diff options
Diffstat (limited to 'drivers/net')
1794 files changed, 126177 insertions, 44907 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 368c6f5b327e..af0da4bb429b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -332,6 +332,28 @@ config NETCONSOLE_DYNAMIC at runtime through a userspace interface exported using configfs. See <file:Documentation/networking/netconsole.rst> for details. +config NETCONSOLE_EXTENDED_LOG + bool "Set kernel extended message by default" + depends on NETCONSOLE + default n + help + Set extended log support for netconsole message. If this option is + set, log messages are transmitted with extended metadata header in a + format similar to /dev/kmsg. See + <file:Documentation/networking/netconsole.rst> for details. + +config NETCONSOLE_PREPEND_RELEASE + bool "Prepend kernel release version in the message by default" + depends on NETCONSOLE_EXTENDED_LOG + default n + help + Set kernel release to be prepended to each netconsole message by + default. If this option is set, the kernel release is prepended into + the first field of every netconsole message, so, the netconsole + server/peer can easily identify what kernel release is logging each + message. See <file:Documentation/networking/netconsole.rst> for + details. + config NETPOLL def_bool NETCONSOLE @@ -426,6 +448,15 @@ config NLMON diagnostics, etc. This is mostly intended for developers or support to debug netlink issues. If unsure, say N. +config NETKIT + bool "BPF-programmable network device" + depends on BPF_SYSCALL + help + The netkit device is a virtual networking device where BPF programs + can be attached to the device(s) transmission routine in order to + implement the driver's internal logic. The device can be configured + to operate in L3 or L2 mode. If unsure, say N. + config NET_VRF tristate "Virtual Routing and Forwarding (Lite)" depends on IP_MULTIPLE_TABLES @@ -571,6 +602,7 @@ config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET depends on PAGE_SIZE_LESS_THAN_64KB + select PAGE_POOL help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the @@ -592,6 +624,7 @@ config NETDEVSIM depends on INET depends on IPV6 || IPV6=n depends on PSAMPLE || PSAMPLE=n + depends on PTP_1588_CLOCK_MOCK || PTP_1588_CLOCK_MOCK=n select NET_DEVLINK help This driver is a developer testing tool and software model that can diff --git a/drivers/net/Makefile b/drivers/net/Makefile index e26f98f897c5..7cab36f94782 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO) += mdio.o obj-$(CONFIG_NET) += loopback.o obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o obj-$(CONFIG_NETCONSOLE) += netconsole.o +obj-$(CONFIG_NETKIT) += netkit.o obj-y += phy/ obj-y += pse-pd/ obj-y += mdio/ @@ -45,7 +46,6 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o # Networking Drivers # obj-$(CONFIG_ARCNET) += arcnet/ -obj-$(CONFIG_DEV_APPLETALK) += appletalk/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_NET_DSA) += dsa/ diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 83214e2e70ab..dc50797a2ed0 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -247,12 +247,6 @@ static int __init net_olddevs_init(void) for (num = 0; num < 8; ++num) ethif_probe2(num); -#ifdef CONFIG_COPS - cops_probe(0); - cops_probe(1); - cops_probe(2); -#endif - return 0; } diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 2d20be6ffb7e..53415e83821c 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -3449,5 +3449,6 @@ static void __exit amt_fini(void) module_exit(amt_fini); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Driver for Automatic Multicast Tunneling (AMT)"); MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>"); MODULE_ALIAS_RTNL_LINK("amt"); diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig deleted file mode 100644 index b38ed52b82bc..000000000000 --- a/drivers/net/appletalk/Kconfig +++ /dev/null @@ -1,102 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Appletalk driver configuration -# -config ATALK - tristate "Appletalk protocol support" - select LLC - help - AppleTalk is the protocol that Apple computers can use to communicate - on a network. If your Linux box is connected to such a network and you - wish to connect to it, say Y. You will need to use the netatalk package - so that your Linux box can act as a print and file server for Macs as - well as access AppleTalk printers. Check out - <http://www.zettabyte.net/netatalk/> on the WWW for details. - EtherTalk is the name used for AppleTalk over Ethernet and the - cheaper and slower LocalTalk is AppleTalk over a proprietary Apple - network using serial links. EtherTalk and LocalTalk are fully - supported by Linux. - - General information about how to connect Linux, Windows machines and - Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>. The - NET3-4-HOWTO, available from - <http://www.tldp.org/docs.html#howto>, contains valuable - information as well. - - To compile this driver as a module, choose M here: the module will be - called appletalk. You almost certainly want to compile it as a - module so you can restart your AppleTalk stack without rebooting - your machine. I hear that the GNU boycott of Apple is over, so - even politically correct people are allowed to say Y here. - -config DEV_APPLETALK - tristate "Appletalk interfaces support" - depends on ATALK - help - AppleTalk is the protocol that Apple computers can use to communicate - on a network. If your Linux box is connected to such a network, and wish - to do IP over it, or you have a LocalTalk card and wish to use it to - connect to the AppleTalk network, say Y. - - -config COPS - tristate "COPS LocalTalk PC support" - depends on DEV_APPLETALK && ISA - depends on NETDEVICES - select NETDEV_LEGACY_INIT - help - This allows you to use COPS AppleTalk cards to connect to LocalTalk - networks. You also need version 1.3.3 or later of the netatalk - package. This driver is experimental, which means that it may not - work. This driver will only work if you choose "AppleTalk DDP" - networking support, above. - Please read the file - <file:Documentation/networking/device_drivers/appletalk/cops.rst>. - -config COPS_DAYNA - bool "Dayna firmware support" - depends on COPS - help - Support COPS compatible cards with Dayna style firmware (Dayna - DL2000/ Daynatalk/PC (half length), COPS LT-95, Farallon PhoneNET PC - III, Farallon PhoneNET PC II). - -config COPS_TANGENT - bool "Tangent firmware support" - depends on COPS - help - Support COPS compatible cards with Tangent style firmware (Tangent - ATB_II, Novell NL-1000, Daystar Digital LT-200. - -config IPDDP - tristate "Appletalk-IP driver support" - depends on DEV_APPLETALK && ATALK - help - This allows IP networking for users who only have AppleTalk - networking available. This feature is experimental. With this - driver, you can encapsulate IP inside AppleTalk (e.g. if your Linux - box is stuck on an AppleTalk only network) or decapsulate (e.g. if - you want your Linux box to act as an Internet gateway for a zoo of - AppleTalk connected Macs). Please see the file - <file:Documentation/networking/ipddp.rst> for more information. - - If you say Y here, the AppleTalk-IP support will be compiled into - the kernel. In this case, you can either use encapsulation or - decapsulation, but not both. With the following two questions, you - decide which one you want. - - To compile the AppleTalk-IP support as a module, choose M here: the - module will be called ipddp. - In this case, you will be able to use both encapsulation and - decapsulation simultaneously, by loading two copies of the module - and specifying different values for the module option ipddp_mode. - -config IPDDP_ENCAP - bool "IP to Appletalk-IP Encapsulation support" - depends on IPDDP - help - If you say Y here, the AppleTalk-IP code will be able to encapsulate - IP packets inside AppleTalk frames; this is useful if your Linux box - is stuck on an AppleTalk network (which hopefully contains a - decapsulator somewhere). Please see - <file:Documentation/networking/ipddp.rst> for more information. diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile deleted file mode 100644 index 6db2943ce5d6..000000000000 --- a/drivers/net/appletalk/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for drivers/net/appletalk -# - -obj-$(CONFIG_IPDDP) += ipddp.o -obj-$(CONFIG_COPS) += cops.o diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c deleted file mode 100644 index 97f254bdbb16..000000000000 --- a/drivers/net/appletalk/cops.c +++ /dev/null @@ -1,1005 +0,0 @@ -/* cops.c: LocalTalk driver for Linux. - * - * Authors: - * - Jay Schulist <jschlst@samba.org> - * - * With more than a little help from; - * - Alan Cox <alan@lxorguk.ukuu.org.uk> - * - * Derived from: - * - skeleton.c: A network driver outline for linux. - * Written 1993-94 by Donald Becker. - * - ltpc.c: A driver for the LocalTalk PC card. - * Written by Bradford W. Johnson. - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * Changes: - * 19970608 Alan Cox Allowed dual card type support - * Can set board type in insmod - * Hooks for cops_setup routine - * (not yet implemented). - * 19971101 Jay Schulist Fixes for multiple lt* devices. - * 19980607 Steven Hirsch Fixed the badly broken support - * for Tangent type cards. Only - * tested on Daystar LT200. Some - * cleanup of formatting and program - * logic. Added emacs 'local-vars' - * setup for Jay's brace style. - * 20000211 Alan Cox Cleaned up for softnet - */ - -static const char *version = -"cops.c:v0.04 6/7/98 Jay Schulist <jschlst@samba.org>\n"; -/* - * Sources: - * COPS Localtalk SDK. This provides almost all of the information - * needed. - */ - -/* - * insmod/modprobe configurable stuff. - * - IO Port, choose one your card supports or 0 if you dare. - * - IRQ, also choose one your card supports or nothing and let - * the driver figure it out. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ptrace.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/if_arp.h> -#include <linux/if_ltalk.h> -#include <linux/delay.h> /* For udelay() */ -#include <linux/atalk.h> -#include <linux/spinlock.h> -#include <linux/bitops.h> -#include <linux/jiffies.h> - -#include <net/Space.h> - -#include <asm/io.h> -#include <asm/dma.h> - -#include "cops.h" /* Our Stuff */ -#include "cops_ltdrv.h" /* Firmware code for Tangent type cards. */ -#include "cops_ffdrv.h" /* Firmware code for Dayna type cards. */ - -/* - * The name of the card. Is used for messages and in the requests for - * io regions, irqs and dma channels - */ - -static const char *cardname = "cops"; - -#ifdef CONFIG_COPS_DAYNA -static int board_type = DAYNA; /* Module exported */ -#else -static int board_type = TANGENT; -#endif - -static int io = 0x240; /* Default IO for Dayna */ -static int irq = 5; /* Default IRQ */ - -/* - * COPS Autoprobe information. - * Right now if port address is right but IRQ is not 5 this will - * return a 5 no matter what since we will still get a status response. - * Need one more additional check to narrow down after we have gotten - * the ioaddr. But since only other possible IRQs is 3 and 4 so no real - * hurry on this. I *STRONGLY* recommend using IRQ 5 for your card with - * this driver. - * - * This driver has 2 modes and they are: Dayna mode and Tangent mode. - * Each mode corresponds with the type of card. It has been found - * that there are 2 main types of cards and all other cards are - * the same and just have different names or only have minor differences - * such as more IO ports. As this driver is tested it will - * become more clear on exactly what cards are supported. The driver - * defaults to using Dayna mode. To change the drivers mode, simply - * select Dayna or Tangent mode when configuring the kernel. - * - * This driver should support: - * TANGENT driver mode: - * Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200, - * COPS LT-1 - * DAYNA driver mode: - * Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95, - * Farallon PhoneNET PC III, Farallon PhoneNET PC II - * Other cards possibly supported mode unknown though: - * Dayna DL2000 (Full length), COPS LT/M (Micro-Channel) - * - * Cards NOT supported by this driver but supported by the ltpc.c - * driver written by Bradford W. Johnson <johns393@maroon.tc.umn.edu> - * Farallon PhoneNET PC - * Original Apple LocalTalk PC card - * - * N.B. - * - * The Daystar Digital LT200 boards do not support interrupt-driven - * IO. You must specify 'irq=0xff' as a module parameter to invoke - * polled mode. I also believe that the port probing logic is quite - * dangerous at best and certainly hopeless for a polled card. Best to - * specify both. - Steve H. - * - */ - -/* - * Zero terminated list of IO ports to probe. - */ - -static unsigned int ports[] = { - 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260, - 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360, - 0 -}; - -/* - * Zero terminated list of IRQ ports to probe. - */ - -static int cops_irqlist[] = { - 5, 4, 3, 0 -}; - -static struct timer_list cops_timer; -static struct net_device *cops_timer_dev; - -/* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */ -#ifndef COPS_DEBUG -#define COPS_DEBUG 1 -#endif -static unsigned int cops_debug = COPS_DEBUG; - -/* The number of low I/O ports used by the card. */ -#define COPS_IO_EXTENT 8 - -/* Information that needs to be kept for each board. */ - -struct cops_local -{ - int board; /* Holds what board type is. */ - int nodeid; /* Set to 1 once have nodeid. */ - unsigned char node_acquire; /* Node ID when acquired. */ - struct atalk_addr node_addr; /* Full node address */ - spinlock_t lock; /* RX/TX lock */ -}; - -/* Index to functions, as function prototypes. */ -static int cops_probe1 (struct net_device *dev, int ioaddr); -static int cops_irq (int ioaddr, int board); - -static int cops_open (struct net_device *dev); -static int cops_jumpstart (struct net_device *dev); -static void cops_reset (struct net_device *dev, int sleep); -static void cops_load (struct net_device *dev); -static int cops_nodeid (struct net_device *dev, int nodeid); - -static irqreturn_t cops_interrupt (int irq, void *dev_id); -static void cops_poll(struct timer_list *t); -static void cops_timeout(struct net_device *dev, unsigned int txqueue); -static void cops_rx (struct net_device *dev); -static netdev_tx_t cops_send_packet (struct sk_buff *skb, - struct net_device *dev); -static void set_multicast_list (struct net_device *dev); -static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static int cops_close (struct net_device *dev); - -static void cleanup_card(struct net_device *dev) -{ - if (dev->irq) - free_irq(dev->irq, dev); - release_region(dev->base_addr, COPS_IO_EXTENT); -} - -/* - * Check for a network adaptor of this type, and return '0' iff one exists. - * If dev->base_addr == 0, probe all likely locations. - * If dev->base_addr in [1..0x1ff], always return failure. - * otherwise go with what we pass in. - */ -struct net_device * __init cops_probe(int unit) -{ - struct net_device *dev; - unsigned *port; - int base_addr; - int err = 0; - - dev = alloc_ltalkdev(sizeof(struct cops_local)); - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "lt%d", unit); - netdev_boot_setup_check(dev); - irq = dev->irq; - base_addr = dev->base_addr; - } else { - base_addr = dev->base_addr = io; - } - - if (base_addr > 0x1ff) { /* Check a single specified location. */ - err = cops_probe1(dev, base_addr); - } else if (base_addr != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - /* FIXME Does this really work for cards which generate irq? - * It's definitely N.G. for polled Tangent. sh - * Dayna cards don't autoprobe well at all, but if your card is - * at IRQ 5 & IO 0x240 we find it every time. ;) JS - */ - for (port = ports; *port && cops_probe1(dev, *port) < 0; port++) - ; - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - cleanup_card(dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops cops_netdev_ops = { - .ndo_open = cops_open, - .ndo_stop = cops_close, - .ndo_start_xmit = cops_send_packet, - .ndo_tx_timeout = cops_timeout, - .ndo_do_ioctl = cops_ioctl, - .ndo_set_rx_mode = set_multicast_list, -}; - -/* - * This is the real probe routine. Linux has a history of friendly device - * probes on the ISA bus. A good device probes avoids doing writes, and - * verifies that the correct device exists and functions. - */ -static int __init cops_probe1(struct net_device *dev, int ioaddr) -{ - struct cops_local *lp; - static unsigned version_printed; - int board = board_type; - int retval; - - if(cops_debug && version_printed++ == 0) - printk("%s", version); - - /* Grab the region so no one else tries to probe our ioports. */ - if (!request_region(ioaddr, COPS_IO_EXTENT, dev->name)) - return -EBUSY; - - /* - * Since this board has jumpered interrupts, allocate the interrupt - * vector now. There is no point in waiting since no other device - * can use the interrupt, and this marks the irq as busy. Jumpered - * interrupts are typically not reported by the boards, and we must - * used AutoIRQ to find them. - */ - dev->irq = irq; - switch (dev->irq) - { - case 0: - /* COPS AutoIRQ routine */ - dev->irq = cops_irq(ioaddr, board); - if (dev->irq) - break; - fallthrough; /* Once no IRQ found on this port */ - case 1: - retval = -EINVAL; - goto err_out; - - /* Fixup for users that don't know that IRQ 2 is really - * IRQ 9, or don't know which one to set. - */ - case 2: - dev->irq = 9; - break; - - /* Polled operation requested. Although irq of zero passed as - * a parameter tells the init routines to probe, we'll - * overload it to denote polled operation at runtime. - */ - case 0xff: - dev->irq = 0; - break; - - default: - break; - } - - dev->base_addr = ioaddr; - - /* Reserve any actual interrupt. */ - if (dev->irq) { - retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev); - if (retval) - goto err_out; - } - - lp = netdev_priv(dev); - spin_lock_init(&lp->lock); - - /* Copy local board variable to lp struct. */ - lp->board = board; - - dev->netdev_ops = &cops_netdev_ops; - dev->watchdog_timeo = HZ * 2; - - - /* Tell the user where the card is and what mode we're in. */ - if(board==DAYNA) - printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n", - dev->name, cardname, ioaddr, dev->irq); - if(board==TANGENT) { - if(dev->irq) - printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n", - dev->name, cardname, ioaddr, dev->irq); - else - printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n", - dev->name, cardname, ioaddr); - - } - return 0; - -err_out: - release_region(ioaddr, COPS_IO_EXTENT); - return retval; -} - -static int __init cops_irq (int ioaddr, int board) -{ /* - * This does not use the IRQ to determine where the IRQ is. We just - * assume that when we get a correct status response that it's the IRQ. - * This really just verifies the IO port but since we only have access - * to such a small number of IRQs (5, 4, 3) this is not bad. - * This will probably not work for more than one card. - */ - int irqaddr=0; - int i, x, status; - - if(board==DAYNA) - { - outb(0, ioaddr+DAYNA_RESET); - inb(ioaddr+DAYNA_RESET); - mdelay(333); - } - if(board==TANGENT) - { - inb(ioaddr); - outb(0, ioaddr); - outb(0, ioaddr+TANG_RESET); - } - - for(i=0; cops_irqlist[i] !=0; i++) - { - irqaddr = cops_irqlist[i]; - for(x = 0xFFFF; x>0; x --) /* wait for response */ - { - if(board==DAYNA) - { - status = (inb(ioaddr+DAYNA_CARD_STATUS)&3); - if(status == 1) - return irqaddr; - } - if(board==TANGENT) - { - if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0) - return irqaddr; - } - } - } - return 0; /* no IRQ found */ -} - -/* - * Open/initialize the board. This is called (in the current kernel) - * sometime after booting when the 'ifconfig' program is run. - */ -static int cops_open(struct net_device *dev) -{ - struct cops_local *lp = netdev_priv(dev); - - if(dev->irq==0) - { - /* - * I don't know if the Dayna-style boards support polled - * operation. For now, only allow it for Tangent. - */ - if(lp->board==TANGENT) /* Poll 20 times per second */ - { - cops_timer_dev = dev; - timer_setup(&cops_timer, cops_poll, 0); - cops_timer.expires = jiffies + HZ/20; - add_timer(&cops_timer); - } - else - { - printk(KERN_WARNING "%s: No irq line set\n", dev->name); - return -EAGAIN; - } - } - - cops_jumpstart(dev); /* Start the card up. */ - - netif_start_queue(dev); - return 0; -} - -/* - * This allows for a dynamic start/restart of the entire card. - */ -static int cops_jumpstart(struct net_device *dev) -{ - struct cops_local *lp = netdev_priv(dev); - - /* - * Once the card has the firmware loaded and has acquired - * the nodeid, if it is reset it will lose it all. - */ - cops_reset(dev,1); /* Need to reset card before load firmware. */ - cops_load(dev); /* Load the firmware. */ - - /* - * If atalkd already gave us a nodeid we will use that - * one again, else we wait for atalkd to give us a nodeid - * in cops_ioctl. This may cause a problem if someone steals - * our nodeid while we are resetting. - */ - if(lp->nodeid == 1) - cops_nodeid(dev,lp->node_acquire); - - return 0; -} - -static void tangent_wait_reset(int ioaddr) -{ - int timeout=0; - - while(timeout++ < 5 && (inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) - mdelay(1); /* Wait 1 second */ -} - -/* - * Reset the LocalTalk board. - */ -static void cops_reset(struct net_device *dev, int sleep) -{ - struct cops_local *lp = netdev_priv(dev); - int ioaddr=dev->base_addr; - - if(lp->board==TANGENT) - { - inb(ioaddr); /* Clear request latch. */ - outb(0,ioaddr); /* Clear the TANG_TX_READY flop. */ - outb(0, ioaddr+TANG_RESET); /* Reset the adapter. */ - - tangent_wait_reset(ioaddr); - outb(0, ioaddr+TANG_CLEAR_INT); - } - if(lp->board==DAYNA) - { - outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ - inb(ioaddr+DAYNA_RESET); /* Clear the reset */ - if (sleep) - msleep(333); - else - mdelay(333); - } - - netif_wake_queue(dev); -} - -static void cops_load (struct net_device *dev) -{ - struct ifreq ifr; - struct ltfirmware *ltf= (struct ltfirmware *)&ifr.ifr_ifru; - struct cops_local *lp = netdev_priv(dev); - int ioaddr=dev->base_addr; - int length, i = 0; - - strcpy(ifr.ifr_name,"lt0"); - - /* Get card's firmware code and do some checks on it. */ -#ifdef CONFIG_COPS_DAYNA - if(lp->board==DAYNA) - { - ltf->length=sizeof(ffdrv_code); - ltf->data=ffdrv_code; - } - else -#endif -#ifdef CONFIG_COPS_TANGENT - if(lp->board==TANGENT) - { - ltf->length=sizeof(ltdrv_code); - ltf->data=ltdrv_code; - } - else -#endif - { - printk(KERN_INFO "%s; unsupported board type.\n", dev->name); - return; - } - - /* Check to make sure firmware is correct length. */ - if(lp->board==DAYNA && ltf->length!=5983) - { - printk(KERN_WARNING "%s: Firmware is not length of FFDRV.BIN.\n", dev->name); - return; - } - if(lp->board==TANGENT && ltf->length!=2501) - { - printk(KERN_WARNING "%s: Firmware is not length of DRVCODE.BIN.\n", dev->name); - return; - } - - if(lp->board==DAYNA) - { - /* - * We must wait for a status response - * with the DAYNA board. - */ - while(++i<65536) - { - if((inb(ioaddr+DAYNA_CARD_STATUS)&3)==1) - break; - } - - if(i==65536) - return; - } - - /* - * Upload the firmware and kick. Byte-by-byte works nicely here. - */ - i=0; - length = ltf->length; - while(length--) - { - outb(ltf->data[i], ioaddr); - i++; - } - - if(cops_debug > 1) - printk("%s: Uploaded firmware - %d bytes of %d bytes.\n", - dev->name, i, ltf->length); - - if(lp->board==DAYNA) /* Tell Dayna to run the firmware code. */ - outb(1, ioaddr+DAYNA_INT_CARD); - else /* Tell Tang to run the firmware code. */ - inb(ioaddr); - - if(lp->board==TANGENT) - { - tangent_wait_reset(ioaddr); - inb(ioaddr); /* Clear initial ready signal. */ - } -} - -/* - * Get the LocalTalk Nodeid from the card. We can suggest - * any nodeid 1-254. The card will try and get that exact - * address else we can specify 0 as the nodeid and the card - * will autoprobe for a nodeid. - */ -static int cops_nodeid (struct net_device *dev, int nodeid) -{ - struct cops_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - if(lp->board == DAYNA) - { - /* Empty any pending adapter responses. */ - while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0) - { - outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */ - if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST) - cops_rx(dev); /* Kick any packets waiting. */ - schedule(); - } - - outb(2, ioaddr); /* Output command packet length as 2. */ - outb(0, ioaddr); - outb(LAP_INIT, ioaddr); /* Send LAP_INIT command byte. */ - outb(nodeid, ioaddr); /* Suggest node address. */ - } - - if(lp->board == TANGENT) - { - /* Empty any pending adapter responses. */ - while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY) - { - outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */ - cops_rx(dev); /* Kick out packets waiting. */ - schedule(); - } - - /* Not sure what Tangent does if nodeid picked is used. */ - if(nodeid == 0) /* Seed. */ - nodeid = jiffies&0xFF; /* Get a random try */ - outb(2, ioaddr); /* Command length LSB */ - outb(0, ioaddr); /* Command length MSB */ - outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */ - outb(nodeid, ioaddr); /* LAP address hint. */ - outb(0xFF, ioaddr); /* Int. level to use */ - } - - lp->node_acquire=0; /* Set nodeid holder to 0. */ - while(lp->node_acquire==0) /* Get *True* nodeid finally. */ - { - outb(0, ioaddr+COPS_CLEAR_INT); /* Clear any interrupt. */ - - if(lp->board == DAYNA) - { - if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST) - cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */ - } - if(lp->board == TANGENT) - { - if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY) - cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */ - } - schedule(); - } - - if(cops_debug > 1) - printk(KERN_DEBUG "%s: Node ID %d has been acquired.\n", - dev->name, lp->node_acquire); - - lp->nodeid=1; /* Set got nodeid to 1. */ - - return 0; -} - -/* - * Poll the Tangent type cards to see if we have work. - */ - -static void cops_poll(struct timer_list *unused) -{ - int ioaddr, status; - int boguscount = 0; - struct net_device *dev = cops_timer_dev; - - del_timer(&cops_timer); - - if(dev == NULL) - return; /* We've been downed */ - - ioaddr = dev->base_addr; - do { - status=inb(ioaddr+TANG_CARD_STATUS); - if(status & TANG_RX_READY) - cops_rx(dev); - if(status & TANG_TX_READY) - netif_wake_queue(dev); - status = inb(ioaddr+TANG_CARD_STATUS); - } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY))); - - /* poll 20 times per second */ - cops_timer.expires = jiffies + HZ/20; - add_timer(&cops_timer); -} - -/* - * The typical workload of the driver: - * Handle the network interface interrupts. - */ -static irqreturn_t cops_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct cops_local *lp; - int ioaddr, status; - int boguscount = 0; - - ioaddr = dev->base_addr; - lp = netdev_priv(dev); - - if(lp->board==DAYNA) - { - do { - outb(0, ioaddr + COPS_CLEAR_INT); - status=inb(ioaddr+DAYNA_CARD_STATUS); - if((status&0x03)==DAYNA_RX_REQUEST) - cops_rx(dev); - netif_wake_queue(dev); - } while(++boguscount < 20); - } - else - { - do { - status=inb(ioaddr+TANG_CARD_STATUS); - if(status & TANG_RX_READY) - cops_rx(dev); - if(status & TANG_TX_READY) - netif_wake_queue(dev); - status=inb(ioaddr+TANG_CARD_STATUS); - } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY))); - } - - return IRQ_HANDLED; -} - -/* - * We have a good packet(s), get it/them out of the buffers. - */ -static void cops_rx(struct net_device *dev) -{ - int pkt_len = 0; - int rsp_type = 0; - struct sk_buff *skb = NULL; - struct cops_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - int boguscount = 0; - unsigned long flags; - - - spin_lock_irqsave(&lp->lock, flags); - - if(lp->board==DAYNA) - { - outb(0, ioaddr); /* Send out Zero length. */ - outb(0, ioaddr); - outb(DATA_READ, ioaddr); /* Send read command out. */ - - /* Wait for DMA to turn around. */ - while(++boguscount<1000000) - { - barrier(); - if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY) - break; - } - - if(boguscount==1000000) - { - printk(KERN_WARNING "%s: DMA timed out.\n",dev->name); - spin_unlock_irqrestore(&lp->lock, flags); - return; - } - } - - /* Get response length. */ - pkt_len = inb(ioaddr); - pkt_len |= (inb(ioaddr) << 8); - /* Input IO code. */ - rsp_type=inb(ioaddr); - - /* Malloc up new buffer. */ - skb = dev_alloc_skb(pkt_len); - if(skb == NULL) - { - printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", - dev->name); - dev->stats.rx_dropped++; - while(pkt_len--) /* Discard packet */ - inb(ioaddr); - spin_unlock_irqrestore(&lp->lock, flags); - return; - } - skb->dev = dev; - skb_put(skb, pkt_len); - skb->protocol = htons(ETH_P_LOCALTALK); - - insb(ioaddr, skb->data, pkt_len); /* Eat the Data */ - - if(lp->board==DAYNA) - outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */ - - spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ - - /* Check for bad response length */ - if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE) - { - printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n", - dev->name, pkt_len); - dev->stats.tx_errors++; - dev_kfree_skb_any(skb); - return; - } - - /* Set nodeid and then get out. */ - if(rsp_type == LAP_INIT_RSP) - { /* Nodeid taken from received packet. */ - lp->node_acquire = skb->data[0]; - dev_kfree_skb_any(skb); - return; - } - - /* One last check to make sure we have a good packet. */ - if(rsp_type != LAP_RESPONSE) - { - printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type); - dev->stats.tx_errors++; - dev_kfree_skb_any(skb); - return; - } - - skb_reset_mac_header(skb); /* Point to entire packet. */ - skb_pull(skb,3); - skb_reset_transport_header(skb); /* Point to data (Skip header). */ - - /* Update the counters. */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - - /* Send packet to a higher place. */ - netif_rx(skb); -} - -static void cops_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct cops_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - - dev->stats.tx_errors++; - if(lp->board==TANGENT) - { - if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) - printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name); - } - printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name); - cops_jumpstart(dev); /* Restart the card. */ - netif_trans_update(dev); /* prevent tx timeout */ - netif_wake_queue(dev); -} - - -/* - * Make the card transmit a LocalTalk packet. - */ - -static netdev_tx_t cops_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct cops_local *lp = netdev_priv(dev); - int ioaddr = dev->base_addr; - unsigned long flags; - - /* - * Block a timer-based transmit from overlapping. - */ - - netif_stop_queue(dev); - - spin_lock_irqsave(&lp->lock, flags); - if(lp->board == DAYNA) /* Wait for adapter transmit buffer. */ - while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0) - cpu_relax(); - if(lp->board == TANGENT) /* Wait for adapter transmit buffer. */ - while((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) - cpu_relax(); - - /* Output IO length. */ - outb(skb->len, ioaddr); - outb(skb->len >> 8, ioaddr); - - /* Output IO code. */ - outb(LAP_WRITE, ioaddr); - - if(lp->board == DAYNA) /* Check the transmit buffer again. */ - while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0); - - outsb(ioaddr, skb->data, skb->len); /* Send out the data. */ - - if(lp->board==DAYNA) /* Dayna requires you kick the card */ - outb(1, ioaddr+DAYNA_INT_CARD); - - spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ - - /* Done sending packet, update counters and cleanup. */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - dev_kfree_skb (skb); - return NETDEV_TX_OK; -} - -/* - * Dummy function to keep the Appletalk layer happy. - */ - -static void set_multicast_list(struct net_device *dev) -{ - if(cops_debug >= 3) - printk("%s: set_multicast_list executed\n", dev->name); -} - -/* - * System ioctls for the COPS LocalTalk card. - */ - -static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct cops_local *lp = netdev_priv(dev); - struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr; - struct atalk_addr *aa = &lp->node_addr; - - switch(cmd) - { - case SIOCSIFADDR: - /* Get and set the nodeid and network # atalkd wants. */ - cops_nodeid(dev, sa->sat_addr.s_node); - aa->s_net = sa->sat_addr.s_net; - aa->s_node = lp->node_acquire; - - /* Set broardcast address. */ - dev->broadcast[0] = 0xFF; - - /* Set hardware address. */ - dev->addr_len = 1; - dev_addr_set(dev, &aa->s_node); - return 0; - - case SIOCGIFADDR: - sa->sat_addr.s_net = aa->s_net; - sa->sat_addr.s_node = aa->s_node; - return 0; - - default: - return -EOPNOTSUPP; - } -} - -/* - * The inverse routine to cops_open(). - */ - -static int cops_close(struct net_device *dev) -{ - struct cops_local *lp = netdev_priv(dev); - - /* If we were running polled, yank the timer. - */ - if(lp->board==TANGENT && dev->irq==0) - del_timer(&cops_timer); - - netif_stop_queue(dev); - return 0; -} - - -#ifdef MODULE -static struct net_device *cops_dev; - -MODULE_LICENSE("GPL"); -module_param_hw(io, int, ioport, 0); -module_param_hw(irq, int, irq, 0); -module_param_hw(board_type, int, other, 0); - -static int __init cops_module_init(void) -{ - if (io == 0) - printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", - cardname); - cops_dev = cops_probe(-1); - return PTR_ERR_OR_ZERO(cops_dev); -} - -static void __exit cops_module_exit(void) -{ - unregister_netdev(cops_dev); - cleanup_card(cops_dev); - free_netdev(cops_dev); -} -module_init(cops_module_init); -module_exit(cops_module_exit); -#endif /* MODULE */ diff --git a/drivers/net/appletalk/cops.h b/drivers/net/appletalk/cops.h deleted file mode 100644 index 7a0bfb351929..000000000000 --- a/drivers/net/appletalk/cops.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* cops.h: LocalTalk driver for Linux. - * - * Authors: - * - Jay Schulist <jschlst@samba.org> - */ - -#ifndef __LINUX_COPSLTALK_H -#define __LINUX_COPSLTALK_H - -#ifdef __KERNEL__ - -/* Max LLAP size we will accept. */ -#define MAX_LLAP_SIZE 603 - -/* Tangent */ -#define TANG_CARD_STATUS 1 -#define TANG_CLEAR_INT 1 -#define TANG_RESET 3 - -#define TANG_TX_READY 1 -#define TANG_RX_READY 2 - -/* Dayna */ -#define DAYNA_CMD_DATA 0 -#define DAYNA_CLEAR_INT 1 -#define DAYNA_CARD_STATUS 2 -#define DAYNA_INT_CARD 3 -#define DAYNA_RESET 4 - -#define DAYNA_RX_READY 0 -#define DAYNA_TX_READY 1 -#define DAYNA_RX_REQUEST 3 - -/* Same on both card types */ -#define COPS_CLEAR_INT 1 - -/* LAP response codes received from the cards. */ -#define LAP_INIT 1 /* Init cmd */ -#define LAP_INIT_RSP 2 /* Init response */ -#define LAP_WRITE 3 /* Write cmd */ -#define DATA_READ 4 /* Data read */ -#define LAP_RESPONSE 4 /* Received ALAP frame response */ -#define LAP_GETSTAT 5 /* Get LAP and HW status */ -#define LAP_RSPSTAT 6 /* Status response */ - -#endif - -/* - * Structure to hold the firmware information. - */ -struct ltfirmware -{ - unsigned int length; - const unsigned char *data; -}; - -#define DAYNA 1 -#define TANGENT 2 - -#endif diff --git a/drivers/net/appletalk/cops_ffdrv.h b/drivers/net/appletalk/cops_ffdrv.h deleted file mode 100644 index b02005087c1b..000000000000 --- a/drivers/net/appletalk/cops_ffdrv.h +++ /dev/null @@ -1,532 +0,0 @@ - -/* - * The firmware this driver downloads into the Localtalk card is a - * separate program and is not GPL'd source code, even though the Linux - * side driver and the routine that loads this data into the card are. - * - * It is taken from the COPS SDK and is under the following license - * - * This material is licensed to you strictly for use in conjunction with - * the use of COPS LocalTalk adapters. - * There is no charge for this SDK. And no waranty express or implied - * about its fitness for any purpose. However, we will cheerefully - * refund every penny you paid for this SDK... - * Regards, - * - * Thomas F. Divine - * Chief Scientist - */ - - -/* cops_ffdrv.h: LocalTalk driver firmware dump for Linux. - * - * Authors: - * - Jay Schulist <jschlst@samba.org> - */ - - -#ifdef CONFIG_COPS_DAYNA - -static const unsigned char ffdrv_code[] = { - 58,3,0,50,228,149,33,255,255,34,226,149, - 249,17,40,152,33,202,154,183,237,82,77,68, - 11,107,98,19,54,0,237,176,175,50,80,0, - 62,128,237,71,62,32,237,57,51,62,12,237, - 57,50,237,57,54,62,6,237,57,52,62,12, - 237,57,49,33,107,137,34,32,128,33,83,130, - 34,40,128,33,86,130,34,42,128,33,112,130, - 34,36,128,33,211,130,34,38,128,62,0,237, - 57,16,33,63,148,34,34,128,237,94,205,15, - 130,251,205,168,145,24,141,67,111,112,121,114, - 105,103,104,116,32,40,67,41,32,49,57,56, - 56,32,45,32,68,97,121,110,97,32,67,111, - 109,109,117,110,105,99,97,116,105,111,110,115, - 32,32,32,65,108,108,32,114,105,103,104,116, - 115,32,114,101,115,101,114,118,101,100,46,32, - 32,40,68,40,68,7,16,8,34,7,22,6, - 16,5,12,4,8,3,6,140,0,16,39,128, - 0,4,96,10,224,6,0,7,126,2,64,11, - 118,12,6,13,0,14,193,15,0,5,96,3, - 192,1,64,9,8,62,9,211,66,62,192,211, - 66,62,100,61,32,253,6,28,33,205,129,14, - 66,237,163,194,253,129,6,28,33,205,129,14, - 64,237,163,194,9,130,201,62,47,50,71,152, - 62,47,211,68,58,203,129,237,57,20,58,204, - 129,237,57,21,33,77,152,54,132,205,233,129, - 58,228,149,254,209,40,6,56,4,62,0,24, - 2,219,96,33,233,149,119,230,62,33,232,149, - 119,213,33,8,152,17,7,0,25,119,19,25, - 119,209,201,251,237,77,245,197,213,229,221,229, - 205,233,129,62,1,50,106,137,205,158,139,221, - 225,225,209,193,241,251,237,77,245,197,213,219, - 72,237,56,16,230,46,237,57,16,237,56,12, - 58,72,152,183,32,26,6,20,17,128,2,237, - 56,46,187,32,35,237,56,47,186,32,29,219, - 72,230,1,32,3,5,32,232,175,50,72,152, - 229,221,229,62,1,50,106,137,205,158,139,221, - 225,225,24,25,62,1,50,72,152,58,201,129, - 237,57,12,58,202,129,237,57,13,237,56,16, - 246,17,237,57,16,209,193,241,251,237,77,245, - 197,229,213,221,229,237,56,16,230,17,237,57, - 16,237,56,20,58,34,152,246,16,246,8,211, - 68,62,6,61,32,253,58,34,152,246,8,211, - 68,58,203,129,237,57,20,58,204,129,237,57, - 21,237,56,16,246,34,237,57,16,221,225,209, - 225,193,241,251,237,77,33,2,0,57,126,230, - 3,237,100,1,40,2,246,128,230,130,245,62, - 5,211,64,241,211,64,201,229,213,243,237,56, - 16,230,46,237,57,16,237,56,12,251,70,35, - 35,126,254,175,202,77,133,254,129,202,15,133, - 230,128,194,191,132,43,58,44,152,119,33,76, - 152,119,35,62,132,119,120,254,255,40,4,58, - 49,152,119,219,72,43,43,112,17,3,0,237, - 56,52,230,248,237,57,52,219,72,230,1,194, - 141,131,209,225,237,56,52,246,6,237,57,52, - 62,1,55,251,201,62,3,211,66,62,192,211, - 66,62,48,211,66,0,0,219,66,230,1,40, - 4,219,67,24,240,205,203,135,58,75,152,254, - 255,202,128,132,58,49,152,254,161,250,207,131, - 58,34,152,211,68,62,10,211,66,62,128,211, - 66,62,11,211,66,62,6,211,66,24,0,62, - 14,211,66,62,33,211,66,62,1,211,66,62, - 64,211,66,62,3,211,66,62,209,211,66,62, - 100,71,219,66,230,1,32,6,5,32,247,195, - 248,132,219,67,71,58,44,152,184,194,248,132, - 62,100,71,219,66,230,1,32,6,5,32,247, - 195,248,132,219,67,62,100,71,219,66,230,1, - 32,6,5,32,247,195,248,132,219,67,254,133, - 32,7,62,0,50,74,152,24,17,254,173,32, - 7,62,1,50,74,152,24,6,254,141,194,248, - 132,71,209,225,58,49,152,254,132,32,10,62, - 50,205,2,134,205,144,135,24,27,254,140,32, - 15,62,110,205,2,134,62,141,184,32,5,205, - 144,135,24,8,62,10,205,2,134,205,8,134, - 62,1,50,106,137,205,158,139,237,56,52,246, - 6,237,57,52,175,183,251,201,62,20,135,237, - 57,20,175,237,57,21,237,56,16,246,2,237, - 57,16,237,56,20,95,237,56,21,123,254,10, - 48,244,237,56,16,230,17,237,57,16,209,225, - 205,144,135,62,1,50,106,137,205,158,139,237, - 56,52,246,6,237,57,52,175,183,251,201,209, - 225,243,219,72,230,1,40,13,62,10,211,66, - 0,0,219,66,230,192,202,226,132,237,56,52, - 246,6,237,57,52,62,1,55,251,201,205,203, - 135,62,1,50,106,137,205,158,139,237,56,52, - 246,6,237,57,52,183,251,201,209,225,62,1, - 50,106,137,205,158,139,237,56,52,246,6,237, - 57,52,62,2,55,251,201,209,225,243,219,72, - 230,1,202,213,132,62,10,211,66,0,0,219, - 66,230,192,194,213,132,229,62,1,50,106,137, - 42,40,152,205,65,143,225,17,3,0,205,111, - 136,62,6,211,66,58,44,152,211,66,237,56, - 52,246,6,237,57,52,183,251,201,209,197,237, - 56,52,230,248,237,57,52,219,72,230,1,32, - 15,193,225,237,56,52,246,6,237,57,52,62, - 1,55,251,201,14,23,58,37,152,254,0,40, - 14,14,2,254,1,32,5,62,140,119,24,3, - 62,132,119,43,43,197,205,203,135,193,62,1, - 211,66,62,64,211,66,62,3,211,66,62,193, - 211,66,62,100,203,39,71,219,66,230,1,32, - 6,5,32,247,195,229,133,33,238,151,219,67, - 71,58,44,152,184,194,229,133,119,62,100,71, - 219,66,230,1,32,6,5,32,247,195,229,133, - 219,67,35,119,13,32,234,193,225,62,1,50, - 106,137,205,158,139,237,56,52,246,6,237,57, - 52,175,183,251,201,33,234,151,35,35,62,255, - 119,193,225,62,1,50,106,137,205,158,139,237, - 56,52,246,6,237,57,52,175,251,201,243,61, - 32,253,251,201,62,3,211,66,62,192,211,66, - 58,49,152,254,140,32,19,197,229,213,17,181, - 129,33,185,129,1,2,0,237,176,209,225,193, - 24,27,229,213,33,187,129,58,49,152,230,15, - 87,30,2,237,92,25,17,181,129,126,18,19, - 35,126,18,209,225,58,34,152,246,8,211,68, - 58,49,152,254,165,40,14,254,164,40,10,62, - 10,211,66,62,224,211,66,24,25,58,74,152, - 254,0,40,10,62,10,211,66,62,160,211,66, - 24,8,62,10,211,66,62,128,211,66,62,11, - 211,66,62,6,211,66,205,147,143,62,5,211, - 66,62,224,211,66,62,5,211,66,62,96,211, - 66,62,5,61,32,253,62,5,211,66,62,224, - 211,66,62,14,61,32,253,62,5,211,66,62, - 233,211,66,62,128,211,66,58,181,129,61,32, - 253,62,1,211,66,62,192,211,66,1,254,19, - 237,56,46,187,32,6,13,32,247,195,226,134, - 62,192,211,66,0,0,219,66,203,119,40,250, - 219,66,203,87,40,250,243,237,56,16,230,17, - 237,57,16,237,56,20,251,62,5,211,66,62, - 224,211,66,58,182,129,61,32,253,229,33,181, - 129,58,183,129,203,63,119,35,58,184,129,119, - 225,62,10,211,66,62,224,211,66,62,11,211, - 66,62,118,211,66,62,47,211,68,62,5,211, - 66,62,233,211,66,58,181,129,61,32,253,62, - 5,211,66,62,224,211,66,58,182,129,61,32, - 253,62,5,211,66,62,96,211,66,201,229,213, - 58,50,152,230,15,87,30,2,237,92,33,187, - 129,25,17,181,129,126,18,35,19,126,18,209, - 225,58,71,152,246,8,211,68,58,50,152,254, - 165,40,14,254,164,40,10,62,10,211,66,62, - 224,211,66,24,8,62,10,211,66,62,128,211, - 66,62,11,211,66,62,6,211,66,195,248,135, - 62,3,211,66,62,192,211,66,197,229,213,17, - 181,129,33,183,129,1,2,0,237,176,209,225, - 193,62,47,211,68,62,10,211,66,62,224,211, - 66,62,11,211,66,62,118,211,66,62,1,211, - 66,62,0,211,66,205,147,143,195,16,136,62, - 3,211,66,62,192,211,66,197,229,213,17,181, - 129,33,183,129,1,2,0,237,176,209,225,193, - 62,47,211,68,62,10,211,66,62,224,211,66, - 62,11,211,66,62,118,211,66,205,147,143,62, - 5,211,66,62,224,211,66,62,5,211,66,62, - 96,211,66,62,5,61,32,253,62,5,211,66, - 62,224,211,66,62,14,61,32,253,62,5,211, - 66,62,233,211,66,62,128,211,66,58,181,129, - 61,32,253,62,1,211,66,62,192,211,66,1, - 254,19,237,56,46,187,32,6,13,32,247,195, - 88,136,62,192,211,66,0,0,219,66,203,119, - 40,250,219,66,203,87,40,250,62,5,211,66, - 62,224,211,66,58,182,129,61,32,253,62,5, - 211,66,62,96,211,66,201,197,14,67,6,0, - 62,3,211,66,62,192,211,66,62,48,211,66, - 0,0,219,66,230,1,40,4,219,67,24,240, - 62,5,211,66,62,233,211,66,62,128,211,66, - 58,181,129,61,32,253,237,163,29,62,192,211, - 66,219,66,230,4,40,250,237,163,29,32,245, - 219,66,230,4,40,250,62,255,71,219,66,230, - 4,40,3,5,32,247,219,66,230,4,40,250, - 62,5,211,66,62,224,211,66,58,182,129,61, - 32,253,62,5,211,66,62,96,211,66,58,71, - 152,254,1,202,18,137,62,16,211,66,62,56, - 211,66,62,14,211,66,62,33,211,66,62,1, - 211,66,62,248,211,66,237,56,48,246,153,230, - 207,237,57,48,62,3,211,66,62,221,211,66, - 193,201,58,71,152,211,68,62,10,211,66,62, - 128,211,66,62,11,211,66,62,6,211,66,62, - 6,211,66,58,44,152,211,66,62,16,211,66, - 62,56,211,66,62,48,211,66,0,0,62,14, - 211,66,62,33,211,66,62,1,211,66,62,248, - 211,66,237,56,48,246,145,246,8,230,207,237, - 57,48,62,3,211,66,62,221,211,66,193,201, - 44,3,1,0,70,69,1,245,197,213,229,175, - 50,72,152,237,56,16,230,46,237,57,16,237, - 56,12,62,1,211,66,0,0,219,66,95,230, - 160,32,3,195,20,139,123,230,96,194,72,139, - 62,48,211,66,62,1,211,66,62,64,211,66, - 237,91,40,152,205,207,143,25,43,55,237,82, - 218,70,139,34,42,152,98,107,58,44,152,190, - 194,210,138,35,35,62,130,190,194,200,137,62, - 1,50,48,152,62,175,190,202,82,139,62,132, - 190,32,44,50,50,152,62,47,50,71,152,229, - 175,50,106,137,42,40,152,205,65,143,225,54, - 133,43,70,58,44,152,119,43,112,17,3,0, - 62,10,205,2,134,205,111,136,195,158,138,62, - 140,190,32,19,50,50,152,58,233,149,230,4, - 202,222,138,62,1,50,71,152,195,219,137,126, - 254,160,250,185,138,254,166,242,185,138,50,50, - 152,43,126,35,229,213,33,234,149,95,22,0, - 25,126,254,132,40,18,254,140,40,14,58,50, - 152,230,15,87,126,31,21,242,65,138,56,2, - 175,119,58,50,152,230,15,87,58,233,149,230, - 62,31,21,242,85,138,218,98,138,209,225,195, - 20,139,58,50,152,33,100,137,230,15,95,22, - 0,25,126,50,71,152,209,225,58,50,152,254, - 164,250,135,138,58,73,152,254,0,40,4,54, - 173,24,2,54,133,43,70,58,44,152,119,43, - 112,17,3,0,205,70,135,175,50,106,137,205, - 208,139,58,199,129,237,57,12,58,200,129,237, - 57,13,237,56,16,246,17,237,57,16,225,209, - 193,241,251,237,77,62,129,190,194,227,138,54, - 130,43,70,58,44,152,119,43,112,17,3,0, - 205,144,135,195,20,139,35,35,126,254,132,194, - 227,138,175,50,106,137,205,158,139,24,42,58, - 201,154,254,1,40,7,62,1,50,106,137,24, - 237,58,106,137,254,1,202,222,138,62,128,166, - 194,222,138,221,229,221,33,67,152,205,127,142, - 205,109,144,221,225,225,209,193,241,251,237,77, - 58,106,137,254,1,202,44,139,58,50,152,254, - 164,250,44,139,58,73,152,238,1,50,73,152, - 221,229,221,33,51,152,205,127,142,221,225,62, - 1,50,106,137,205,158,139,195,13,139,24,208, - 24,206,24,204,230,64,40,3,195,20,139,195, - 20,139,43,126,33,8,152,119,35,58,44,152, - 119,43,237,91,35,152,205,203,135,205,158,139, - 195,13,139,175,50,78,152,62,3,211,66,62, - 192,211,66,201,197,33,4,0,57,126,35,102, - 111,62,1,50,106,137,219,72,205,141,139,193, - 201,62,1,50,78,152,34,40,152,54,0,35, - 35,54,0,195,163,139,58,78,152,183,200,229, - 33,181,129,58,183,129,119,35,58,184,129,119, - 225,62,47,211,68,62,14,211,66,62,193,211, - 66,62,10,211,66,62,224,211,66,62,11,211, - 66,62,118,211,66,195,3,140,58,78,152,183, - 200,58,71,152,211,68,254,69,40,4,254,70, - 32,17,58,73,152,254,0,40,10,62,10,211, - 66,62,160,211,66,24,8,62,10,211,66,62, - 128,211,66,62,11,211,66,62,6,211,66,62, - 6,211,66,58,44,152,211,66,62,16,211,66, - 62,56,211,66,62,48,211,66,0,0,219,66, - 230,1,40,4,219,67,24,240,62,14,211,66, - 62,33,211,66,42,40,152,205,65,143,62,1, - 211,66,62,248,211,66,237,56,48,246,145,246, - 8,230,207,237,57,48,62,3,211,66,62,221, - 211,66,201,62,16,211,66,62,56,211,66,62, - 48,211,66,0,0,219,66,230,1,40,4,219, - 67,24,240,62,14,211,66,62,33,211,66,62, - 1,211,66,62,248,211,66,237,56,48,246,153, - 230,207,237,57,48,62,3,211,66,62,221,211, - 66,201,229,213,33,234,149,95,22,0,25,126, - 254,132,40,4,254,140,32,2,175,119,123,209, - 225,201,6,8,14,0,31,48,1,12,16,250, - 121,201,33,4,0,57,94,35,86,33,2,0, - 57,126,35,102,111,221,229,34,89,152,237,83, - 91,152,221,33,63,152,205,127,142,58,81,152, - 50,82,152,58,80,152,135,50,80,152,205,162, - 140,254,3,56,16,58,81,152,135,60,230,15, - 50,81,152,175,50,80,152,24,23,58,79,152, - 205,162,140,254,3,48,13,58,81,152,203,63, - 50,81,152,62,255,50,79,152,58,81,152,50, - 82,152,58,79,152,135,50,79,152,62,32,50, - 83,152,50,84,152,237,56,16,230,17,237,57, - 16,219,72,62,192,50,93,152,62,93,50,94, - 152,58,93,152,61,50,93,152,32,9,58,94, - 152,61,50,94,152,40,44,62,170,237,57,20, - 175,237,57,21,237,56,16,246,2,237,57,16, - 219,72,230,1,202,29,141,237,56,20,71,237, - 56,21,120,254,10,48,237,237,56,16,230,17, - 237,57,16,243,62,14,211,66,62,65,211,66, - 251,58,39,152,23,23,60,50,39,152,71,58, - 82,152,160,230,15,40,22,71,14,10,219,66, - 230,16,202,186,141,219,72,230,1,202,186,141, - 13,32,239,16,235,42,89,152,237,91,91,152, - 205,47,131,48,7,61,202,186,141,195,227,141, - 221,225,33,0,0,201,221,33,55,152,205,127, - 142,58,84,152,61,50,84,152,40,19,58,82, - 152,246,1,50,82,152,58,79,152,246,1,50, - 79,152,195,29,141,221,225,33,1,0,201,221, - 33,59,152,205,127,142,58,80,152,246,1,50, - 80,152,58,82,152,135,246,1,50,82,152,58, - 83,152,61,50,83,152,194,29,141,221,225,33, - 2,0,201,221,229,33,0,0,57,17,4,0, - 25,126,50,44,152,230,128,50,85,152,58,85, - 152,183,40,6,221,33,88,2,24,4,221,33, - 150,0,58,44,152,183,40,53,60,40,50,60, - 40,47,61,61,33,86,152,119,35,119,35,54, - 129,175,50,48,152,221,43,221,229,225,124,181, - 40,42,33,86,152,17,3,0,205,189,140,17, - 232,3,27,123,178,32,251,58,48,152,183,40, - 224,58,44,152,71,62,7,128,230,127,71,58, - 85,152,176,50,44,152,24,162,221,225,201,183, - 221,52,0,192,221,52,1,192,221,52,2,192, - 221,52,3,192,55,201,245,62,1,211,100,241, - 201,245,62,1,211,96,241,201,33,2,0,57, - 126,35,102,111,237,56,48,230,175,237,57,48, - 62,48,237,57,49,125,237,57,32,124,237,57, - 33,62,0,237,57,34,62,88,237,57,35,62, - 0,237,57,36,237,57,37,33,128,2,125,237, - 57,38,124,237,57,39,237,56,48,246,97,230, - 207,237,57,48,62,0,237,57,0,62,0,211, - 96,211,100,201,33,2,0,57,126,35,102,111, - 237,56,48,230,175,237,57,48,62,12,237,57, - 49,62,76,237,57,32,62,0,237,57,33,237, - 57,34,125,237,57,35,124,237,57,36,62,0, - 237,57,37,33,128,2,125,237,57,38,124,237, - 57,39,237,56,48,246,97,230,207,237,57,48, - 62,1,211,96,201,33,2,0,57,126,35,102, - 111,229,237,56,48,230,87,237,57,48,125,237, - 57,40,124,237,57,41,62,0,237,57,42,62, - 67,237,57,43,62,0,237,57,44,58,106,137, - 254,1,32,5,33,6,0,24,3,33,128,2, - 125,237,57,46,124,237,57,47,237,56,50,230, - 252,246,2,237,57,50,225,201,33,4,0,57, - 94,35,86,33,2,0,57,126,35,102,111,237, - 56,48,230,87,237,57,48,125,237,57,40,124, - 237,57,41,62,0,237,57,42,62,67,237,57, - 43,62,0,237,57,44,123,237,57,46,122,237, - 57,47,237,56,50,230,244,246,0,237,57,50, - 237,56,48,246,145,230,207,237,57,48,201,213, - 237,56,46,95,237,56,47,87,237,56,46,111, - 237,56,47,103,183,237,82,32,235,33,128,2, - 183,237,82,209,201,213,237,56,38,95,237,56, - 39,87,237,56,38,111,237,56,39,103,183,237, - 82,32,235,33,128,2,183,237,82,209,201,245, - 197,1,52,0,237,120,230,253,237,121,193,241, - 201,245,197,1,52,0,237,120,246,2,237,121, - 193,241,201,33,2,0,57,126,35,102,111,126, - 35,110,103,201,33,0,0,34,102,152,34,96, - 152,34,98,152,33,202,154,34,104,152,237,91, - 104,152,42,226,149,183,237,82,17,0,255,25, - 34,100,152,203,124,40,6,33,0,125,34,100, - 152,42,104,152,35,35,35,229,205,120,139,193, - 201,205,186,149,229,42,40,152,35,35,35,229, - 205,39,144,193,124,230,3,103,221,117,254,221, - 116,255,237,91,42,152,35,35,35,183,237,82, - 32,12,17,5,0,42,42,152,205,171,149,242, - 169,144,42,40,152,229,205,120,139,193,195,198, - 149,237,91,42,152,42,98,152,25,34,98,152, - 19,19,19,42,102,152,25,34,102,152,237,91, - 100,152,33,158,253,25,237,91,102,152,205,171, - 149,242,214,144,33,0,0,34,102,152,62,1, - 50,95,152,205,225,144,195,198,149,58,95,152, - 183,200,237,91,96,152,42,102,152,205,171,149, - 242,5,145,237,91,102,152,33,98,2,25,237, - 91,96,152,205,171,149,250,37,145,237,91,96, - 152,42,102,152,183,237,82,32,7,42,98,152, - 125,180,40,13,237,91,102,152,42,96,152,205, - 171,149,242,58,145,237,91,104,152,42,102,152, - 25,35,35,35,229,205,120,139,193,175,50,95, - 152,201,195,107,139,205,206,149,250,255,243,205, - 225,144,251,58,230,149,183,194,198,149,17,1, - 0,42,98,152,205,171,149,250,198,149,62,1, - 50,230,149,237,91,96,152,42,104,152,25,221, - 117,252,221,116,253,237,91,104,152,42,96,152, - 25,35,35,35,221,117,254,221,116,255,35,35, - 35,229,205,39,144,124,230,3,103,35,35,35, - 221,117,250,221,116,251,235,221,110,252,221,102, - 253,115,35,114,35,54,4,62,1,211,100,211, - 84,195,198,149,33,0,0,34,102,152,34,96, - 152,34,98,152,33,202,154,34,104,152,237,91, - 104,152,42,226,149,183,237,82,17,0,255,25, - 34,100,152,33,109,152,54,0,33,107,152,229, - 205,240,142,193,62,47,50,34,152,62,132,50, - 49,152,205,241,145,205,61,145,58,39,152,60, - 50,39,152,24,241,205,206,149,251,255,33,109, - 152,126,183,202,198,149,110,221,117,251,33,109, - 152,54,0,221,126,251,254,1,40,28,254,3, - 40,101,254,4,202,190,147,254,5,202,147,147, - 254,8,40,87,33,107,152,229,205,240,142,195, - 198,149,58,201,154,183,32,21,33,111,152,126, - 50,229,149,205,52,144,33,110,152,110,38,0, - 229,205,11,142,193,237,91,96,152,42,104,152, - 25,221,117,254,221,116,255,35,35,54,2,17, - 2,0,43,43,115,35,114,58,44,152,35,35, - 119,58,228,149,35,119,62,1,211,100,211,84, - 62,1,50,201,154,24,169,205,153,142,58,231, - 149,183,40,250,175,50,231,149,33,110,152,126, - 254,255,40,91,58,233,149,230,63,183,40,83, - 94,22,0,33,234,149,25,126,183,40,13,33, - 110,152,94,33,234,150,25,126,254,3,32,36, - 205,81,148,125,180,33,110,152,94,22,0,40, - 17,33,234,149,25,54,0,33,107,152,229,205, - 240,142,193,195,198,149,33,234,150,25,54,0, - 33,110,152,94,22,0,33,234,149,25,126,50, - 49,152,254,132,32,37,62,47,50,34,152,42, - 107,152,229,33,110,152,229,205,174,140,193,193, - 125,180,33,110,152,94,22,0,33,234,150,202, - 117,147,25,52,195,120,147,58,49,152,254,140, - 32,7,62,1,50,34,152,24,210,62,32,50, - 106,152,24,19,58,49,152,95,58,106,152,163, - 183,58,106,152,32,11,203,63,50,106,152,58, - 106,152,183,32,231,254,2,40,51,254,4,40, - 38,254,8,40,26,254,16,40,13,254,32,32, - 158,62,165,50,49,152,62,69,24,190,62,164, - 50,49,152,62,70,24,181,62,163,50,49,152, - 175,24,173,62,162,50,49,152,62,1,24,164, - 62,161,50,49,152,62,3,24,155,25,54,0, - 221,126,251,254,8,40,7,58,230,149,183,202, - 32,146,33,107,152,229,205,240,142,193,211,84, - 195,198,149,237,91,96,152,42,104,152,25,221, - 117,254,221,116,255,35,35,54,6,17,2,0, - 43,43,115,35,114,58,228,149,35,35,119,58, - 233,149,35,119,205,146,142,195,32,146,237,91, - 96,152,42,104,152,25,229,205,160,142,193,58, - 231,149,183,40,250,175,50,231,149,243,237,91, - 96,152,42,104,152,25,221,117,254,221,116,255, - 78,35,70,221,113,252,221,112,253,89,80,42, - 98,152,183,237,82,34,98,152,203,124,40,19, - 33,0,0,34,98,152,34,102,152,34,96,152, - 62,1,50,95,152,24,40,221,94,252,221,86, - 253,19,19,19,42,96,152,25,34,96,152,237, - 91,100,152,33,158,253,25,237,91,96,152,205, - 171,149,242,55,148,33,0,0,34,96,152,175, - 50,230,149,251,195,32,146,245,62,1,50,231, - 149,62,16,237,57,0,211,80,241,251,237,77, - 201,205,186,149,229,229,33,0,0,34,37,152, - 33,110,152,126,50,234,151,58,44,152,33,235, - 151,119,221,54,253,0,221,54,254,0,195,230, - 148,33,236,151,54,175,33,3,0,229,33,234, - 151,229,205,174,140,193,193,33,236,151,126,254, - 255,40,74,33,245,151,110,221,117,255,33,249, - 151,126,221,166,255,221,119,255,33,253,151,126, - 221,166,255,221,119,255,58,232,149,95,221,126, - 255,163,221,119,255,183,40,15,230,191,33,110, - 152,94,22,0,33,234,149,25,119,24,12,33, - 110,152,94,22,0,33,234,149,25,54,132,33, - 0,0,195,198,149,221,110,253,221,102,254,35, - 221,117,253,221,116,254,17,32,0,221,110,253, - 221,102,254,205,171,149,250,117,148,58,233,149, - 203,87,40,84,33,1,0,34,37,152,221,54, - 253,0,221,54,254,0,24,53,33,236,151,54, - 175,33,3,0,229,33,234,151,229,205,174,140, - 193,193,33,236,151,126,254,255,40,14,33,110, - 152,94,22,0,33,234,149,25,54,140,24,159, - 221,110,253,221,102,254,35,221,117,253,221,116, - 254,17,32,0,221,110,253,221,102,254,205,171, - 149,250,12,149,33,2,0,34,37,152,221,54, - 253,0,221,54,254,0,24,54,33,236,151,54, - 175,33,3,0,229,33,234,151,229,205,174,140, - 193,193,33,236,151,126,254,255,40,15,33,110, - 152,94,22,0,33,234,149,25,54,132,195,211, - 148,221,110,253,221,102,254,35,221,117,253,221, - 116,254,17,32,0,221,110,253,221,102,254,205, - 171,149,250,96,149,33,1,0,195,198,149,124, - 170,250,179,149,237,82,201,124,230,128,237,82, - 60,201,225,253,229,221,229,221,33,0,0,221, - 57,233,221,249,221,225,253,225,201,233,225,253, - 229,221,229,221,33,0,0,221,57,94,35,86, - 35,235,57,249,235,233,0,0,0,0,0,0, - 62,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 175,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,133,1,0,0,0,63, - 255,255,255,255,0,0,0,63,0,0,0,0, - 0,0,0,0,0,0,0,24,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0 - } ; - -#endif diff --git a/drivers/net/appletalk/cops_ltdrv.h b/drivers/net/appletalk/cops_ltdrv.h deleted file mode 100644 index c699b1ad31da..000000000000 --- a/drivers/net/appletalk/cops_ltdrv.h +++ /dev/null @@ -1,241 +0,0 @@ -/* - * The firmware this driver downloads into the Localtalk card is a - * separate program and is not GPL'd source code, even though the Linux - * side driver and the routine that loads this data into the card are. - * - * It is taken from the COPS SDK and is under the following license - * - * This material is licensed to you strictly for use in conjunction with - * the use of COPS LocalTalk adapters. - * There is no charge for this SDK. And no waranty express or implied - * about its fitness for any purpose. However, we will cheerefully - * refund every penny you paid for this SDK... - * Regards, - * - * Thomas F. Divine - * Chief Scientist - */ - - -/* cops_ltdrv.h: LocalTalk driver firmware dump for Linux. - * - * Authors: - * - Jay Schulist <jschlst@samba.org> - */ - - -#ifdef CONFIG_COPS_TANGENT - -static const unsigned char ltdrv_code[] = { - 58,3,0,50,148,10,33,143,15,62,85,119, - 190,32,9,62,170,119,190,32,3,35,24,241, - 34,146,10,249,17,150,10,33,143,15,183,237, - 82,77,68,11,107,98,19,54,0,237,176,62, - 16,237,57,51,62,0,237,57,50,237,57,54, - 62,12,237,57,49,62,195,33,39,2,50,56, - 0,34,57,0,237,86,205,30,2,251,205,60, - 10,24,169,67,111,112,121,114,105,103,104,116, - 32,40,99,41,32,49,57,56,56,45,49,57, - 57,50,44,32,80,114,105,110,116,105,110,103, - 32,67,111,109,109,117,110,105,99,97,116,105, - 111,110,115,32,65,115,115,111,99,105,97,116, - 101,115,44,32,73,110,99,46,65,108,108,32, - 114,105,103,104,116,115,32,114,101,115,101,114, - 118,101,100,46,32,32,4,4,22,40,255,60, - 4,96,10,224,6,0,7,126,2,64,11,246, - 12,6,13,0,14,193,15,0,5,96,3,192, - 1,0,9,8,62,3,211,82,62,192,211,82, - 201,62,3,211,82,62,213,211,82,201,62,5, - 211,82,62,224,211,82,201,62,5,211,82,62, - 224,211,82,201,62,5,211,82,62,96,211,82, - 201,6,28,33,180,1,14,82,237,163,194,4, - 2,33,39,2,34,64,0,58,3,0,230,1, - 192,62,11,237,121,62,118,237,121,201,33,182, - 10,54,132,205,253,1,201,245,197,213,229,42, - 150,10,14,83,17,98,2,67,20,237,162,58, - 179,1,95,219,82,230,1,32,6,29,32,247, - 195,17,3,62,1,211,82,219,82,95,230,160, - 32,10,237,162,32,225,21,32,222,195,15,3, - 237,162,123,230,96,194,21,3,62,48,211,82, - 62,1,211,82,175,211,82,237,91,150,10,43, - 55,237,82,218,19,3,34,152,10,98,107,58, - 154,10,190,32,81,62,1,50,158,10,35,35, - 62,132,190,32,44,54,133,43,70,58,154,10, - 119,43,112,17,3,0,205,137,3,62,16,211, - 82,62,56,211,82,205,217,1,42,150,10,14, - 83,17,98,2,67,20,58,178,1,95,195,59, - 2,62,129,190,194,227,2,54,130,43,70,58, - 154,10,119,43,112,17,3,0,205,137,3,195, - 254,2,35,35,126,254,132,194,227,2,205,61, - 3,24,20,62,128,166,194,222,2,221,229,221, - 33,175,10,205,93,6,205,144,7,221,225,225, - 209,193,241,251,237,77,221,229,221,33,159,10, - 205,93,6,221,225,205,61,3,195,247,2,24, - 237,24,235,24,233,230,64,40,2,24,227,24, - 225,175,50,179,10,205,208,1,201,197,33,4, - 0,57,126,35,102,111,205,51,3,193,201,62, - 1,50,179,10,34,150,10,54,0,58,179,10, - 183,200,62,14,211,82,62,193,211,82,62,10, - 211,82,62,224,211,82,62,6,211,82,58,154, - 10,211,82,62,16,211,82,62,56,211,82,62, - 48,211,82,219,82,230,1,40,4,219,83,24, - 242,62,14,211,82,62,33,211,82,62,1,211, - 82,62,9,211,82,62,32,211,82,205,217,1, - 201,14,83,205,208,1,24,23,14,83,205,208, - 1,205,226,1,58,174,1,61,32,253,205,244, - 1,58,174,1,61,32,253,205,226,1,58,175, - 1,61,32,253,62,5,211,82,62,233,211,82, - 62,128,211,82,58,176,1,61,32,253,237,163, - 27,62,192,211,82,219,82,230,4,40,250,237, - 163,27,122,179,32,243,219,82,230,4,40,250, - 58,178,1,71,219,82,230,4,40,3,5,32, - 247,219,82,230,4,40,250,205,235,1,58,177, - 1,61,32,253,205,244,1,201,229,213,35,35, - 126,230,128,194,145,4,43,58,154,10,119,43, - 70,33,181,10,119,43,112,17,3,0,243,62, - 10,211,82,219,82,230,128,202,41,4,209,225, - 62,1,55,251,201,205,144,3,58,180,10,254, - 255,202,127,4,205,217,1,58,178,1,71,219, - 82,230,1,32,6,5,32,247,195,173,4,219, - 83,71,58,154,10,184,194,173,4,58,178,1, - 71,219,82,230,1,32,6,5,32,247,195,173, - 4,219,83,58,178,1,71,219,82,230,1,32, - 6,5,32,247,195,173,4,219,83,254,133,194, - 173,4,58,179,1,24,4,58,179,1,135,61, - 32,253,209,225,205,137,3,205,61,3,183,251, - 201,209,225,243,62,10,211,82,219,82,230,128, - 202,164,4,62,1,55,251,201,205,144,3,205, - 61,3,183,251,201,209,225,62,2,55,251,201, - 243,62,14,211,82,62,33,211,82,251,201,33, - 4,0,57,94,35,86,33,2,0,57,126,35, - 102,111,221,229,34,193,10,237,83,195,10,221, - 33,171,10,205,93,6,58,185,10,50,186,10, - 58,184,10,135,50,184,10,205,112,6,254,3, - 56,16,58,185,10,135,60,230,15,50,185,10, - 175,50,184,10,24,23,58,183,10,205,112,6, - 254,3,48,13,58,185,10,203,63,50,185,10, - 62,255,50,183,10,58,185,10,50,186,10,58, - 183,10,135,50,183,10,62,32,50,187,10,50, - 188,10,6,255,219,82,230,16,32,3,5,32, - 247,205,180,4,6,40,219,82,230,16,40,3, - 5,32,247,62,10,211,82,219,82,230,128,194, - 46,5,219,82,230,16,40,214,237,95,71,58, - 186,10,160,230,15,40,32,71,14,10,62,10, - 211,82,219,82,230,128,202,119,5,205,180,4, - 195,156,5,219,82,230,16,202,156,5,13,32, - 229,16,225,42,193,10,237,91,195,10,205,252, - 3,48,7,61,202,156,5,195,197,5,221,225, - 33,0,0,201,221,33,163,10,205,93,6,58, - 188,10,61,50,188,10,40,19,58,186,10,246, - 1,50,186,10,58,183,10,246,1,50,183,10, - 195,46,5,221,225,33,1,0,201,221,33,167, - 10,205,93,6,58,184,10,246,1,50,184,10, - 58,186,10,135,246,1,50,186,10,58,187,10, - 61,50,187,10,194,46,5,221,225,33,2,0, - 201,221,229,33,0,0,57,17,4,0,25,126, - 50,154,10,230,128,50,189,10,58,189,10,183, - 40,6,221,33,88,2,24,4,221,33,150,0, - 58,154,10,183,40,49,60,40,46,61,33,190, - 10,119,35,119,35,54,129,175,50,158,10,221, - 43,221,229,225,124,181,40,42,33,190,10,17, - 3,0,205,206,4,17,232,3,27,123,178,32, - 251,58,158,10,183,40,224,58,154,10,71,62, - 7,128,230,127,71,58,189,10,176,50,154,10, - 24,166,221,225,201,183,221,52,0,192,221,52, - 1,192,221,52,2,192,221,52,3,192,55,201, - 6,8,14,0,31,48,1,12,16,250,121,201, - 33,2,0,57,94,35,86,35,78,35,70,35, - 126,35,102,105,79,120,68,103,237,176,201,33, - 2,0,57,126,35,102,111,62,17,237,57,48, - 125,237,57,40,124,237,57,41,62,0,237,57, - 42,62,64,237,57,43,62,0,237,57,44,33, - 128,2,125,237,57,46,124,237,57,47,62,145, - 237,57,48,211,68,58,149,10,211,66,201,33, - 2,0,57,126,35,102,111,62,33,237,57,48, - 62,64,237,57,32,62,0,237,57,33,237,57, - 34,125,237,57,35,124,237,57,36,62,0,237, - 57,37,33,128,2,125,237,57,38,124,237,57, - 39,62,97,237,57,48,211,67,58,149,10,211, - 66,201,237,56,46,95,237,56,47,87,237,56, - 46,111,237,56,47,103,183,237,82,32,235,33, - 128,2,183,237,82,201,237,56,38,95,237,56, - 39,87,237,56,38,111,237,56,39,103,183,237, - 82,32,235,33,128,2,183,237,82,201,205,106, - 10,221,110,6,221,102,7,126,35,110,103,195, - 118,10,205,106,10,33,0,0,34,205,10,34, - 198,10,34,200,10,33,143,15,34,207,10,237, - 91,207,10,42,146,10,183,237,82,17,0,255, - 25,34,203,10,203,124,40,6,33,0,125,34, - 203,10,42,207,10,229,205,37,3,195,118,10, - 205,106,10,229,42,150,10,35,35,35,229,205, - 70,7,193,124,230,3,103,221,117,254,221,116, - 255,237,91,152,10,35,35,35,183,237,82,32, - 12,17,5,0,42,152,10,205,91,10,242,203, - 7,42,150,10,229,205,37,3,195,118,10,237, - 91,152,10,42,200,10,25,34,200,10,42,205, - 10,25,34,205,10,237,91,203,10,33,158,253, - 25,237,91,205,10,205,91,10,242,245,7,33, - 0,0,34,205,10,62,1,50,197,10,205,5, - 8,33,0,0,57,249,195,118,10,205,106,10, - 58,197,10,183,202,118,10,237,91,198,10,42, - 205,10,205,91,10,242,46,8,237,91,205,10, - 33,98,2,25,237,91,198,10,205,91,10,250, - 78,8,237,91,198,10,42,205,10,183,237,82, - 32,7,42,200,10,125,180,40,13,237,91,205, - 10,42,198,10,205,91,10,242,97,8,237,91, - 207,10,42,205,10,25,229,205,37,3,175,50, - 197,10,195,118,10,205,29,3,33,0,0,57, - 249,195,118,10,205,106,10,58,202,10,183,40, - 22,205,14,7,237,91,209,10,19,19,19,205, - 91,10,242,139,8,33,1,0,195,118,10,33, - 0,0,195,118,10,205,126,10,252,255,205,108, - 8,125,180,194,118,10,237,91,200,10,33,0, - 0,205,91,10,242,118,10,237,91,207,10,42, - 198,10,25,221,117,254,221,116,255,35,35,35, - 229,205,70,7,193,124,230,3,103,35,35,35, - 221,117,252,221,116,253,229,221,110,254,221,102, - 255,229,33,212,10,229,205,124,6,193,193,221, - 110,252,221,102,253,34,209,10,33,211,10,54, - 4,33,209,10,227,205,147,6,193,62,1,50, - 202,10,243,221,94,252,221,86,253,42,200,10, - 183,237,82,34,200,10,203,124,40,17,33,0, - 0,34,200,10,34,205,10,34,198,10,50,197, - 10,24,37,221,94,252,221,86,253,42,198,10, - 25,34,198,10,237,91,203,10,33,158,253,25, - 237,91,198,10,205,91,10,242,68,9,33,0, - 0,34,198,10,205,5,8,33,0,0,57,249, - 251,195,118,10,205,106,10,33,49,13,126,183, - 40,16,205,42,7,237,91,47,13,19,19,19, - 205,91,10,242,117,9,58,142,15,198,1,50, - 142,15,195,118,10,33,49,13,126,254,1,40, - 25,254,3,202,7,10,254,5,202,21,10,33, - 49,13,54,0,33,47,13,229,205,207,6,195, - 118,10,58,141,15,183,32,72,33,51,13,126, - 50,149,10,205,86,7,33,50,13,126,230,127, - 183,32,40,58,142,15,230,127,50,142,15,183, - 32,5,198,1,50,142,15,33,50,13,126,111, - 23,159,103,203,125,58,142,15,40,5,198,128, - 50,142,15,33,50,13,119,33,50,13,126,111, - 23,159,103,229,205,237,5,193,33,211,10,54, - 2,33,2,0,34,209,10,58,154,10,33,212, - 10,119,58,148,10,33,213,10,119,33,209,10, - 229,205,147,6,193,24,128,42,47,13,229,33, - 50,13,229,205,191,4,193,24,239,33,211,10, - 54,6,33,3,0,34,209,10,58,154,10,33, - 212,10,119,58,148,10,33,213,10,119,33,214, - 10,54,5,33,209,10,229,205,147,6,24,200, - 205,106,10,33,49,13,54,0,33,47,13,229, - 205,207,6,33,209,10,227,205,147,6,193,205, - 80,9,205,145,8,24,248,124,170,250,99,10, - 237,82,201,124,230,128,237,82,60,201,225,253, - 229,221,229,221,33,0,0,221,57,233,221,249, - 221,225,253,225,201,233,225,253,229,221,229,221, - 33,0,0,221,57,94,35,86,35,235,57,249, - 235,233,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0 - } ; - -#endif diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c deleted file mode 100644 index d558535390f9..000000000000 --- a/drivers/net/appletalk/ipddp.c +++ /dev/null @@ -1,345 +0,0 @@ -/* - * ipddp.c: IP to Appletalk-IP Encapsulation driver for Linux - * Appletalk-IP to IP Decapsulation driver for Linux - * - * Authors: - * - DDP-IP Encap by: Bradford W. Johnson <johns393@maroon.tc.umn.edu> - * - DDP-IP Decap by: Jay Schulist <jschlst@samba.org> - * - * Derived from: - * - Almost all code already existed in net/appletalk/ddp.c I just - * moved/reorginized it into a driver file. Original IP-over-DDP code - * was done by Bradford W. Johnson <johns393@maroon.tc.umn.edu> - * - skeleton.c: A network driver outline for linux. - * Written 1993-94 by Donald Becker. - * - dummy.c: A dummy net driver. By Nick Holloway. - * - MacGate: A user space Daemon for Appletalk-IP Decap for - * Linux by Jay Schulist <jschlst@samba.org> - * - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - */ - -#include <linux/compat.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ip.h> -#include <linux/atalk.h> -#include <linux/if_arp.h> -#include <linux/slab.h> -#include <net/route.h> -#include <linux/uaccess.h> - -#include "ipddp.h" /* Our stuff */ - -static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n"; - -static struct ipddp_route *ipddp_route_list; -static DEFINE_SPINLOCK(ipddp_route_lock); - -#ifdef CONFIG_IPDDP_ENCAP -static int ipddp_mode = IPDDP_ENCAP; -#else -static int ipddp_mode = IPDDP_DECAP; -#endif - -/* Index to functions, as function prototypes. */ -static netdev_tx_t ipddp_xmit(struct sk_buff *skb, - struct net_device *dev); -static int ipddp_create(struct ipddp_route *new_rt); -static int ipddp_delete(struct ipddp_route *rt); -static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt); -static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd); - -static const struct net_device_ops ipddp_netdev_ops = { - .ndo_start_xmit = ipddp_xmit, - .ndo_siocdevprivate = ipddp_siocdevprivate, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static struct net_device * __init ipddp_init(void) -{ - static unsigned version_printed; - struct net_device *dev; - int err; - - dev = alloc_etherdev(0); - if (!dev) - return ERR_PTR(-ENOMEM); - - netif_keep_dst(dev); - strcpy(dev->name, "ipddp%d"); - - if (version_printed++ == 0) - printk(version); - - /* Initialize the device structure. */ - dev->netdev_ops = &ipddp_netdev_ops; - - dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */ - dev->mtu = 585; - dev->flags |= IFF_NOARP; - - /* - * The worst case header we will need is currently a - * ethernet header (14 bytes) and a ddp header (sizeof ddpehdr+1) - * We send over SNAP so that takes another 8 bytes. - */ - dev->hard_header_len = 14+8+sizeof(struct ddpehdr)+1; - - err = register_netdev(dev); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } - - /* Let the user now what mode we are in */ - if(ipddp_mode == IPDDP_ENCAP) - printk("%s: Appletalk-IP Encap. mode by Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n", - dev->name); - if(ipddp_mode == IPDDP_DECAP) - printk("%s: Appletalk-IP Decap. mode by Jay Schulist <jschlst@samba.org>\n", - dev->name); - - return dev; -} - - -/* - * Transmit LLAP/ELAP frame using aarp_send_ddp. - */ -static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct rtable *rtable = skb_rtable(skb); - __be32 paddr = 0; - struct ddpehdr *ddp; - struct ipddp_route *rt; - struct atalk_addr *our_addr; - - if (rtable->rt_gw_family == AF_INET) - paddr = rtable->rt_gw4; - - spin_lock(&ipddp_route_lock); - - /* - * Find appropriate route to use, based only on IP number. - */ - for(rt = ipddp_route_list; rt != NULL; rt = rt->next) - { - if(rt->ip == paddr) - break; - } - if(rt == NULL) { - spin_unlock(&ipddp_route_lock); - return NETDEV_TX_OK; - } - - our_addr = atalk_find_dev_addr(rt->dev); - - if(ipddp_mode == IPDDP_DECAP) - /* - * Pull off the excess room that should not be there. - * This is due to a hard-header problem. This is the - * quick fix for now though, till it breaks. - */ - skb_pull(skb, 35-(sizeof(struct ddpehdr)+1)); - - /* Create the Extended DDP header */ - ddp = (struct ddpehdr *)skb->data; - ddp->deh_len_hops = htons(skb->len + (1<<10)); - ddp->deh_sum = 0; - - /* - * For Localtalk we need aarp_send_ddp to strip the - * long DDP header and place a shot DDP header on it. - */ - if(rt->dev->type == ARPHRD_LOCALTLK) - { - ddp->deh_dnet = 0; /* FIXME more hops?? */ - ddp->deh_snet = 0; - } - else - { - ddp->deh_dnet = rt->at.s_net; /* FIXME more hops?? */ - ddp->deh_snet = our_addr->s_net; - } - ddp->deh_dnode = rt->at.s_node; - ddp->deh_snode = our_addr->s_node; - ddp->deh_dport = 72; - ddp->deh_sport = 72; - - *((__u8 *)(ddp+1)) = 22; /* ddp type = IP */ - - skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */ - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - aarp_send_ddp(rt->dev, skb, &rt->at, NULL); - - spin_unlock(&ipddp_route_lock); - - return NETDEV_TX_OK; -} - -/* - * Create a routing entry. We first verify that the - * record does not already exist. If it does we return -EEXIST - */ -static int ipddp_create(struct ipddp_route *new_rt) -{ - struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL); - - if (rt == NULL) - return -ENOMEM; - - rt->ip = new_rt->ip; - rt->at = new_rt->at; - rt->next = NULL; - if ((rt->dev = atrtr_get_dev(&rt->at)) == NULL) { - kfree(rt); - return -ENETUNREACH; - } - - spin_lock_bh(&ipddp_route_lock); - if (__ipddp_find_route(rt)) { - spin_unlock_bh(&ipddp_route_lock); - kfree(rt); - return -EEXIST; - } - - rt->next = ipddp_route_list; - ipddp_route_list = rt; - - spin_unlock_bh(&ipddp_route_lock); - - return 0; -} - -/* - * Delete a route, we only delete a FULL match. - * If route does not exist we return -ENOENT. - */ -static int ipddp_delete(struct ipddp_route *rt) -{ - struct ipddp_route **r = &ipddp_route_list; - struct ipddp_route *tmp; - - spin_lock_bh(&ipddp_route_lock); - while((tmp = *r) != NULL) - { - if(tmp->ip == rt->ip && - tmp->at.s_net == rt->at.s_net && - tmp->at.s_node == rt->at.s_node) - { - *r = tmp->next; - spin_unlock_bh(&ipddp_route_lock); - kfree(tmp); - return 0; - } - r = &tmp->next; - } - - spin_unlock_bh(&ipddp_route_lock); - return -ENOENT; -} - -/* - * Find a routing entry, we only return a FULL match - */ -static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt) -{ - struct ipddp_route *f; - - for(f = ipddp_route_list; f != NULL; f = f->next) - { - if(f->ip == rt->ip && - f->at.s_net == rt->at.s_net && - f->at.s_node == rt->at.s_node) - return f; - } - - return NULL; -} - -static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd) -{ - struct ipddp_route rcp, rcp2, *rp; - - if (in_compat_syscall()) - return -EOPNOTSUPP; - - if(!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (copy_from_user(&rcp, data, sizeof(rcp))) - return -EFAULT; - - switch(cmd) - { - case SIOCADDIPDDPRT: - return ipddp_create(&rcp); - - case SIOCFINDIPDDPRT: - spin_lock_bh(&ipddp_route_lock); - rp = __ipddp_find_route(&rcp); - if (rp) { - memset(&rcp2, 0, sizeof(rcp2)); - rcp2.ip = rp->ip; - rcp2.at = rp->at; - rcp2.flags = rp->flags; - } - spin_unlock_bh(&ipddp_route_lock); - - if (rp) { - if (copy_to_user(data, &rcp2, - sizeof(struct ipddp_route))) - return -EFAULT; - return 0; - } else - return -ENOENT; - - case SIOCDELIPDDPRT: - return ipddp_delete(&rcp); - - default: - return -EINVAL; - } -} - -static struct net_device *dev_ipddp; - -MODULE_LICENSE("GPL"); -module_param(ipddp_mode, int, 0); - -static int __init ipddp_init_module(void) -{ - dev_ipddp = ipddp_init(); - return PTR_ERR_OR_ZERO(dev_ipddp); -} - -static void __exit ipddp_cleanup_module(void) -{ - struct ipddp_route *p; - - unregister_netdev(dev_ipddp); - free_netdev(dev_ipddp); - - while (ipddp_route_list) { - p = ipddp_route_list->next; - kfree(ipddp_route_list); - ipddp_route_list = p; - } -} - -module_init(ipddp_init_module); -module_exit(ipddp_cleanup_module); diff --git a/drivers/net/appletalk/ipddp.h b/drivers/net/appletalk/ipddp.h deleted file mode 100644 index 9a8e45a46925..000000000000 --- a/drivers/net/appletalk/ipddp.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ipddp.h: Header for IP-over-DDP driver for Linux. - */ - -#ifndef __LINUX_IPDDP_H -#define __LINUX_IPDDP_H - -#ifdef __KERNEL__ - -#define SIOCADDIPDDPRT (SIOCDEVPRIVATE) -#define SIOCDELIPDDPRT (SIOCDEVPRIVATE+1) -#define SIOCFINDIPDDPRT (SIOCDEVPRIVATE+2) - -struct ipddp_route -{ - struct net_device *dev; /* Carrier device */ - __be32 ip; /* IP address */ - struct atalk_addr at; /* Gateway appletalk address */ - int flags; - struct ipddp_route *next; -}; - -#define IPDDP_ENCAP 1 -#define IPDDP_DECAP 2 - -#endif /* __KERNEL__ */ -#endif /* __LINUX_IPDDP_H */ diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index 19e996a829c9..b54275389f8a 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -186,6 +186,8 @@ do { \ #define ARC_IS_5MBIT 1 /* card default speed is 5MBit */ #define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit, but default is 2.5MBit. */ +#define ARC_HAS_LED 4 /* card has software controlled LEDs */ +#define ARC_HAS_ROTARY 8 /* card has rotary encoder */ /* information needed to define an encapsulation driver */ struct ArcProto { diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 99265667538c..d9e052c49ba1 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -464,7 +464,7 @@ static void arcnet_reply_tasklet(struct tasklet_struct *t) ret = sock_queue_err_skb(sk, ackskb); if (ret) - kfree_skb(ackskb); + dev_kfree_skb_irq(ackskb); local_irq_enable(); }; diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index c580acb8b1d3..7b5c8bb02f11 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -213,12 +213,13 @@ static int com20020pci_probe(struct pci_dev *pdev, if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15)) lp->backplane = 1; - /* Get the dev_id from the PLX rotary coder */ - if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) - dev_id_mask = 0x3; - dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; - - snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); + if (ci->flags & ARC_HAS_ROTARY) { + /* Get the dev_id from the PLX rotary coder */ + if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) + dev_id_mask = 0x3; + dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; + snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); + } if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) { pr_err("IO address %Xh is empty!\n", ioaddr); @@ -230,6 +231,10 @@ static int com20020pci_probe(struct pci_dev *pdev, goto err_free_arcdev; } + ret = com20020_found(dev, IRQF_SHARED); + if (ret) + goto err_free_arcdev; + card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), GFP_KERNEL); if (!card) { @@ -239,41 +244,39 @@ static int com20020pci_probe(struct pci_dev *pdev, card->index = i; card->pci_priv = priv; - card->tx_led.brightness_set = led_tx_set; - card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, - GFP_KERNEL, "arc%d-%d-tx", - dev->dev_id, i); - card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, - "pci:green:tx:%d-%d", - dev->dev_id, i); - - card->tx_led.dev = &dev->dev; - card->recon_led.brightness_set = led_recon_set; - card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, - GFP_KERNEL, "arc%d-%d-recon", - dev->dev_id, i); - card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, - "pci:red:recon:%d-%d", - dev->dev_id, i); - card->recon_led.dev = &dev->dev; - card->dev = dev; - - ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); - if (ret) - goto err_free_arcdev; - ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); - if (ret) - goto err_free_arcdev; - - dev_set_drvdata(&dev->dev, card); - - ret = com20020_found(dev, IRQF_SHARED); - if (ret) - goto err_free_arcdev; - - devm_arcnet_led_init(dev, dev->dev_id, i); + if (ci->flags & ARC_HAS_LED) { + card->tx_led.brightness_set = led_tx_set; + card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, + GFP_KERNEL, "arc%d-%d-tx", + dev->dev_id, i); + card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pci:green:tx:%d-%d", + dev->dev_id, i); + + card->tx_led.dev = &dev->dev; + card->recon_led.brightness_set = led_recon_set; + card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, + GFP_KERNEL, "arc%d-%d-recon", + dev->dev_id, i); + card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "pci:red:recon:%d-%d", + dev->dev_id, i); + card->recon_led.dev = &dev->dev; + + ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); + if (ret) + goto err_free_arcdev; + + ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); + if (ret) + goto err_free_arcdev; + + dev_set_drvdata(&dev->dev, card); + devm_arcnet_led_init(dev, dev->dev_id, i); + } + card->dev = dev; list_add(&card->list, &priv->list_dev); continue; @@ -329,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = { }; static struct com20020_pci_card_info card_info_sohard = { - .name = "PLX-PCI", + .name = "SOHARD SH ARC-PCI", .devcount = 1, /* SOHARD needs PCI base addr 4 */ .chan_map_tbl = { @@ -364,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = { }, }, .rotary = 0x0, - .flags = ARC_CAN_10MBIT, + .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT, }; static struct com20020_pci_card_info card_info_eae_ma1 = { @@ -396,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = { }, }, .rotary = 0x0, - .flags = ARC_CAN_10MBIT, + .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT, }; static struct com20020_pci_card_info card_info_eae_fb2 = { @@ -421,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = { }, }, .rotary = 0x0, - .flags = ARC_CAN_10MBIT, + .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT, }; static const struct pci_device_id com20020pci_id_table[] = { diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 683203f87ae2..31377bb1cc97 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -306,8 +306,13 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (!sock) return -ESHUTDOWN; - rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info, - IPPROTO_UDP, use_cache); + sport = udp_flow_src_port(bareudp->net, skb, + bareudp->sport_min, USHRT_MAX, + true); + rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key, + sport, bareudp->port, key->tos, + use_cache ? + (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -315,9 +320,6 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, &rt->dst, BAREUDP_IPV4_HLEN + info->options_len, false); - sport = udp_flow_src_port(bareudp->net, skb, - bareudp->sport_min, USHRT_MAX, - true); tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; @@ -369,17 +371,19 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (!sock) return -ESHUTDOWN; - dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info, - IPPROTO_UDP, use_cache); + sport = udp_flow_src_port(bareudp->net, skb, + bareudp->sport_min, USHRT_MAX, + true); + dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr, + key, sport, bareudp->port, key->tos, + use_cache ? + (struct dst_cache *) &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, false); - sport = udp_flow_src_port(bareudp->net, skb, - bareudp->sport_min, USHRT_MAX, - true); prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -476,15 +480,21 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, struct ip_tunnel_info *info = skb_tunnel_info(skb); struct bareudp_dev *bareudp = netdev_priv(dev); bool use_cache; + __be16 sport; use_cache = ip_tunnel_dst_cache_usable(skb, info); + sport = udp_flow_src_port(bareudp->net, skb, + bareudp->sport_min, USHRT_MAX, + true); if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; - rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, - info, IPPROTO_UDP, use_cache); + rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, + &info->key, sport, bareudp->port, + info->key.tos, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -495,9 +505,10 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, struct in6_addr saddr; struct socket *sock = rcu_dereference(bareudp->sock); - dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, - &saddr, info, IPPROTO_UDP, - use_cache); + dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, + 0, &saddr, &info->key, + sport, bareudp->port, info->key.tos, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -507,9 +518,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, return -EINVAL; } - info->key.tp_src = udp_flow_src_port(bareudp->net, skb, - bareudp->sport_min, - USHRT_MAX, true); + info->key.tp_src = sport; info->key.tp_dst = bareudp->port; return 0; } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index fc5da5d7744d..dc2c7b979656 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -668,7 +668,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) dev = ip_dev_find(dev_net(bond->dev), arp->ip_src); if (dev) { - if (netif_is_bridge_master(dev)) { + if (netif_is_any_bridge_master(dev)) { dev_put(dev); return NULL; } diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 594094526648..b19492a7f6ad 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -49,9 +49,6 @@ DEFINE_SHOW_ATTRIBUTE(bond_debug_rlb_hash); void bond_debug_register(struct bonding *bond) { - if (!bonding_debug_root) - return; - bond->debug_dir = debugfs_create_dir(bond->dev->name, bonding_debug_root); @@ -61,9 +58,6 @@ void bond_debug_register(struct bonding *bond) void bond_debug_unregister(struct bonding *bond) { - if (!bonding_debug_root) - return; - debugfs_remove_recursive(bond->debug_dir); } @@ -71,9 +65,6 @@ void bond_debug_reregister(struct bonding *bond) { struct dentry *d; - if (!bonding_debug_root) - return; - d = debugfs_rename(bonding_debug_root, bond->debug_dir, bonding_debug_root, bond->dev->name); if (!IS_ERR(d)) { @@ -84,11 +75,11 @@ void bond_debug_reregister(struct bonding *bond) } } -void bond_create_debugfs(void) +void __init bond_create_debugfs(void) { bonding_debug_root = debugfs_create_dir("bonding", NULL); - if (!bonding_debug_root) + if (IS_ERR(bonding_debug_root)) pr_warn("Warning: Cannot create bonding directory in debugfs\n"); } @@ -113,7 +104,7 @@ void bond_debug_reregister(struct bonding *bond) { } -void bond_create_debugfs(void) +void __init bond_create_debugfs(void) { } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 447b06ea4fc9..8e6cc0e133b7 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -90,6 +90,7 @@ #include <net/tls.h> #endif #include <net/ip6_route.h> +#include <net/xdp.h> #include "bonding_priv.h" @@ -1499,6 +1500,10 @@ done: static void bond_setup_by_slave(struct net_device *bond_dev, struct net_device *slave_dev) { + bool was_up = !!(bond_dev->flags & IFF_UP); + + dev_close(bond_dev); + bond_dev->header_ops = slave_dev->header_ops; bond_dev->type = slave_dev->type; @@ -1513,6 +1518,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev, bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); } + if (was_up) + dev_open(bond_dev, NULL); } /* On bonding slaves other than the currently active slave, suppress @@ -4022,7 +4029,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb, if (likely(n <= hlen)) return data; else if (skb && likely(pskb_may_pull(skb, n))) - return skb->head; + return skb->data; return NULL; } @@ -4446,11 +4453,6 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm { struct bonding *bond = netdev_priv(bond_dev); struct mii_ioctl_data *mii = NULL; - const struct net_device_ops *ops; - struct net_device *real_dev; - struct hwtstamp_config cfg; - struct ifreq ifrr; - int res = 0; netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd); @@ -4477,44 +4479,11 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm } break; - case SIOCSHWTSTAMP: - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX)) - return -EOPNOTSUPP; - - fallthrough; - case SIOCGHWTSTAMP: - real_dev = bond_option_active_slave_get_rcu(bond); - if (!real_dev) - return -EOPNOTSUPP; - - strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); - ifrr.ifr_ifru = ifr->ifr_ifru; - - ops = real_dev->netdev_ops; - if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) { - res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); - if (res) - return res; - - ifr->ifr_ifru = ifrr.ifr_ifru; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - /* Set the BOND_PHC_INDEX flag to notify user space */ - cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; - - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? - -EFAULT : 0; - } - fallthrough; default: - res = -EOPNOTSUPP; + return -EOPNOTSUPP; } - return res; + return 0; } static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd) @@ -5083,19 +5052,7 @@ static void bond_set_slave_arr(struct bonding *bond, static void bond_reset_slave_arr(struct bonding *bond) { - struct bond_up_slave *usable, *all; - - usable = rtnl_dereference(bond->usable_slaves); - if (usable) { - RCU_INIT_POINTER(bond->usable_slaves, NULL); - kfree_rcu(usable, rcu); - } - - all = rtnl_dereference(bond->all_slaves); - if (all) { - RCU_INIT_POINTER(bond->all_slaves, NULL); - kfree_rcu(all, rcu); - } + bond_set_slave_arr(bond, NULL, NULL); } /* Build the usable slaves array in control path for modes that use xmit-hash @@ -5688,6 +5645,67 @@ static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) return speed; } +/* Set the BOND_PHC_INDEX flag to notify user space */ +static int bond_set_phc_index_flag(struct kernel_hwtstamp_config *kernel_cfg) +{ + struct ifreq *ifr = kernel_cfg->ifr; + struct hwtstamp_config cfg; + + if (kernel_cfg->copied_to_user) { + /* Lower device has a legacy implementation */ + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; + if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) + return -EFAULT; + } else { + kernel_cfg->flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; + } + + return 0; +} + +static int bond_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) +{ + struct bonding *bond = netdev_priv(dev); + struct net_device *real_dev; + int err; + + real_dev = bond_option_active_slave_get_rcu(bond); + if (!real_dev) + return -EOPNOTSUPP; + + err = generic_hwtstamp_get_lower(real_dev, cfg); + if (err) + return err; + + return bond_set_phc_index_flag(cfg); +} + +static int bond_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct bonding *bond = netdev_priv(dev); + struct net_device *real_dev; + int err; + + if (!(cfg->flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX)) + return -EOPNOTSUPP; + + real_dev = bond_option_active_slave_get_rcu(bond); + if (!real_dev) + return -EOPNOTSUPP; + + err = generic_hwtstamp_set_lower(real_dev, cfg, extack); + if (err) + return err; + + return bond_set_phc_index_flag(cfg); +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { @@ -5706,6 +5724,7 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { + bond_update_speed_duplex(slave); if (slave->speed != SPEED_UNKNOWN) { if (BOND_MODE(bond) == BOND_MODE_BROADCAST) speed = bond_mode_bcast_speed(slave, @@ -5836,6 +5855,8 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_bpf = bond_xdp, .ndo_xdp_xmit = bond_xdp_xmit, .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave, + .ndo_hwtstamp_get = bond_hwtstamp_get, + .ndo_hwtstamp_set = bond_hwtstamp_set, }; static const struct device_type bond_type = { @@ -5849,8 +5870,7 @@ static void bond_destructor(struct net_device *bond_dev) if (bond->wq) destroy_workqueue(bond->wq); - if (bond->rr_tx_counter) - free_percpu(bond->rr_tx_counter); + free_percpu(bond->rr_tx_counter); } void bond_setup(struct net_device *bond_dev) @@ -5925,7 +5945,6 @@ void bond_setup(struct net_device *bond_dev) static void bond_uninit(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct bond_up_slave *usable, *all; struct list_head *iter; struct slave *slave; @@ -5936,17 +5955,7 @@ static void bond_uninit(struct net_device *bond_dev) __bond_release_one(bond_dev, slave->dev, true, true); netdev_info(bond_dev, "Released all slaves\n"); - usable = rtnl_dereference(bond->usable_slaves); - if (usable) { - RCU_INIT_POINTER(bond->usable_slaves, NULL); - kfree_rcu(usable, rcu); - } - - all = rtnl_dereference(bond->all_slaves); - if (all) { - RCU_INIT_POINTER(bond->all_slaves, NULL); - kfree_rcu(all, rcu); - } + bond_set_slave_arr(bond, NULL, NULL); list_del(&bond->bond_list); @@ -5955,7 +5964,7 @@ static void bond_uninit(struct net_device *bond_dev) /*------------------------- Module initialization ---------------------------*/ -static int bond_check_params(struct bond_params *params) +static int __init bond_check_params(struct bond_params *params) { int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; struct bond_opt_value newval; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 27cbe148f0db..cfa74cf8bb1a 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -85,7 +85,7 @@ nla_put_failure: } /* Limit the max delay range to 300s */ -static struct netlink_range_validation delay_range = { +static const struct netlink_range_validation delay_range = { .max = 300000, }; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 0bb59da24922..2805135a7205 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -803,7 +803,7 @@ static const struct attribute_group bonding_group = { /* Initialize sysfs. This sets up the bonding_masters file in * /sys/class/net. */ -int bond_create_sysfs(struct bond_net *bn) +int __net_init bond_create_sysfs(struct bond_net *bn) { int ret; @@ -836,7 +836,7 @@ int bond_create_sysfs(struct bond_net *bn) } /* Remove /sys/class/net/bonding_masters. */ -void bond_destroy_sysfs(struct bond_net *bn) +void __net_exit bond_destroy_sysfs(struct bond_net *bn) { netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net); } diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 688075859ae4..ed3a589def6b 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -159,7 +159,7 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) #endif static void ldisc_receive(struct tty_struct *tty, const u8 *data, - const char *flags, int count) + const u8 *flags, size_t count) { struct sk_buff *skb = NULL; struct ser_device *ser; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index a5c5036dfb94..eb410714afc2 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -89,6 +89,7 @@ config CAN_RX_OFFLOAD config CAN_AT91 tristate "Atmel AT91 onchip CAN controller" depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM + select CAN_RX_OFFLOAD help This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 and AT91SAM9X5 processors. @@ -160,8 +161,13 @@ config CAN_KVASER_PCIEFD Kvaser PCIEcan 4xHS Kvaser PCIEcan 2xHS v2 Kvaser PCIEcan HS v2 + Kvaser PCIEcan 1xCAN v3 + Kvaser PCIEcan 2xCAN v3 + Kvaser PCIEcan 4xCAN v2 Kvaser Mini PCI Express HS v2 Kvaser Mini PCI Express 2xHS v2 + Kvaser Mini PCI Express 1xCAN v3 + Kvaser Mini PCI Express 2xCAN v3 config CAN_SLCAN tristate "Serial / USB serial CAN Adaptors (slcan)" @@ -185,10 +191,10 @@ config CAN_SLCAN config CAN_SUN4I tristate "Allwinner A10 CAN controller" - depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST + depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST help Say Y here if you want to use CAN controller found on Allwinner - A10/A20 SoCs. + A10/A20/D1 SoCs. To compile this driver as a module, choose M here: the module will be called sun4i_can. diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 4621266851ed..11f434d708b3 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -3,9 +3,10 @@ * at91_can.c - CAN network driver for AT91 SoC CAN controller * * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> - * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de> + * (C) 2008, 2009, 2010, 2011, 2023 by Marc Kleine-Budde <kernel@pengutronix.de> */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/errno.h> #include <linux/ethtool.h> @@ -15,6 +16,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> +#include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> @@ -24,90 +26,115 @@ #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/rx-offload.h> -#define AT91_MB_MASK(i) ((1 << (i)) - 1) +#define AT91_MB_MASK(i) ((1 << (i)) - 1) /* Common registers */ enum at91_reg { - AT91_MR = 0x000, - AT91_IER = 0x004, - AT91_IDR = 0x008, - AT91_IMR = 0x00C, - AT91_SR = 0x010, - AT91_BR = 0x014, - AT91_TIM = 0x018, - AT91_TIMESTP = 0x01C, - AT91_ECR = 0x020, - AT91_TCR = 0x024, - AT91_ACR = 0x028, + AT91_MR = 0x000, + AT91_IER = 0x004, + AT91_IDR = 0x008, + AT91_IMR = 0x00C, + AT91_SR = 0x010, + AT91_BR = 0x014, + AT91_TIM = 0x018, + AT91_TIMESTP = 0x01C, + AT91_ECR = 0x020, + AT91_TCR = 0x024, + AT91_ACR = 0x028, }; /* Mailbox registers (0 <= i <= 15) */ -#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) -#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) -#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) -#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) -#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) -#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) -#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) -#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) +#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) +#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) +#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) +#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) +#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) +#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) +#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) +#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) /* Register bits */ -#define AT91_MR_CANEN BIT(0) -#define AT91_MR_LPM BIT(1) -#define AT91_MR_ABM BIT(2) -#define AT91_MR_OVL BIT(3) -#define AT91_MR_TEOF BIT(4) -#define AT91_MR_TTM BIT(5) -#define AT91_MR_TIMFRZ BIT(6) -#define AT91_MR_DRPT BIT(7) - -#define AT91_SR_RBSY BIT(29) - -#define AT91_MMR_PRIO_SHIFT (16) - -#define AT91_MID_MIDE BIT(29) - -#define AT91_MSR_MRTR BIT(20) -#define AT91_MSR_MABT BIT(22) -#define AT91_MSR_MRDY BIT(23) -#define AT91_MSR_MMI BIT(24) - -#define AT91_MCR_MRTR BIT(20) -#define AT91_MCR_MTCR BIT(23) +#define AT91_MR_CANEN BIT(0) +#define AT91_MR_LPM BIT(1) +#define AT91_MR_ABM BIT(2) +#define AT91_MR_OVL BIT(3) +#define AT91_MR_TEOF BIT(4) +#define AT91_MR_TTM BIT(5) +#define AT91_MR_TIMFRZ BIT(6) +#define AT91_MR_DRPT BIT(7) + +#define AT91_SR_RBSY BIT(29) +#define AT91_SR_TBSY BIT(30) +#define AT91_SR_OVLSY BIT(31) + +#define AT91_BR_PHASE2_MASK GENMASK(2, 0) +#define AT91_BR_PHASE1_MASK GENMASK(6, 4) +#define AT91_BR_PROPAG_MASK GENMASK(10, 8) +#define AT91_BR_SJW_MASK GENMASK(13, 12) +#define AT91_BR_BRP_MASK GENMASK(22, 16) +#define AT91_BR_SMP BIT(24) + +#define AT91_TIM_TIMER_MASK GENMASK(15, 0) + +#define AT91_ECR_REC_MASK GENMASK(8, 0) +#define AT91_ECR_TEC_MASK GENMASK(23, 16) + +#define AT91_TCR_TIMRST BIT(31) + +#define AT91_MMR_MTIMEMARK_MASK GENMASK(15, 0) +#define AT91_MMR_PRIOR_MASK GENMASK(19, 16) +#define AT91_MMR_MOT_MASK GENMASK(26, 24) + +#define AT91_MID_MIDVB_MASK GENMASK(17, 0) +#define AT91_MID_MIDVA_MASK GENMASK(28, 18) +#define AT91_MID_MIDE BIT(29) + +#define AT91_MSR_MTIMESTAMP_MASK GENMASK(15, 0) +#define AT91_MSR_MDLC_MASK GENMASK(19, 16) +#define AT91_MSR_MRTR BIT(20) +#define AT91_MSR_MABT BIT(22) +#define AT91_MSR_MRDY BIT(23) +#define AT91_MSR_MMI BIT(24) + +#define AT91_MCR_MDLC_MASK GENMASK(19, 16) +#define AT91_MCR_MRTR BIT(20) +#define AT91_MCR_MACR BIT(22) +#define AT91_MCR_MTCR BIT(23) /* Mailbox Modes */ enum at91_mb_mode { - AT91_MB_MODE_DISABLED = 0, - AT91_MB_MODE_RX = 1, - AT91_MB_MODE_RX_OVRWR = 2, - AT91_MB_MODE_TX = 3, - AT91_MB_MODE_CONSUMER = 4, - AT91_MB_MODE_PRODUCER = 5, + AT91_MB_MODE_DISABLED = 0, + AT91_MB_MODE_RX = 1, + AT91_MB_MODE_RX_OVRWR = 2, + AT91_MB_MODE_TX = 3, + AT91_MB_MODE_CONSUMER = 4, + AT91_MB_MODE_PRODUCER = 5, }; /* Interrupt mask bits */ -#define AT91_IRQ_ERRA BIT(16) -#define AT91_IRQ_WARN BIT(17) -#define AT91_IRQ_ERRP BIT(18) -#define AT91_IRQ_BOFF BIT(19) -#define AT91_IRQ_SLEEP BIT(20) -#define AT91_IRQ_WAKEUP BIT(21) -#define AT91_IRQ_TOVF BIT(22) -#define AT91_IRQ_TSTP BIT(23) -#define AT91_IRQ_CERR BIT(24) -#define AT91_IRQ_SERR BIT(25) -#define AT91_IRQ_AERR BIT(26) -#define AT91_IRQ_FERR BIT(27) -#define AT91_IRQ_BERR BIT(28) - -#define AT91_IRQ_ERR_ALL (0x1fff0000) -#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ - AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) -#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ - AT91_IRQ_ERRP | AT91_IRQ_BOFF) - -#define AT91_IRQ_ALL (0x1fffffff) +#define AT91_IRQ_ERRA BIT(16) +#define AT91_IRQ_WARN BIT(17) +#define AT91_IRQ_ERRP BIT(18) +#define AT91_IRQ_BOFF BIT(19) +#define AT91_IRQ_SLEEP BIT(20) +#define AT91_IRQ_WAKEUP BIT(21) +#define AT91_IRQ_TOVF BIT(22) +#define AT91_IRQ_TSTP BIT(23) +#define AT91_IRQ_CERR BIT(24) +#define AT91_IRQ_SERR BIT(25) +#define AT91_IRQ_AERR BIT(26) +#define AT91_IRQ_FERR BIT(27) +#define AT91_IRQ_BERR BIT(28) + +#define AT91_IRQ_ERR_ALL (0x1fff0000) +#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ + AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) +#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ + AT91_IRQ_ERRP | AT91_IRQ_BOFF) + +#define AT91_IRQ_ALL (0x1fffffff) enum at91_devtype { AT91_DEVTYPE_SAM9263, @@ -116,7 +143,6 @@ enum at91_devtype { struct at91_devtype_data { unsigned int rx_first; - unsigned int rx_split; unsigned int rx_last; unsigned int tx_shift; enum at91_devtype type; @@ -124,14 +150,13 @@ struct at91_devtype_data { struct at91_priv { struct can_priv can; /* must be the first member! */ - struct napi_struct napi; + struct can_rx_offload offload; + struct phy *transceiver; void __iomem *reg_base; - u32 reg_sr; - unsigned int tx_next; - unsigned int tx_echo; - unsigned int rx_next; + unsigned int tx_head; + unsigned int tx_tail; struct at91_devtype_data devtype_data; struct clk *clk; @@ -140,9 +165,13 @@ struct at91_priv { canid_t mb0_id; }; +static inline struct at91_priv *rx_offload_to_priv(struct can_rx_offload *offload) +{ + return container_of(offload, struct at91_priv, offload); +} + static const struct at91_devtype_data at91_at91sam9263_data = { .rx_first = 1, - .rx_split = 8, .rx_last = 11, .tx_shift = 2, .type = AT91_DEVTYPE_SAM9263, @@ -150,7 +179,6 @@ static const struct at91_devtype_data at91_at91sam9263_data = { static const struct at91_devtype_data at91_at91sam9x5_data = { .rx_first = 0, - .rx_split = 4, .rx_last = 5, .tx_shift = 1, .type = AT91_DEVTYPE_SAM9X5, @@ -187,27 +215,6 @@ static inline unsigned int get_mb_rx_last(const struct at91_priv *priv) return priv->devtype_data.rx_last; } -static inline unsigned int get_mb_rx_split(const struct at91_priv *priv) -{ - return priv->devtype_data.rx_split; -} - -static inline unsigned int get_mb_rx_num(const struct at91_priv *priv) -{ - return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1; -} - -static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv) -{ - return get_mb_rx_split(priv) - 1; -} - -static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv) -{ - return AT91_MB_MASK(get_mb_rx_split(priv)) & - ~AT91_MB_MASK(get_mb_rx_first(priv)); -} - static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv) { return priv->devtype_data.tx_shift; @@ -228,24 +235,24 @@ static inline unsigned int get_mb_tx_last(const struct at91_priv *priv) return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1; } -static inline unsigned int get_next_prio_shift(const struct at91_priv *priv) +static inline unsigned int get_head_prio_shift(const struct at91_priv *priv) { return get_mb_tx_shift(priv); } -static inline unsigned int get_next_prio_mask(const struct at91_priv *priv) +static inline unsigned int get_head_prio_mask(const struct at91_priv *priv) { return 0xf << get_mb_tx_shift(priv); } -static inline unsigned int get_next_mb_mask(const struct at91_priv *priv) +static inline unsigned int get_head_mb_mask(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_tx_shift(priv)); } -static inline unsigned int get_next_mask(const struct at91_priv *priv) +static inline unsigned int get_head_mask(const struct at91_priv *priv) { - return get_next_mb_mask(priv) | get_next_prio_mask(priv); + return get_head_mb_mask(priv) | get_head_prio_mask(priv); } static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv) @@ -260,19 +267,19 @@ static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv) ~AT91_MB_MASK(get_mb_tx_first(priv)); } -static inline unsigned int get_tx_next_mb(const struct at91_priv *priv) +static inline unsigned int get_tx_head_mb(const struct at91_priv *priv) { - return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv); + return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv); } -static inline unsigned int get_tx_next_prio(const struct at91_priv *priv) +static inline unsigned int get_tx_head_prio(const struct at91_priv *priv) { - return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf; + return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf; } -static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv) +static inline unsigned int get_tx_tail_mb(const struct at91_priv *priv) { - return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv); + return (priv->tx_tail & get_head_mb_mask(priv)) + get_mb_tx_first(priv); } static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) @@ -288,9 +295,12 @@ static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, static inline void set_mb_mode_prio(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode, - int prio) + u8 prio) { - at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); + const u32 reg_mmr = FIELD_PREP(AT91_MMR_MOT_MASK, mode) | + FIELD_PREP(AT91_MMR_PRIOR_MASK, prio); + + at91_write(priv, AT91_MMR(mb), reg_mmr); } static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, @@ -304,9 +314,10 @@ static inline u32 at91_can_id_to_reg_mid(canid_t can_id) u32 reg_mid; if (can_id & CAN_EFF_FLAG) - reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; + reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, can_id) | + AT91_MID_MIDE; else - reg_mid = (can_id & CAN_SFF_MASK) << 18; + reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK, can_id); return reg_mid; } @@ -318,8 +329,8 @@ static void at91_setup_mailboxes(struct net_device *dev) u32 reg_mid; /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first - * mailbox is disabled. The next 11 mailboxes are used as a - * reception FIFO. The last mailbox is configured with + * mailbox is disabled. The next mailboxes are used as a + * reception FIFO. The last of the RX mailboxes is configured with * overwrite option. The overwrite flag indicates a FIFO * overflow. */ @@ -340,27 +351,30 @@ static void at91_setup_mailboxes(struct net_device *dev) at91_write(priv, AT91_MID(i), AT91_MID_MIDE); } - /* The last 4 mailboxes are used for transmitting. */ + /* The last mailboxes are used for transmitting. */ for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++) set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); - /* Reset tx and rx helper pointers */ - priv->tx_next = priv->tx_echo = 0; - priv->rx_next = get_mb_rx_first(priv); + /* Reset tx helper pointers */ + priv->tx_head = priv->tx_tail = 0; } static int at91_set_bittiming(struct net_device *dev) { const struct at91_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; - u32 reg_br; + u32 reg_br = 0; + + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + reg_br |= AT91_BR_SMP; - reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) | - ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | - ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | - ((bt->phase_seg2 - 1) << 0); + reg_br |= FIELD_PREP(AT91_BR_BRP_MASK, bt->brp - 1) | + FIELD_PREP(AT91_BR_SJW_MASK, bt->sjw - 1) | + FIELD_PREP(AT91_BR_PROPAG_MASK, bt->prop_seg - 1) | + FIELD_PREP(AT91_BR_PHASE1_MASK, bt->phase_seg1 - 1) | + FIELD_PREP(AT91_BR_PHASE2_MASK, bt->phase_seg2 - 1); - netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br); + netdev_dbg(dev, "writing AT91_BR: 0x%08x\n", reg_br); at91_write(priv, AT91_BR, reg_br); @@ -373,8 +387,8 @@ static int at91_get_berr_counter(const struct net_device *dev, const struct at91_priv *priv = netdev_priv(dev); u32 reg_ecr = at91_read(priv, AT91_ECR); - bec->rxerr = reg_ecr & 0xff; - bec->txerr = reg_ecr >> 16; + bec->rxerr = FIELD_GET(AT91_ECR_REC_MASK, reg_ecr); + bec->txerr = FIELD_GET(AT91_ECR_TEC_MASK, reg_ecr); return 0; } @@ -403,9 +417,13 @@ static void at91_chip_start(struct net_device *dev) priv->can.state = CAN_STATE_ERROR_ACTIVE; + /* Dummy read to clear latched line error interrupts on + * sam9x5 and newer SoCs. + */ + at91_read(priv, AT91_SR); + /* Enable interrupts */ - reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; - at91_write(priv, AT91_IDR, AT91_IRQ_ALL); + reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERR_LINE | AT91_IRQ_ERR_FRAME; at91_write(priv, AT91_IER, reg_ier); } @@ -414,6 +432,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) struct at91_priv *priv = netdev_priv(dev); u32 reg_mr; + /* Abort any pending TX requests. However this doesn't seem to + * work in case of bus-off on sama5d3. + */ + at91_write(priv, AT91_ACR, get_irq_mb_tx(priv)); + /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); @@ -437,11 +460,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) * stop sending, waiting for all messages to be delivered, then start * again with mailbox AT91_MB_TX_FIRST prio 0. * - * We use the priv->tx_next as counter for the next transmission + * We use the priv->tx_head as counter for the next transmission * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits * encode the mailbox number, the upper 4 bits the mailbox priority: * - * priv->tx_next = (prio << get_next_prio_shift(priv)) | + * priv->tx_head = (prio << get_next_prio_shift(priv)) | * (mb - get_mb_tx_first(priv)); * */ @@ -455,8 +478,8 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; - mb = get_tx_next_mb(priv); - prio = get_tx_next_prio(priv); + mb = get_tx_head_mb(priv); + prio = get_tx_head_prio(priv); if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { netif_stop_queue(dev); @@ -465,8 +488,12 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } reg_mid = at91_can_id_to_reg_mid(cf->can_id); - reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | - (cf->len << 16) | AT91_MCR_MTCR; + + reg_mcr = FIELD_PREP(AT91_MCR_MDLC_MASK, cf->len) | + AT91_MCR_MTCR; + + if (cf->can_id & CAN_RTR_FLAG) + reg_mcr |= AT91_MCR_MRTR; /* disable MB while writing ID (see datasheet) */ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); @@ -484,15 +511,15 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) /* we have to stop the queue and deliver all messages in case * of a prio+mb counter wrap around. This is the case if - * tx_next buffer prio and mailbox equals 0. + * tx_head buffer prio and mailbox equals 0. * * also stop the queue if next buffer is still in use * (== not ready) */ - priv->tx_next++; - if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & + priv->tx_head++; + if (!(at91_read(priv, AT91_MSR(get_tx_head_mb(priv))) & AT91_MSR_MRDY) || - (priv->tx_next & get_next_mask(priv)) == 0) + (priv->tx_head & get_head_mask(priv)) == 0) netif_stop_queue(dev); /* Enable interrupt for this mailbox */ @@ -501,32 +528,20 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -/** - * at91_activate_rx_low - activate lower rx mailboxes - * @priv: a91 context - * - * Reenables the lower mailboxes for reception of new CAN messages - */ -static inline void at91_activate_rx_low(const struct at91_priv *priv) +static inline u32 at91_get_timestamp(const struct at91_priv *priv) { - u32 mask = get_mb_rx_low_mask(priv); - - at91_write(priv, AT91_TCR, mask); + return at91_read(priv, AT91_TIM); } -/** - * at91_activate_rx_mb - reactive single rx mailbox - * @priv: a91 context - * @mb: mailbox to reactivate - * - * Reenables given mailbox for reception of new CAN messages - */ -static inline void at91_activate_rx_mb(const struct at91_priv *priv, - unsigned int mb) +static inline struct sk_buff * +at91_alloc_can_err_skb(struct net_device *dev, + struct can_frame **cf, u32 *timestamp) { - u32 mask = 1 << mb; + const struct at91_priv *priv = netdev_priv(dev); - at91_write(priv, AT91_TCR, mask); + *timestamp = at91_get_timestamp(priv); + + return alloc_can_err_skb(dev, cf); } /** @@ -537,45 +552,71 @@ static void at91_rx_overflow_err(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; + struct at91_priv *priv = netdev_priv(dev); struct can_frame *cf; + u32 timestamp; + int err; netdev_dbg(dev, "RX buffer overflow\n"); stats->rx_over_errors++; stats->rx_errors++; - skb = alloc_can_err_skb(dev, &cf); + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); if (unlikely(!skb)) return; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - netif_receive_skb(skb); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; } /** - * at91_read_mb - read CAN msg from mailbox (lowlevel impl) - * @dev: net device + * at91_mailbox_read - read CAN msg from mailbox + * @offload: rx-offload * @mb: mailbox number to read from - * @cf: can frame where to store message + * @timestamp: pointer to 32 bit timestamp + * @drop: true indicated mailbox to mark as read and drop frame * - * Reads a CAN message from the given mailbox and stores data into - * given can frame. "mb" and "cf" must be valid. + * Reads a CAN message from the given mailbox if not empty. */ -static void at91_read_mb(struct net_device *dev, unsigned int mb, - struct can_frame *cf) +static struct sk_buff *at91_mailbox_read(struct can_rx_offload *offload, + unsigned int mb, u32 *timestamp, + bool drop) { - const struct at91_priv *priv = netdev_priv(dev); + const struct at91_priv *priv = rx_offload_to_priv(offload); + struct can_frame *cf; + struct sk_buff *skb; u32 reg_msr, reg_mid; + reg_msr = at91_read(priv, AT91_MSR(mb)); + if (!(reg_msr & AT91_MSR_MRDY)) + return NULL; + + if (unlikely(drop)) { + skb = ERR_PTR(-ENOBUFS); + goto mark_as_read; + } + + skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) { + skb = ERR_PTR(-ENOMEM); + goto mark_as_read; + } + reg_mid = at91_read(priv, AT91_MID(mb)); if (reg_mid & AT91_MID_MIDE) - cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; + cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, reg_mid) | + CAN_EFF_FLAG; else - cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; + cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK, reg_mid); - reg_msr = at91_read(priv, AT91_MSR(mb)); - cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf); + /* extend timestamp to full 32 bit */ + *timestamp = FIELD_GET(AT91_MSR_MTIMESTAMP_MASK, reg_msr) << 16; + + cf->len = can_cc_dlc2len(FIELD_GET(AT91_MSR_MDLC_MASK, reg_msr)); if (reg_msr & AT91_MSR_MRTR) { cf->can_id |= CAN_RTR_FLAG; @@ -588,234 +629,21 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb, at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI)) - at91_rx_overflow_err(dev); -} - -/** - * at91_read_msg - read CAN message from mailbox - * @dev: net device - * @mb: mail box to read from - * - * Reads a CAN message from given mailbox, and put into linux network - * RX queue, does all housekeeping chores (stats, ...) - */ -static void at91_read_msg(struct net_device *dev, unsigned int mb) -{ - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf; - struct sk_buff *skb; - - skb = alloc_can_skb(dev, &cf); - if (unlikely(!skb)) { - stats->rx_dropped++; - return; - } - - at91_read_mb(dev, mb, cf); - - stats->rx_packets++; - if (!(cf->can_id & CAN_RTR_FLAG)) - stats->rx_bytes += cf->len; - - netif_receive_skb(skb); -} - -/** - * at91_poll_rx - read multiple CAN messages from mailboxes - * @dev: net device - * @quota: max number of pkgs we're allowed to receive - * - * Theory of Operation: - * - * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last()) - * on the chip are reserved for RX. We split them into 2 groups. The - * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last(). - * - * Like it or not, but the chip always saves a received CAN message - * into the first free mailbox it finds (starting with the - * lowest). This makes it very difficult to read the messages in the - * right order from the chip. This is how we work around that problem: - * - * The first message goes into mb nr. 1 and issues an interrupt. All - * rx ints are disabled in the interrupt handler and a napi poll is - * scheduled. We read the mailbox, but do _not_ re-enable the mb (to - * receive another message). - * - * lower mbxs upper - * ____^______ __^__ - * / \ / \ - * +-+-+-+-+-+-+-+-++-+-+-+-+ - * | |x|x|x|x|x|x|x|| | | | | - * +-+-+-+-+-+-+-+-++-+-+-+-+ - * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail - * 0 1 2 3 4 5 6 7 8 9 0 1 / box - * ^ - * | - * \ - * unused, due to chip bug - * - * The variable priv->rx_next points to the next mailbox to read a - * message from. As long we're in the lower mailboxes we just read the - * mailbox but not re-enable it. - * - * With completion of the last of the lower mailboxes, we re-enable the - * whole first group, but continue to look for filled mailboxes in the - * upper mailboxes. Imagine the second group like overflow mailboxes, - * which takes CAN messages if the lower goup is full. While in the - * upper group we re-enable the mailbox right after reading it. Giving - * the chip more room to store messages. - * - * After finishing we look again in the lower group if we've still - * quota. - * - */ -static int at91_poll_rx(struct net_device *dev, int quota) -{ - struct at91_priv *priv = netdev_priv(dev); - u32 reg_sr = at91_read(priv, AT91_SR); - const unsigned long *addr = (unsigned long *)®_sr; - unsigned int mb; - int received = 0; - - if (priv->rx_next > get_mb_rx_low_last(priv) && - reg_sr & get_mb_rx_low_mask(priv)) - netdev_info(dev, - "order of incoming frames cannot be guaranteed\n"); - - again: - for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next); - mb < get_mb_tx_first(priv) && quota > 0; - reg_sr = at91_read(priv, AT91_SR), - mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) { - at91_read_msg(dev, mb); - - /* reactivate mailboxes */ - if (mb == get_mb_rx_low_last(priv)) - /* all lower mailboxed, if just finished it */ - at91_activate_rx_low(priv); - else if (mb > get_mb_rx_low_last(priv)) - /* only the mailbox we read */ - at91_activate_rx_mb(priv, mb); - - received++; - quota--; - } - - /* upper group completed, look again in lower */ - if (priv->rx_next > get_mb_rx_low_last(priv) && - mb > get_mb_rx_last(priv)) { - priv->rx_next = get_mb_rx_first(priv); - if (quota > 0) - goto again; - } - - return received; -} - -static void at91_poll_err_frame(struct net_device *dev, - struct can_frame *cf, u32 reg_sr) -{ - struct at91_priv *priv = netdev_priv(dev); - - /* CRC error */ - if (reg_sr & AT91_IRQ_CERR) { - netdev_dbg(dev, "CERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - } - - /* Stuffing Error */ - if (reg_sr & AT91_IRQ_SERR) { - netdev_dbg(dev, "SERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_STUFF; - } - - /* Acknowledgement Error */ - if (reg_sr & AT91_IRQ_AERR) { - netdev_dbg(dev, "AERR irq\n"); - dev->stats.tx_errors++; - cf->can_id |= CAN_ERR_ACK; - } - - /* Form error */ - if (reg_sr & AT91_IRQ_FERR) { - netdev_dbg(dev, "FERR irq\n"); - dev->stats.rx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_FORM; - } - - /* Bit Error */ - if (reg_sr & AT91_IRQ_BERR) { - netdev_dbg(dev, "BERR irq\n"); - dev->stats.tx_errors++; - priv->can.can_stats.bus_error++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->data[2] |= CAN_ERR_PROT_BIT; - } -} - -static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) -{ - struct sk_buff *skb; - struct can_frame *cf; - - if (quota == 0) - return 0; - - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; - - at91_poll_err_frame(dev, cf, reg_sr); - - netif_receive_skb(skb); - - return 1; -} - -static int at91_poll(struct napi_struct *napi, int quota) -{ - struct net_device *dev = napi->dev; - const struct at91_priv *priv = netdev_priv(dev); - u32 reg_sr = at91_read(priv, AT91_SR); - int work_done = 0; - - if (reg_sr & get_irq_mb_rx(priv)) - work_done += at91_poll_rx(dev, quota - work_done); - - /* The error bits are clear on read, - * so use saved value from irq handler. - */ - reg_sr |= priv->reg_sr; - if (reg_sr & AT91_IRQ_ERR_FRAME) - work_done += at91_poll_err(dev, quota - work_done, reg_sr); - - if (work_done < quota) { - /* enable IRQs for frame errors and all mailboxes >= rx_next */ - u32 reg_ier = AT91_IRQ_ERR_FRAME; + at91_rx_overflow_err(offload->dev); - reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); + mark_as_read: + at91_write(priv, AT91_MCR(mb), AT91_MCR_MTCR); - napi_complete_done(napi, work_done); - at91_write(priv, AT91_IER, reg_ier); - } - - return work_done; + return skb; } /* theory of operation: * - * priv->tx_echo holds the number of the oldest can_frame put for + * priv->tx_tail holds the number of the oldest can_frame put for * transmission into the hardware, but not yet ACKed by the CAN tx * complete IRQ. * - * We iterate from priv->tx_echo to priv->tx_next and check if the + * We iterate from priv->tx_tail to priv->tx_head and check if the * packet has been transmitted, echo it back to the CAN framework. If * we discover a not yet transmitted package, stop looking for more. * @@ -826,10 +654,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) u32 reg_msr; unsigned int mb; - /* masking of reg_sr not needed, already done by at91_irq */ - - for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { - mb = get_tx_echo_mb(priv); + for (/* nix */; (priv->tx_head - priv->tx_tail) > 0; priv->tx_tail++) { + mb = get_tx_tail_mb(priv); /* no event in mailbox? */ if (!(reg_sr & (1 << mb))) @@ -844,236 +670,202 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) * parked in the echo queue. */ reg_msr = at91_read(priv, AT91_MSR(mb)); - if (likely(reg_msr & AT91_MSR_MRDY && - ~reg_msr & AT91_MSR_MABT)) { - /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ - dev->stats.tx_bytes += - can_get_echo_skb(dev, - mb - get_mb_tx_first(priv), - NULL); - dev->stats.tx_packets++; - } + if (unlikely(!(reg_msr & AT91_MSR_MRDY && + ~reg_msr & AT91_MSR_MABT))) + continue; + + /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ + dev->stats.tx_bytes += + can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL); + dev->stats.tx_packets++; } /* restart queue if we don't have a wrap around but restart if * we get a TX int for the last can frame directly before a * wrap around. */ - if ((priv->tx_next & get_next_mask(priv)) != 0 || - (priv->tx_echo & get_next_mask(priv)) == 0) + if ((priv->tx_head & get_head_mask(priv)) != 0 || + (priv->tx_tail & get_head_mask(priv)) == 0) netif_wake_queue(dev); } -static void at91_irq_err_state(struct net_device *dev, - struct can_frame *cf, enum can_state new_state) +static void at91_irq_err_line(struct net_device *dev, const u32 reg_sr) { + struct net_device_stats *stats = &dev->stats; + enum can_state new_state, rx_state, tx_state; struct at91_priv *priv = netdev_priv(dev); - u32 reg_idr = 0, reg_ier = 0; struct can_berr_counter bec; + struct sk_buff *skb; + struct can_frame *cf; + u32 timestamp; + int err; at91_get_berr_counter(dev, &bec); + can_state_get_by_berr_counter(dev, &bec, &tx_state, &rx_state); - switch (priv->can.state) { - case CAN_STATE_ERROR_ACTIVE: - /* from: ERROR_ACTIVE - * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF - * => : there was a warning int - */ - if (new_state >= CAN_STATE_ERROR_WARNING && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Warning IRQ\n"); - priv->can.can_stats.error_warning++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - } - fallthrough; - case CAN_STATE_ERROR_WARNING: - /* from: ERROR_ACTIVE, ERROR_WARNING - * to : ERROR_PASSIVE, BUS_OFF - * => : error passive int - */ - if (new_state >= CAN_STATE_ERROR_PASSIVE && - new_state <= CAN_STATE_BUS_OFF) { - netdev_dbg(dev, "Error Passive IRQ\n"); - priv->can.can_stats.error_passive++; - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (bec.txerr > bec.rxerr) ? - CAN_ERR_CRTL_TX_PASSIVE : - CAN_ERR_CRTL_RX_PASSIVE; - } - break; - case CAN_STATE_BUS_OFF: - /* from: BUS_OFF - * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE - */ - if (new_state <= CAN_STATE_ERROR_PASSIVE) { - cf->can_id |= CAN_ERR_RESTARTED; + /* The chip automatically recovers from bus-off after 128 + * occurrences of 11 consecutive recessive bits. + * + * After an auto-recovered bus-off, the error counters no + * longer reflect this fact. On the sam9263 the state bits in + * the SR register show the current state (based on the + * current error counters), while on sam9x5 and newer SoCs + * these bits are latched. + * + * Take any latched bus-off information from the SR register + * into account when calculating the CAN new state, to start + * the standard CAN bus off handling. + */ + if (reg_sr & AT91_IRQ_BOFF) + rx_state = CAN_STATE_BUS_OFF; - netdev_dbg(dev, "restarted\n"); - priv->can.can_stats.restarts++; + new_state = max(tx_state, rx_state); - netif_carrier_on(dev); - netif_wake_queue(dev); - } - break; - default: - break; - } + /* state hasn't changed */ + if (likely(new_state == priv->can.state)) + return; - /* process state changes depending on the new state */ - switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: - /* actually we want to enable AT91_IRQ_WARN here, but - * it screws up the system under certain - * circumstances. so just enable AT91_IRQ_ERRP, thus - * the "fallthrough" - */ - netdev_dbg(dev, "Error Active\n"); - cf->can_id |= CAN_ERR_PROT; - cf->data[2] = CAN_ERR_PROT_ACTIVE; - fallthrough; - case CAN_STATE_ERROR_WARNING: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; - reg_ier = AT91_IRQ_ERRP; - break; - case CAN_STATE_ERROR_PASSIVE: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP; - reg_ier = AT91_IRQ_BOFF; - break; - case CAN_STATE_BUS_OFF: - reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP | - AT91_IRQ_WARN | AT91_IRQ_BOFF; - reg_ier = 0; + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); + can_change_state(dev, cf, tx_state, rx_state); - cf->can_id |= CAN_ERR_BUSOFF; + if (new_state == CAN_STATE_BUS_OFF) { + at91_chip_stop(dev, CAN_STATE_BUS_OFF); + can_bus_off(dev); + } - netdev_dbg(dev, "bus-off\n"); - netif_carrier_off(dev); - priv->can.can_stats.bus_off++; + if (unlikely(!skb)) + return; - /* turn off chip, if restart is disabled */ - if (!priv->can.restart_ms) { - at91_chip_stop(dev, CAN_STATE_BUS_OFF); - return; - } - break; - default: - break; + if (new_state != CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_CNT; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; } - at91_write(priv, AT91_IDR, reg_idr); - at91_write(priv, AT91_IER, reg_ier); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; } -static int at91_get_state_by_bec(const struct net_device *dev, - enum can_state *state) +static void at91_irq_err_frame(struct net_device *dev, const u32 reg_sr) { - struct can_berr_counter bec; + struct net_device_stats *stats = &dev->stats; + struct at91_priv *priv = netdev_priv(dev); + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp; int err; - err = at91_get_berr_counter(dev, &bec); - if (err) - return err; + priv->can.can_stats.bus_error++; - if (bec.txerr < 96 && bec.rxerr < 96) - *state = CAN_STATE_ERROR_ACTIVE; - else if (bec.txerr < 128 && bec.rxerr < 128) - *state = CAN_STATE_ERROR_WARNING; - else if (bec.txerr < 256 && bec.rxerr < 256) - *state = CAN_STATE_ERROR_PASSIVE; - else - *state = CAN_STATE_BUS_OFF; + skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); + if (cf) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - return 0; -} + if (reg_sr & AT91_IRQ_CERR) { + netdev_dbg(dev, "CRC error\n"); -static void at91_irq_err(struct net_device *dev) -{ - struct at91_priv *priv = netdev_priv(dev); - struct sk_buff *skb; - struct can_frame *cf; - enum can_state new_state; - u32 reg_sr; - int err; + stats->rx_errors++; + if (cf) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } + + if (reg_sr & AT91_IRQ_SERR) { + netdev_dbg(dev, "Stuff error\n"); + + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } - if (at91_is_sam9263(priv)) { - reg_sr = at91_read(priv, AT91_SR); - - /* we need to look at the unmasked reg_sr */ - if (unlikely(reg_sr & AT91_IRQ_BOFF)) { - new_state = CAN_STATE_BUS_OFF; - } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) { - new_state = CAN_STATE_ERROR_PASSIVE; - } else if (unlikely(reg_sr & AT91_IRQ_WARN)) { - new_state = CAN_STATE_ERROR_WARNING; - } else if (likely(reg_sr & AT91_IRQ_ERRA)) { - new_state = CAN_STATE_ERROR_ACTIVE; - } else { - netdev_err(dev, "BUG! hardware in undefined state\n"); - return; + if (reg_sr & AT91_IRQ_AERR) { + netdev_dbg(dev, "NACK error\n"); + + stats->tx_errors++; + if (cf) { + cf->can_id |= CAN_ERR_ACK; + cf->data[2] |= CAN_ERR_PROT_TX; } - } else { - err = at91_get_state_by_bec(dev, &new_state); - if (err) - return; } - /* state hasn't changed */ - if (likely(new_state == priv->can.state)) - return; + if (reg_sr & AT91_IRQ_FERR) { + netdev_dbg(dev, "Format error\n"); - skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) + stats->rx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_FORM; + } + + if (reg_sr & AT91_IRQ_BERR) { + netdev_dbg(dev, "Bit error\n"); + + stats->tx_errors++; + if (cf) + cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT; + } + + if (!cf) return; - at91_irq_err_state(dev, cf, new_state); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + if (err) + stats->rx_fifo_errors++; +} + +static u32 at91_get_reg_sr_rx(const struct at91_priv *priv, u32 *reg_sr_p) +{ + const u32 reg_sr = at91_read(priv, AT91_SR); - netif_rx(skb); + *reg_sr_p |= reg_sr; - priv->can.state = new_state; + return reg_sr & get_irq_mb_rx(priv); } -/* interrupt handler - */ static irqreturn_t at91_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct at91_priv *priv = netdev_priv(dev); irqreturn_t handled = IRQ_NONE; - u32 reg_sr, reg_imr; + u32 reg_sr = 0, reg_sr_rx; + int ret; - reg_sr = at91_read(priv, AT91_SR); - reg_imr = at91_read(priv, AT91_IMR); - - /* Ignore masked interrupts */ - reg_sr &= reg_imr; - if (!reg_sr) - goto exit; - - handled = IRQ_HANDLED; + /* Receive interrupt + * Some bits of AT91_SR are cleared on read, keep them in reg_sr. + */ + while ((reg_sr_rx = at91_get_reg_sr_rx(priv, ®_sr))) { + ret = can_rx_offload_irq_offload_timestamp(&priv->offload, + reg_sr_rx); + handled = IRQ_HANDLED; - /* Receive or error interrupt? -> napi */ - if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) { - /* The error bits are clear on read, - * save for later use. - */ - priv->reg_sr = reg_sr; - at91_write(priv, AT91_IDR, - get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME); - napi_schedule(&priv->napi); + if (!ret) + break; } /* Transmission complete interrupt */ - if (reg_sr & get_irq_mb_tx(priv)) + if (reg_sr & get_irq_mb_tx(priv)) { at91_irq_tx(dev, reg_sr); + handled = IRQ_HANDLED; + } - at91_irq_err(dev); + /* Line Error interrupt */ + if (reg_sr & AT91_IRQ_ERR_LINE || + priv->can.state > CAN_STATE_ERROR_ACTIVE) { + at91_irq_err_line(dev, reg_sr); + handled = IRQ_HANDLED; + } + + /* Frame Error Interrupt */ + if (reg_sr & AT91_IRQ_ERR_FRAME) { + at91_irq_err_frame(dev, reg_sr); + handled = IRQ_HANDLED; + } + + if (handled) + can_rx_offload_irq_finish(&priv->offload); - exit: return handled; } @@ -1082,33 +874,38 @@ static int at91_open(struct net_device *dev) struct at91_priv *priv = netdev_priv(dev); int err; - err = clk_prepare_enable(priv->clk); + err = phy_power_on(priv->transceiver); if (err) return err; /* check or determine and set bittime */ err = open_candev(dev); if (err) - goto out; + goto out_phy_power_off; + + err = clk_prepare_enable(priv->clk); + if (err) + goto out_close_candev; /* register interrupt handler */ - if (request_irq(dev->irq, at91_irq, IRQF_SHARED, - dev->name, dev)) { - err = -EAGAIN; - goto out_close; - } + err = request_irq(dev->irq, at91_irq, IRQF_SHARED, + dev->name, dev); + if (err) + goto out_clock_disable_unprepare; /* start chip and queuing */ at91_chip_start(dev); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(dev); return 0; - out_close: - close_candev(dev); - out: + out_clock_disable_unprepare: clk_disable_unprepare(priv->clk); + out_close_candev: + close_candev(dev); + out_phy_power_off: + phy_power_off(priv->transceiver); return err; } @@ -1120,11 +917,12 @@ static int at91_close(struct net_device *dev) struct at91_priv *priv = netdev_priv(dev); netif_stop_queue(dev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); clk_disable_unprepare(priv->clk); + phy_power_off(priv->transceiver); close_candev(dev); @@ -1249,6 +1047,7 @@ static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_ static int at91_can_probe(struct platform_device *pdev) { const struct at91_devtype_data *devtype_data; + struct phy *transceiver; struct net_device *dev; struct at91_priv *priv; struct resource *res; @@ -1297,6 +1096,13 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_iounmap; } + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) { + err = PTR_ERR(transceiver); + dev_err_probe(&pdev->dev, err, "failed to get phy\n"); + goto exit_iounmap; + } + dev->netdev_ops = &at91_netdev_ops; dev->ethtool_ops = &at91_ethtool_ops; dev->irq = irq; @@ -1314,8 +1120,14 @@ static int at91_can_probe(struct platform_device *pdev) priv->clk = clk; priv->pdata = dev_get_platdata(&pdev->dev); priv->mb0_id = 0x7ff; + priv->offload.mailbox_read = at91_mailbox_read; + priv->offload.mb_first = devtype_data->rx_first; + priv->offload.mb_last = devtype_data->rx_last; + + can_rx_offload_add_timestamp(dev, &priv->offload); - netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); + if (transceiver) + priv->can.bitrate_max = transceiver->attrs.max_link_rate; if (at91_is_sam9263(priv)) dev->sysfs_groups[0] = &at91_sysfs_attr_group; diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c index 39de7164bc4e..49cf9682b925 100644 --- a/drivers/net/can/bxcan.c +++ b/drivers/net/can/bxcan.c @@ -23,7 +23,6 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 925930b6c4ca..f44ba2600415 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -285,8 +285,8 @@ static int c_can_plat_probe(struct platform_device *pdev) /* get the platform data */ irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - ret = -ENODEV; + if (irq < 0) { + ret = irq; goto exit; } diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c index dc7192ecb001..24af63961030 100644 --- a/drivers/net/can/can327.c +++ b/drivers/net/can/can327.c @@ -885,10 +885,10 @@ static bool can327_is_valid_rx_char(u8 c) * This will not be re-entered while running, but other ldisc * functions may be called in parallel. */ -static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp, - const char *fp, int count) +static void can327_ldisc_rx(struct tty_struct *tty, const u8 *cp, + const u8 *fp, size_t count) { - struct can327 *elm = (struct can327 *)tty->disc_data; + struct can327 *elm = tty->disc_data; size_t first_new_char_idx; if (elm->uart_side_failure) @@ -901,15 +901,17 @@ static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp, */ first_new_char_idx = elm->rxfill; - while (count-- && elm->rxfill < CAN327_SIZE_RXBUF) { + while (count--) { + if (elm->rxfill >= CAN327_SIZE_RXBUF) { + netdev_err(elm->dev, + "Receive buffer overflowed. Bad chip or wiring? count = %zu", + count); + goto uart_failure; + } if (fp && *fp++) { netdev_err(elm->dev, "Error in received character stream. Check your wiring."); - - can327_uart_side_failure(elm); - - spin_unlock_bh(&elm->lock); - return; + goto uart_failure; } /* Ignore NUL characters, which the PIC microcontroller may @@ -925,10 +927,7 @@ static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp, netdev_err(elm->dev, "Received illegal character %02x.\n", *cp); - can327_uart_side_failure(elm); - - spin_unlock_bh(&elm->lock); - return; + goto uart_failure; } elm->rxbuf[elm->rxfill++] = *cp; @@ -937,19 +936,13 @@ static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp, cp++; } - if (count >= 0) { - netdev_err(elm->dev, - "Receive buffer overflowed. Bad chip or wiring? count = %i", - count); - - can327_uart_side_failure(elm); - - spin_unlock_bh(&elm->lock); - return; - } - can327_parse_rxbuf(elm, first_new_char_idx); spin_unlock_bh(&elm->lock); + + return; +uart_failure: + can327_uart_side_failure(elm); + spin_unlock_bh(&elm->lock); } /* Write out remaining transmit buffer. @@ -990,7 +983,7 @@ static void can327_ldisc_tx_worker(struct work_struct *work) /* Called by the driver when there's room for more data. */ static void can327_ldisc_tx_wakeup(struct tty_struct *tty) { - struct can327 *elm = (struct can327 *)tty->disc_data; + struct can327 *elm = tty->disc_data; schedule_work(&elm->tx_work); } @@ -1067,7 +1060,7 @@ static int can327_ldisc_open(struct tty_struct *tty) */ static void can327_ldisc_close(struct tty_struct *tty) { - struct can327 *elm = (struct can327 *)tty->disc_data; + struct can327 *elm = tty->disc_data; /* unregister_netdev() calls .ndo_stop() so we don't have to. */ unregister_candev(elm->dev); @@ -1092,7 +1085,7 @@ static void can327_ldisc_close(struct tty_struct *tty) static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { - struct can327 *elm = (struct can327 *)tty->disc_data; + struct can327 *elm = tty->disc_data; unsigned int tmp; switch (cmd) { diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 7f9334a8af50..3a3be5cdfc1f 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -90,6 +90,28 @@ const char *can_get_state_str(const enum can_state state) } EXPORT_SYMBOL_GPL(can_get_state_str); +static enum can_state can_state_err_to_state(u16 err) +{ + if (err < CAN_ERROR_WARNING_THRESHOLD) + return CAN_STATE_ERROR_ACTIVE; + if (err < CAN_ERROR_PASSIVE_THRESHOLD) + return CAN_STATE_ERROR_WARNING; + if (err < CAN_BUS_OFF_THRESHOLD) + return CAN_STATE_ERROR_PASSIVE; + + return CAN_STATE_BUS_OFF; +} + +void can_state_get_by_berr_counter(const struct net_device *dev, + const struct can_berr_counter *bec, + enum can_state *tx_state, + enum can_state *rx_state) +{ + *tx_state = can_state_err_to_state(bec->txerr); + *rx_state = can_state_err_to_state(bec->rxerr); +} +EXPORT_SYMBOL_GPL(can_state_get_by_berr_counter); + void can_change_state(struct net_device *dev, struct can_frame *cf, enum can_state tx_state, enum can_state rx_state) { @@ -132,7 +154,8 @@ static void can_restart(struct net_device *dev) struct can_frame *cf; int err; - BUG_ON(netif_carrier_ok(dev)); + if (netif_carrier_ok(dev)) + netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); /* No synchronization needed because the device is bus-off and * no messages can come in or go out. @@ -141,23 +164,21 @@ static void can_restart(struct net_device *dev) /* send restart message upstream */ skb = alloc_can_err_skb(dev, &cf); - if (!skb) - goto restart; - - cf->can_id |= CAN_ERR_RESTARTED; - - netif_rx(skb); - -restart: - netdev_dbg(dev, "restarted\n"); - priv->can_stats.restarts++; + if (skb) { + cf->can_id |= CAN_ERR_RESTARTED; + netif_rx(skb); + } /* Now restart the device */ - err = priv->do_set_mode(dev, CAN_MODE_START); - netif_carrier_on(dev); - if (err) - netdev_err(dev, "Error %d during restart", err); + err = priv->do_set_mode(dev, CAN_MODE_START); + if (err) { + netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err)); + netif_carrier_off(dev); + } else { + netdev_dbg(dev, "Restarted\n"); + priv->can_stats.restarts++; + } } static void can_restart_work(struct work_struct *work) diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index 161e45a7e8c1..46e7b6db4a1e 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2014 Protonic Holland, * David Jander - * Copyright (C) 2014-2021 Pengutronix, + * Copyright (C) 2014-2021, 2023 Pengutronix, * Marc Kleine-Budde <kernel@pengutronix.de> */ @@ -67,7 +67,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) /* Check if there was another interrupt */ if (!skb_queue_empty(&offload->skb_queue)) - napi_reschedule(&offload->napi); + napi_schedule(&offload->napi); } return work_done; @@ -240,9 +240,10 @@ int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, } EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp); -unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, - unsigned int idx, u32 timestamp, - unsigned int *frame_len_ptr) +unsigned int +can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp, + unsigned int *frame_len_ptr) { struct net_device *dev = offload->dev; struct net_device_stats *stats = &dev->stats; @@ -262,7 +263,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, return len; } -EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp); int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb) @@ -279,6 +280,31 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, } EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); +unsigned int +can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload, + unsigned int idx, + unsigned int *frame_len_ptr) +{ + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + unsigned int len; + int err; + + skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr); + if (!skb) + return 0; + + err = can_rx_offload_queue_tail(offload, skb); + if (err) { + stats->rx_errors++; + stats->tx_fifo_errors++; + } + + return len; +} +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail); + void can_rx_offload_irq_finish(struct can_rx_offload *offload) { unsigned long flags; diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c index f6d05b3ef59a..3ebd4f779b9b 100644 --- a/drivers/net/can/dev/skb.c +++ b/drivers/net/can/dev/skb.c @@ -49,7 +49,11 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, { struct can_priv *priv = netdev_priv(dev); - BUG_ON(idx >= priv->echo_skb_max); + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return -EINVAL; + } /* check flag whether this packet has to be looped back */ if (!(dev->flags & IFF_ECHO) || diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index ff0fc18baf13..d15f85a40c1e 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -348,7 +348,7 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { static struct flexcan_devtype_data fsl_imx93_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | - FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, @@ -544,11 +544,6 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); - } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) { - /* For the auto stop mode, software do nothing, hardware will cover - * all the operation automatically after system go into low power mode. - */ - return 0; } return flexcan_low_power_enter_ack(priv); @@ -574,12 +569,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, ®s->mcr); - /* For the auto stop mode, hardware will exist stop mode - * automatically after system go out of low power mode. - */ - if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) - return 0; - return flexcan_low_power_exit_ack(priv); } @@ -1097,8 +1086,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) handled = IRQ_HANDLED; stats->tx_bytes += - can_rx_offload_get_echo_skb(&priv->offload, 0, - reg_ctrl << 16, NULL); + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, 0, + reg_ctrl << 16, NULL); stats->tx_packets++; /* after sending a RTR frame MB is in RX mode */ @@ -1994,13 +1983,18 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) ret = flexcan_setup_stop_mode_scfw(pdev); else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) ret = flexcan_setup_stop_mode_gpr(pdev); - else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) - ret = 0; else /* return 0 directly if doesn't support stop mode feature */ return 0; - if (ret) + /* If ret is -EINVAL, this means SoC claim to support stop mode, but + * dts file lack the stop mode property definition. For this case, + * directly return 0, this will skip the wakeup capable setting and + * will not block the driver probe. + */ + if (ret == -EINVAL) + return 0; + else if (ret) return ret; device_set_wakeup_capable(&pdev->dev, true); @@ -2089,8 +2083,8 @@ static int flexcan_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return -ENODEV; + if (irq < 0) + return irq; regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) @@ -2167,13 +2161,13 @@ static int flexcan_probe(struct platform_device *pdev) if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { priv->irq_boff = platform_get_irq(pdev, 1); - if (priv->irq_boff <= 0) { - err = -ENODEV; + if (priv->irq_boff < 0) { + err = priv->irq_boff; goto failed_platform_get_irq; } priv->irq_err = platform_get_irq(pdev, 2); - if (priv->irq_err <= 0) { - err = -ENODEV; + if (priv->irq_err < 0) { + err = priv->irq_err; goto failed_platform_get_irq; } } @@ -2320,16 +2314,8 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device) if (netif_running(dev)) { int err; - if (device_may_wakeup(device)) { + if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, true); - /* For auto stop mode, need to keep the clock on before - * system go into low power mode. After system go into - * low power mode, hardware will config the flexcan into - * stop mode, and gate off the clock automatically. - */ - if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) - return 0; - } err = pm_runtime_force_suspend(device); if (err) @@ -2347,15 +2333,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) if (netif_running(dev)) { int err; - /* For the wakeup in auto stop mode, no need to gate on the - * clock here, hardware will do this automatically. - */ - if (!(device_may_wakeup(device) && - priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) { - err = pm_runtime_force_resume(device); - if (err) - return err; - } + err = pm_runtime_force_resume(device); + if (err) + return err; if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h index 91402977780b..025c3417031f 100644 --- a/drivers/net/can/flexcan/flexcan.h +++ b/drivers/net/can/flexcan/flexcan.h @@ -68,8 +68,6 @@ #define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15) /* Device supports RX via FIFO */ #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16) -/* auto enter stop mode to support wakeup */ -#define FLEXCAN_QUIRK_AUTO_STOP_MODE BIT(17) struct flexcan_devtype_data { u32 quirks; /* quirks needed for different IP cores */ diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 3174efdae271..6d3ba71a6a73 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -30,8 +30,9 @@ #include <linux/ethtool.h> #include <linux/io.h> #include <linux/can/dev.h> +#include <linux/platform_device.h> #include <linux/spinlock.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/dma-mapping.h> diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 1d6642c94f2f..72307297d75e 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -20,7 +20,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/can/dev.h> diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index db6256f2b1b3..a57005faa04f 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. * Parts of this driver are based on the following: - * - Kvaser linux pciefd driver (version 5.25) + * - Kvaser linux pciefd driver (version 5.42) * - PEAK linux canfd driver */ @@ -33,37 +33,27 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) #define KVASER_PCIEFD_VENDOR 0x1a07 +/* Altera based devices */ #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 -/* PCIe IRQ registers */ -#define KVASER_PCIEFD_IRQ_REG 0x40 -#define KVASER_PCIEFD_IEN_REG 0x50 -/* DMA address translation map register base */ -#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 -/* Loopback control register */ -#define KVASER_PCIEFD_LOOP_REG 0x1f000 -/* System identification and information registers */ -#define KVASER_PCIEFD_SYSID_BASE 0x1f020 -#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) -#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) -#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) -#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) -/* Shared receive buffer registers */ -#define KVASER_PCIEFD_SRB_BASE 0x1f200 -#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4) -#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) -#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) -#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) -#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) -#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214) -#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) +/* SmartFusion2 based devices */ +#define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 +#define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 +#define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 +#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 +#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 + +/* Altera SerDes Enable 64-bit DMA address translation */ +#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) + +/* SmartFusion2 SerDes LSB address translation mask */ +#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) + /* Kvaser KCAN CAN controller registers */ -#define KVASER_PCIEFD_KCAN0_BASE 0x10000 -#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 @@ -77,13 +67,20 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 - -/* PCI interrupt fields */ -#define KVASER_PCIEFD_IRQ_SRB BIT(4) -#define KVASER_PCIEFD_IRQ_ALL_MASK GENMASK(4, 0) - -/* Enable 64-bit DMA address translation */ -#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) +/* System identification and information registers */ +#define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 +#define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc +#define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 +#define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 +/* Shared receive buffer FIFO registers */ +#define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 +/* Shared receive buffer registers */ +#define KVASER_PCIEFD_SRB_CMD_REG 0x0 +#define KVASER_PCIEFD_SRB_IEN_REG 0x04 +#define KVASER_PCIEFD_SRB_IRQ_REG 0x0c +#define KVASER_PCIEFD_SRB_STAT_REG 0x10 +#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 +#define KVASER_PCIEFD_SRB_CTRL_REG 0x18 /* System build information fields */ #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) @@ -253,7 +250,122 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); /* KCAN Error detected packet, second word */ #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) +/* Macros for calculating addresses of registers */ +#define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ + ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) +#define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) +#define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) +#define KVASER_PCIEFD_SERDES_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) +#define KVASER_PCIEFD_SYSID_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) +#define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) +#define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) +#define KVASER_PCIEFD_SRB_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) +#define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) +#define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) +#define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ + (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) +#define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ + (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) + struct kvaser_pciefd; +static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); +static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); + +struct kvaser_pciefd_address_offset { + u32 serdes; + u32 pci_ien; + u32 pci_irq; + u32 sysid; + u32 loopback; + u32 kcan_srb_fifo; + u32 kcan_srb; + u32 kcan_ch0; + u32 kcan_ch1; +}; + +struct kvaser_pciefd_dev_ops { + void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index); +}; + +struct kvaser_pciefd_irq_mask { + u32 kcan_rx0; + u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; + u32 all; +}; + +struct kvaser_pciefd_driver_data { + const struct kvaser_pciefd_address_offset *address_offset; + const struct kvaser_pciefd_irq_mask *irq_mask; + const struct kvaser_pciefd_dev_ops *ops; +}; + +static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { + .serdes = 0x1000, + .pci_ien = 0x50, + .pci_irq = 0x40, + .sysid = 0x1f020, + .loopback = 0x1f000, + .kcan_srb_fifo = 0x1f200, + .kcan_srb = 0x1f400, + .kcan_ch0 = 0x10000, + .kcan_ch1 = 0x11000, +}; + +static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { + .serdes = 0x280c8, + .pci_ien = 0x102004, + .pci_irq = 0x102008, + .sysid = 0x100000, + .loopback = 0x103000, + .kcan_srb_fifo = 0x120000, + .kcan_srb = 0x121000, + .kcan_ch0 = 0x140000, + .kcan_ch1 = 0x142000, +}; + +static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { + .kcan_rx0 = BIT(4), + .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, + .all = GENMASK(4, 0), +}; + +static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { + .kcan_rx0 = BIT(4), + .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, + .all = GENMASK(19, 16) | BIT(4), +}; + +static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, +}; + +static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, +}; + +static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { + .address_offset = &kvaser_pciefd_altera_address_offset, + .irq_mask = &kvaser_pciefd_altera_irq_mask, + .ops = &kvaser_pciefd_altera_dev_ops, +}; + +static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { + .address_offset = &kvaser_pciefd_sf2_address_offset, + .irq_mask = &kvaser_pciefd_sf2_irq_mask, + .ops = &kvaser_pciefd_sf2_dev_ops, +}; struct kvaser_pciefd_can { struct can_priv can; @@ -273,6 +385,7 @@ struct kvaser_pciefd { struct pci_dev *pci; void __iomem *reg_base; struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; + const struct kvaser_pciefd_driver_data *driver_data; void *dma_data[KVASER_PCIEFD_DMA_COUNT]; u8 nr_channels; u32 bus_freq; @@ -305,18 +418,43 @@ static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { static struct pci_device_id kvaser_pciefd_id_table[] = { { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, + }, + { + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, }, { 0, @@ -783,8 +921,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can = netdev_priv(netdev); netdev->netdev_ops = &kvaser_pciefd_netdev_ops; netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; - can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + - i * KVASER_PCIEFD_KCAN_BASE_OFFSET; + can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); can->kv_pcie = pcie; can->cmd_seq = 0; can->err_rep_cnt = 0; @@ -865,20 +1002,37 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) return 0; } -static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, - dma_addr_t addr, int offset) +static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index) { + void __iomem *serdes_base; u32 word1, word2; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; + word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT; word2 = addr >> 32; #else word1 = addr; word2 = 0; #endif - iowrite32(word1, pcie->reg_base + offset); - iowrite32(word2, pcie->reg_base + offset + 4); + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; + iowrite32(word1, serdes_base); + iowrite32(word2, serdes_base + 0x4); +} + +static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, + dma_addr_t addr, int index) +{ + void __iomem *serdes_base; + u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; + u32 msb = 0x0; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + msb = addr >> 32; +#endif + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; + iowrite32(lsb, serdes_base); + iowrite32(msb, serdes_base + 0x4); } static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) @@ -889,10 +1043,8 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; /* Disable the DMA */ - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { - unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; - pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, KVASER_PCIEFD_DMA_SIZE, &dma_addr[i], @@ -903,24 +1055,25 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) KVASER_PCIEFD_DMA_SIZE); return -ENOMEM; } - kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); + pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); } /* Reset Rx FIFO, and both DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | KVASER_PCIEFD_SRB_CMD_RDB1, - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); /* Empty Rx FIFO */ srb_packet_count = FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, - ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); + ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); while (srb_packet_count) { /* Drop current packet in FIFO */ - ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG); + ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); srb_packet_count--; } - srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); + srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); return -EIO; @@ -928,7 +1081,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) /* Enable the DMA */ iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, - pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); return 0; } @@ -937,30 +1090,29 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) { u32 version, srb_status, build; - version = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); + version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); - build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); + build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n", FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); - srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); + srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); return -ENODEV; } - pcie->bus_freq = ioread32(pcie->reg_base + - KVASER_PCIEFD_SYSID_BUSFREQ_REG); - pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); + pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); + pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); pcie->freq_to_ticks_div = pcie->freq / 1000000; if (pcie->freq_to_ticks_div == 0) pcie->freq_to_ticks_div = 1; /* Turn off all loopback functionality */ - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); + iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); return 0; } @@ -1430,21 +1582,20 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { - u32 irq; + u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { kvaser_pciefd_read_buffer(pcie, 0); /* Reset DMA buffer 0 */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); } if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { kvaser_pciefd_read_buffer(pcie, 1); /* Reset DMA buffer 1 */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); } if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || @@ -1453,7 +1604,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) irq & KVASER_PCIEFD_SRB_IRQ_DUF1) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); - iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1479,15 +1630,14 @@ static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) { struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; - u32 board_irq; + const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; + u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); int i; - board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); - - if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MASK)) + if (!(board_irq & irq_mask->all)) return IRQ_NONE; - if (board_irq & KVASER_PCIEFD_IRQ_SRB) + if (board_irq & irq_mask->kcan_rx0) kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { @@ -1498,7 +1648,7 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) } /* Check that mask matches channel (i) IRQ mask */ - if (board_irq & (1 << i)) + if (board_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } @@ -1525,6 +1675,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, { int err; struct kvaser_pciefd *pcie; + const struct kvaser_pciefd_irq_mask *irq_mask; + void __iomem *irq_en_base; pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -1532,6 +1684,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, pcie); pcie->pci = pdev; + pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; + irq_mask = pcie->driver_data->irq_mask; err = pci_enable_device(pdev); if (err) @@ -1567,22 +1721,21 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, goto err_teardown_can_ctrls; iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, - pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, - pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); /* Enable PCI interrupts */ - iowrite32(KVASER_PCIEFD_IRQ_ALL_MASK, - pcie->reg_base + KVASER_PCIEFD_IEN_REG); - + irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); + iowrite32(irq_mask->all, irq_en_base); /* Ready the DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); err = kvaser_pciefd_reg_candev(pcie); if (err) @@ -1592,12 +1745,12 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, err_free_irq: /* Disable PCI interrupts */ - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); + iowrite32(0, irq_en_base); free_irq(pcie->pci->irq, pcie); err_teardown_can_ctrls: kvaser_pciefd_teardown_can_ctrls(pcie); - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); pci_clear_master(pdev); err_pci_iounmap: @@ -1636,8 +1789,8 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev) kvaser_pciefd_remove_all_ctrls(pcie); /* Disable interrupts */ - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); + iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); free_irq(pcie->pci->irq, pcie); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index c5af92bcc9c9..16ecc11c7f62 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -11,6 +11,7 @@ #include <linux/bitfield.h> #include <linux/can/dev.h> #include <linux/ethtool.h> +#include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -18,7 +19,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> @@ -308,6 +308,9 @@ enum m_can_reg { #define TX_EVENT_MM_MASK GENMASK(31, 24) #define TX_EVENT_TXTS_MASK GENMASK(15, 0) +/* Hrtimer polling interval */ +#define HRTIMER_POLL_INTERVAL_MS 1 + /* The ID and DLC registers are adjacent in M_CAN FIFO memory, * and we can save a (potentially slow) bus round trip by combining * reads and writes to them. @@ -1013,10 +1016,10 @@ static void m_can_tx_update_stats(struct m_can_classdev *cdev, if (cdev->is_peripheral) stats->tx_bytes += - can_rx_offload_get_echo_skb(&cdev->offload, - msg_mark, - timestamp, - NULL); + can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload, + msg_mark, + timestamp, + NULL); else stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); @@ -1414,6 +1417,12 @@ static int m_can_start(struct net_device *dev) m_can_enable_all_interrupts(cdev); + if (!dev->irq) { + dev_dbg(cdev->dev, "Start hrtimer\n"); + hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS), + HRTIMER_MODE_REL_PINNED); + } + return 0; } @@ -1568,6 +1577,11 @@ static void m_can_stop(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); + if (!dev->irq) { + dev_dbg(cdev->dev, "Stop hrtimer\n"); + hrtimer_cancel(&cdev->hrtimer); + } + /* disable all interrupts */ m_can_disable_all_interrupts(cdev); @@ -1793,6 +1807,18 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) +{ + struct m_can_classdev *cdev = container_of(timer, struct + m_can_classdev, hrtimer); + + m_can_isr(0, cdev->net); + + hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS)); + + return HRTIMER_RESTART; +} + static int m_can_open(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); @@ -1831,7 +1857,7 @@ static int m_can_open(struct net_device *dev) err = request_threaded_irq(dev->irq, NULL, m_can_isr, IRQF_ONESHOT, dev->name, dev); - } else { + } else if (dev->irq) { err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, dev); } @@ -1887,6 +1913,22 @@ static int register_m_can_dev(struct net_device *dev) return register_candev(dev); } +int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size) +{ + u32 total_size; + + total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off + + cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + if (total_size > mram_max_size) { + dev_err(cdev->dev, "Total size of mram config(%u) exceeds mram(%u)\n", + total_size, mram_max_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(m_can_check_mram_cfg); + static void m_can_of_parse_mram(struct m_can_classdev *cdev, const u32 *mram_config_vals) { @@ -2027,6 +2069,9 @@ int m_can_class_register(struct m_can_classdev *cdev) goto clk_disable; } + if (!cdev->net->irq) + cdev->hrtimer.function = &hrtimer_callback; + ret = m_can_dev_setup(cdev); if (ret) goto rx_offload_del; diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index a839dc71dc9b..520e14277dff 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -15,6 +15,7 @@ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/freezer.h> +#include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -22,7 +23,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> @@ -93,6 +93,8 @@ struct m_can_classdev { int is_peripheral; struct mram_cfg mcfg[MRAM_CFG_NUM]; + + struct hrtimer hrtimer; }; struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv); @@ -101,6 +103,7 @@ int m_can_class_register(struct m_can_classdev *cdev); void m_can_class_unregister(struct m_can_classdev *cdev); int m_can_class_get_clocks(struct m_can_classdev *cdev); int m_can_init_ram(struct m_can_classdev *priv); +int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size); int m_can_class_suspend(struct device *dev); int m_can_class_resume(struct device *dev); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 94dc82644113..cdb28d6a092c 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -5,6 +5,7 @@ // // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ +#include <linux/hrtimer.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> @@ -82,7 +83,7 @@ static int m_can_plat_probe(struct platform_device *pdev) void __iomem *addr; void __iomem *mram_addr; struct phy *transceiver; - int irq, ret = 0; + int irq = 0, ret = 0; mcan_class = m_can_class_allocate_dev(&pdev->dev, sizeof(struct m_can_plat_priv)); @@ -96,12 +97,24 @@ static int m_can_plat_probe(struct platform_device *pdev) goto probe_fail; addr = devm_platform_ioremap_resource_byname(pdev, "m_can"); - irq = platform_get_irq_byname(pdev, "int0"); - if (IS_ERR(addr) || irq < 0) { - ret = -EINVAL; + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); goto probe_fail; } + if (device_property_present(mcan_class->dev, "interrupts") || + device_property_present(mcan_class->dev, "interrupt-names")) { + irq = platform_get_irq_byname(pdev, "int0"); + if (irq < 0) { + ret = irq; + goto probe_fail; + } + } else { + dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer"); + hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + } + /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); if (!res) { diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 2342aa011647..ae8c42f5debd 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -6,8 +6,9 @@ #define TCAN4X5X_EXT_CLK_DEF 40000000 -#define TCAN4X5X_DEV_ID0 0x00 -#define TCAN4X5X_DEV_ID1 0x04 +#define TCAN4X5X_DEV_ID1 0x00 +#define TCAN4X5X_DEV_ID1_TCAN 0x4e414354 /* ASCII TCAN */ +#define TCAN4X5X_DEV_ID2 0x04 #define TCAN4X5X_REV 0x08 #define TCAN4X5X_STATUS 0x0C #define TCAN4X5X_ERROR_STATUS_MASK 0x10 @@ -80,6 +81,7 @@ TCAN4X5X_MCAN_IR_RF1F) #define TCAN4X5X_MRAM_START 0x8000 +#define TCAN4X5X_MRAM_SIZE 0x800 #define TCAN4X5X_MCAN_OFFSET 0x1000 #define TCAN4X5X_CLEAR_ALL_INT 0xffffffff @@ -102,6 +104,37 @@ #define TCAN4X5X_WD_3_S_TIMER BIT(29) #define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29)) +struct tcan4x5x_version_info { + const char *name; + u32 id2_register; + + bool has_wake_pin; + bool has_state_pin; +}; + +enum { + TCAN4552 = 0, + TCAN4553, + TCAN4X5X, +}; + +static const struct tcan4x5x_version_info tcan4x5x_versions[] = { + [TCAN4552] = { + .name = "4552", + .id2_register = 0x32353534, + }, + [TCAN4553] = { + .name = "4553", + .id2_register = 0x33353534, + }, + /* generic version with no id2_register at the end */ + [TCAN4X5X] = { + .name = "generic", + .has_wake_pin = true, + .has_state_pin = true, + }, +}; + static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev) { return container_of(cdev, struct tcan4x5x_priv, cdev); @@ -253,18 +286,53 @@ static int tcan4x5x_disable_state(struct m_can_classdev *cdev) TCAN4X5X_DISABLE_INH_MSK, 0x01); } -static int tcan4x5x_get_gpios(struct m_can_classdev *cdev) +static const struct tcan4x5x_version_info +*tcan4x5x_find_version(struct tcan4x5x_priv *priv) +{ + u32 val; + int ret; + + ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID1, &val); + if (ret) + return ERR_PTR(ret); + + if (val != TCAN4X5X_DEV_ID1_TCAN) { + dev_err(&priv->spi->dev, "Not a tcan device %x\n", val); + return ERR_PTR(-ENODEV); + } + + ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID2, &val); + if (ret) + return ERR_PTR(ret); + + for (int i = 0; i != ARRAY_SIZE(tcan4x5x_versions); ++i) { + const struct tcan4x5x_version_info *vinfo = &tcan4x5x_versions[i]; + + if (!vinfo->id2_register || val == vinfo->id2_register) { + dev_info(&priv->spi->dev, "Detected TCAN device version %s\n", + vinfo->name); + return vinfo; + } + } + + return &tcan4x5x_versions[TCAN4X5X]; +} + +static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, + const struct tcan4x5x_version_info *version_info) { struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); int ret; - tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", - GPIOD_OUT_HIGH); - if (IS_ERR(tcan4x5x->device_wake_gpio)) { - if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (version_info->has_wake_pin) { + tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", + GPIOD_OUT_HIGH); + if (IS_ERR(tcan4x5x->device_wake_gpio)) { + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) + return -EPROBE_DEFER; - tcan4x5x_disable_wake(cdev); + tcan4x5x_disable_wake(cdev); + } } tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", @@ -276,12 +344,14 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev) if (ret) return ret; - tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, - "device-state", - GPIOD_IN); - if (IS_ERR(tcan4x5x->device_state_gpio)) { - tcan4x5x->device_state_gpio = NULL; - tcan4x5x_disable_state(cdev); + if (version_info->has_state_pin) { + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, + "device-state", + GPIOD_IN); + if (IS_ERR(tcan4x5x->device_state_gpio)) { + tcan4x5x->device_state_gpio = NULL; + tcan4x5x_disable_state(cdev); + } } return 0; @@ -298,6 +368,7 @@ static struct m_can_ops tcan4x5x_ops = { static int tcan4x5x_can_probe(struct spi_device *spi) { + const struct tcan4x5x_version_info *version_info; struct tcan4x5x_priv *priv; struct m_can_classdev *mcan_class; int freq, ret; @@ -307,6 +378,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi) if (!mcan_class) return -ENOMEM; + ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE); + if (ret) + goto out_m_can_class_free_dev; + priv = cdev_to_priv(mcan_class); priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); @@ -327,6 +402,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi) /* Sanity check */ if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) { + dev_err(&spi->dev, "Clock frequency is out of supported range %d\n", + freq); ret = -ERANGE; goto out_m_can_class_free_dev; } @@ -345,28 +422,49 @@ static int tcan4x5x_can_probe(struct spi_device *spi) /* Configure the SPI bus */ spi->bits_per_word = 8; ret = spi_setup(spi); - if (ret) + if (ret) { + dev_err(&spi->dev, "SPI setup failed %pe\n", ERR_PTR(ret)); goto out_m_can_class_free_dev; + } ret = tcan4x5x_regmap_init(priv); - if (ret) + if (ret) { + dev_err(&spi->dev, "regmap init failed %pe\n", ERR_PTR(ret)); goto out_m_can_class_free_dev; + } ret = tcan4x5x_power_enable(priv->power, 1); - if (ret) + if (ret) { + dev_err(&spi->dev, "Enabling regulator failed %pe\n", + ERR_PTR(ret)); goto out_m_can_class_free_dev; + } - ret = tcan4x5x_get_gpios(mcan_class); - if (ret) + version_info = tcan4x5x_find_version(priv); + if (IS_ERR(version_info)) { + ret = PTR_ERR(version_info); + goto out_power; + } + + ret = tcan4x5x_get_gpios(mcan_class, version_info); + if (ret) { + dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); goto out_power; + } ret = tcan4x5x_init(mcan_class); - if (ret) + if (ret) { + dev_err(&spi->dev, "tcan initialization failed %pe\n", + ERR_PTR(ret)); goto out_power; + } ret = m_can_class_register(mcan_class); - if (ret) + if (ret) { + dev_err(&spi->dev, "Failed registering m_can device %pe\n", + ERR_PTR(ret)); goto out_power; + } netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n"); return 0; diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c index 2b218ce04e9f..fafa6daa67e6 100644 --- a/drivers/net/can/m_can/tcan4x5x-regmap.c +++ b/drivers/net/can/m_can/tcan4x5x-regmap.c @@ -95,7 +95,6 @@ static const struct regmap_range tcan4x5x_reg_table_wr_range[] = { regmap_reg_range(0x000c, 0x0010), /* Device configuration registers and Interrupt Flags*/ regmap_reg_range(0x0800, 0x080c), - regmap_reg_range(0x0814, 0x0814), regmap_reg_range(0x0820, 0x0820), regmap_reg_range(0x0830, 0x0830), /* M_CAN */ diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index e4d748913439..b82842718735 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -34,7 +34,6 @@ #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index c56e27223e5f..5bca719d61f5 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -108,11 +108,6 @@ struct ems_pci_card { #define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ -#ifndef PCI_VENDOR_ID_ASIX -#define PCI_VENDOR_ID_ASIX 0x125b -#define PCI_DEVICE_ID_ASIX_9110 0x9110 -#define PCI_SUBVENDOR_ID_ASIX 0xa000 -#endif #define PCI_SUBDEVICE_ID_EMS 0x4010 static const struct pci_device_id ems_pci_tbl[] = { @@ -123,7 +118,7 @@ static const struct pci_device_id ems_pci_tbl[] = { /* CPC-104P v2 */ {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002}, /* CPC-PCIe v3 */ - {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_9110, PCI_SUBVENDOR_ID_ASIX, PCI_SUBDEVICE_ID_EMS}, + {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100_LB, 0xa000, PCI_SUBDEVICE_ID_EMS}, {0,} }; MODULE_DEVICE_TABLE(pci, ems_pci_tbl); @@ -148,7 +143,7 @@ static void ems_pci_v1_write_reg(const struct sja1000_priv *priv, static void ems_pci_v1_post_irq(const struct sja1000_priv *priv) { - struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; + struct ems_pci_card *card = priv->priv; /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, @@ -168,7 +163,7 @@ static void ems_pci_v2_write_reg(const struct sja1000_priv *priv, static void ems_pci_v2_post_irq(const struct sja1000_priv *priv) { - struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; + struct ems_pci_card *card = priv->priv; writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); } @@ -186,7 +181,7 @@ static void ems_pci_v3_write_reg(const struct sja1000_priv *priv, static void ems_pci_v3_post_irq(const struct sja1000_priv *priv) { - struct ems_pci_card *card = (struct ems_pci_card *)priv->priv; + struct ems_pci_card *card = priv->priv; writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR); } diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 84f34020aafb..da396d641e24 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -462,7 +462,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) card->led_chip.owner = THIS_MODULE; card->led_chip.dev.parent = &pdev->dev; card->led_chip.algo_data = &card->i2c_bit; - strncpy(card->led_chip.name, "peak_i2c", + strscpy(card->led_chip.name, "peak_i2c", sizeof(card->led_chip.name)); card->i2c_bit = peak_pciec_i2c_bit_ops; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 0ada0e160e93..ddb3247948ad 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -206,7 +206,7 @@ static void sja1000_start(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); - /* leave reset mode */ + /* enter reset mode */ if (priv->can.state != CAN_STATE_STOPPED) set_reset_mode(dev); @@ -392,7 +392,13 @@ static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; netdev_dbg(dev, "performing a soft reset upon overrun\n"); - sja1000_start(dev); + + netif_tx_lock(dev); + + can_free_echo_skb(dev, 0, NULL); + sja1000_set_mode(dev, CAN_MODE_START); + + netif_tx_unlock(dev); return IRQ_HANDLED; } diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 4e59952c66d4..33f0e46ab1c2 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -17,7 +17,6 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/of_device.h> #include "sja1000.h" diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index f4db77007c13..24c6622d36bd 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -583,7 +583,7 @@ static void slcan_transmit(struct work_struct *work) */ static void slcan_write_wakeup(struct tty_struct *tty) { - struct slcan *sl = (struct slcan *)tty->disc_data; + struct slcan *sl = tty->disc_data; schedule_work(&sl->tx_work); } @@ -774,11 +774,10 @@ static const struct net_device_ops slcan_netdev_ops = { * be re-entered while running but other ldisc functions may be called * in parallel */ -static void slcan_receive_buf(struct tty_struct *tty, - const unsigned char *cp, const char *fp, - int count) +static void slcan_receive_buf(struct tty_struct *tty, const u8 *cp, + const u8 *fp, size_t count) { - struct slcan *sl = (struct slcan *)tty->disc_data; + struct slcan *sl = tty->disc_data; if (!netif_running(sl->dev)) return; @@ -862,7 +861,7 @@ static int slcan_open(struct tty_struct *tty) */ static void slcan_close(struct tty_struct *tty) { - struct slcan *sl = (struct slcan *)tty->disc_data; + struct slcan *sl = tty->disc_data; unregister_candev(sl->dev); @@ -886,7 +885,7 @@ static void slcan_close(struct tty_struct *tty) static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { - struct slcan *sl = (struct slcan *)tty->disc_data; + struct slcan *sl = tty->disc_data; unsigned int tmp; switch (cmd) { diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index 237617b0c125..e5bd57b65aaf 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -111,9 +111,9 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, if (skb) mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); stats->tx_bytes += - can_rx_offload_get_echo_skb(&priv->offload, - tef_tail, hw_tef_obj->ts, - frame_len_ptr); + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, + tef_tail, hw_tef_obj->ts, + frame_len_ptr); stats->tx_packets++; priv->tef->tail++; diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 0827830bbf28..ab8d01784686 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -59,7 +59,6 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/reset.h> @@ -91,6 +90,8 @@ #define SUN4I_REG_BUF12_ADDR 0x0070 /* CAN Tx/Rx Buffer 12 */ #define SUN4I_REG_ACPC_ADDR 0x0040 /* CAN Acceptance Code 0 */ #define SUN4I_REG_ACPM_ADDR 0x0044 /* CAN Acceptance Mask 0 */ +#define SUN4I_REG_ACPC_ADDR_D1 0x0028 /* CAN Acceptance Code 0 on the D1 */ +#define SUN4I_REG_ACPM_ADDR_D1 0x002C /* CAN Acceptance Mask 0 on the D1 */ #define SUN4I_REG_RBUF_RBACK_START_ADDR 0x0180 /* CAN transmit buffer start */ #define SUN4I_REG_RBUF_RBACK_END_ADDR 0x01b0 /* CAN transmit buffer end */ @@ -205,9 +206,11 @@ * struct sun4ican_quirks - Differences between SoC variants. * * @has_reset: SoC needs reset deasserted. + * @acp_offset: Offset of ACPC and ACPM registers */ struct sun4ican_quirks { bool has_reset; + int acp_offset; }; struct sun4ican_priv { @@ -216,6 +219,7 @@ struct sun4ican_priv { struct clk *clk; struct reset_control *reset; spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */ + int acp_offset; }; static const struct can_bittiming_const sun4ican_bittiming_const = { @@ -338,8 +342,8 @@ static int sun4i_can_start(struct net_device *dev) } /* set filters - we accept all */ - writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR); - writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR); + writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR + priv->acp_offset); + writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR + priv->acp_offset); /* clear error counters and error code capture */ writel(0, priv->base + SUN4I_REG_ERRC_ADDR); @@ -768,10 +772,17 @@ static const struct ethtool_ops sun4ican_ethtool_ops = { static const struct sun4ican_quirks sun4ican_quirks_a10 = { .has_reset = false, + .acp_offset = 0, }; static const struct sun4ican_quirks sun4ican_quirks_r40 = { .has_reset = true, + .acp_offset = 0, +}; + +static const struct sun4ican_quirks sun4ican_quirks_d1 = { + .has_reset = true, + .acp_offset = (SUN4I_REG_ACPC_ADDR_D1 - SUN4I_REG_ACPC_ADDR), }; static const struct of_device_id sun4ican_of_match[] = { @@ -785,6 +796,9 @@ static const struct of_device_id sun4ican_of_match[] = { .compatible = "allwinner,sun8i-r40-can", .data = &sun4ican_quirks_r40 }, { + .compatible = "allwinner,sun20i-d1-can", + .data = &sun4ican_quirks_d1 + }, { /* sentinel */ }, }; @@ -870,6 +884,7 @@ static int sun4ican_probe(struct platform_device *pdev) priv->base = addr; priv->clk = clk; priv->reset = reset; + priv->acp_offset = quirks->acp_offset; spin_lock_init(&priv->cmdreg_lock); platform_set_drvdata(pdev, dev); @@ -907,4 +922,4 @@ module_platform_driver(sun4i_can_driver); MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>"); MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)"); +MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20/D1)"); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 54284661992e..5aab440074c6 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -21,7 +21,6 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/regulator/consumer.h> #include <linux/can/dev.h> @@ -748,8 +747,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&priv->mbx_lock, flags); stamp = hecc_read_stamp(priv, mbxno); stats->tx_bytes += - can_rx_offload_get_echo_skb(&priv->offload, - mbxno, stamp, NULL); + can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, + mbxno, stamp, NULL); stats->tx_packets++; --priv->tx_tail; } diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index 58fcd2b34820..d1450722cb3c 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -52,6 +52,7 @@ config CAN_F81604 config CAN_GS_USB tristate "Geschwister Schneider UG and candleLight compatible interfaces" + select CAN_RX_OFFLOAD help This driver supports the Geschwister Schneider and bytewerk.org candleLight compatible diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 6201637ac0ff..41a0e4261d15 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro + * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro * * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu> * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu> @@ -19,17 +19,19 @@ MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>"); MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>"); -MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro interfaces"); +MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro interfaces"); MODULE_LICENSE("GPL v2"); /* USB vendor and product ID */ #define ESD_USB_ESDGMBH_VENDOR_ID 0x0ab4 #define ESD_USB_CANUSB2_PRODUCT_ID 0x0010 #define ESD_USB_CANUSBM_PRODUCT_ID 0x0011 +#define ESD_USB_CANUSB3_PRODUCT_ID 0x0014 /* CAN controller clock frequencies */ #define ESD_USB_2_CAN_CLOCK (60 * MEGA) /* Hz */ #define ESD_USB_M_CAN_CLOCK (36 * MEGA) /* Hz */ +#define ESD_USB_3_CAN_CLOCK (80 * MEGA) /* Hz */ /* Maximum number of CAN nets */ #define ESD_USB_MAX_NETS 2 @@ -44,6 +46,9 @@ MODULE_LICENSE("GPL v2"); /* esd CAN message flags - dlc field */ #define ESD_USB_RTR BIT(4) +#define ESD_USB_NO_BRS BIT(4) +#define ESD_USB_ESI BIT(5) +#define ESD_USB_FD BIT(7) /* esd CAN message flags - id field */ #define ESD_USB_EXTID BIT(29) @@ -65,6 +70,9 @@ MODULE_LICENSE("GPL v2"); #define ESD_USB_M_SJW_SHIFT 24 #define ESD_USB_TRIPLE_SAMPLES BIT(23) +/* Transmitter Delay Compensation */ +#define ESD_USB_3_TDC_MODE_AUTO 0 + /* esd IDADD message */ #define ESD_USB_ID_ENABLE BIT(7) #define ESD_USB_MAX_ID_SEGMENT 64 @@ -88,6 +96,21 @@ MODULE_LICENSE("GPL v2"); #define ESD_USB_MAX_RX_URBS 4 #define ESD_USB_MAX_TX_URBS 16 /* must be power of 2 */ +/* Modes for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.mode */ +#define ESD_USB_3_BAUDRATE_MODE_DISABLE 0 /* remove from bus */ +#define ESD_USB_3_BAUDRATE_MODE_INDEX 1 /* ESD (CiA) bit rate idx */ +#define ESD_USB_3_BAUDRATE_MODE_BTR_CTRL 2 /* BTR values (controller)*/ +#define ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL 3 /* BTR values (canonical) */ +#define ESD_USB_3_BAUDRATE_MODE_NUM 4 /* numerical bit rate */ +#define ESD_USB_3_BAUDRATE_MODE_AUTOBAUD 5 /* autobaud */ + +/* Flags for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.flags */ +#define ESD_USB_3_BAUDRATE_FLAG_FD BIT(0) /* enable CAN FD mode */ +#define ESD_USB_3_BAUDRATE_FLAG_LOM BIT(1) /* enable listen only mode */ +#define ESD_USB_3_BAUDRATE_FLAG_STM BIT(2) /* enable self test mode */ +#define ESD_USB_3_BAUDRATE_FLAG_TRS BIT(3) /* enable triple sampling */ +#define ESD_USB_3_BAUDRATE_FLAG_TXP BIT(4) /* enable transmit pause */ + struct esd_usb_header_msg { u8 len; /* total message length in 32bit words */ u8 cmd; @@ -122,6 +145,7 @@ struct esd_usb_rx_msg { __le32 id; /* upper 3 bits contain flags */ union { u8 data[CAN_MAX_DLEN]; + u8 data_fd[CANFD_MAX_DLEN]; struct { u8 status; /* CAN Controller Status */ u8 ecc; /* Error Capture Register */ @@ -138,7 +162,10 @@ struct esd_usb_tx_msg { u8 dlc; u32 hnd; /* opaque handle, not used by device */ __le32 id; /* upper 3 bits contain flags */ - u8 data[CAN_MAX_DLEN]; + union { + u8 data[CAN_MAX_DLEN]; + u8 data_fd[CANFD_MAX_DLEN]; + }; }; struct esd_usb_tx_done_msg { @@ -166,6 +193,50 @@ struct esd_usb_set_baudrate_msg { __le32 baud; }; +/* CAN-USB/3 baudrate configuration, used for nominal as well as for data bit rate */ +struct esd_usb_3_baudrate_cfg { + __le16 brp; /* bit rate pre-scaler */ + __le16 tseg1; /* time segment before sample point */ + __le16 tseg2; /* time segment after sample point */ + __le16 sjw; /* synchronization jump Width */ +}; + +/* In principle, the esd CAN-USB/3 supports Transmitter Delay Compensation (TDC), + * but currently only the automatic TDC mode is supported by this driver. + * An implementation for manual TDC configuration will follow. + * + * For information about struct esd_usb_3_tdc_cfg, see + * NTCAN Application Developers Manual, 6.2.25 NTCAN_TDC_CFG + related chapters + * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf + */ +struct esd_usb_3_tdc_cfg { + u8 tdc_mode; /* transmitter delay compensation mode */ + u8 ssp_offset; /* secondary sample point offset in mtq */ + s8 ssp_shift; /* secondary sample point shift in mtq */ + u8 tdc_filter; /* TDC filter in mtq */ +}; + +/* Extended version of the above set_baudrate_msg for a CAN-USB/3 + * to define the CAN bit timing configuration of the CAN controller in + * CAN FD mode as well as in Classical CAN mode. + * + * The payload of this command is a NTCAN_BAUDRATE_X structure according to + * esd electronics gmbh, NTCAN Application Developers Manual, 6.2.15 NTCAN_BAUDRATE_X + * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf + */ +struct esd_usb_3_set_baudrate_msg_x { + u8 len; /* total message length in 32bit words */ + u8 cmd; + u8 net; + u8 rsvd; /*reserved */ + /* Payload ... */ + __le16 mode; /* mode word, see ESD_USB_3_BAUDRATE_MODE_xxx */ + __le16 flags; /* control flags, see ESD_USB_3_BAUDRATE_FLAG_xxx */ + struct esd_usb_3_tdc_cfg tdc; /* TDC configuration */ + struct esd_usb_3_baudrate_cfg nom; /* nominal bit rate */ + struct esd_usb_3_baudrate_cfg data; /* data bit rate */ +}; + /* Main message type used between library and application */ union __packed esd_usb_msg { struct esd_usb_header_msg hdr; @@ -175,12 +246,14 @@ union __packed esd_usb_msg { struct esd_usb_tx_msg tx; struct esd_usb_tx_done_msg txdone; struct esd_usb_set_baudrate_msg setbaud; + struct esd_usb_3_set_baudrate_msg_x setbaud_x; struct esd_usb_id_filter_msg filter; }; static struct usb_device_id esd_usb_table[] = { {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB2_PRODUCT_ID)}, {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSBM_PRODUCT_ID)}, + {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB3_PRODUCT_ID)}, {} }; MODULE_DEVICE_TABLE(usb, esd_usb_table); @@ -321,9 +394,10 @@ static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv, { struct net_device_stats *stats = &priv->netdev->stats; struct can_frame *cf; + struct canfd_frame *cfd; struct sk_buff *skb; - int i; u32 id; + u8 len; if (!netif_device_present(priv->netdev)) return; @@ -333,27 +407,42 @@ static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv, if (id & ESD_USB_EVENT) { esd_usb_rx_event(priv, msg); } else { - skb = alloc_can_skb(priv->netdev, &cf); + if (msg->rx.dlc & ESD_USB_FD) { + skb = alloc_canfd_skb(priv->netdev, &cfd); + } else { + skb = alloc_can_skb(priv->netdev, &cf); + cfd = (struct canfd_frame *)cf; + } + if (skb == NULL) { stats->rx_dropped++; return; } - cf->can_id = id & ESD_USB_IDMASK; - can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_USB_RTR, - priv->can.ctrlmode); - - if (id & ESD_USB_EXTID) - cf->can_id |= CAN_EFF_FLAG; + cfd->can_id = id & ESD_USB_IDMASK; - if (msg->rx.dlc & ESD_USB_RTR) { - cf->can_id |= CAN_RTR_FLAG; + if (msg->rx.dlc & ESD_USB_FD) { + /* masking by 0x0F is already done within can_fd_dlc2len() */ + cfd->len = can_fd_dlc2len(msg->rx.dlc); + len = cfd->len; + if ((msg->rx.dlc & ESD_USB_NO_BRS) == 0) + cfd->flags |= CANFD_BRS; + if (msg->rx.dlc & ESD_USB_ESI) + cfd->flags |= CANFD_ESI; } else { - for (i = 0; i < cf->len; i++) - cf->data[i] = msg->rx.data[i]; - - stats->rx_bytes += cf->len; + can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_USB_RTR, priv->can.ctrlmode); + len = cf->len; + if (msg->rx.dlc & ESD_USB_RTR) { + cf->can_id |= CAN_RTR_FLAG; + len = 0; + } } + + if (id & ESD_USB_EXTID) + cfd->can_id |= CAN_EFF_FLAG; + + memcpy(cfd->data, msg->rx.data_fd, len); + stats->rx_bytes += len; stats->rx_packets++; netif_rx(skb); @@ -728,7 +817,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, struct esd_usb *dev = priv->usb; struct esd_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; union esd_usb_msg *msg; struct urb *urb; u8 *buf; @@ -762,20 +851,29 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, msg->hdr.len = offsetof(struct esd_usb_tx_msg, data) / sizeof(u32); msg->hdr.cmd = ESD_USB_CMD_CAN_TX; msg->tx.net = priv->index; - msg->tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); - msg->tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); - if (cf->can_id & CAN_RTR_FLAG) - msg->tx.dlc |= ESD_USB_RTR; + if (can_is_canfd_skb(skb)) { + msg->tx.dlc = can_fd_len2dlc(cfd->len); + msg->tx.dlc |= ESD_USB_FD; + + if ((cfd->flags & CANFD_BRS) == 0) + msg->tx.dlc |= ESD_USB_NO_BRS; + } else { + msg->tx.dlc = can_get_cc_dlc((struct can_frame *)cfd, priv->can.ctrlmode); + + if (cfd->can_id & CAN_RTR_FLAG) + msg->tx.dlc |= ESD_USB_RTR; + } + + msg->tx.id = cpu_to_le32(cfd->can_id & CAN_ERR_MASK); - if (cf->can_id & CAN_EFF_FLAG) + if (cfd->can_id & CAN_EFF_FLAG) msg->tx.id |= cpu_to_le32(ESD_USB_EXTID); - for (i = 0; i < cf->len; i++) - msg->tx.data[i] = cf->data[i]; + memcpy(msg->tx.data_fd, cfd->data, cfd->len); /* round up, then divide by 4 to add the payload length as # of 32bit words */ - msg->hdr.len += DIV_ROUND_UP(cf->len, sizeof(u32)); + msg->hdr.len += DIV_ROUND_UP(cfd->len, sizeof(u32)); for (i = 0; i < ESD_USB_MAX_TX_URBS; i++) { if (priv->tx_contexts[i].echo_index == ESD_USB_MAX_TX_URBS) { @@ -962,6 +1060,105 @@ static int esd_usb_2_set_bittiming(struct net_device *netdev) return err; } +/* Nominal bittiming constants, see + * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022 + * 48.6.8 MCAN Nominal Bit Timing and Prescaler Register + */ +static const struct can_bittiming_const esd_usb_3_nom_bittiming_const = { + .name = "esd_usb_3", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +/* Data bittiming constants, see + * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022 + * 48.6.4 MCAN Data Bit Timing and Prescaler Register + */ +static const struct can_bittiming_const esd_usb_3_data_bittiming_const = { + .name = "esd_usb_3", + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static int esd_usb_3_set_bittiming(struct net_device *netdev) +{ + const struct can_bittiming_const *nom_btc = &esd_usb_3_nom_bittiming_const; + const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const; + struct esd_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *nom_bt = &priv->can.bittiming; + struct can_bittiming *data_bt = &priv->can.data_bittiming; + struct esd_usb_3_set_baudrate_msg_x *baud_x; + union esd_usb_msg *msg; + u16 flags = 0; + int err; + + msg = kmalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + baud_x = &msg->setbaud_x; + + /* Canonical is the most reasonable mode for SocketCAN on CAN-USB/3 ... */ + baud_x->mode = cpu_to_le16(ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL); + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + flags |= ESD_USB_3_BAUDRATE_FLAG_LOM; + + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + flags |= ESD_USB_3_BAUDRATE_FLAG_TRS; + + baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1)); + baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1)); + baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1) + & (nom_btc->tseg1_max - 1)); + baud_x->nom.tseg2 = cpu_to_le16(nom_bt->phase_seg2 & (nom_btc->tseg2_max - 1)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + baud_x->data.brp = cpu_to_le16(data_bt->brp & (data_btc->brp_max - 1)); + baud_x->data.sjw = cpu_to_le16(data_bt->sjw & (data_btc->sjw_max - 1)); + baud_x->data.tseg1 = cpu_to_le16((data_bt->prop_seg + data_bt->phase_seg1) + & (data_btc->tseg1_max - 1)); + baud_x->data.tseg2 = cpu_to_le16(data_bt->phase_seg2 & (data_btc->tseg2_max - 1)); + flags |= ESD_USB_3_BAUDRATE_FLAG_FD; + } + + /* Currently this driver only supports the automatic TDC mode */ + baud_x->tdc.tdc_mode = ESD_USB_3_TDC_MODE_AUTO; + baud_x->tdc.ssp_offset = 0; + baud_x->tdc.ssp_shift = 0; + baud_x->tdc.tdc_filter = 0; + + baud_x->flags = cpu_to_le16(flags); + baud_x->net = priv->index; + baud_x->rsvd = 0; + + /* set len as # of 32bit words */ + msg->hdr.len = sizeof(struct esd_usb_3_set_baudrate_msg_x) / sizeof(u32); + msg->hdr.cmd = ESD_USB_CMD_SETBAUD; + + netdev_dbg(netdev, + "ctrlmode=%#x/%#x, esd-net=%u, esd-mode=%#x, esd-flags=%#x\n", + priv->can.ctrlmode, priv->can.ctrlmode_supported, + priv->index, le16_to_cpu(baud_x->mode), flags); + + err = esd_usb_send_msg(priv->usb, msg); + + kfree(msg); + return err; +} + static int esd_usb_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { @@ -1019,16 +1216,32 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_BERR_REPORTING; - if (le16_to_cpu(dev->udev->descriptor.idProduct) == - ESD_USB_CANUSBM_PRODUCT_ID) + switch (le16_to_cpu(dev->udev->descriptor.idProduct)) { + case ESD_USB_CANUSB3_PRODUCT_ID: + priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; + priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; + priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const; + priv->can.do_set_bittiming = esd_usb_3_set_bittiming; + priv->can.do_set_data_bittiming = esd_usb_3_set_bittiming; + break; + + case ESD_USB_CANUSBM_PRODUCT_ID: priv->can.clock.freq = ESD_USB_M_CAN_CLOCK; - else { + priv->can.bittiming_const = &esd_usb_2_bittiming_const; + priv->can.do_set_bittiming = esd_usb_2_set_bittiming; + break; + + case ESD_USB_CANUSB2_PRODUCT_ID: + default: priv->can.clock.freq = ESD_USB_2_CAN_CLOCK; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + priv->can.bittiming_const = &esd_usb_2_bittiming_const; + priv->can.do_set_bittiming = esd_usb_2_set_bittiming; + break; } - priv->can.bittiming_const = &esd_usb_2_bittiming_const; - priv->can.do_set_bittiming = esd_usb_2_set_bittiming; priv->can.do_set_mode = esd_usb_set_mode; priv->can.do_get_berr_counter = esd_usb_get_berr_counter; diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 0c7f7505632c..5e3a72b7c469 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -2230,6 +2230,7 @@ static int es58x_probe(struct usb_interface *intf, for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { int ret = es58x_init_netdev(es58x_dev, ch_idx); + if (ret) { es58x_free_netdevs(es58x_dev); return ret; diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h index c1ba1a4e8857..2e183bdeedd7 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h @@ -378,13 +378,13 @@ struct es58x_sw_version { /** * struct es58x_hw_revision - Hardware revision number. - * @letter: Revision letter. + * @letter: Revision letter, an alphanumeric character. * @major: Version major number, represented on three digits. * @minor: Version minor number, represented on three digits. * * The hardware revision uses its own format: "axxx/xxx" where 'a' is - * a letter and 'x' a digit. It can be retrieved from the product - * information string. + * an alphanumeric character and 'x' a digit. It can be retrieved from + * the product information string. */ struct es58x_hw_revision { char letter; diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c index 9fba29e2f57c..635edeb8f68c 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c +++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c @@ -125,14 +125,28 @@ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev, * firmware version, the bootloader version and the hardware * revision. * - * If the function fails, simply emit a log message and continue - * because product information is not critical for the driver to - * operate. + * If the function fails, set the version or revision to an invalid + * value and emit an informal message. Continue probing because the + * product information is not critical for the driver to operate. */ void es58x_parse_product_info(struct es58x_device *es58x_dev) { + static const struct es58x_sw_version sw_version_not_set = { + .major = -1, + .minor = -1, + .revision = -1, + }; + static const struct es58x_hw_revision hw_revision_not_set = { + .letter = '\0', + .major = -1, + .minor = -1, + }; char *prod_info; + es58x_dev->firmware_version = sw_version_not_set; + es58x_dev->bootloader_version = sw_version_not_set; + es58x_dev->hardware_revision = hw_revision_not_set; + prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX); if (!prod_info) { dev_warn(es58x_dev->dev, @@ -150,29 +164,36 @@ void es58x_parse_product_info(struct es58x_device *es58x_dev) } /** - * es58x_sw_version_is_set() - Check if the version is a valid number. + * es58x_sw_version_is_valid() - Check if the version is a valid number. * @sw_ver: Version number of either the firmware or the bootloader. * - * If &es58x_sw_version.major, &es58x_sw_version.minor and - * &es58x_sw_version.revision are all zero, the product string could - * not be parsed and the version number is invalid. + * If any of the software version sub-numbers do not fit on two + * digits, the version is invalid, most probably because the product + * string could not be parsed. + * + * Return: @true if the software version is valid, @false otherwise. */ -static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver) +static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver) { - return sw_ver->major || sw_ver->minor || sw_ver->revision; + return sw_ver->major < 100 && sw_ver->minor < 100 && + sw_ver->revision < 100; } /** - * es58x_hw_revision_is_set() - Check if the revision is a valid number. + * es58x_hw_revision_is_valid() - Check if the revision is a valid number. * @hw_rev: Revision number of the hardware. * - * If &es58x_hw_revision.letter is the null character, the product - * string could not be parsed and the hardware revision number is - * invalid. + * If &es58x_hw_revision.letter is not a alphanumeric character or if + * any of the hardware revision sub-numbers do not fit on three + * digits, the revision is invalid, most probably because the product + * string could not be parsed. + * + * Return: @true if the hardware revision is valid, @false otherwise. */ -static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev) +static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev) { - return hw_rev->letter != '\0'; + return isalnum(hw_rev->letter) && hw_rev->major < 1000 && + hw_rev->minor < 1000; } /** @@ -197,7 +218,7 @@ static int es58x_devlink_info_get(struct devlink *devlink, char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))]; int ret = 0; - if (es58x_sw_version_is_set(fw_ver)) { + if (es58x_sw_version_is_valid(fw_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", fw_ver->major, fw_ver->minor, fw_ver->revision); ret = devlink_info_version_running_put(req, @@ -207,7 +228,7 @@ static int es58x_devlink_info_get(struct devlink *devlink, return ret; } - if (es58x_sw_version_is_set(bl_ver)) { + if (es58x_sw_version_is_valid(bl_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", bl_ver->major, bl_ver->minor, bl_ver->revision); ret = devlink_info_version_running_put(req, @@ -217,7 +238,7 @@ static int es58x_devlink_info_get(struct devlink *devlink, return ret; } - if (es58x_hw_revision_is_set(hw_rev)) { + if (es58x_hw_revision_is_valid(hw_rev)) { snprintf(buf, sizeof(buf), "%c%03u/%03u", hw_rev->letter, hw_rev->major, hw_rev->minor); ret = devlink_info_version_fixed_put(req, diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index bd9eb066ecf1..95b0fdb602c8 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -5,6 +5,7 @@ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-, * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt). * Copyright (C) 2016 Hubert Denkmair + * Copyright (c) 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> * * Many thanks to all socketcan devs! */ @@ -24,6 +25,7 @@ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> +#include <linux/can/rx-offload.h> /* Device specific constants */ #define USB_GS_USB_1_VENDOR_ID 0x1d50 @@ -282,6 +284,8 @@ struct gs_host_frame { #define GS_MAX_TX_URBS 10 /* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */ #define GS_MAX_RX_URBS 30 +#define GS_NAPI_WEIGHT 32 + /* Maximum number of interfaces the driver supports per device. * Current hardware only supports 3 interfaces. The future may vary. */ @@ -295,6 +299,7 @@ struct gs_tx_context { struct gs_can { struct can_priv can; /* must be the first member */ + struct can_rx_offload offload; struct gs_usb *parent; struct net_device *netdev; @@ -506,27 +511,64 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf) } } -static void gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb, - const struct gs_host_frame *hf) +static u32 gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb, + const struct gs_host_frame *hf) { u32 timestamp; - if (!(dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)) - return; - if (hf->flags & GS_CAN_FLAG_FD) timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us); else timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us); - gs_usb_skb_set_timestamp(dev, skb, timestamp); + if (skb) + gs_usb_skb_set_timestamp(dev, skb, timestamp); + + return timestamp; +} + +static void gs_usb_rx_offload(struct gs_can *dev, struct sk_buff *skb, + const struct gs_host_frame *hf) +{ + struct can_rx_offload *offload = &dev->offload; + int rc; + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) { + const u32 ts = gs_usb_set_timestamp(dev, skb, hf); + + rc = can_rx_offload_queue_timestamp(offload, skb, ts); + } else { + rc = can_rx_offload_queue_tail(offload, skb); + } + + if (rc) + dev->netdev->stats.rx_fifo_errors++; +} + +static unsigned int +gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb, + const struct gs_host_frame *hf) +{ + struct can_rx_offload *offload = &dev->offload; + const u32 echo_id = hf->echo_id; + unsigned int len; + + if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) { + const u32 ts = gs_usb_set_timestamp(dev, skb, hf); + + len = can_rx_offload_get_echo_skb_queue_timestamp(offload, echo_id, + ts, NULL); + } else { + len = can_rx_offload_get_echo_skb_queue_tail(offload, echo_id, + NULL); + } - return; + return len; } static void gs_usb_receive_bulk_callback(struct urb *urb) { - struct gs_usb *usbcan = urb->context; + struct gs_usb *parent = urb->context; struct gs_can *dev; struct net_device *netdev; int rc; @@ -537,7 +579,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) struct canfd_frame *cfd; struct sk_buff *skb; - BUG_ON(!usbcan); + BUG_ON(!parent); switch (urb->status) { case 0: /* success */ @@ -554,7 +596,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) if (hf->channel >= GS_MAX_INTF) goto device_detach; - dev = usbcan->canch[hf->channel]; + dev = parent->canch[hf->channel]; netdev = dev->netdev; stats = &netdev->stats; @@ -567,7 +609,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) if (hf->echo_id == -1) { /* normal rx */ if (hf->flags & GS_CAN_FLAG_FD) { - skb = alloc_canfd_skb(dev->netdev, &cfd); + skb = alloc_canfd_skb(netdev, &cfd); if (!skb) return; @@ -580,7 +622,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) memcpy(cfd->data, hf->canfd->data, cfd->len); } else { - skb = alloc_can_skb(dev->netdev, &cf); + skb = alloc_can_skb(netdev, &cf); if (!skb) return; @@ -594,12 +636,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) gs_update_state(dev, cf); } - gs_usb_set_timestamp(dev, skb, hf); - - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += hf->can_dlc; - - netif_rx(skb); + gs_usb_rx_offload(dev, skb, hf); } else { /* echo_id == hf->echo_id */ if (hf->echo_id >= GS_MAX_TX_URBS) { netdev_err(netdev, @@ -619,12 +656,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } skb = dev->can.echo_skb[hf->echo_id]; - gs_usb_set_timestamp(dev, skb, hf); - - netdev->stats.tx_packets++; - netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id, - NULL); - + stats->tx_packets++; + stats->tx_bytes += gs_usb_get_echo_skb(dev, skb, hf); gs_free_tx_context(txc); atomic_dec(&dev->active_tx_urbs); @@ -633,6 +666,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } if (hf->flags & GS_CAN_FLAG_OVERFLOW) { + stats->rx_over_errors++; + stats->rx_errors++; + skb = alloc_can_err_skb(netdev, &cf); if (!skb) goto resubmit_urb; @@ -640,25 +676,26 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) cf->can_id |= CAN_ERR_CRTL; cf->len = CAN_ERR_DLC; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_over_errors++; - stats->rx_errors++; - netif_rx(skb); + + gs_usb_rx_offload(dev, skb, hf); } - resubmit_urb: - usb_fill_bulk_urb(urb, usbcan->udev, - usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN), + can_rx_offload_irq_finish(&dev->offload); + +resubmit_urb: + usb_fill_bulk_urb(urb, parent->udev, + usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN), hf, dev->parent->hf_size_rx, - gs_usb_receive_bulk_callback, usbcan); + gs_usb_receive_bulk_callback, parent); rc = usb_submit_urb(urb, GFP_ATOMIC); /* USB failure take down all interfaces */ if (rc == -ENODEV) { - device_detach: +device_detach: for (rc = 0; rc < GS_MAX_INTF; rc++) { - if (usbcan->canch[rc]) - netif_device_detach(usbcan->canch[rc]->netdev); + if (parent->canch[rc]) + netif_device_detach(parent->canch[rc]->netdev); } } } @@ -742,10 +779,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, goto nomem_urb; hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC); - if (!hf) { - netdev_err(netdev, "No memory left for USB buffer\n"); + if (!hf) goto nomem_hf; - } idx = txc->echo_id; @@ -818,12 +853,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; - badidx: +badidx: kfree(hf); - nomem_hf: +nomem_hf: usb_free_urb(urb); - nomem_urb: +nomem_urb: gs_free_tx_context(txc); dev_kfree_skb(skb); stats->tx_dropped++; @@ -860,6 +895,8 @@ static int gs_can_open(struct net_device *netdev) dev->hf_size_tx = struct_size(hf, classic_can, 1); } + can_rx_offload_enable(&dev->offload); + if (!parent->active_channels) { if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) gs_usb_timestamp_init(parent); @@ -878,8 +915,6 @@ static int gs_can_open(struct net_device *netdev) buf = kmalloc(dev->parent->hf_size_rx, GFP_KERNEL); if (!buf) { - netdev_err(netdev, - "No memory left for USB buffer\n"); rc = -ENOMEM; goto out_usb_free_urb; } @@ -902,7 +937,8 @@ static int gs_can_open(struct net_device *netdev) netif_device_detach(dev->netdev); netdev_err(netdev, - "usb_submit failed (err=%d)\n", rc); + "usb_submit_urb() failed, error %pe\n", + ERR_PTR(rc)); goto out_usb_unanchor_urb; } @@ -969,6 +1005,7 @@ out_usb_kill_anchored_urbs: gs_usb_timestamp_stop(parent); } + can_rx_offload_disable(&dev->offload); close_candev(netdev); return rc; @@ -1033,9 +1070,7 @@ static int gs_can_close(struct net_device *netdev) dev->can.state = CAN_STATE_STOPPED; /* reset the device */ - rc = gs_cmd_reset(dev); - if (rc < 0) - netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc); + gs_cmd_reset(dev); /* reset tx contexts */ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) { @@ -1043,6 +1078,8 @@ static int gs_can_close(struct net_device *netdev) dev->tx_context[rc].echo_id = GS_MAX_TX_URBS; } + can_rx_offload_disable(&dev->offload); + /* close the netdev */ close_candev(netdev); @@ -1342,6 +1379,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, dev->can.data_bittiming_const = &dev->data_bt_const; } + can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT); SET_NETDEV_DEV(netdev, &intf->dev); rc = register_candev(dev->netdev); @@ -1349,12 +1387,14 @@ static struct gs_can *gs_make_candev(unsigned int channel, dev_err(&intf->dev, "Couldn't register candev for channel %d (%pe)\n", channel, ERR_PTR(rc)); - goto out_free_candev; + goto out_can_rx_offload_del; } return dev; - out_free_candev: +out_can_rx_offload_del: + can_rx_offload_del(&dev->offload); +out_free_candev: free_candev(dev->netdev); return ERR_PTR(rc); } @@ -1362,7 +1402,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, static void gs_destroy_candev(struct gs_can *dev) { unregister_candev(dev->netdev); - usb_kill_anchored_urbs(&dev->tx_submitted); + can_rx_offload_del(&dev->offload); free_candev(dev->netdev); } @@ -1371,7 +1411,7 @@ static int gs_usb_probe(struct usb_interface *intf, { struct usb_device *udev = interface_to_usbdev(intf); struct gs_host_frame *hf; - struct gs_usb *dev; + struct gs_usb *parent; struct gs_host_config hconf = { .byte_order = cpu_to_le32(0x0000beef), }; @@ -1414,49 +1454,49 @@ static int gs_usb_probe(struct usb_interface *intf, return -EINVAL; } - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) + parent = kzalloc(sizeof(*parent), GFP_KERNEL); + if (!parent) return -ENOMEM; - init_usb_anchor(&dev->rx_submitted); + init_usb_anchor(&parent->rx_submitted); - usb_set_intfdata(intf, dev); - dev->udev = udev; + usb_set_intfdata(intf, parent); + parent->udev = udev; for (i = 0; i < icount; i++) { unsigned int hf_size_rx = 0; - dev->canch[i] = gs_make_candev(i, intf, &dconf); - if (IS_ERR_OR_NULL(dev->canch[i])) { + parent->canch[i] = gs_make_candev(i, intf, &dconf); + if (IS_ERR_OR_NULL(parent->canch[i])) { /* save error code to return later */ - rc = PTR_ERR(dev->canch[i]); + rc = PTR_ERR(parent->canch[i]); /* on failure destroy previously created candevs */ icount = i; for (i = 0; i < icount; i++) - gs_destroy_candev(dev->canch[i]); + gs_destroy_candev(parent->canch[i]); - usb_kill_anchored_urbs(&dev->rx_submitted); - kfree(dev); + usb_kill_anchored_urbs(&parent->rx_submitted); + kfree(parent); return rc; } - dev->canch[i]->parent = dev; + parent->canch[i]->parent = parent; /* set RX packet size based on FD and if hardware - * timestamps are supported. - */ - if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) { - if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + * timestamps are supported. + */ + if (parent->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) { + if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) hf_size_rx = struct_size(hf, canfd_ts, 1); else hf_size_rx = struct_size(hf, canfd, 1); } else { - if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) hf_size_rx = struct_size(hf, classic_can_ts, 1); else hf_size_rx = struct_size(hf, classic_can, 1); } - dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx); + parent->hf_size_rx = max(parent->hf_size_rx, hf_size_rx); } return 0; @@ -1464,22 +1504,21 @@ static int gs_usb_probe(struct usb_interface *intf, static void gs_usb_disconnect(struct usb_interface *intf) { - struct gs_usb *dev = usb_get_intfdata(intf); + struct gs_usb *parent = usb_get_intfdata(intf); unsigned int i; usb_set_intfdata(intf, NULL); - if (!dev) { + if (!parent) { dev_err(&intf->dev, "Disconnect (nodata)\n"); return; } for (i = 0; i < GS_MAX_INTF; i++) - if (dev->canch[i]) - gs_destroy_candev(dev->canch[i]); + if (parent->canch[i]) + gs_destroy_candev(parent->canch[i]); - usb_kill_anchored_urbs(&dev->rx_submitted); - kfree(dev); + kfree(parent); } static const struct usb_device_id gs_usb_table[] = { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index d881e1d30183..24ad9f593a77 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -214,19 +214,6 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) } } -/* - * post received skb after having set any hw timestamp - */ -int peak_usb_netif_rx(struct sk_buff *skb, - struct peak_time_ref *time_ref, u32 ts_low) -{ - struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); - - peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp); - - return netif_rx(skb); -} - /* post received skb with native 64-bit hw timestamp */ int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high) { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index 980e315186cf..f6cf84bb718f 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -142,8 +142,6 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now); void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now); void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv); -int peak_usb_netif_rx(struct sk_buff *skb, - struct peak_time_ref *time_ref, u32 ts_low); int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high); void peak_usb_async_complete(struct urb *urb); void peak_usb_restart_complete(struct peak_usb_device *dev); diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index a0f7bcec719c..39a63b7313a4 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -284,7 +284,7 @@ struct ucan_priv { */ spinlock_t echo_skb_lock; - /* usb device information information */ + /* usb device information */ u8 intf_index; u8 in_ep_addr; u8 out_ep_addr; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 4d3283db3a13..abe58f103043 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -30,6 +30,7 @@ #include <linux/can/error.h> #include <linux/phy/phy.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #define DRIVER_NAME "xilinx_can" @@ -200,6 +201,7 @@ struct xcan_devtype_data { * @can_clk: Pointer to struct clk * @devtype: Device type specific constants * @transceiver: Optional pointer to associated CAN transceiver + * @rstc: Pointer to reset control */ struct xcan_priv { struct can_priv can; @@ -218,6 +220,7 @@ struct xcan_priv { struct clk *can_clk; struct xcan_devtype_data devtype; struct phy *transceiver; + struct reset_control *rstc; }; /* CAN Bittiming constants as per Xilinx CAN specs */ @@ -1799,6 +1802,16 @@ static int xcan_probe(struct platform_device *pdev) priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; + priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(priv->rstc)) { + dev_err(&pdev->dev, "Cannot get CAN reset.\n"); + ret = PTR_ERR(priv->rstc); + goto err_free; + } + + ret = reset_control_reset(priv->rstc); + if (ret) + goto err_free; if (devtype->cantype == XAXI_CANFD) { priv->can.data_bittiming_const = @@ -1827,7 +1840,7 @@ static int xcan_probe(struct platform_device *pdev) /* Get IRQ for the device */ ret = platform_get_irq(pdev, 0); if (ret < 0) - goto err_free; + goto err_reset; ndev->irq = ret; @@ -1843,21 +1856,21 @@ static int xcan_probe(struct platform_device *pdev) if (IS_ERR(priv->can_clk)) { ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), "device clock not found\n"); - goto err_free; + goto err_reset; } priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); if (IS_ERR(priv->bus_clk)) { ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), "bus clock not found\n"); - goto err_free; + goto err_reset; } transceiver = devm_phy_optional_get(&pdev->dev, NULL); if (IS_ERR(transceiver)) { ret = PTR_ERR(transceiver); dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); - goto err_free; + goto err_reset; } priv->transceiver = transceiver; @@ -1904,6 +1917,8 @@ static int xcan_probe(struct platform_device *pdev) err_disableclks: pm_runtime_put(priv->dev); pm_runtime_disable(&pdev->dev); +err_reset: + reset_control_assert(priv->rstc); err_free: free_candev(ndev); err: @@ -1920,9 +1935,11 @@ err: static void xcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct xcan_priv *priv = netdev_priv(ndev); unregister_candev(ndev); pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); free_candev(ndev); } diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 3ed5391bb18d..f8c1d73b251d 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -37,7 +37,6 @@ config NET_DSA_LANTIQ_GSWIP config NET_DSA_MT7530 tristate "MediaTek MT7530 and MT7531 Ethernet switch support" select NET_DSA_TAG_MTK - select MEDIATEK_GE_PHY imply NET_DSA_MT7530_MDIO imply NET_DSA_MT7530_MMIO help @@ -49,6 +48,7 @@ config NET_DSA_MT7530 config NET_DSA_MT7530_MDIO tristate "MediaTek MT7530 MDIO interface driver" depends on NET_DSA_MT7530 + imply MEDIATEK_GE_PHY select PCS_MTK_LYNXI help This enables support for the MediaTek MT7530 and MT7531 switch @@ -60,6 +60,7 @@ config NET_DSA_MT7530_MMIO tristate "MediaTek MT7530 MMIO interface driver" depends on NET_DSA_MT7530 depends on HAS_IOMEM + imply MEDIATEK_GE_SOC_PHY help This enables support for the built-in Ethernet switch found in the MediaTek MT7988 SoC. diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 3464ce5e7470..0d628b35fd5c 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -757,7 +757,7 @@ int b53_configure_vlan(struct dsa_switch *ds) /* Create an untagged VLAN entry for the default PVID in case * CONFIG_VLAN_8021Q is disabled and there are no calls to - * dsa_slave_vlan_rx_add_vid() to create the default VLAN + * dsa_user_vlan_rx_add_vid() to create the default VLAN * entry. Do this only when the tagging protocol is not * DSA_TAG_PROTO_NONE */ @@ -958,7 +958,7 @@ static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) return NULL; } - return mdiobus_get_phy(ds->slave_mii_bus, port); + return mdiobus_get_phy(ds->user_mii_bus, port); } void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, @@ -1393,12 +1393,6 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port, /* Get the implementation specific capabilities */ if (dev->ops->phylink_get_caps) dev->ops->phylink_get_caps(dev, port, config); - - /* This driver does not make use of the speed, duplex, pause or the - * advertisement in its mac_config, so it is safe to mark this driver - * as non-legacy. - */ - config->legacy_pre_march2020 = false; } static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds, diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index 8b422b298cd5..897e5e8b3d69 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/phy.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/delay.h> #include <linux/brcmphy.h> #include <linux/rtnetlink.h> @@ -328,7 +329,7 @@ static int b53_mdio_probe(struct mdio_device *mdiodev) * layer setup */ if (of_machine_is_compatible("brcm,bcm7445d0") && - strcmp(mdiodev->bus->name, "sf2 slave mii")) + strcmp(mdiodev->bus->name, "sf2 user mii")) return -EPROBE_DEFER; dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus); diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 5db1ed26f03a..3a89349dc918 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -19,6 +19,7 @@ #include <linux/bits.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/platform_data/b53.h> @@ -323,14 +324,12 @@ static int b53_mmap_probe(struct platform_device *pdev) return b53_switch_register(dev); } -static int b53_mmap_remove(struct platform_device *pdev) +static void b53_mmap_remove(struct platform_device *pdev) { struct b53_device *dev = platform_get_drvdata(pdev); if (dev) b53_switch_remove(dev); - - return 0; } static void b53_mmap_shutdown(struct platform_device *pdev) @@ -371,7 +370,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table); static struct platform_driver b53_mmap_driver = { .probe = b53_mmap_probe, - .remove = b53_mmap_remove, + .remove_new = b53_mmap_remove, .shutdown = b53_mmap_shutdown, .driver = { .name = "b53-switch", diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c index b0ccebcd3ffa..3f8a491ce885 100644 --- a/drivers/net/dsa/b53/b53_serdes.c +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Northstar Plus switch SerDes/SGMII PHY main logic * diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h index ef81f5da5f81..3d367c4df4d9 100644 --- a/drivers/net/dsa/b53/b53_serdes.h +++ b/drivers/net/dsa/b53/b53_serdes.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Northstar Plus switch SerDes/SGMII PHY definitions * diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index bcb44034404d..f3f95332ff17 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -657,17 +657,15 @@ static int b53_srab_probe(struct platform_device *pdev) return b53_switch_register(dev); } -static int b53_srab_remove(struct platform_device *pdev) +static void b53_srab_remove(struct platform_device *pdev) { struct b53_device *dev = platform_get_drvdata(pdev); if (!dev) - return 0; + return; b53_srab_intr_set(dev->priv, false); b53_switch_remove(dev); - - return 0; } static void b53_srab_shutdown(struct platform_device *pdev) @@ -684,7 +682,7 @@ static void b53_srab_shutdown(struct platform_device *pdev) static struct platform_driver b53_srab_driver = { .probe = b53_srab_probe, - .remove = b53_srab_remove, + .remove_new = b53_srab_remove, .shutdown = b53_srab_shutdown, .driver = { .name = "b53-srab-switch", diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 72374b066f64..cadee5505c29 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -617,26 +617,25 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); priv->master_mii_bus = of_mdio_find_bus(dn); if (!priv->master_mii_bus) { - of_node_put(dn); - return -EPROBE_DEFER; + err = -EPROBE_DEFER; + goto err_of_node_put; } - get_device(&priv->master_mii_bus->dev); priv->master_mii_dn = dn; - priv->slave_mii_bus = mdiobus_alloc(); - if (!priv->slave_mii_bus) { - of_node_put(dn); - return -ENOMEM; + priv->user_mii_bus = mdiobus_alloc(); + if (!priv->user_mii_bus) { + err = -ENOMEM; + goto err_put_master_mii_bus_dev; } - priv->slave_mii_bus->priv = priv; - priv->slave_mii_bus->name = "sf2 slave mii"; - priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; - priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; - snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", + priv->user_mii_bus->priv = priv; + priv->user_mii_bus->name = "sf2 user mii"; + priv->user_mii_bus->read = bcm_sf2_sw_mdio_read; + priv->user_mii_bus->write = bcm_sf2_sw_mdio_write; + snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", index++); - priv->slave_mii_bus->dev.of_node = dn; + priv->user_mii_bus->dev.of_node = dn; /* Include the pseudo-PHY address to divert reads towards our * workaround. This is only required for 7445D0, since 7445E0 @@ -654,9 +653,9 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->indir_phy_mask = 0; ds->phys_mii_mask = priv->indir_phy_mask; - ds->slave_mii_bus = priv->slave_mii_bus; - priv->slave_mii_bus->parent = ds->dev->parent; - priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; + ds->user_mii_bus = priv->user_mii_bus; + priv->user_mii_bus->parent = ds->dev->parent; + priv->user_mii_bus->phy_mask = ~priv->indir_phy_mask; /* We need to make sure that of_phy_connect() will not work by * removing the 'phandle' and 'linux,phandle' properties and @@ -683,20 +682,26 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) phy_device_remove(phydev); } - err = mdiobus_register(priv->slave_mii_bus); - if (err && dn) { - mdiobus_free(priv->slave_mii_bus); - of_node_put(dn); - } + err = mdiobus_register(priv->user_mii_bus); + if (err && dn) + goto err_free_user_mii_bus; + + return 0; +err_free_user_mii_bus: + mdiobus_free(priv->user_mii_bus); +err_put_master_mii_bus_dev: + put_device(&priv->master_mii_bus->dev); +err_of_node_put: + of_node_put(dn); return err; } static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) { - mdiobus_unregister(priv->slave_mii_bus); - mdiobus_free(priv->slave_mii_bus); - of_node_put(priv->master_mii_dn); + mdiobus_unregister(priv->user_mii_bus); + mdiobus_free(priv->user_mii_bus); + put_device(&priv->master_mii_bus->dev); } static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) @@ -909,7 +914,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) - netif_carrier_off(dsa_to_port(ds, port)->slave); + netif_carrier_off(dsa_to_port(ds, port)->user); status->duplex = DUPLEX_FULL; } else { status->link = true; @@ -983,7 +988,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol = { }; @@ -1007,7 +1012,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; struct ethtool_wolinfo pwol = { }; @@ -1537,12 +1542,12 @@ out_clk: return ret; } -static int bcm_sf2_sw_remove(struct platform_device *pdev) +static void bcm_sf2_sw_remove(struct platform_device *pdev) { struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); if (!priv) - return 0; + return; priv->wol_ports_mask = 0; /* Disable interrupts */ @@ -1554,8 +1559,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk); if (priv->type == BCM7278_DEVICE_ID) reset_control_assert(priv->rcdev); - - return 0; } static void bcm_sf2_sw_shutdown(struct platform_device *pdev) @@ -1601,7 +1604,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, static struct platform_driver bcm_sf2_driver = { .probe = bcm_sf2_sw_probe, - .remove = bcm_sf2_sw_remove, + .remove_new = bcm_sf2_sw_remove, .shutdown = bcm_sf2_sw_shutdown, .driver = { .name = "brcm-sf2", diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 00afc94ce522..424f896b5a6f 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -108,7 +108,7 @@ struct bcm_sf2_priv { /* Master and slave MDIO bus controller */ unsigned int indir_phy_mask; struct device_node *master_mii_dn; - struct mii_bus *slave_mii_bus; + struct mii_bus *user_mii_bus; struct mii_bus *master_mii_bus; /* Bitmask of ports needing BRCM tags */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index c4010b7bf089..c88ee3dd4299 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc, u32 *rule_locs) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; @@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc) { - struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port)); + struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port)); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 5b139f2206b6..c70ed67cc188 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -277,6 +277,14 @@ static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port) return ETH_MAX_MTU; } +static void dsa_loop_phylink_get_caps(struct dsa_switch *dsa, int port, + struct phylink_config *config) +{ + bitmap_fill(config->supported_interfaces, PHY_INTERFACE_MODE_MAX); + __clear_bit(PHY_INTERFACE_MODE_NA, config->supported_interfaces); + config->mac_capabilities = ~0; +} + static const struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, @@ -295,6 +303,7 @@ static const struct dsa_switch_ops dsa_loop_driver = { .port_vlan_del = dsa_loop_port_vlan_del, .port_change_mtu = dsa_loop_port_change_mtu, .port_max_mtu = dsa_loop_port_max_mtu, + .phylink_get_caps = dsa_loop_phylink_get_caps, }; static int dsa_loop_drv_probe(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index af50001ccdd4..beda1e9d350f 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: (GPL-2.0 or MIT) +// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * DSA driver for: * Hirschmann Hellcreek TSN switch. @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/platform_device.h> #include <linux/bitops.h> @@ -2061,18 +2060,16 @@ err_ptp_setup: return ret; } -static int hellcreek_remove(struct platform_device *pdev) +static void hellcreek_remove(struct platform_device *pdev) { struct hellcreek *hellcreek = platform_get_drvdata(pdev); if (!hellcreek) - return 0; + return; hellcreek_hwtstamp_free(hellcreek); hellcreek_ptp_free(hellcreek); dsa_unregister_switch(hellcreek->ds); - - return 0; } static void hellcreek_shutdown(struct platform_device *pdev) @@ -2108,7 +2105,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match); static struct platform_driver hellcreek_driver = { .probe = hellcreek_probe, - .remove = hellcreek_remove, + .remove_new = hellcreek_remove, .shutdown = hellcreek_shutdown, .driver = { .name = "hellcreek", diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h index 4a678f7d61ae..6874cb9dc361 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.h +++ b/drivers/net/dsa/hirschmann/hellcreek.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 or MIT) */ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * DSA driver for: * Hirschmann Hellcreek TSN switch. diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c index ffd06cf8c44f..bd7aacc71a63 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c @@ -298,17 +298,10 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek, struct sk_buff_head received; unsigned long flags; - /* The latched timestamp belongs to one of the received frames. */ + /* Construct Rx timestamps for all received PTP packets. */ __skb_queue_head_init(&received); - - /* Lock & disable interrupts */ spin_lock_irqsave(&rxq->lock, flags); - - /* Add the reception queue "rxq" to the "received" queue an reintialize - * "rxq". From now on, we deal with "received" not with "rxq" - */ skb_queue_splice_tail_init(rxq, &received); - spin_unlock_irqrestore(&rxq->lock, flags); for (; skb; skb = __skb_dequeue(&received)) { diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c index 3e44ccb7db84..5249a1c2a80b 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c @@ -9,6 +9,7 @@ * Kurt Kanzenbach <kurt@linutronix.de> */ +#include <linux/of.h> #include <linux/ptp_clock_kernel.h> #include "hellcreek.h" #include "hellcreek_ptp.h" diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index ff76444057d2..fcb20eac332a 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -8,6 +8,7 @@ #include <linux/regmap.h> #include <linux/mutex.h> #include <linux/mii.h> +#include <linux/of.h> #include <linux/phy.h> #include <linux/if_bridge.h> #include <linux/if_vlan.h> @@ -1083,7 +1084,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, if (!dsa_port_is_user(dp)) return 0; - vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port); + vlan_vid_add(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port); return lan9303_enable_processing_port(chip, port); } @@ -1096,7 +1097,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) if (!dsa_port_is_user(dp)) return; - vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port); + vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port); lan9303_disable_processing_port(chip, port); lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); @@ -1290,12 +1291,6 @@ static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port, __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); } - - /* This driver does not make use of the speed, duplex, pause or the - * advertisement in its mac_config, so it is safe to mark this driver - * as non-legacy. - */ - config->legacy_pre_march2020 = false; } static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c index d8ab2b77d201..167a86f39f27 100644 --- a/drivers/net/dsa/lan9303_mdio.c +++ b/drivers/net/dsa/lan9303_mdio.c @@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val) struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; reg <<= 2; /* reg num to offset */ - mutex_lock(&sw_dev->device->bus->mdio_lock); + mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED); lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff); lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff); mutex_unlock(&sw_dev->device->bus->mdio_lock); @@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; reg <<= 2; /* reg num to offset */ - mutex_lock(&sw_dev->device->bus->mdio_lock); + mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED); *val = lan9303_mdio_real_read(sw_dev->device, reg); *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16); mutex_unlock(&sw_dev->device->bus->mdio_lock); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 3c76a1a14aee..9c185c9f0963 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -510,22 +510,22 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) struct dsa_switch *ds = priv->ds; int err; - ds->slave_mii_bus = mdiobus_alloc(); - if (!ds->slave_mii_bus) + ds->user_mii_bus = mdiobus_alloc(); + if (!ds->user_mii_bus) return -ENOMEM; - ds->slave_mii_bus->priv = priv; - ds->slave_mii_bus->read = gswip_mdio_rd; - ds->slave_mii_bus->write = gswip_mdio_wr; - ds->slave_mii_bus->name = "lantiq,xrx200-mdio"; - snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", + ds->user_mii_bus->priv = priv; + ds->user_mii_bus->read = gswip_mdio_rd; + ds->user_mii_bus->write = gswip_mdio_wr; + ds->user_mii_bus->name = "lantiq,xrx200-mdio"; + snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); - ds->slave_mii_bus->parent = priv->dev; - ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; + ds->user_mii_bus->parent = priv->dev; + ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask; - err = of_mdiobus_register(ds->slave_mii_bus, mdio_np); + err = of_mdiobus_register(ds->user_mii_bus, mdio_np); if (err) - mdiobus_free(ds->slave_mii_bus); + mdiobus_free(ds->user_mii_bus); return err; } @@ -1759,8 +1759,7 @@ static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, return; for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) - strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name, - ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", gswip_rmon_cnt[i].name); } static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, @@ -2197,8 +2196,8 @@ disable_switch: dsa_unregister_switch(priv->ds); mdio_bus: if (mdio_np) { - mdiobus_unregister(priv->ds->slave_mii_bus); - mdiobus_free(priv->ds->slave_mii_bus); + mdiobus_unregister(priv->ds->user_mii_bus); + mdiobus_free(priv->ds->user_mii_bus); } put_mdio_node: of_node_put(mdio_np); @@ -2207,29 +2206,27 @@ put_mdio_node: return err; } -static int gswip_remove(struct platform_device *pdev) +static void gswip_remove(struct platform_device *pdev) { struct gswip_priv *priv = platform_get_drvdata(pdev); int i; if (!priv) - return 0; + return; /* disable the switch */ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); dsa_unregister_switch(priv->ds); - if (priv->ds->slave_mii_bus) { - mdiobus_unregister(priv->ds->slave_mii_bus); - of_node_put(priv->ds->slave_mii_bus->dev.of_node); - mdiobus_free(priv->ds->slave_mii_bus); + if (priv->ds->user_mii_bus) { + mdiobus_unregister(priv->ds->user_mii_bus); + of_node_put(priv->ds->user_mii_bus->dev.of_node); + mdiobus_free(priv->ds->user_mii_bus); } for (i = 0; i < priv->num_gphy_fw; i++) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); - - return 0; } static void gswip_shutdown(struct platform_device *pdev) @@ -2266,7 +2263,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match); static struct platform_driver gswip_driver = { .probe = gswip_probe, - .remove = gswip_remove, + .remove_new = gswip_remove, .shutdown = gswip_shutdown, .driver = { .name = "gswip", diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 48360cc9fc68..49459a50dbc8 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o ksz_switch-objs := ksz_common.o -ksz_switch-objs += ksz9477.o +ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o ksz_switch-objs += ksz8795.o ksz_switch-objs += lan937x_main.o diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h index e68465fdf6b9..4cea811e73ac 100644 --- a/drivers/net/dsa/microchip/ksz8.h +++ b/drivers/net/dsa/microchip/ksz8.h @@ -48,13 +48,11 @@ int ksz8_port_mirror_add(struct ksz_device *dev, int port, bool ingress, struct netlink_ext_ack *extack); void ksz8_port_mirror_del(struct ksz_device *dev, int port, struct dsa_mall_mirror_tc_entry *mirror); -int ksz8_get_stp_reg(void); void ksz8_get_caps(struct ksz_device *dev, int port, struct phylink_config *config); void ksz8_config_cpu_port(struct dsa_switch *ds); int ksz8_enable_stp_addr(struct ksz_device *dev); int ksz8_reset_switch(struct ksz_device *dev); -int ksz8_switch_detect(struct ksz_device *dev); int ksz8_switch_init(struct ksz_device *dev); void ksz8_switch_exit(struct ksz_device *dev); int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu); diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 91aba470fb2f..4bf4d67557dc 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -632,6 +632,50 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) ksz8_w_table(dev, TABLE_VLAN, addr, buf); } +/** + * ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY + * Control register (Reg. 31). + * @dev: The KSZ device instance. + * @port: The port number to be read. + * @val: The value read from the SMI interface. + * + * This function reads the SMI interface and translates the hardware register + * bit values into their corresponding control settings for a MIIM PHY Control + * register. + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val) +{ + const u16 *regs = dev->info->regs; + u8 reg_val; + int ret; + + *val = 0; + + ret = ksz_pread8(dev, port, regs[P_LINK_STATUS], ®_val); + if (ret < 0) + return ret; + + if (reg_val & PORT_MDIX_STATUS) + *val |= KSZ886X_CTRL_MDIX_STAT; + + ret = ksz_pread8(dev, port, REG_PORT_LINK_MD_CTRL, ®_val); + if (ret < 0) + return ret; + + if (reg_val & PORT_FORCE_LINK) + *val |= KSZ886X_CTRL_FORCE_LINK; + + if (reg_val & PORT_POWER_SAVING) + *val |= KSZ886X_CTRL_PWRSAVE; + + if (reg_val & PORT_PHY_REMOTE_LOOPBACK) + *val |= KSZ886X_CTRL_REMOTE_LOOPBACK; + + return 0; +} + int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) { u8 restart, speed, ctrl, link; @@ -769,12 +813,10 @@ int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2)); break; case PHY_REG_PHY_CTRL: - ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link); + ret = ksz8_r_phy_ctrl(dev, p, &data); if (ret) return ret; - if (link & PORT_MDIX_STATUS) - data |= KSZ886X_CTRL_MDIX_STAT; break; default: processed = false; @@ -786,6 +828,38 @@ int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) return 0; } +/** + * ksz8_w_phy_ctrl - Translates and writes to the SMI interface from a MIIM PHY + * Control register (Reg. 31). + * @dev: The KSZ device instance. + * @port: The port number to be configured. + * @val: The register value to be written. + * + * This function translates control settings from a MIIM PHY Control register + * into their corresponding hardware register bit values for the SMI + * interface. + * + * Return: 0 on success, error code on failure. + */ +static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val) +{ + u8 reg_val = 0; + int ret; + + if (val & KSZ886X_CTRL_FORCE_LINK) + reg_val |= PORT_FORCE_LINK; + + if (val & KSZ886X_CTRL_PWRSAVE) + reg_val |= PORT_POWER_SAVING; + + if (val & KSZ886X_CTRL_REMOTE_LOOPBACK) + reg_val |= PORT_PHY_REMOTE_LOOPBACK; + + ret = ksz_prmw8(dev, port, REG_PORT_LINK_MD_CTRL, PORT_FORCE_LINK | + PORT_POWER_SAVING | PORT_PHY_REMOTE_LOOPBACK, reg_val); + return ret; +} + int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) { u8 restart, speed, ctrl, data; @@ -926,6 +1000,12 @@ int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) if (val & PHY_START_CABLE_DIAG) ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true); break; + + case PHY_REG_PHY_CTRL: + ret = ksz8_w_phy_ctrl(dev, p, val); + if (ret) + return ret; + break; default: break; } diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index 7a57c6088f80..3c9dae53e4d8 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -323,13 +323,6 @@ ((addr) + REG_PORT_1_CTRL_0 + (port) * \ (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0)) -#define REG_SW_MAC_ADDR_0 0x68 -#define REG_SW_MAC_ADDR_1 0x69 -#define REG_SW_MAC_ADDR_2 0x6A -#define REG_SW_MAC_ADDR_3 0x6B -#define REG_SW_MAC_ADDR_4 0x6C -#define REG_SW_MAC_ADDR_5 0x6D - #define TABLE_EXT_SELECT_S 5 #define TABLE_EEE_V 1 #define TABLE_ACL_V 2 @@ -442,20 +435,6 @@ #define TOS_PRIO_M KS_PRIO_M #define TOS_PRIO_S KS_PRIO_S -#define REG_SW_CTRL_20 0xA3 - -#define SW_GMII_DRIVE_STRENGTH_S 4 -#define SW_DRIVE_STRENGTH_M 0x7 -#define SW_DRIVE_STRENGTH_2MA 0 -#define SW_DRIVE_STRENGTH_4MA 1 -#define SW_DRIVE_STRENGTH_8MA 2 -#define SW_DRIVE_STRENGTH_12MA 3 -#define SW_DRIVE_STRENGTH_16MA 4 -#define SW_DRIVE_STRENGTH_20MA 5 -#define SW_DRIVE_STRENGTH_24MA 6 -#define SW_DRIVE_STRENGTH_28MA 7 -#define SW_MII_DRIVE_STRENGTH_S 0 - #define REG_SW_CTRL_21 0xA4 #define SW_IPV6_MLD_OPTION BIT(3) diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c index fd6e2e69a42a..5711a59e2ac9 100644 --- a/drivers/net/dsa/microchip/ksz8863_smi.c +++ b/drivers/net/dsa/microchip/ksz8863_smi.c @@ -5,6 +5,9 @@ * Copyright (C) 2019 Pengutronix, Michael Grzeschik <kernel@pengutronix.de> */ +#include <linux/mod_devicetable.h> +#include <linux/property.h> + #include "ksz8.h" #include "ksz_common.h" diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 83b7f2d5c1ea..7f745628c84d 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -56,6 +56,187 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu) REG_SW_MTU_MASK, frame_size); } +/** + * ksz9477_handle_wake_reason - Handle wake reason on a specified port. + * @dev: The device structure. + * @port: The port number. + * + * This function reads the PME (Power Management Event) status register of a + * specified port to determine the wake reason. If there is no wake event, it + * returns early. Otherwise, it logs the wake reason which could be due to a + * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register + * is then cleared to acknowledge the handling of the wake event. + * + * Return: 0 on success, or an error code on failure. + */ +static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port) +{ + u8 pme_status; + int ret; + + ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status); + if (ret) + return ret; + + if (!pme_status) + return 0; + + dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port, + pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "", + pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "", + pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : ""); + + return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status); +} + +/** + * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port. + * @dev: The device structure. + * @port: The port number. + * @wol: Pointer to ethtool Wake-on-LAN settings structure. + * + * This function checks the PME Pin Control Register to see if PME Pin Output + * Enable is set, indicating PME is enabled. If enabled, it sets the supported + * and active WoL flags. + */ +void ksz9477_get_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol) +{ + u8 pme_ctrl; + int ret; + + if (!dev->wakeup_source) + return; + + wol->supported = WAKE_PHY; + + /* Check if the current MAC address on this port can be set + * as global for WAKE_MAGIC support. The result may vary + * dynamically based on other ports configurations. + */ + if (ksz_is_port_mac_global_usable(dev->ds, port)) + wol->supported |= WAKE_MAGIC; + + ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl); + if (ret) + return; + + if (pme_ctrl & PME_WOL_MAGICPKT) + wol->wolopts |= WAKE_MAGIC; + if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY)) + wol->wolopts |= WAKE_PHY; +} + +/** + * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port. + * @dev: The device structure. + * @port: The port number. + * @wol: Pointer to ethtool Wake-on-LAN settings structure. + * + * This function configures Wake-on-LAN (WoL) settings for a specified port. + * It validates the provided WoL options, checks if PME is enabled via the + * switch's PME Pin Control Register, clears any previous wake reasons, + * and sets the Magic Packet flag in the port's PME control register if + * specified. + * + * Return: 0 on success, or other error codes on failure. + */ +int ksz9477_set_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol) +{ + u8 pme_ctrl = 0, pme_ctrl_old = 0; + bool magic_switched_off; + bool magic_switched_on; + int ret; + + if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + + if (!dev->wakeup_source) + return -EOPNOTSUPP; + + ret = ksz9477_handle_wake_reason(dev, port); + if (ret) + return ret; + + if (wol->wolopts & WAKE_MAGIC) + pme_ctrl |= PME_WOL_MAGICPKT; + if (wol->wolopts & WAKE_PHY) + pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY; + + ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old); + if (ret) + return ret; + + if (pme_ctrl_old == pme_ctrl) + return 0; + + magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) && + !(pme_ctrl & PME_WOL_MAGICPKT); + magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) && + (pme_ctrl & PME_WOL_MAGICPKT); + + /* To keep reference count of MAC address, we should do this + * operation only on change of WOL settings. + */ + if (magic_switched_on) { + ret = ksz_switch_macaddr_get(dev->ds, port, NULL); + if (ret) + return ret; + } else if (magic_switched_off) { + ksz_switch_macaddr_put(dev->ds); + } + + ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl); + if (ret) { + if (magic_switched_on) + ksz_switch_macaddr_put(dev->ds); + return ret; + } + + return 0; +} + +/** + * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while + * considering Wake-on-LAN (WoL) settings. + * @dev: The switch device structure. + * @wol_enabled: Pointer to a boolean which will be set to true if WoL is + * enabled on any port. + * + * This function prepares the switch device for a safe shutdown while taking + * into account the Wake-on-LAN (WoL) settings on the user ports. It updates + * the wol_enabled flag accordingly to reflect whether WoL is active on any + * port. + */ +void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled) +{ + struct dsa_port *dp; + int ret; + + *wol_enabled = false; + + if (!dev->wakeup_source) + return; + + dsa_switch_for_each_user_port(dp, dev->ds) { + u8 pme_ctrl = 0; + + ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl); + if (!ret && pme_ctrl) + *wol_enabled = true; + + /* make sure there are no pending wake events which would + * prevent the device from going to sleep/shutdown. + */ + ksz9477_handle_wake_reason(dev, dp->index); + } + + /* Now we are save to enable PME pin. */ + if (*wol_enabled) + ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE); +} + static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev) { unsigned int val; @@ -1004,6 +1185,16 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* clear pending interrupts */ if (dev->info->internal_phy[port]) ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); + + ksz9477_port_acl_init(dev, port); + + /* clear pending wake flags */ + ksz9477_handle_wake_reason(dev, port); + + /* Disable all WoL options by default. Otherwise + * ksz_switch_macaddr_get/put logic will not work properly. + */ + ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0); } void ksz9477_config_cpu_port(struct dsa_switch *ds) @@ -1126,6 +1317,12 @@ int ksz9477_setup(struct dsa_switch *ds) /* enable global MIB counter freeze function */ ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); + /* Make sure PME (WoL) is not enabled. If requested, it will be + * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not + * like PME events changes before shutdown. + */ + ksz_write8(dev, REG_SW_PME_CTRL, 0); + return 0; } @@ -1141,6 +1338,83 @@ int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val) return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val); } +/* The KSZ9477 provides following HW features to accelerate + * HSR frames handling: + * + * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH + * 2. RX PACKET DUPLICATION DISCARDING + * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING + * + * Only one from point 1. has the NETIF_F* flag available. + * + * Ones from point 2 and 3 are "best effort" - i.e. those will + * work correctly most of the time, but it may happen that some + * frames will not be caught - to be more specific; there is a race + * condition in hardware such that, when duplicate packets are received + * on member ports very close in time to each other, the hardware fails + * to detect that they are duplicates. + * + * Hence, the SW needs to handle those special cases. However, the speed + * up gain is considerable when above features are used. + * + * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames + * can be forwarded in the switch fabric between HSR ports. + */ +#define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD) + +void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr) +{ + struct ksz_device *dev = ds->priv; + struct net_device *user; + struct dsa_port *hsr_dp; + u8 data, hsr_ports = 0; + + /* Program which port(s) shall support HSR */ + ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port)); + + /* Forward frames between HSR ports (i.e. bridge together HSR ports) */ + if (dev->hsr_ports) { + dsa_hsr_foreach_port(hsr_dp, ds, hsr) + hsr_ports |= BIT(hsr_dp->index); + + hsr_ports |= BIT(dsa_upstream_port(ds, port)); + dsa_hsr_foreach_port(hsr_dp, ds, hsr) + ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports); + } + + if (!dev->hsr_ports) { + /* Enable discarding of received HSR frames */ + ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data); + data |= HSR_DUPLICATE_DISCARD; + data &= ~HSR_NODE_UNICAST; + ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data); + } + + /* Enable per port self-address filtering. + * The global self-address filtering has already been enabled in the + * ksz9477_reset_switch() function. + */ + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true); + + /* Setup HW supported features for lan HSR ports */ + user = dsa_to_port(ds, port)->user; + user->features |= KSZ9477_SUPPORTED_HSR_FEATURES; +} + +void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr) +{ + struct ksz_device *dev = ds->priv; + + /* Clear port HSR support */ + ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0); + + /* Disable forwarding frames between HSR ports */ + ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port))); + + /* Disable per port self-address filtering */ + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false); +} + int ksz9477_switch_init(struct ksz_device *dev) { u8 data8; diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h index b6f7e3c46e3f..ce1e656b800b 100644 --- a/drivers/net/dsa/microchip/ksz9477.h +++ b/drivers/net/dsa/microchip/ksz9477.h @@ -36,7 +36,6 @@ int ksz9477_port_mirror_add(struct ksz_device *dev, int port, bool ingress, struct netlink_ext_ack *extack); void ksz9477_port_mirror_del(struct ksz_device *dev, int port, struct dsa_mall_mirror_tc_entry *mirror); -int ksz9477_get_stp_reg(void); void ksz9477_get_caps(struct ksz_device *dev, int port, struct phylink_config *config); int ksz9477_fdb_dump(struct ksz_device *dev, int port, @@ -54,9 +53,51 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds); int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val); int ksz9477_enable_stp_addr(struct ksz_device *dev); int ksz9477_reset_switch(struct ksz_device *dev); -int ksz9477_dsa_init(struct ksz_device *dev); int ksz9477_switch_init(struct ksz_device *dev); void ksz9477_switch_exit(struct ksz_device *dev); void ksz9477_port_queue_split(struct ksz_device *dev, int port); +void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr); +void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr); +void ksz9477_get_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); +int ksz9477_set_wol(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); +void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled); + +int ksz9477_port_acl_init(struct ksz_device *dev, int port); +void ksz9477_port_acl_free(struct ksz_device *dev, int port); +int ksz9477_cls_flower_add(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress); +int ksz9477_cls_flower_del(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress); + +#define KSZ9477_ACL_ENTRY_SIZE 18 +#define KSZ9477_ACL_MAX_ENTRIES 16 + +struct ksz9477_acl_entry { + u8 entry[KSZ9477_ACL_ENTRY_SIZE]; + unsigned long cookie; + u32 prio; +}; + +struct ksz9477_acl_entries { + struct ksz9477_acl_entry entries[KSZ9477_ACL_MAX_ENTRIES]; + int entries_count; +}; + +struct ksz9477_acl_priv { + struct ksz9477_acl_entries acles; +}; + +void ksz9477_acl_remove_entries(struct ksz_device *dev, int port, + struct ksz9477_acl_entries *acles, + unsigned long cookie); +int ksz9477_acl_write_list(struct ksz_device *dev, int port); +int ksz9477_sort_acl_entries(struct ksz_device *dev, int port); +void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val); +void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx); +void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port, + u16 ethtype, u8 *src_mac, u8 *dst_mac, + unsigned long cookie, u32 prio); #endif diff --git a/drivers/net/dsa/microchip/ksz9477_acl.c b/drivers/net/dsa/microchip/ksz9477_acl.c new file mode 100644 index 000000000000..7ba778df63ac --- /dev/null +++ b/drivers/net/dsa/microchip/ksz9477_acl.c @@ -0,0 +1,1436 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> + +/* Access Control List (ACL) structure: + * + * There are multiple groups of registers involved in ACL configuration: + * + * - Matching Rules: These registers define the criteria for matching incoming + * packets based on their header information (Layer 2 MAC, Layer 3 IP, or + * Layer 4 TCP/UDP). Different register settings are used depending on the + * matching rule mode (MD) and the Enable (ENB) settings. + * + * - Action Rules: These registers define how the ACL should modify the packet's + * priority, VLAN tag priority, and forwarding map once a matching rule has + * been triggered. The settings vary depending on whether the matching rule is + * in Count Mode (MD = 01 and ENB = 00) or not. + * + * - Processing Rules: These registers control the overall behavior of the ACL, + * such as selecting which matching rule to apply first, enabling/disabling + * specific rules, or specifying actions for matched packets. + * + * ACL Structure: + * +----------------------+ + * +----------------------+ | (optional) | + * | Matching Rules | | Matching Rules | + * | (Layer 2, 3, 4) | | (Layer 2, 3, 4) | + * +----------------------+ +----------------------+ + * | | + * \___________________________/ + * v + * +----------------------+ + * | Processing Rules | + * | (action idx, | + * | matching rule set) | + * +----------------------+ + * | + * v + * +----------------------+ + * | Action Rules | + * | (Modify Priority, | + * | Forwarding Map, | + * | VLAN tag, etc) | + * +----------------------+ + */ + +#include <linux/bitops.h> + +#include "ksz9477.h" +#include "ksz9477_reg.h" +#include "ksz_common.h" + +#define KSZ9477_PORT_ACL_0 0x600 + +enum ksz9477_acl_port_access { + KSZ9477_ACL_PORT_ACCESS_0 = 0x00, + KSZ9477_ACL_PORT_ACCESS_1 = 0x01, + KSZ9477_ACL_PORT_ACCESS_2 = 0x02, + KSZ9477_ACL_PORT_ACCESS_3 = 0x03, + KSZ9477_ACL_PORT_ACCESS_4 = 0x04, + KSZ9477_ACL_PORT_ACCESS_5 = 0x05, + KSZ9477_ACL_PORT_ACCESS_6 = 0x06, + KSZ9477_ACL_PORT_ACCESS_7 = 0x07, + KSZ9477_ACL_PORT_ACCESS_8 = 0x08, + KSZ9477_ACL_PORT_ACCESS_9 = 0x09, + KSZ9477_ACL_PORT_ACCESS_A = 0x0A, + KSZ9477_ACL_PORT_ACCESS_B = 0x0B, + KSZ9477_ACL_PORT_ACCESS_C = 0x0C, + KSZ9477_ACL_PORT_ACCESS_D = 0x0D, + KSZ9477_ACL_PORT_ACCESS_E = 0x0E, + KSZ9477_ACL_PORT_ACCESS_F = 0x0F, + KSZ9477_ACL_PORT_ACCESS_10 = 0x10, + KSZ9477_ACL_PORT_ACCESS_11 = 0x11 +}; + +#define KSZ9477_ACL_MD_MASK GENMASK(5, 4) +#define KSZ9477_ACL_MD_DISABLE 0 +#define KSZ9477_ACL_MD_L2_MAC 1 +#define KSZ9477_ACL_MD_L3_IP 2 +#define KSZ9477_ACL_MD_L4_TCP_UDP 3 + +#define KSZ9477_ACL_ENB_MASK GENMASK(3, 2) +#define KSZ9477_ACL_ENB_L2_COUNTER 0 +#define KSZ9477_ACL_ENB_L2_TYPE 1 +#define KSZ9477_ACL_ENB_L2_MAC 2 +#define KSZ9477_ACL_ENB_L2_MAC_TYPE 3 + +/* only IPv4 src or dst can be used with mask */ +#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_MASK 1 +/* only IPv4 src and dst can be used without mask */ +#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_SRC_DST 2 + +#define KSZ9477_ACL_ENB_L4_IP_PROTO 0 +#define KSZ9477_ACL_ENB_L4_TCP_SRC_DST_PORT 1 +#define KSZ9477_ACL_ENB_L4_UDP_SRC_DST_PORT 2 +#define KSZ9477_ACL_ENB_L4_TCP_SEQ_NUMBER 3 + +#define KSZ9477_ACL_SD_SRC BIT(1) +#define KSZ9477_ACL_SD_DST 0 +#define KSZ9477_ACL_EQ_EQUAL BIT(0) +#define KSZ9477_ACL_EQ_NOT_EQUAL 0 + +#define KSZ9477_ACL_PM_M GENMASK(7, 6) +#define KSZ9477_ACL_PM_DISABLE 0 +#define KSZ9477_ACL_PM_HIGHER 1 +#define KSZ9477_ACL_PM_LOWER 2 +#define KSZ9477_ACL_PM_REPLACE 3 +#define KSZ9477_ACL_P_M GENMASK(5, 3) + +#define KSZ9477_PORT_ACL_CTRL_0 0x0612 + +#define KSZ9477_ACL_WRITE_DONE BIT(6) +#define KSZ9477_ACL_READ_DONE BIT(5) +#define KSZ9477_ACL_WRITE BIT(4) +#define KSZ9477_ACL_INDEX_M GENMASK(3, 0) + +/** + * ksz9477_dump_acl_index - Print the ACL entry at the specified index + * + * @dev: Pointer to the ksz9477 device structure. + * @acle: Pointer to the ACL entry array. + * @index: The index of the ACL entry to print. + * + * This function prints the details of an ACL entry, located at a particular + * index within the ksz9477 device's ACL table. It omits printing entries that + * are empty. + * + * Return: 1 if the entry is non-empty and printed, 0 otherwise. + */ +static int ksz9477_dump_acl_index(struct ksz_device *dev, + struct ksz9477_acl_entry *acle, int index) +{ + bool empty = true; + char buf[64]; + u8 *entry; + int i; + + entry = &acle[index].entry[0]; + for (i = 0; i <= KSZ9477_ACL_PORT_ACCESS_11; i++) { + if (entry[i]) + empty = false; + + sprintf(buf + (i * 3), "%02x ", entry[i]); + } + + /* no need to print empty entries */ + if (empty) + return 0; + + dev_err(dev->dev, " Entry %02d, prio: %02d : %s", index, + acle[index].prio, buf); + + return 1; +} + +/** + * ksz9477_dump_acl - Print ACL entries + * + * @dev: Pointer to the device structure. + * @acle: Pointer to the ACL entry array. + */ +static void ksz9477_dump_acl(struct ksz_device *dev, + struct ksz9477_acl_entry *acle) +{ + int count = 0; + int i; + + for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES; i++) + count += ksz9477_dump_acl_index(dev, acle, i); + + if (count != KSZ9477_ACL_MAX_ENTRIES - 1) + dev_err(dev->dev, " Empty ACL entries were skipped\n"); +} + +/** + * ksz9477_acl_is_valid_matching_rule - Check if an ACL entry contains a valid + * matching rule. + * + * @entry: Pointer to ACL entry buffer + * + * This function checks if the given ACL entry buffer contains a valid + * matching rule by inspecting the Mode (MD) and Enable (ENB) fields. + * + * Returns: True if it's a valid matching rule, false otherwise. + */ +static bool ksz9477_acl_is_valid_matching_rule(u8 *entry) +{ + u8 val1, md, enb; + + val1 = entry[KSZ9477_ACL_PORT_ACCESS_1]; + + md = FIELD_GET(KSZ9477_ACL_MD_MASK, val1); + if (md == KSZ9477_ACL_MD_DISABLE) + return false; + + if (md == KSZ9477_ACL_MD_L2_MAC) { + /* L2 counter is not support, so it is not valid rule for now */ + enb = FIELD_GET(KSZ9477_ACL_ENB_MASK, val1); + if (enb == KSZ9477_ACL_ENB_L2_COUNTER) + return false; + } + + return true; +} + +/** + * ksz9477_acl_get_cont_entr - Get count of contiguous ACL entries and validate + * the matching rules. + * @dev: Pointer to the KSZ9477 device structure. + * @port: Port number. + * @index: Index of the starting ACL entry. + * + * Based on the KSZ9477 switch's Access Control List (ACL) system, the RuleSet + * in an ACL entry indicates which entries contain Matching rules linked to it. + * This RuleSet is represented by two registers: KSZ9477_ACL_PORT_ACCESS_E and + * KSZ9477_ACL_PORT_ACCESS_F. Each bit set in these registers corresponds to + * an entry containing a Matching rule for this RuleSet. + * + * For a single Matching rule linked, only one bit is set. However, when an + * entry links multiple Matching rules, forming what's termed a 'complex rule', + * multiple bits are set in these registers. + * + * This function checks that, for complex rules, the entries containing the + * linked Matching rules are contiguous in terms of their indices. It calculates + * and returns the number of these contiguous entries. + * + * Returns: + * - 0 if the entry is empty and can be safely overwritten + * - 1 if the entry represents a simple rule + * - The number of contiguous entries if it is the root entry of a complex + * rule + * - -ENOTEMPTY if the entry is part of a complex rule but not the root + * entry + * - -EINVAL if the validation fails + */ +static int ksz9477_acl_get_cont_entr(struct ksz_device *dev, int port, + int index) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int start_idx, end_idx, contiguous_count; + unsigned long val; + u8 vale, valf; + u8 *entry; + int i; + + entry = &acles->entries[index].entry[0]; + vale = entry[KSZ9477_ACL_PORT_ACCESS_E]; + valf = entry[KSZ9477_ACL_PORT_ACCESS_F]; + + val = (vale << 8) | valf; + + /* If no bits are set, return an appropriate value or error */ + if (!val) { + if (ksz9477_acl_is_valid_matching_rule(entry)) { + /* Looks like we are about to corrupt some complex rule. + * Do not print an error here, as this is a normal case + * when we are trying to find a free or starting entry. + */ + dev_dbg(dev->dev, "ACL: entry %d starting with a valid matching rule, but no bits set in RuleSet\n", + index); + return -ENOTEMPTY; + } + + /* This entry does not contain a valid matching rule */ + return 0; + } + + start_idx = find_first_bit((unsigned long *)&val, 16); + end_idx = find_last_bit((unsigned long *)&val, 16); + + /* Calculate the contiguous count */ + contiguous_count = end_idx - start_idx + 1; + + /* Check if the number of bits set in val matches our calculated count */ + if (contiguous_count != hweight16(val)) { + /* Probably we have a fragmented complex rule, which is not + * supported by this driver. + */ + dev_err(dev->dev, "ACL: number of bits set in RuleSet does not match calculated count\n"); + return -EINVAL; + } + + /* loop over the contiguous entries and check for valid matching rules */ + for (i = start_idx; i <= end_idx; i++) { + u8 *current_entry = &acles->entries[i].entry[0]; + + if (!ksz9477_acl_is_valid_matching_rule(current_entry)) { + /* we have something linked without a valid matching + * rule. ACL table? + */ + dev_err(dev->dev, "ACL: entry %d does not contain a valid matching rule\n", + i); + return -EINVAL; + } + + if (i > start_idx) { + vale = current_entry[KSZ9477_ACL_PORT_ACCESS_E]; + valf = current_entry[KSZ9477_ACL_PORT_ACCESS_F]; + /* Following entry should have empty linkage list */ + if (vale || valf) { + dev_err(dev->dev, "ACL: entry %d has non-empty RuleSet linkage\n", + i); + return -EINVAL; + } + } + } + + return contiguous_count; +} + +/** + * ksz9477_acl_update_linkage - Update the RuleSet linkage for an ACL entry + * after a move operation. + * + * @dev: Pointer to the ksz_device. + * @entry: Pointer to the ACL entry array. + * @old_idx: The original index of the ACL entry before moving. + * @new_idx: The new index of the ACL entry after moving. + * + * This function updates the RuleSet linkage bits for an ACL entry when + * it's moved from one position to another in the ACL table. The RuleSet + * linkage is represented by two 8-bit registers, which are combined + * into a 16-bit value for easier manipulation. The linkage bits are shifted + * based on the difference between the old and new index. If any bits are lost + * during the shift operation, an error is returned. + * + * Note: Fragmentation within a RuleSet is not supported. Hence, entries must + * be moved as complete blocks, maintaining the integrity of the RuleSet. + * + * Returns: 0 on success, or -EINVAL if any RuleSet linkage bits are lost + * during the move. + */ +static int ksz9477_acl_update_linkage(struct ksz_device *dev, u8 *entry, + u16 old_idx, u16 new_idx) +{ + unsigned int original_bit_count; + unsigned long rule_linkage; + u8 vale, valf, val0; + int shift; + + val0 = entry[KSZ9477_ACL_PORT_ACCESS_0]; + vale = entry[KSZ9477_ACL_PORT_ACCESS_E]; + valf = entry[KSZ9477_ACL_PORT_ACCESS_F]; + + /* Combine the two u8 values into one u16 for easier manipulation */ + rule_linkage = (vale << 8) | valf; + original_bit_count = hweight16(rule_linkage); + + /* Even if HW is able to handle fragmented RuleSet, we don't support it. + * RuleSet is filled only for the first entry of the set. + */ + if (!rule_linkage) + return 0; + + if (val0 != old_idx) { + dev_err(dev->dev, "ACL: entry %d has unexpected ActionRule linkage: %d\n", + old_idx, val0); + return -EINVAL; + } + + val0 = new_idx; + + /* Calculate the number of positions to shift */ + shift = new_idx - old_idx; + + /* Shift the RuleSet */ + if (shift > 0) + rule_linkage <<= shift; + else + rule_linkage >>= -shift; + + /* Check that no bits were lost in the process */ + if (original_bit_count != hweight16(rule_linkage)) { + dev_err(dev->dev, "ACL RuleSet linkage bits lost during move\n"); + return -EINVAL; + } + + entry[KSZ9477_ACL_PORT_ACCESS_0] = val0; + + /* Update the RuleSet bitfields in the entry */ + entry[KSZ9477_ACL_PORT_ACCESS_E] = (rule_linkage >> 8) & 0xFF; + entry[KSZ9477_ACL_PORT_ACCESS_F] = rule_linkage & 0xFF; + + return 0; +} + +/** + * ksz9477_validate_and_get_src_count - Validate source and destination indices + * and determine the source entry count. + * @dev: Pointer to the KSZ device structure. + * @port: Port number on the KSZ device where the ACL entries reside. + * @src_idx: Index of the starting ACL entry that needs to be validated. + * @dst_idx: Index of the destination where the source entries are intended to + * be moved. + * @src_count: Pointer to the variable that will hold the number of contiguous + * source entries if the validation passes. + * @dst_count: Pointer to the variable that will hold the number of contiguous + * destination entries if the validation passes. + * + * This function performs validation on the source and destination indices + * provided for ACL entries. It checks if the indices are within the valid + * range, and if the source entries are contiguous. Additionally, the function + * ensures that there's adequate space at the destination for the source entries + * and that the destination index isn't in the middle of a RuleSet. If all + * validations pass, the function returns the number of contiguous source and + * destination entries. + * + * Return: 0 on success, otherwise returns a negative error code if any + * validation check fails. + */ +static int ksz9477_validate_and_get_src_count(struct ksz_device *dev, int port, + int src_idx, int dst_idx, + int *src_count, int *dst_count) +{ + int ret; + + if (src_idx >= KSZ9477_ACL_MAX_ENTRIES || + dst_idx >= KSZ9477_ACL_MAX_ENTRIES) { + dev_err(dev->dev, "ACL: invalid entry index\n"); + return -EINVAL; + } + + /* Validate if the source entries are contiguous */ + ret = ksz9477_acl_get_cont_entr(dev, port, src_idx); + if (ret < 0) + return ret; + *src_count = ret; + + if (!*src_count) { + dev_err(dev->dev, "ACL: source entry is empty\n"); + return -EINVAL; + } + + if (dst_idx + *src_count >= KSZ9477_ACL_MAX_ENTRIES) { + dev_err(dev->dev, "ACL: Not enough space at the destination. Move operation will fail.\n"); + return -EINVAL; + } + + /* Validate if the destination entry is empty or not in the middle of + * a RuleSet. + */ + ret = ksz9477_acl_get_cont_entr(dev, port, dst_idx); + if (ret < 0) + return ret; + *dst_count = ret; + + return 0; +} + +/** + * ksz9477_move_entries_downwards - Move a range of ACL entries downwards in + * the list. + * @dev: Pointer to the KSZ device structure. + * @acles: Pointer to the structure encapsulating all the ACL entries. + * @start_idx: Starting index of the entries to be relocated. + * @num_entries_to_move: Number of consecutive entries to be relocated. + * @end_idx: Destination index where the first entry should be situated post + * relocation. + * + * This function is responsible for rearranging a specific block of ACL entries + * by shifting them downwards in the list based on the supplied source and + * destination indices. It ensures that the linkage between the ACL entries is + * maintained accurately after the relocation. + * + * Return: 0 on successful relocation of entries, otherwise returns a negative + * error code. + */ +static int ksz9477_move_entries_downwards(struct ksz_device *dev, + struct ksz9477_acl_entries *acles, + u16 start_idx, + u16 num_entries_to_move, + u16 end_idx) +{ + struct ksz9477_acl_entry *e; + int ret, i; + + for (i = start_idx; i < end_idx; i++) { + e = &acles->entries[i]; + *e = acles->entries[i + num_entries_to_move]; + + ret = ksz9477_acl_update_linkage(dev, &e->entry[0], + i + num_entries_to_move, i); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * ksz9477_move_entries_upwards - Move a range of ACL entries upwards in the + * list. + * @dev: Pointer to the KSZ device structure. + * @acles: Pointer to the structure holding all the ACL entries. + * @start_idx: The starting index of the entries to be moved. + * @num_entries_to_move: Number of contiguous entries to be moved. + * @target_idx: The destination index where the first entry should be placed + * after moving. + * + * This function rearranges a chunk of ACL entries by moving them upwards + * in the list based on the given source and destination indices. The reordering + * process preserves the linkage between entries by updating it accordingly. + * + * Return: 0 if the entries were successfully moved, otherwise a negative error + * code. + */ +static int ksz9477_move_entries_upwards(struct ksz_device *dev, + struct ksz9477_acl_entries *acles, + u16 start_idx, u16 num_entries_to_move, + u16 target_idx) +{ + struct ksz9477_acl_entry *e; + int ret, i, b; + + for (i = start_idx; i > target_idx; i--) { + b = i + num_entries_to_move - 1; + + e = &acles->entries[b]; + *e = acles->entries[i - 1]; + + ret = ksz9477_acl_update_linkage(dev, &e->entry[0], i - 1, b); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * ksz9477_acl_move_entries - Move a block of contiguous ACL entries from a + * source to a destination index. + * @dev: Pointer to the KSZ9477 device structure. + * @port: Port number. + * @src_idx: Index of the starting source ACL entry. + * @dst_idx: Index of the starting destination ACL entry. + * + * This function aims to move a block of contiguous ACL entries from the source + * index to the destination index while ensuring the integrity and validity of + * the ACL table. + * + * In case of any errors during the adjustments or copying, the function will + * restore the ACL entries to their original state from the backup. + * + * Return: 0 if the move operation is successful. Returns -EINVAL for validation + * errors or other error codes based on specific failure conditions. + */ +static int ksz9477_acl_move_entries(struct ksz_device *dev, int port, + u16 src_idx, u16 dst_idx) +{ + struct ksz9477_acl_entry buffer[KSZ9477_ACL_MAX_ENTRIES]; + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int src_count, ret, dst_count; + + /* Nothing to do */ + if (src_idx == dst_idx) + return 0; + + ret = ksz9477_validate_and_get_src_count(dev, port, src_idx, dst_idx, + &src_count, &dst_count); + if (ret) + return ret; + + /* In case dst_index is greater than src_index, we need to adjust the + * destination index to account for the entries that will be moved + * downwards and the size of the entry located at dst_idx. + */ + if (dst_idx > src_idx) + dst_idx = dst_idx + dst_count - src_count; + + /* Copy source block to buffer and update its linkage */ + for (int i = 0; i < src_count; i++) { + buffer[i] = acles->entries[src_idx + i]; + ret = ksz9477_acl_update_linkage(dev, &buffer[i].entry[0], + src_idx + i, dst_idx + i); + if (ret < 0) + return ret; + } + + /* Adjust other entries and their linkage based on destination */ + if (dst_idx > src_idx) { + ret = ksz9477_move_entries_downwards(dev, acles, src_idx, + src_count, dst_idx); + } else { + ret = ksz9477_move_entries_upwards(dev, acles, src_idx, + src_count, dst_idx); + } + if (ret < 0) + return ret; + + /* Copy buffer to destination block */ + for (int i = 0; i < src_count; i++) + acles->entries[dst_idx + i] = buffer[i]; + + return 0; +} + +/** + * ksz9477_get_next_block_start - Identify the starting index of the next ACL + * block. + * @dev: Pointer to the device structure. + * @port: The port number on which the ACL entries are being checked. + * @start: The starting index from which the search begins. + * + * This function looks for the next valid ACL block starting from the provided + * 'start' index and returns the beginning index of that block. If the block is + * invalid or if it reaches the end of the ACL entries without finding another + * block, it returns the maximum ACL entries count. + * + * Returns: + * - The starting index of the next valid ACL block. + * - KSZ9477_ACL_MAX_ENTRIES if no other valid blocks are found after 'start'. + * - A negative error code if an error occurs while checking. + */ +static int ksz9477_get_next_block_start(struct ksz_device *dev, int port, + int start) +{ + int block_size; + + for (int i = start; i < KSZ9477_ACL_MAX_ENTRIES;) { + block_size = ksz9477_acl_get_cont_entr(dev, port, i); + if (block_size < 0 && block_size != -ENOTEMPTY) + return block_size; + + if (block_size > 0) + return i; + + i++; + } + return KSZ9477_ACL_MAX_ENTRIES; +} + +/** + * ksz9477_swap_acl_blocks - Swap two ACL blocks + * @dev: Pointer to the device structure. + * @port: The port number on which the ACL blocks are to be swapped. + * @i: The starting index of the first ACL block. + * @j: The starting index of the second ACL block. + * + * This function is used to swap two ACL blocks present at given indices. The + * main purpose is to aid in the sorting and reordering of ACL blocks based on + * certain criteria, e.g., priority. It checks the validity of the block at + * index 'i', ensuring it's not an empty block, and then proceeds to swap it + * with the block at index 'j'. + * + * Returns: + * - 0 on successful swapping of blocks. + * - -EINVAL if the block at index 'i' is empty. + * - A negative error code if any other error occurs during the swap. + */ +static int ksz9477_swap_acl_blocks(struct ksz_device *dev, int port, int i, + int j) +{ + int ret, current_block_size; + + current_block_size = ksz9477_acl_get_cont_entr(dev, port, i); + if (current_block_size < 0) + return current_block_size; + + if (!current_block_size) { + dev_err(dev->dev, "ACL: swapping empty entry %d\n", i); + return -EINVAL; + } + + ret = ksz9477_acl_move_entries(dev, port, i, j); + if (ret) + return ret; + + ret = ksz9477_acl_move_entries(dev, port, j - current_block_size, i); + if (ret) + return ret; + + return 0; +} + +/** + * ksz9477_sort_acl_entr_no_back - Sort ACL entries for a given port based on + * priority without backing up entries. + * @dev: Pointer to the device structure. + * @port: The port number whose ACL entries need to be sorted. + * + * This function sorts ACL entries of the specified port using a variant of the + * bubble sort algorithm. It operates on blocks of ACL entries rather than + * individual entries. Each block's starting point is identified and then + * compared with subsequent blocks based on their priority. If the current + * block has a lower priority than the subsequent block, the two blocks are + * swapped. + * + * This is done in order to maintain an organized order of ACL entries based on + * priority, ensuring efficient and predictable ACL rule application. + * + * Returns: + * - 0 on successful sorting of entries. + * - A negative error code if any issue arises during sorting, e.g., + * if the function is unable to get the next block start. + */ +static int ksz9477_sort_acl_entr_no_back(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + struct ksz9477_acl_entry *curr, *next; + int i, j, ret; + + /* Bubble sort */ + for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES;) { + curr = &acles->entries[i]; + + j = ksz9477_get_next_block_start(dev, port, i + 1); + if (j < 0) + return j; + + while (j < KSZ9477_ACL_MAX_ENTRIES) { + next = &acles->entries[j]; + + if (curr->prio > next->prio) { + ret = ksz9477_swap_acl_blocks(dev, port, i, j); + if (ret) + return ret; + } + + j = ksz9477_get_next_block_start(dev, port, j + 1); + if (j < 0) + return j; + } + + i = ksz9477_get_next_block_start(dev, port, i + 1); + if (i < 0) + return i; + } + + return 0; +} + +/** + * ksz9477_sort_acl_entries - Sort the ACL entries for a given port. + * @dev: Pointer to the KSZ device. + * @port: Port number. + * + * This function sorts the Access Control List (ACL) entries for a specified + * port. Before sorting, a backup of the original entries is created. If the + * sorting process fails, the function will log error messages displaying both + * the original and attempted sorted entries, and then restore the original + * entries from the backup. + * + * Return: 0 if the sorting succeeds, otherwise a negative error code. + */ +int ksz9477_sort_acl_entries(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_entry backup[KSZ9477_ACL_MAX_ENTRIES]; + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int ret; + + /* create a backup of the ACL entries, if something goes wrong + * we can restore the ACL entries. + */ + memcpy(backup, acles->entries, sizeof(backup)); + + ret = ksz9477_sort_acl_entr_no_back(dev, port); + if (ret) { + dev_err(dev->dev, "ACL: failed to sort entries for port %d\n", + port); + dev_err(dev->dev, "ACL dump before sorting:\n"); + ksz9477_dump_acl(dev, backup); + dev_err(dev->dev, "ACL dump after sorting:\n"); + ksz9477_dump_acl(dev, acles->entries); + /* Restore the original entries */ + memcpy(acles->entries, backup, sizeof(backup)); + } + + return ret; +} + +/** + * ksz9477_acl_wait_ready - Waits for the ACL operation to complete on a given + * port. + * @dev: The ksz_device instance. + * @port: The port number to wait for. + * + * This function checks if the ACL write or read operation is completed by + * polling the specified register. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_wait_ready(struct ksz_device *dev, int port) +{ + unsigned int wr_mask = KSZ9477_ACL_WRITE_DONE | KSZ9477_ACL_READ_DONE; + unsigned int val, reg; + int ret; + + reg = dev->dev_ops->get_port_addr(port, KSZ9477_PORT_ACL_CTRL_0); + + ret = regmap_read_poll_timeout(dev->regmap[0], reg, val, + (val & wr_mask) == wr_mask, 1000, 10000); + if (ret) + dev_err(dev->dev, "Failed to read/write ACL table\n"); + + return ret; +} + +/** + * ksz9477_acl_entry_write - Writes an ACL entry to a given port at the + * specified index. + * @dev: The ksz_device instance. + * @port: The port number to write the ACL entry to. + * @entry: A pointer to the ACL entry data. + * @idx: The index at which to write the ACL entry. + * + * This function writes the provided ACL entry to the specified port at the + * given index. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_entry_write(struct ksz_device *dev, int port, u8 *entry, + int idx) +{ + int ret, i; + u8 val; + + for (i = 0; i < KSZ9477_ACL_ENTRY_SIZE; i++) { + ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_0 + i, entry[i]); + if (ret) { + dev_err(dev->dev, "Failed to write ACL entry %d\n", i); + return ret; + } + } + + /* write everything down */ + val = FIELD_PREP(KSZ9477_ACL_INDEX_M, idx) | KSZ9477_ACL_WRITE; + ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_CTRL_0, val); + if (ret) + return ret; + + /* wait until everything is written */ + return ksz9477_acl_wait_ready(dev, port); +} + +/** + * ksz9477_acl_port_enable - Enables ACL functionality on a given port. + * @dev: The ksz_device instance. + * @port: The port number on which to enable ACL functionality. + * + * This function enables ACL functionality on the specified port by configuring + * the appropriate control registers. It returns 0 if the operation is + * successful, or a negative error code if an error occurs. + * + * 0xn801 - KSZ9477S 5.2.8.2 Port Priority Control Register + * Bit 7 - Highest Priority + * Bit 6 - OR'ed Priority + * Bit 4 - MAC Address Priority Classification + * Bit 3 - VLAN Priority Classification + * Bit 2 - 802.1p Priority Classification + * Bit 1 - Diffserv Priority Classification + * Bit 0 - ACL Priority Classification + * + * Current driver implementation sets 802.1p priority classification by default. + * In this function we add ACL priority classification with OR'ed priority. + * According to testing, priority set by ACL will supersede the 802.1p priority. + * + * 0xn803 - KSZ9477S 5.2.8.4 Port Authentication Control Register + * Bit 2 - Access Control List (ACL) Enable + * Bits 1:0 - Authentication Mode + * 00 = Reserved + * 01 = Block Mode. Authentication is enabled. When ACL is + * enabled, all traffic that misses the ACL rules is + * blocked; otherwise ACL actions apply. + * 10 = Pass Mode. Authentication is disabled. When ACL is + * enabled, all traffic that misses the ACL rules is + * forwarded; otherwise ACL actions apply. + * 11 = Trap Mode. Authentication is enabled. All traffic is + * forwarded to the host port. When ACL is enabled, all + * traffic that misses the ACL rules is blocked; otherwise + * ACL actions apply. + * + * We are using Pass Mode int this function. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_port_enable(struct ksz_device *dev, int port) +{ + int ret; + + ret = ksz_prmw8(dev, port, P_PRIO_CTRL, 0, PORT_ACL_PRIO_ENABLE | + PORT_OR_PRIO); + if (ret) + return ret; + + return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL, + PORT_ACL_ENABLE | + FIELD_PREP(PORT_AUTHEN_MODE, PORT_AUTHEN_PASS)); +} + +/** + * ksz9477_acl_port_disable - Disables ACL functionality on a given port. + * @dev: The ksz_device instance. + * @port: The port number on which to disable ACL functionality. + * + * This function disables ACL functionality on the specified port by writing a + * value of 0 to the REG_PORT_MRI_AUTHEN_CTRL control register and remove + * PORT_ACL_PRIO_ENABLE bit from P_PRIO_CTRL register. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +static int ksz9477_acl_port_disable(struct ksz_device *dev, int port) +{ + int ret; + + ret = ksz_prmw8(dev, port, P_PRIO_CTRL, PORT_ACL_PRIO_ENABLE, 0); + if (ret) + return ret; + + return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL, 0); +} + +/** + * ksz9477_acl_write_list - Write a list of ACL entries to a given port. + * @dev: The ksz_device instance. + * @port: The port number on which to write ACL entries. + * + * This function enables ACL functionality on the specified port, writes a list + * of ACL entries to the port, and disables ACL functionality if there are no + * entries. + * + * Returns: 0 if the operation is successful, or a negative error code if an + * error occurs. + */ +int ksz9477_acl_write_list(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + int ret, i; + + /* ACL should be enabled before writing entries */ + ret = ksz9477_acl_port_enable(dev, port); + if (ret) + return ret; + + /* write all entries */ + for (i = 0; i < ARRAY_SIZE(acles->entries); i++) { + u8 *entry = acles->entries[i].entry; + + /* Check if entry was removed and should be zeroed. + * If last fields of the entry are not zero, it means + * it is removed locally but currently not synced with the HW. + * So, we will write it down to the HW to remove it. + */ + if (i >= acles->entries_count && + entry[KSZ9477_ACL_PORT_ACCESS_10] == 0 && + entry[KSZ9477_ACL_PORT_ACCESS_11] == 0) + continue; + + ret = ksz9477_acl_entry_write(dev, port, entry, i); + if (ret) + return ret; + + /* now removed entry is clean on HW side, so it can + * in the cache too + */ + if (i >= acles->entries_count && + entry[KSZ9477_ACL_PORT_ACCESS_10] != 0 && + entry[KSZ9477_ACL_PORT_ACCESS_11] != 0) { + entry[KSZ9477_ACL_PORT_ACCESS_10] = 0; + entry[KSZ9477_ACL_PORT_ACCESS_11] = 0; + } + } + + if (!acles->entries_count) + return ksz9477_acl_port_disable(dev, port); + + return 0; +} + +/** + * ksz9477_acl_remove_entries - Remove ACL entries with a given cookie from a + * specified ksz9477_acl_entries structure. + * @dev: The ksz_device instance. + * @port: The port number on which to remove ACL entries. + * @acles: The ksz9477_acl_entries instance. + * @cookie: The cookie value to match for entry removal. + * + * This function iterates through the entries array, removing any entries with + * a matching cookie value. The remaining entries are then shifted down to fill + * the gap. + */ +void ksz9477_acl_remove_entries(struct ksz_device *dev, int port, + struct ksz9477_acl_entries *acles, + unsigned long cookie) +{ + int entries_count = acles->entries_count; + int ret, i, src_count; + int src_idx = -1; + + if (!entries_count) + return; + + /* Search for the first position with the cookie */ + for (i = 0; i < entries_count; i++) { + if (acles->entries[i].cookie == cookie) { + src_idx = i; + break; + } + } + + /* No entries with the matching cookie found */ + if (src_idx == -1) + return; + + /* Get the size of the cookie entry. We may have complex entries. */ + src_count = ksz9477_acl_get_cont_entr(dev, port, src_idx); + if (src_count <= 0) + return; + + /* Move all entries down to overwrite removed entry with the cookie */ + ret = ksz9477_move_entries_downwards(dev, acles, src_idx, + src_count, + entries_count - src_count); + if (ret) { + dev_err(dev->dev, "Failed to move ACL entries down\n"); + return; + } + + /* Overwrite new empty places at the end of the list with zeros to make + * sure not unexpected things will happen or no unexplored quirks will + * come out. + */ + for (i = entries_count - src_count; i < entries_count; i++) { + struct ksz9477_acl_entry *entry = &acles->entries[i]; + + memset(entry, 0, sizeof(*entry)); + + /* Set all access bits to be able to write zeroed entry to HW */ + entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff; + entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff; + } + + /* Adjust the total entries count */ + acles->entries_count -= src_count; +} + +/** + * ksz9477_port_acl_init - Initialize the ACL for a specified port on a ksz + * device. + * @dev: The ksz_device instance. + * @port: The port number to initialize the ACL for. + * + * This function allocates memory for an acl structure, associates it with the + * specified port, and initializes the ACL entries to a default state. The + * entries are then written using the ksz9477_acl_write_list function, ensuring + * the ACL has a predictable initial hardware state. + * + * Returns: 0 on success, or an error code on failure. + */ +int ksz9477_port_acl_init(struct ksz_device *dev, int port) +{ + struct ksz9477_acl_entries *acles; + struct ksz9477_acl_priv *acl; + int ret, i; + + acl = kzalloc(sizeof(*acl), GFP_KERNEL); + if (!acl) + return -ENOMEM; + + dev->ports[port].acl_priv = acl; + + acles = &acl->acles; + /* write all entries */ + for (i = 0; i < ARRAY_SIZE(acles->entries); i++) { + u8 *entry = acles->entries[i].entry; + + /* Set all access bits to be able to write zeroed + * entry + */ + entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff; + entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff; + } + + ret = ksz9477_acl_write_list(dev, port); + if (ret) + goto free_acl; + + return 0; + +free_acl: + kfree(dev->ports[port].acl_priv); + dev->ports[port].acl_priv = NULL; + + return ret; +} + +/** + * ksz9477_port_acl_free - Free the ACL resources for a specified port on a ksz + * device. + * @dev: The ksz_device instance. + * @port: The port number to initialize the ACL for. + * + * This disables the ACL for the specified port and frees the associated memory, + */ +void ksz9477_port_acl_free(struct ksz_device *dev, int port) +{ + if (!dev->ports[port].acl_priv) + return; + + ksz9477_acl_port_disable(dev, port); + + kfree(dev->ports[port].acl_priv); + dev->ports[port].acl_priv = NULL; +} + +/** + * ksz9477_acl_set_reg - Set entry[16] and entry[17] depending on the updated + * entry[] + * @entry: An array containing the entries + * @reg: The register of the entry that needs to be updated + * @value: The value to be assigned to the updated entry + * + * This function updates the entry[] array based on the provided register and + * value. It also sets entry[0x10] and entry[0x11] according to the ACL byte + * enable rules. + * + * 0x10 - Byte Enable [15:8] + * + * Each bit enables accessing one of the ACL bytes when a read or write is + * initiated by writing to the Port ACL Byte Enable LSB Register. + * Bit 0 applies to the Port ACL Access 7 Register + * Bit 1 applies to the Port ACL Access 6 Register, etc. + * Bit 7 applies to the Port ACL Access 0 Register + * 1 = Byte is selected for read/write + * 0 = Byte is not selected + * + * 0x11 - Byte Enable [7:0] + * + * Each bit enables accessing one of the ACL bytes when a read or write is + * initiated by writing to the Port ACL Byte Enable LSB Register. + * Bit 0 applies to the Port ACL Access F Register + * Bit 1 applies to the Port ACL Access E Register, etc. + * Bit 7 applies to the Port ACL Access 8 Register + * 1 = Byte is selected for read/write + * 0 = Byte is not selected + */ +static void ksz9477_acl_set_reg(u8 *entry, enum ksz9477_acl_port_access reg, + u8 value) +{ + if (reg >= KSZ9477_ACL_PORT_ACCESS_0 && + reg <= KSZ9477_ACL_PORT_ACCESS_7) { + entry[KSZ9477_ACL_PORT_ACCESS_10] |= + BIT(KSZ9477_ACL_PORT_ACCESS_7 - reg); + } else if (reg >= KSZ9477_ACL_PORT_ACCESS_8 && + reg <= KSZ9477_ACL_PORT_ACCESS_F) { + entry[KSZ9477_ACL_PORT_ACCESS_11] |= + BIT(KSZ9477_ACL_PORT_ACCESS_F - reg); + } else { + WARN_ON(1); + return; + } + + entry[reg] = value; +} + +/** + * ksz9477_acl_matching_rule_cfg_l2 - Configure an ACL filtering entry to match + * L2 types of Ethernet frames + * @entry: Pointer to ACL entry buffer + * @ethertype: Ethertype value + * @eth_addr: Pointer to Ethernet address + * @is_src: If true, match the source MAC address; if false, match the + * destination MAC address + * + * This function configures an Access Control List (ACL) filtering + * entry to match Layer 2 types of Ethernet frames based on the provided + * ethertype and Ethernet address. Additionally, it can match either the source + * or destination MAC address depending on the value of the is_src parameter. + * + * Register Descriptions for MD = 01 and ENB != 00 (Layer 2 MAC header + * filtering) + * + * 0x01 - Mode and Enable + * Bits 5:4 - MD (Mode) + * 01 = Layer 2 MAC header or counter filtering + * Bits 3:2 - ENB (Enable) + * 01 = Comparison is performed only on the TYPE value + * 10 = Comparison is performed only on the MAC Address value + * 11 = Both the MAC Address and TYPE are tested + * Bit 1 - S/D (Source / Destination) + * 0 = Destination address + * 1 = Source address + * Bit 0 - EQ (Equal / Not Equal) + * 0 = Not Equal produces true result + * 1 = Equal produces true result + * + * 0x02-0x07 - MAC Address + * 0x02 - MAC Address [47:40] + * 0x03 - MAC Address [39:32] + * 0x04 - MAC Address [31:24] + * 0x05 - MAC Address [23:16] + * 0x06 - MAC Address [15:8] + * 0x07 - MAC Address [7:0] + * + * 0x08-0x09 - EtherType + * 0x08 - EtherType [15:8] + * 0x09 - EtherType [7:0] + */ +static void ksz9477_acl_matching_rule_cfg_l2(u8 *entry, u16 ethertype, + u8 *eth_addr, bool is_src) +{ + u8 enb = 0; + u8 val; + + if (ethertype) + enb |= KSZ9477_ACL_ENB_L2_TYPE; + if (eth_addr) + enb |= KSZ9477_ACL_ENB_L2_MAC; + + val = FIELD_PREP(KSZ9477_ACL_MD_MASK, KSZ9477_ACL_MD_L2_MAC) | + FIELD_PREP(KSZ9477_ACL_ENB_MASK, enb) | + FIELD_PREP(KSZ9477_ACL_SD_SRC, is_src) | KSZ9477_ACL_EQ_EQUAL; + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_1, val); + + if (eth_addr) { + int i; + + for (i = 0; i < ETH_ALEN; i++) { + ksz9477_acl_set_reg(entry, + KSZ9477_ACL_PORT_ACCESS_2 + i, + eth_addr[i]); + } + } + + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_8, ethertype >> 8); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_9, ethertype & 0xff); +} + +/** + * ksz9477_acl_action_rule_cfg - Set action for an ACL entry + * @entry: Pointer to the ACL entry + * @force_prio: If true, force the priority value + * @prio_val: Priority value + * + * This function sets the action for the specified ACL entry. It prepares + * the priority mode and traffic class values and updates the entry's + * action registers accordingly. Currently, there is no port or VLAN PCP + * remapping. + * + * ACL Action Rule Parameters for Non-Count Modes (MD ≠01 or ENB ≠00) + * + * 0x0A - PM, P, RPE, RP[2:1] + * Bits 7:6 - PM[1:0] - Priority Mode + * 00 = ACL does not specify the packet priority. Priority is + * determined by standard QoS functions. + * 01 = Change packet priority to P[2:0] if it is greater than QoS + * result. + * 10 = Change packet priority to P[2:0] if it is smaller than the + * QoS result. + * 11 = Always change packet priority to P[2:0]. + * Bits 5:3 - P[2:0] - Priority value + * Bit 2 - RPE - Remark Priority Enable + * Bits 1:0 - RP[2:1] - Remarked Priority value (bits 2:1) + * 0 = Disable priority remarking + * 1 = Enable priority remarking. VLAN tag priority (PCP) bits are + * replaced by RP[2:0]. + * + * 0x0B - RP[0], MM + * Bit 7 - RP[0] - Remarked Priority value (bit 0) + * Bits 6:5 - MM[1:0] - Map Mode + * 00 = No forwarding remapping + * 01 = The forwarding map in FORWARD is OR'ed with the forwarding + * map from the Address Lookup Table. + * 10 = The forwarding map in FORWARD is AND'ed with the forwarding + * map from the Address Lookup Table. + * 11 = The forwarding map in FORWARD replaces the forwarding map + * from the Address Lookup Table. + * 0x0D - FORWARD[n:0] + * Bits 7:0 - FORWARD[n:0] - Forwarding map. Bit 0 = port 1, + * bit 1 = port 2, etc. + * 1 = enable forwarding to this port + * 0 = do not forward to this port + */ +void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val) +{ + u8 prio_mode, val; + + if (force_prio) + prio_mode = KSZ9477_ACL_PM_REPLACE; + else + prio_mode = KSZ9477_ACL_PM_DISABLE; + + val = FIELD_PREP(KSZ9477_ACL_PM_M, prio_mode) | + FIELD_PREP(KSZ9477_ACL_P_M, prio_val); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_A, val); + + /* no port or VLAN PCP remapping for now */ + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_B, 0); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_D, 0); +} + +/** + * ksz9477_acl_processing_rule_set_action - Set the action for the processing + * rule set. + * @entry: Pointer to the ACL entry + * @action_idx: Index of the action to be applied + * + * This function sets the action for the processing rule set by updating the + * appropriate register in the entry. There can be only one action per + * processing rule. + * + * Access Control List (ACL) Processing Rule Registers: + * + * 0x00 - First Rule Number (FRN) + * Bits 3:0 - First Rule Number. Pointer to an Action rule entry. + */ +void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx) +{ + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_0, action_idx); +} + +/** + * ksz9477_acl_processing_rule_add_match - Add a matching rule to the rule set + * @entry: Pointer to the ACL entry + * @match_idx: Index of the matching rule to be added + * + * This function adds a matching rule to the rule set by updating the + * appropriate bits in the entry's rule set registers. + * + * Access Control List (ACL) Processing Rule Registers: + * + * 0x0E - RuleSet [15:8] + * Bits 7:0 - RuleSet [15:8] Specifies a set of one or more Matching rule + * entries. RuleSet has one bit for each of the 16 Matching rule entries. + * If multiple Matching rules are selected, then all conditions will be + * AND'ed to produce a final match result. + * 0 = Matching rule not selected + * 1 = Matching rule selected + * + * 0x0F - RuleSet [7:0] + * Bits 7:0 - RuleSet [7:0] + */ +static void ksz9477_acl_processing_rule_add_match(u8 *entry, u8 match_idx) +{ + u8 vale = entry[KSZ9477_ACL_PORT_ACCESS_E]; + u8 valf = entry[KSZ9477_ACL_PORT_ACCESS_F]; + + if (match_idx < 8) + valf |= BIT(match_idx); + else + vale |= BIT(match_idx - 8); + + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_E, vale); + ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_F, valf); +} + +/** + * ksz9477_acl_get_init_entry - Get a new uninitialized entry for a specified + * port on a ksz_device. + * @dev: The ksz_device instance. + * @port: The port number to get the uninitialized entry for. + * @cookie: The cookie to associate with the entry. + * @prio: The priority to associate with the entry. + * + * This function retrieves the next available ACL entry for the specified port, + * clears all access flags, and associates it with the current cookie. + * + * Returns: A pointer to the new uninitialized ACL entry. + */ +static struct ksz9477_acl_entry * +ksz9477_acl_get_init_entry(struct ksz_device *dev, int port, + unsigned long cookie, u32 prio) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + struct ksz9477_acl_entry *entry; + + entry = &acles->entries[acles->entries_count]; + entry->cookie = cookie; + entry->prio = prio; + + /* clear all access flags */ + entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0; + entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0; + + return entry; +} + +/** + * ksz9477_acl_match_process_l2 - Configure Layer 2 ACL matching rules and + * processing rules. + * @dev: Pointer to the ksz_device. + * @port: Port number. + * @ethtype: Ethernet type. + * @src_mac: Source MAC address. + * @dst_mac: Destination MAC address. + * @cookie: The cookie to associate with the entry. + * @prio: The priority of the entry. + * + * This function sets up matching and processing rules for Layer 2 ACLs. + * It takes into account that only one MAC per entry is supported. + */ +void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port, + u16 ethtype, u8 *src_mac, u8 *dst_mac, + unsigned long cookie, u32 prio) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct ksz9477_acl_entries *acles = &acl->acles; + struct ksz9477_acl_entry *entry; + + entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio); + + /* ACL supports only one MAC per entry */ + if (src_mac && dst_mac) { + ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, src_mac, + true); + + /* Add both match entries to first processing rule */ + ksz9477_acl_processing_rule_add_match(entry->entry, + acles->entries_count); + acles->entries_count++; + ksz9477_acl_processing_rule_add_match(entry->entry, + acles->entries_count); + + entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio); + ksz9477_acl_matching_rule_cfg_l2(entry->entry, 0, dst_mac, + false); + acles->entries_count++; + } else { + u8 *mac = src_mac ? src_mac : dst_mac; + bool is_src = src_mac ? true : false; + + ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, mac, + is_src); + ksz9477_acl_processing_rule_add_match(entry->entry, + acles->entries_count); + acles->entries_count++; + } +} diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 2710afad4f3a..cac4a607e54a 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -66,10 +66,7 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c) if (!dev) return; - if (dev->dev_ops->reset) - dev->dev_ops->reset(dev); - - dsa_switch_shutdown(dev->ds); + ksz_switch_shutdown(dev); i2c_set_clientdata(i2c, NULL); } diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index cba3dba58bc3..f3a205ee483f 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -112,19 +112,6 @@ #define REG_SW_IBA_SYNC__1 0x010C -#define REG_SW_IO_STRENGTH__1 0x010D -#define SW_DRIVE_STRENGTH_M 0x7 -#define SW_DRIVE_STRENGTH_2MA 0 -#define SW_DRIVE_STRENGTH_4MA 1 -#define SW_DRIVE_STRENGTH_8MA 2 -#define SW_DRIVE_STRENGTH_12MA 3 -#define SW_DRIVE_STRENGTH_16MA 4 -#define SW_DRIVE_STRENGTH_20MA 5 -#define SW_DRIVE_STRENGTH_24MA 6 -#define SW_DRIVE_STRENGTH_28MA 7 -#define SW_HI_SPEED_DRIVE_STRENGTH_S 4 -#define SW_LO_SPEED_DRIVE_STRENGTH_S 0 - #define REG_SW_IBA_STATUS__4 0x0110 #define SW_IBA_REQ BIT(31) @@ -166,13 +153,6 @@ #define SW_DOUBLE_TAG BIT(7) #define SW_RESET BIT(1) -#define REG_SW_MAC_ADDR_0 0x0302 -#define REG_SW_MAC_ADDR_1 0x0303 -#define REG_SW_MAC_ADDR_2 0x0304 -#define REG_SW_MAC_ADDR_3 0x0305 -#define REG_SW_MAC_ADDR_4 0x0306 -#define REG_SW_MAC_ADDR_5 0x0307 - #define REG_SW_MTU__2 0x0308 #define REG_SW_MTU_MASK GENMASK(13, 0) diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c new file mode 100644 index 000000000000..8b2f5be667e0 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> + +#include "ksz9477.h" +#include "ksz9477_reg.h" +#include "ksz_common.h" + +#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0) +#define KSZ9477_MAX_TC 7 + +/** + * ksz9477_flower_parse_key_l2 - Parse Layer 2 key from flow rule and configure + * ACL entries accordingly. + * @dev: Pointer to the ksz_device. + * @port: Port number. + * @extack: Pointer to the netlink_ext_ack. + * @rule: Pointer to the flow_rule. + * @cookie: The cookie to associate with the entry. + * @prio: The priority of the entry. + * + * This function parses the Layer 2 key from the flow rule and configures + * the corresponding ACL entries. It checks for unsupported offloads and + * available entries before proceeding with the configuration. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int ksz9477_flower_parse_key_l2(struct ksz_device *dev, int port, + struct netlink_ext_ack *extack, + struct flow_rule *rule, + unsigned long cookie, u32 prio) +{ + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + struct flow_match_eth_addrs ematch; + struct ksz9477_acl_entries *acles; + int required_entries; + u8 *src_mac = NULL; + u8 *dst_mac = NULL; + u16 ethtype = 0; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + + if (match.key->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, + "ethernet type mask must be a full mask"); + return -EINVAL; + } + + ethtype = be16_to_cpu(match.key->n_proto); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + flow_rule_match_eth_addrs(rule, &ematch); + + if (!is_zero_ether_addr(ematch.key->src)) { + if (!is_broadcast_ether_addr(ematch.mask->src)) + goto not_full_mask_err; + + src_mac = ematch.key->src; + } + + if (!is_zero_ether_addr(ematch.key->dst)) { + if (!is_broadcast_ether_addr(ematch.mask->dst)) + goto not_full_mask_err; + + dst_mac = ematch.key->dst; + } + } + + acles = &acl->acles; + /* ACL supports only one MAC per entry */ + required_entries = src_mac && dst_mac ? 2 : 1; + + /* Check if there are enough available entries */ + if (acles->entries_count + required_entries > KSZ9477_ACL_MAX_ENTRIES) { + NL_SET_ERR_MSG_MOD(extack, "ACL entry limit reached"); + return -EOPNOTSUPP; + } + + ksz9477_acl_match_process_l2(dev, port, ethtype, src_mac, dst_mac, + cookie, prio); + + return 0; + +not_full_mask_err: + NL_SET_ERR_MSG_MOD(extack, "MAC address mask must be a full mask"); + return -EOPNOTSUPP; +} + +/** + * ksz9477_flower_parse_key - Parse flow rule keys for a specified port on a + * ksz_device. + * @dev: The ksz_device instance. + * @port: The port number to parse the flow rule keys for. + * @extack: The netlink extended ACK for reporting errors. + * @rule: The flow_rule to parse. + * @cookie: The cookie to associate with the entry. + * @prio: The priority of the entry. + * + * This function checks if the used keys in the flow rule are supported by + * the device and parses the L2 keys if they match. If unsupported keys are + * used, an error message is set in the extended ACK. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int ksz9477_flower_parse_key(struct ksz_device *dev, int port, + struct netlink_ext_ack *extack, + struct flow_rule *rule, + unsigned long cookie, u32 prio) +{ + struct flow_dissector *dissector = rule->match.dissector; + int ret; + + if (dissector->used_keys & + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule, + cookie, prio); + if (ret) + return ret; + } + + return 0; +} + +/** + * ksz9477_flower_parse_action - Parse flow rule actions for a specified port + * on a ksz_device. + * @dev: The ksz_device instance. + * @port: The port number to parse the flow rule actions for. + * @extack: The netlink extended ACK for reporting errors. + * @cls: The flow_cls_offload instance containing the flow rule. + * @entry_idx: The index of the ACL entry to store the action. + * + * This function checks if the actions in the flow rule are supported by + * the device. Currently, only actions that change priorities are supported. + * If unsupported actions are encountered, an error message is set in the + * extended ACK. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int ksz9477_flower_parse_action(struct ksz_device *dev, int port, + struct netlink_ext_ack *extack, + struct flow_cls_offload *cls, + int entry_idx) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv; + const struct flow_action_entry *act; + struct ksz9477_acl_entry *entry; + bool prio_force = false; + u8 prio_val = 0; + int i; + + if (TC_H_MIN(cls->classid)) { + NL_SET_ERR_MSG_MOD(extack, "hw_tc is not supported. Use: action skbedit prio"); + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_PRIORITY: + if (act->priority > KSZ9477_MAX_TC) { + NL_SET_ERR_MSG_MOD(extack, "Priority value is too high"); + return -EOPNOTSUPP; + } + prio_force = true; + prio_val = act->priority; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "action not supported"); + return -EOPNOTSUPP; + } + } + + /* pick entry to store action */ + entry = &acl->acles.entries[entry_idx]; + + ksz9477_acl_action_rule_cfg(entry->entry, prio_force, prio_val); + ksz9477_acl_processing_rule_set_action(entry->entry, entry_idx); + + return 0; +} + +/** + * ksz9477_cls_flower_add - Add a flow classification rule for a specified port + * on a ksz_device. + * @ds: The DSA switch instance. + * @port: The port number to add the flow classification rule to. + * @cls: The flow_cls_offload instance containing the flow rule. + * @ingress: A flag indicating if the rule is applied on the ingress path. + * + * This function adds a flow classification rule for a specified port on a + * ksz_device. It checks if the ACL offloading is supported and parses the flow + * keys and actions. If the ACL is not supported, it returns an error. If there + * are unprocessed entries, it parses the action for the rule. + * + * Returns: 0 on success or a negative error code on failure. + */ +int ksz9477_cls_flower_add(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct ksz_device *dev = ds->priv; + struct ksz9477_acl_priv *acl; + int action_entry_idx; + int ret; + + acl = dev->ports[port].acl_priv; + + if (!acl) { + NL_SET_ERR_MSG_MOD(extack, "ACL offloading is not supported"); + return -EOPNOTSUPP; + } + + /* A complex rule set can take multiple entries. Use first entry + * to store the action. + */ + action_entry_idx = acl->acles.entries_count; + + ret = ksz9477_flower_parse_key(dev, port, extack, rule, cls->cookie, + cls->common.prio); + if (ret) + return ret; + + ret = ksz9477_flower_parse_action(dev, port, extack, cls, + action_entry_idx); + if (ret) + return ret; + + ret = ksz9477_sort_acl_entries(dev, port); + if (ret) + return ret; + + return ksz9477_acl_write_list(dev, port); +} + +/** + * ksz9477_cls_flower_del - Remove a flow classification rule for a specified + * port on a ksz_device. + * @ds: The DSA switch instance. + * @port: The port number to remove the flow classification rule from. + * @cls: The flow_cls_offload instance containing the flow rule. + * @ingress: A flag indicating if the rule is applied on the ingress path. + * + * This function removes a flow classification rule for a specified port on a + * ksz_device. It checks if the ACL is initialized, and if not, returns an + * error. If the ACL is initialized, it removes entries with the specified + * cookie and rewrites the ACL list. + * + * Returns: 0 on success or a negative error code on failure. + */ +int ksz9477_cls_flower_del(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + unsigned long cookie = cls->cookie; + struct ksz_device *dev = ds->priv; + struct ksz9477_acl_priv *acl; + + acl = dev->ports[port].acl_priv; + + if (!acl) + return -EOPNOTSUPP; + + ksz9477_acl_remove_entries(dev, port, &acl->acles, cookie); + + return ksz9477_acl_write_list(dev, port); +} diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 6c0623f88654..ff4b39601c93 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -16,10 +16,11 @@ #include <linux/etherdevice.h> #include <linux/if_bridge.h> #include <linux/if_vlan.h> +#include <linux/if_hsr.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/of.h> #include <linux/of_mdio.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/micrel_phy.h> #include <net/dsa.h> @@ -186,6 +187,72 @@ static const struct ksz_mib_names ksz9477_mib_names[] = { { 0x83, "tx_discards" }, }; +struct ksz_driver_strength_prop { + const char *name; + int offset; + int value; +}; + +enum ksz_driver_strength_type { + KSZ_DRIVER_STRENGTH_HI, + KSZ_DRIVER_STRENGTH_LO, + KSZ_DRIVER_STRENGTH_IO, +}; + +/** + * struct ksz_drive_strength - drive strength mapping + * @reg_val: register value + * @microamp: microamp value + */ +struct ksz_drive_strength { + u32 reg_val; + u32 microamp; +}; + +/* ksz9477_drive_strengths - Drive strength mapping for KSZ9477 variants + * + * This values are not documented in KSZ9477 variants but confirmed by + * Microchip that KSZ9477, KSZ9567, KSZ8567, KSZ9897, KSZ9896, KSZ9563, KSZ9893 + * and KSZ8563 are using same register (drive strength) settings like KSZ8795. + * + * Documentation in KSZ8795CLX provides more information with some + * recommendations: + * - for high speed signals + * 1. 4 mA or 8 mA is often used for MII, RMII, and SPI interface with using + * 2.5V or 3.3V VDDIO. + * 2. 12 mA or 16 mA is often used for MII, RMII, and SPI interface with + * using 1.8V VDDIO. + * 3. 20 mA or 24 mA is often used for GMII/RGMII interface with using 2.5V + * or 3.3V VDDIO. + * 4. 28 mA is often used for GMII/RGMII interface with using 1.8V VDDIO. + * 5. In same interface, the heavy loading should use higher one of the + * drive current strength. + * - for low speed signals + * 1. 3.3V VDDIO, use either 4 mA or 8 mA. + * 2. 2.5V VDDIO, use either 8 mA or 12 mA. + * 3. 1.8V VDDIO, use either 12 mA or 16 mA. + * 4. If it is heavy loading, can use higher drive current strength. + */ +static const struct ksz_drive_strength ksz9477_drive_strengths[] = { + { SW_DRIVE_STRENGTH_2MA, 2000 }, + { SW_DRIVE_STRENGTH_4MA, 4000 }, + { SW_DRIVE_STRENGTH_8MA, 8000 }, + { SW_DRIVE_STRENGTH_12MA, 12000 }, + { SW_DRIVE_STRENGTH_16MA, 16000 }, + { SW_DRIVE_STRENGTH_20MA, 20000 }, + { SW_DRIVE_STRENGTH_24MA, 24000 }, + { SW_DRIVE_STRENGTH_28MA, 28000 }, +}; + +/* ksz8830_drive_strengths - Drive strength mapping for KSZ8830, KSZ8873, .. + * variants. + * This values are documented in KSZ8873 and KSZ8863 datasheets. + */ +static const struct ksz_drive_strength ksz8830_drive_strengths[] = { + { 0, 8000 }, + { KSZ8873_DRIVE_STRENGTH_16MA, 16000 }, +}; + static const struct ksz_dev_ops ksz8_dev_ops = { .setup = ksz8_setup, .get_port_addr = ksz8_get_port_addr, @@ -252,6 +319,9 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .mdb_del = ksz9477_mdb_del, .change_mtu = ksz9477_change_mtu, .phylink_mac_link_up = ksz9477_phylink_mac_link_up, + .get_wol = ksz9477_get_wol, + .set_wol = ksz9477_set_wol, + .wol_pre_shutdown = ksz9477_wol_pre_shutdown, .config_cpu_port = ksz9477_config_cpu_port, .tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc, .enable_stp_addr = ksz9477_enable_stp_addr, @@ -298,6 +368,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = { }; static const u16 ksz8795_regs[] = { + [REG_SW_MAC_ADDR] = 0x68, [REG_IND_CTRL_0] = 0x6E, [REG_IND_DATA_8] = 0x70, [REG_IND_DATA_CHECK] = 0x72, @@ -373,6 +444,7 @@ static const u8 ksz8795_shifts[] = { }; static const u16 ksz8863_regs[] = { + [REG_SW_MAC_ADDR] = 0x70, [REG_IND_CTRL_0] = 0x79, [REG_IND_DATA_8] = 0x7B, [REG_IND_DATA_CHECK] = 0x7B, @@ -426,6 +498,7 @@ static u8 ksz8863_shifts[] = { }; static const u16 ksz9477_regs[] = { + [REG_SW_MAC_ADDR] = 0x0302, [P_STP_CTRL] = 0x0B04, [S_START_CTRL] = 0x0300, [S_BROADCAST_CTRL] = 0x0332, @@ -1619,8 +1692,6 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port, { struct ksz_device *dev = ds->priv; - config->legacy_pre_march2020 = false; - if (dev->info->supports_mii[port]) __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); @@ -1878,14 +1949,14 @@ static int ksz_irq_phy_setup(struct ksz_device *dev) ret = irq; goto out; } - ds->slave_mii_bus->irq[phy] = irq; + ds->user_mii_bus->irq[phy] = irq; } } return 0; out: while (phy--) if (BIT(phy) & ds->phys_mii_mask) - irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); + irq_dispose_mapping(ds->user_mii_bus->irq[phy]); return ret; } @@ -1897,7 +1968,7 @@ static void ksz_irq_phy_free(struct ksz_device *dev) for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) if (BIT(phy) & ds->phys_mii_mask) - irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); + irq_dispose_mapping(ds->user_mii_bus->irq[phy]); } static int ksz_mdio_register(struct ksz_device *dev) @@ -1920,12 +1991,12 @@ static int ksz_mdio_register(struct ksz_device *dev) bus->priv = dev; bus->read = ksz_sw_mdio_read; bus->write = ksz_sw_mdio_write; - bus->name = "ksz slave smi"; + bus->name = "ksz user smi"; snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); bus->parent = ds->dev; bus->phy_mask = ~ds->phys_mii_mask; - ds->slave_mii_bus = bus; + ds->user_mii_bus = bus; if (dev->irq > 0) { ret = ksz_irq_phy_setup(dev); @@ -2277,7 +2348,7 @@ static void ksz_mib_read_work(struct work_struct *work) if (!p->read) { const struct dsa_port *dp = dsa_to_port(dev->ds, i); - if (!netif_carrier_ok(dp->slave)) + if (!netif_carrier_ok(dp->user)) mib->cnt_ptr = dev->info->reg_mib_cnt; } port_r_cnt(dev, i); @@ -2337,13 +2408,27 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; - if (dev->chip_id == KSZ8830_CHIP_ID) { + switch (dev->chip_id) { + case KSZ8830_CHIP_ID: /* Silicon Errata Sheet (DS80000830A): * Port 1 does not work with LinkMD Cable-Testing. * Port 1 does not respond to received PAUSE control frames. */ if (!port) return MICREL_KSZ8_P1_ERRATA; + break; + case KSZ9477_CHIP_ID: + /* KSZ9477 Errata DS80000754C + * + * Module 4: Energy Efficient Ethernet (EEE) feature select must + * be manually disabled + * The EEE feature is enabled by default, but it is not fully + * operational. It must be manually disabled through register + * controls. If not disabled, the PHY ports can auto-negotiate + * to enable EEE, and this feature can cause link drops when + * linked to another device supporting EEE. + */ + return MICREL_NO_EEE; } return 0; @@ -2383,7 +2468,7 @@ static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, mutex_lock(&mib->cnt_mutex); /* Only read dropped counters if no link. */ - if (!netif_carrier_ok(dp->slave)) + if (!netif_carrier_ok(dp->user)) mib->cnt_ptr = dev->info->reg_mib_cnt; port_r_cnt(dev, port); memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); @@ -2486,15 +2571,14 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port, return dev->dev_ops->mdb_del(dev, port, mdb, db); } -static int ksz_enable_port(struct dsa_switch *ds, int port, - struct phy_device *phy) +static int ksz_port_setup(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; if (!dsa_is_user_port(ds, port)) return 0; - /* setup slave port */ + /* setup user port */ dev->dev_ops->port_setup(dev, port, false); /* port_stp_state_set() will be called after to enable the port so @@ -2550,6 +2634,23 @@ void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) ksz_update_port_member(dev, port); } +static void ksz_port_teardown(struct dsa_switch *ds, int port) +{ + struct ksz_device *dev = ds->priv; + + switch (dev->chip_id) { + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + if (dsa_is_user_port(ds, port)) + ksz9477_port_acl_free(dev, port); + } +} + static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack) @@ -2612,10 +2713,18 @@ static int ksz_connect_tag_protocol(struct dsa_switch *ds, { struct ksz_tagger_data *tagger_data; - tagger_data = ksz_tagger_data(ds); - tagger_data->xmit_work_fn = ksz_port_deferred_xmit; - - return 0; + switch (proto) { + case DSA_TAG_PROTO_KSZ8795: + return 0; + case DSA_TAG_PROTO_KSZ9893: + case DSA_TAG_PROTO_KSZ9477: + case DSA_TAG_PROTO_LAN937X: + tagger_data = ksz_tagger_data(ds); + tagger_data->xmit_work_fn = ksz_port_deferred_xmit; + return 0; + default: + return -EPROTONOSUPPORT; + } } static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, @@ -3095,6 +3204,44 @@ static int ksz_switch_detect(struct ksz_device *dev) return 0; } +static int ksz_cls_flower_add(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct ksz_device *dev = ds->priv; + + switch (dev->chip_id) { + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + return ksz9477_cls_flower_add(ds, port, cls, ingress); + } + + return -EOPNOTSUPP; +} + +static int ksz_cls_flower_del(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct ksz_device *dev = ds->priv; + + switch (dev->chip_id) { + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + return ksz9477_cls_flower_del(ds, port, cls, ingress); + } + + return -EOPNOTSUPP; +} + /* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth * is converted to Hex-decimal using the successive multiplication method. On * every step, integer part is taken and decimal part is carry forwarded. @@ -3407,6 +3554,224 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port, } } +static void ksz_get_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct ksz_device *dev = ds->priv; + + if (dev->dev_ops->get_wol) + dev->dev_ops->get_wol(dev, port, wol); +} + +static int ksz_set_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct ksz_device *dev = ds->priv; + + if (dev->dev_ops->set_wol) + return dev->dev_ops->set_wol(dev, port, wol); + + return -EOPNOTSUPP; +} + +static int ksz_port_set_mac_address(struct dsa_switch *ds, int port, + const unsigned char *addr) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ethtool_wolinfo wol; + + if (dp->hsr_dev) { + dev_err(ds->dev, + "Cannot change MAC address on port %d with active HSR offload\n", + port); + return -EBUSY; + } + + ksz_get_wol(ds, dp->index, &wol); + if (wol.wolopts & WAKE_MAGIC) { + dev_err(ds->dev, + "Cannot change MAC address on port %d with active Wake on Magic Packet\n", + port); + return -EBUSY; + } + + return 0; +} + +/** + * ksz_is_port_mac_global_usable - Check if the MAC address on a given port + * can be used as a global address. + * @ds: Pointer to the DSA switch structure. + * @port: The port number on which the MAC address is to be checked. + * + * This function examines the MAC address set on the specified port and + * determines if it can be used as a global address for the switch. + * + * Return: true if the port's MAC address can be used as a global address, false + * otherwise. + */ +bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port) +{ + struct net_device *user = dsa_to_port(ds, port)->user; + const unsigned char *addr = user->dev_addr; + struct ksz_switch_macaddr *switch_macaddr; + struct ksz_device *dev = ds->priv; + + ASSERT_RTNL(); + + switch_macaddr = dev->switch_macaddr; + if (switch_macaddr && !ether_addr_equal(switch_macaddr->addr, addr)) + return false; + + return true; +} + +/** + * ksz_switch_macaddr_get - Program the switch's MAC address register. + * @ds: DSA switch instance. + * @port: Port number. + * @extack: Netlink extended acknowledgment. + * + * This function programs the switch's MAC address register with the MAC address + * of the requesting user port. This single address is used by the switch for + * multiple features like HSR self-address filtering and WoL. Other user ports + * can share ownership of this address as long as their MAC address is the same. + * The MAC addresses of user ports must not change while they have ownership of + * the switch MAC address. + * + * Return: 0 on success, or other error codes on failure. + */ +int ksz_switch_macaddr_get(struct dsa_switch *ds, int port, + struct netlink_ext_ack *extack) +{ + struct net_device *user = dsa_to_port(ds, port)->user; + const unsigned char *addr = user->dev_addr; + struct ksz_switch_macaddr *switch_macaddr; + struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; + int i, ret; + + /* Make sure concurrent MAC address changes are blocked */ + ASSERT_RTNL(); + + switch_macaddr = dev->switch_macaddr; + if (switch_macaddr) { + if (!ether_addr_equal(switch_macaddr->addr, addr)) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Switch already configured for MAC address %pM", + switch_macaddr->addr); + return -EBUSY; + } + + refcount_inc(&switch_macaddr->refcount); + return 0; + } + + switch_macaddr = kzalloc(sizeof(*switch_macaddr), GFP_KERNEL); + if (!switch_macaddr) + return -ENOMEM; + + ether_addr_copy(switch_macaddr->addr, addr); + refcount_set(&switch_macaddr->refcount, 1); + dev->switch_macaddr = switch_macaddr; + + /* Program the switch MAC address to hardware */ + for (i = 0; i < ETH_ALEN; i++) { + ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, addr[i]); + if (ret) + goto macaddr_drop; + } + + return 0; + +macaddr_drop: + dev->switch_macaddr = NULL; + refcount_set(&switch_macaddr->refcount, 0); + kfree(switch_macaddr); + + return ret; +} + +void ksz_switch_macaddr_put(struct dsa_switch *ds) +{ + struct ksz_switch_macaddr *switch_macaddr; + struct ksz_device *dev = ds->priv; + const u16 *regs = dev->info->regs; + int i; + + /* Make sure concurrent MAC address changes are blocked */ + ASSERT_RTNL(); + + switch_macaddr = dev->switch_macaddr; + if (!refcount_dec_and_test(&switch_macaddr->refcount)) + return; + + for (i = 0; i < ETH_ALEN; i++) + ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, 0); + + dev->switch_macaddr = NULL; + kfree(switch_macaddr); +} + +static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr, + struct netlink_ext_ack *extack) +{ + struct ksz_device *dev = ds->priv; + enum hsr_version ver; + int ret; + + ret = hsr_get_version(hsr, &ver); + if (ret) + return ret; + + if (dev->chip_id != KSZ9477_CHIP_ID) { + NL_SET_ERR_MSG_MOD(extack, "Chip does not support HSR offload"); + return -EOPNOTSUPP; + } + + /* KSZ9477 can support HW offloading of only 1 HSR device */ + if (dev->hsr_dev && hsr != dev->hsr_dev) { + NL_SET_ERR_MSG_MOD(extack, "Offload supported for a single HSR"); + return -EOPNOTSUPP; + } + + /* KSZ9477 only supports HSR v0 and v1 */ + if (!(ver == HSR_V0 || ver == HSR_V1)) { + NL_SET_ERR_MSG_MOD(extack, "Only HSR v0 and v1 supported"); + return -EOPNOTSUPP; + } + + /* Self MAC address filtering, to avoid frames traversing + * the HSR ring more than once. + */ + ret = ksz_switch_macaddr_get(ds, port, extack); + if (ret) + return ret; + + ksz9477_hsr_join(ds, port, hsr); + dev->hsr_dev = hsr; + dev->hsr_ports |= BIT(port); + + return 0; +} + +static int ksz_hsr_leave(struct dsa_switch *ds, int port, + struct net_device *hsr) +{ + struct ksz_device *dev = ds->priv; + + WARN_ON(dev->chip_id != KSZ9477_CHIP_ID); + + ksz9477_hsr_leave(ds, port, hsr); + dev->hsr_ports &= ~BIT(port); + if (!dev->hsr_ports) + dev->hsr_dev = NULL; + + ksz_switch_macaddr_put(ds); + + return 0; +} + static const struct dsa_switch_ops ksz_switch_ops = { .get_tag_protocol = ksz_get_tag_protocol, .connect_tag_protocol = ksz_connect_tag_protocol, @@ -3419,14 +3784,18 @@ static const struct dsa_switch_ops ksz_switch_ops = { .phylink_mac_config = ksz_phylink_mac_config, .phylink_mac_link_up = ksz_phylink_mac_link_up, .phylink_mac_link_down = ksz_mac_link_down, - .port_enable = ksz_enable_port, + .port_setup = ksz_port_setup, .set_ageing_time = ksz_set_ageing_time, .get_strings = ksz_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, .get_sset_count = ksz_sset_count, .port_bridge_join = ksz_port_bridge_join, .port_bridge_leave = ksz_port_bridge_leave, + .port_hsr_join = ksz_hsr_join, + .port_hsr_leave = ksz_hsr_leave, + .port_set_mac_address = ksz_port_set_mac_address, .port_stp_state_set = ksz_port_stp_state_set, + .port_teardown = ksz_port_teardown, .port_pre_bridge_flags = ksz_port_pre_bridge_flags, .port_bridge_flags = ksz_port_bridge_flags, .port_fast_age = ksz_port_fast_age, @@ -3444,11 +3813,15 @@ static const struct dsa_switch_ops ksz_switch_ops = { .get_pause_stats = ksz_get_pause_stats, .port_change_mtu = ksz_change_mtu, .port_max_mtu = ksz_max_mtu, + .get_wol = ksz_get_wol, + .set_wol = ksz_set_wol, .get_ts_info = ksz_get_ts_info, .port_hwtstamp_get = ksz_hwtstamp_get, .port_hwtstamp_set = ksz_hwtstamp_set, .port_txtstamp = ksz_port_txtstamp, .port_rxtstamp = ksz_port_rxtstamp, + .cls_flower_add = ksz_cls_flower_add, + .cls_flower_del = ksz_cls_flower_del, .port_setup_tc = ksz_setup_tc, .get_mac_eee = ksz_get_mac_eee, .set_mac_eee = ksz_set_mac_eee, @@ -3481,6 +3854,30 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) } EXPORT_SYMBOL(ksz_switch_alloc); +/** + * ksz_switch_shutdown - Shutdown routine for the switch device. + * @dev: The switch device structure. + * + * This function is responsible for initiating a shutdown sequence for the + * switch device. It invokes the reset operation defined in the device + * operations, if available, to reset the switch. Subsequently, it calls the + * DSA framework's shutdown function to ensure a proper shutdown of the DSA + * switch. + */ +void ksz_switch_shutdown(struct ksz_device *dev) +{ + bool wol_enabled = false; + + if (dev->dev_ops->wol_pre_shutdown) + dev->dev_ops->wol_pre_shutdown(dev, &wol_enabled); + + if (dev->dev_ops->reset && !wol_enabled) + dev->dev_ops->reset(dev); + + dsa_switch_shutdown(dev->ds); +} +EXPORT_SYMBOL(ksz_switch_shutdown); + static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num, struct device_node *port_dn) { @@ -3518,6 +3915,245 @@ static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num, dev->ports[port_num].rgmii_tx_val = tx_delay; } +/** + * ksz_drive_strength_to_reg() - Convert drive strength value to corresponding + * register value. + * @array: The array of drive strength values to search. + * @array_size: The size of the array. + * @microamp: The drive strength value in microamp to be converted. + * + * This function searches the array of drive strength values for the given + * microamp value and returns the corresponding register value for that drive. + * + * Returns: If found, the corresponding register value for that drive strength + * is returned. Otherwise, -EINVAL is returned indicating an invalid value. + */ +static int ksz_drive_strength_to_reg(const struct ksz_drive_strength *array, + size_t array_size, int microamp) +{ + int i; + + for (i = 0; i < array_size; i++) { + if (array[i].microamp == microamp) + return array[i].reg_val; + } + + return -EINVAL; +} + +/** + * ksz_drive_strength_error() - Report invalid drive strength value + * @dev: ksz device + * @array: The array of drive strength values to search. + * @array_size: The size of the array. + * @microamp: Invalid drive strength value in microamp + * + * This function logs an error message when an unsupported drive strength value + * is detected. It lists out all the supported drive strength values for + * reference in the error message. + */ +static void ksz_drive_strength_error(struct ksz_device *dev, + const struct ksz_drive_strength *array, + size_t array_size, int microamp) +{ + char supported_values[100]; + size_t remaining_size; + int added_len; + char *ptr; + int i; + + remaining_size = sizeof(supported_values); + ptr = supported_values; + + for (i = 0; i < array_size; i++) { + added_len = snprintf(ptr, remaining_size, + i == 0 ? "%d" : ", %d", array[i].microamp); + + if (added_len >= remaining_size) + break; + + ptr += added_len; + remaining_size -= added_len; + } + + dev_err(dev->dev, "Invalid drive strength %d, supported values are %s\n", + microamp, supported_values); +} + +/** + * ksz9477_drive_strength_write() - Set the drive strength for specific KSZ9477 + * chip variants. + * @dev: ksz device + * @props: Array of drive strength properties to be applied + * @num_props: Number of properties in the array + * + * This function configures the drive strength for various KSZ9477 chip variants + * based on the provided properties. It handles chip-specific nuances and + * ensures only valid drive strengths are written to the respective chip. + * + * Return: 0 on successful configuration, a negative error code on failure. + */ +static int ksz9477_drive_strength_write(struct ksz_device *dev, + struct ksz_driver_strength_prop *props, + int num_props) +{ + size_t array_size = ARRAY_SIZE(ksz9477_drive_strengths); + int i, ret, reg; + u8 mask = 0; + u8 val = 0; + + if (props[KSZ_DRIVER_STRENGTH_IO].value != -1) + dev_warn(dev->dev, "%s is not supported by this chip variant\n", + props[KSZ_DRIVER_STRENGTH_IO].name); + + if (dev->chip_id == KSZ8795_CHIP_ID || + dev->chip_id == KSZ8794_CHIP_ID || + dev->chip_id == KSZ8765_CHIP_ID) + reg = KSZ8795_REG_SW_CTRL_20; + else + reg = KSZ9477_REG_SW_IO_STRENGTH; + + for (i = 0; i < num_props; i++) { + if (props[i].value == -1) + continue; + + ret = ksz_drive_strength_to_reg(ksz9477_drive_strengths, + array_size, props[i].value); + if (ret < 0) { + ksz_drive_strength_error(dev, ksz9477_drive_strengths, + array_size, props[i].value); + return ret; + } + + mask |= SW_DRIVE_STRENGTH_M << props[i].offset; + val |= ret << props[i].offset; + } + + return ksz_rmw8(dev, reg, mask, val); +} + +/** + * ksz8830_drive_strength_write() - Set the drive strength configuration for + * KSZ8830 compatible chip variants. + * @dev: ksz device + * @props: Array of drive strength properties to be set + * @num_props: Number of properties in the array + * + * This function applies the specified drive strength settings to KSZ8830 chip + * variants (KSZ8873, KSZ8863). + * It ensures the configurations align with what the chip variant supports and + * warns or errors out on unsupported settings. + * + * Return: 0 on success, error code otherwise + */ +static int ksz8830_drive_strength_write(struct ksz_device *dev, + struct ksz_driver_strength_prop *props, + int num_props) +{ + size_t array_size = ARRAY_SIZE(ksz8830_drive_strengths); + int microamp; + int i, ret; + + for (i = 0; i < num_props; i++) { + if (props[i].value == -1 || i == KSZ_DRIVER_STRENGTH_IO) + continue; + + dev_warn(dev->dev, "%s is not supported by this chip variant\n", + props[i].name); + } + + microamp = props[KSZ_DRIVER_STRENGTH_IO].value; + ret = ksz_drive_strength_to_reg(ksz8830_drive_strengths, array_size, + microamp); + if (ret < 0) { + ksz_drive_strength_error(dev, ksz8830_drive_strengths, + array_size, microamp); + return ret; + } + + return ksz_rmw8(dev, KSZ8873_REG_GLOBAL_CTRL_12, + KSZ8873_DRIVE_STRENGTH_16MA, ret); +} + +/** + * ksz_parse_drive_strength() - Extract and apply drive strength configurations + * from device tree properties. + * @dev: ksz device + * + * This function reads the specified drive strength properties from the + * device tree, validates against the supported chip variants, and sets + * them accordingly. An error should be critical here, as the drive strength + * settings are crucial for EMI compliance. + * + * Return: 0 on success, error code otherwise + */ +static int ksz_parse_drive_strength(struct ksz_device *dev) +{ + struct ksz_driver_strength_prop of_props[] = { + [KSZ_DRIVER_STRENGTH_HI] = { + .name = "microchip,hi-drive-strength-microamp", + .offset = SW_HI_SPEED_DRIVE_STRENGTH_S, + .value = -1, + }, + [KSZ_DRIVER_STRENGTH_LO] = { + .name = "microchip,lo-drive-strength-microamp", + .offset = SW_LO_SPEED_DRIVE_STRENGTH_S, + .value = -1, + }, + [KSZ_DRIVER_STRENGTH_IO] = { + .name = "microchip,io-drive-strength-microamp", + .offset = 0, /* don't care */ + .value = -1, + }, + }; + struct device_node *np = dev->dev->of_node; + bool have_any_prop = false; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(of_props); i++) { + ret = of_property_read_u32(np, of_props[i].name, + &of_props[i].value); + if (ret && ret != -EINVAL) + dev_warn(dev->dev, "Failed to read %s\n", + of_props[i].name); + if (ret) + continue; + + have_any_prop = true; + } + + if (!have_any_prop) + return 0; + + switch (dev->chip_id) { + case KSZ8830_CHIP_ID: + return ksz8830_drive_strength_write(dev, of_props, + ARRAY_SIZE(of_props)); + case KSZ8795_CHIP_ID: + case KSZ8794_CHIP_ID: + case KSZ8765_CHIP_ID: + case KSZ8563_CHIP_ID: + case KSZ9477_CHIP_ID: + case KSZ9563_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9893_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: + return ksz9477_drive_strength_write(dev, of_props, + ARRAY_SIZE(of_props)); + default: + for (i = 0; i < ARRAY_SIZE(of_props); i++) { + if (of_props[i].value == -1) + continue; + + dev_warn(dev->dev, "%s is not supported by this chip variant\n", + of_props[i].name); + } + } + + return 0; +} + int ksz_switch_register(struct ksz_device *dev) { const struct ksz_chip_data *info; @@ -3600,6 +4236,10 @@ int ksz_switch_register(struct ksz_device *dev) for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; if (dev->dev->of_node) { + ret = ksz_parse_drive_strength(dev); + if (ret) + return ret; + ret = of_get_phy_mode(dev->dev->of_node, &interface); if (ret == 0) dev->compat_interface = interface; @@ -3631,6 +4271,9 @@ int ksz_switch_register(struct ksz_device *dev) dev_err(dev->dev, "inconsistent synclko settings\n"); return -EINVAL; } + + dev->wakeup_source = of_property_read_bool(dev->dev->of_node, + "wakeup-source"); } ret = dsa_register_switch(dev->ds); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index a4de58847dea..b7e8a403a132 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -101,6 +101,11 @@ struct ksz_ptp_irq { int num; }; +struct ksz_switch_macaddr { + unsigned char addr[ETH_ALEN]; + refcount_t refcount; +}; + struct ksz_port { bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ bool learning; @@ -117,6 +122,7 @@ struct ksz_port { u32 rgmii_tx_val; u32 rgmii_rx_val; struct ksz_device *ksz_dev; + void *acl_priv; struct ksz_irq pirq; u8 num; #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP) @@ -157,6 +163,7 @@ struct ksz_device { phy_interface_t compat_interface; bool synclko_125; bool synclko_disable; + bool wakeup_source; struct vlan_table *vlan_cache; @@ -169,6 +176,10 @@ struct ksz_device { struct mutex lock_irq; /* IRQ Access */ struct ksz_irq girq; struct ksz_ptp_data ptp_data; + + struct ksz_switch_macaddr *switch_macaddr; + struct net_device *hsr_dev; /* HSR */ + u8 hsr_ports; }; /* List of supported models */ @@ -211,6 +222,7 @@ enum ksz_chip_id { }; enum ksz_regs { + REG_SW_MAC_ADDR, REG_IND_CTRL_0, REG_IND_DATA_8, REG_IND_DATA_CHECK, @@ -362,6 +374,11 @@ struct ksz_dev_ops { int duplex, bool tx_pause, bool rx_pause); void (*setup_rgmii_delay)(struct ksz_device *dev, int port); int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val); + void (*get_wol)(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); + int (*set_wol)(struct ksz_device *dev, int port, + struct ethtool_wolinfo *wol); + void (*wol_pre_shutdown)(struct ksz_device *dev, bool *wol_enabled); void (*config_cpu_port)(struct dsa_switch *ds); int (*enable_stp_addr)(struct ksz_device *dev); int (*reset)(struct ksz_device *dev); @@ -374,12 +391,17 @@ int ksz_switch_register(struct ksz_device *dev); void ksz_switch_remove(struct ksz_device *dev); void ksz_init_mib_timer(struct ksz_device *dev); +bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port); void ksz_r_mib_stats64(struct ksz_device *dev, int port); void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port); void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state); bool ksz_get_gbit(struct ksz_device *dev, int port); phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit); extern const struct ksz_chip_data ksz_switch_chips[]; +int ksz_switch_macaddr_get(struct dsa_switch *ds, int port, + struct netlink_ext_ack *extack); +void ksz_switch_macaddr_put(struct dsa_switch *ds); +void ksz_switch_shutdown(struct ksz_device *dev); /* Common register access functions */ static inline struct regmap *ksz_regmap_8(struct ksz_device *dev) @@ -689,6 +711,26 @@ static inline int is_lan937x(struct ksz_device *dev) #define KSZ8_LEGAL_PACKET_SIZE 1518 #define KSZ9477_MAX_FRAME_SIZE 9000 +#define KSZ8873_REG_GLOBAL_CTRL_12 0x0e +/* Drive Strength of I/O Pad + * 0: 8mA, 1: 16mA + */ +#define KSZ8873_DRIVE_STRENGTH_16MA BIT(6) + +#define KSZ8795_REG_SW_CTRL_20 0xa3 +#define KSZ9477_REG_SW_IO_STRENGTH 0x010d +#define SW_DRIVE_STRENGTH_M 0x7 +#define SW_DRIVE_STRENGTH_2MA 0 +#define SW_DRIVE_STRENGTH_4MA 1 +#define SW_DRIVE_STRENGTH_8MA 2 +#define SW_DRIVE_STRENGTH_12MA 3 +#define SW_DRIVE_STRENGTH_16MA 4 +#define SW_DRIVE_STRENGTH_20MA 5 +#define SW_DRIVE_STRENGTH_24MA 6 +#define SW_DRIVE_STRENGTH_28MA 7 +#define SW_HI_SPEED_DRIVE_STRENGTH_S 4 +#define SW_LO_SPEED_DRIVE_STRENGTH_S 0 + #define KSZ9477_REG_PORT_OUT_RATE_0 0x0420 #define KSZ9477_OUT_RATE_NO_LIMIT 0 diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c index 4e22a695a64c..1fe105913c75 100644 --- a/drivers/net/dsa/microchip/ksz_ptp.c +++ b/drivers/net/dsa/microchip/ksz_ptp.c @@ -557,7 +557,7 @@ static void ksz_ptp_txtstamp_skb(struct ksz_device *dev, struct skb_shared_hwtstamps hwtstamps = {}; int ret; - /* timeout must include DSA master to transmit data, tstamp latency, + /* timeout must include DSA conduit to transmit data, tstamp latency, * IRQ latency and time for reading the time stamp. */ ret = wait_for_completion_timeout(&prt->tstamp_msg_comp, diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c index 279338451621..6f6d878e742c 100644 --- a/drivers/net/dsa/microchip/ksz_spi.c +++ b/drivers/net/dsa/microchip/ksz_spi.c @@ -114,10 +114,7 @@ static void ksz_spi_shutdown(struct spi_device *spi) if (!dev) return; - if (dev->dev_ops->reset) - dev->dev_ops->reset(dev); - - dsa_switch_shutdown(dev->ds); + ksz_switch_shutdown(dev); spi_set_drvdata(spi, NULL); } diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c index 1a3d4b692f34..b74a230a3f13 100644 --- a/drivers/net/dsa/mt7530-mmio.c +++ b/drivers/net/dsa/mt7530-mmio.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -62,15 +63,12 @@ mt7988_probe(struct platform_device *pdev) return dsa_register_switch(priv->ds); } -static int -mt7988_remove(struct platform_device *pdev) +static void mt7988_remove(struct platform_device *pdev) { struct mt7530_priv *priv = platform_get_drvdata(pdev); if (priv) mt7530_remove_common(priv); - - return 0; } static void mt7988_shutdown(struct platform_device *pdev) @@ -87,7 +85,7 @@ static void mt7988_shutdown(struct platform_device *pdev) static struct platform_driver mt7988_platform_driver = { .probe = mt7988_probe, - .remove = mt7988_remove, + .remove_new = mt7988_remove, .shutdown = mt7988_shutdown, .driver = { .name = "mt7530-mmio", diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index b8bb9f3b3609..d27c6b70a2f6 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -836,8 +836,7 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, return; for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) - strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name, - ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", mt7530_mib[i].name); } static void @@ -1114,7 +1113,7 @@ mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) u32 val; /* When a new MTU is set, DSA always set the CPU port's MTU to the - * largest MTU of the slave ports. Because the switch only has a global + * largest MTU of the user ports. Because the switch only has a global * RX length register, only allowing CPU port here is enough. */ if (!dsa_is_cpu_port(ds, port)) @@ -2070,7 +2069,7 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv) unsigned int irq; irq = irq_create_mapping(priv->irq_domain, p); - ds->slave_mii_bus->irq[p] = irq; + ds->user_mii_bus->irq[p] = irq; } } } @@ -2164,7 +2163,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv) if (!bus) return -ENOMEM; - ds->slave_mii_bus = bus; + ds->user_mii_bus = bus; bus->priv = priv; bus->name = KBUILD_MODNAME "-mii"; snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++); @@ -2201,20 +2200,20 @@ mt7530_setup(struct dsa_switch *ds) u32 id, val; int ret, i; - /* The parent node of master netdev which holds the common system + /* The parent node of conduit netdev which holds the common system * controller also is the container for two GMACs nodes representing * as two netdev instances. */ dsa_switch_for_each_cpu_port(cpu_dp, ds) { - dn = cpu_dp->master->dev.of_node->parent; + dn = cpu_dp->conduit->dev.of_node->parent; /* It doesn't matter which CPU port is found first, - * their masters should share the same parent OF node + * their conduits should share the same parent OF node */ break; } if (!dn) { - dev_err(ds->dev, "parent OF node of DSA master not found"); + dev_err(ds->dev, "parent OF node of DSA conduit not found"); return -EINVAL; } @@ -2489,7 +2488,7 @@ mt7531_setup(struct dsa_switch *ds) if (mt7531_dual_sgmii_supported(priv)) { priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII; - /* Let ds->slave_mii_bus be able to access external phy. */ + /* Let ds->user_mii_bus be able to access external phy. */ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, MT7531_EXT_P_MDC_11); mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, @@ -2718,7 +2717,7 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode, case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: dp = dsa_to_port(ds, port); - phydev = dp->slave->phydev; + phydev = dp->user->phydev; return mt7531_rgmii_setup(priv, port, interface, phydev); case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_NA: @@ -2824,15 +2823,6 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); } -static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs, - unsigned int mode, - phy_interface_t interface, - int speed, int duplex) -{ - if (pcs->ops->pcs_link_up) - pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex); -} - static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, @@ -2921,8 +2911,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) return ret; mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPU_PORT_SETTING(priv->id)); - mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED, - interface, speed, DUPLEX_FULL); mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL, speed, DUPLEX_FULL, true, true); @@ -2953,12 +2941,6 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; - /* This driver does not make use of the speed, duplex, pause or the - * advertisement in its mac_config, so it is safe to mark this driver - * as non-legacy. - */ - config->legacy_pre_march2020 = false; - priv->info->mac_port_get_caps(ds, port, config); } diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index fdda62d6eb16..294312b58e4f 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -247,11 +247,56 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return reg_write(priv, addr, regnum, val); } +static void mv88e6060_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + unsigned long *interfaces = config->supported_interfaces; + struct mv88e6060_priv *priv = ds->priv; + int addr = REG_PORT(port); + int ret; + + ret = reg_read(priv, addr, PORT_STATUS); + if (ret < 0) { + dev_err(ds->dev, + "port %d: unable to read status register: %pe\n", + port, ERR_PTR(ret)); + return; + } + + /* If the port is configured in SNI mode (acts as a 10Mbps PHY), + * it should have phy-mode = "sni", but that doesn't yet exist, so + * forcibly fail validation until the need arises to introduce it. + */ + if (!(ret & PORT_STATUS_PORTMODE)) { + dev_warn(ds->dev, "port %d: SNI mode not supported\n", port); + return; + } + + config->mac_capabilities = MAC_100 | MAC_10 | MAC_SYM_PAUSE; + + if (port >= 4) { + /* Ports 4 and 5 can support MII, REVMII and REVRMII modes */ + __set_bit(PHY_INTERFACE_MODE_MII, interfaces); + __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces); + __set_bit(PHY_INTERFACE_MODE_REVRMII, interfaces); + } + if (port <= 4) { + /* Ports 0 to 3 have internal PHYs, and port 4 can optionally + * use an internal PHY. + */ + /* Internal PHY */ + __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces); + /* Default phylib interface mode */ + __set_bit(PHY_INTERFACE_MODE_GMII, interfaces); + } +} + static const struct dsa_switch_ops mv88e6060_switch_ops = { .get_tag_protocol = mv88e6060_get_tag_protocol, .setup = mv88e6060_setup, .phy_read = mv88e6060_phy_read, .phy_write = mv88e6060_phy_write, + .phylink_get_caps = mv88e6060_phylink_get_caps, }; static int mv88e6060_probe(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile index 1409e691ab77..a9a9651187db 100644 --- a/drivers/net/dsa/mv88e6xxx/Makefile +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -9,6 +9,9 @@ mv88e6xxx-objs += global2.o mv88e6xxx-objs += global2_avb.o mv88e6xxx-objs += global2_scratch.o mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o +mv88e6xxx-objs += pcs-6185.o +mv88e6xxx-objs += pcs-6352.o +mv88e6xxx-objs += pcs-639x.o mv88e6xxx-objs += phy.o mv88e6xxx-objs += port.o mv88e6xxx-objs += port_hidden.o diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7af2f08a62f1..07a22c74fe81 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -23,7 +23,7 @@ #include <linux/list.h> #include <linux/mdio.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/platform_data/mv88e6xxx.h> @@ -499,81 +499,6 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port) return !!(reg & MV88E6XXX_PORT_STS_PHY_DETECT); } -static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int lane; - int err; - - mv88e6xxx_reg_lock(chip); - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane >= 0 && chip->info->ops->serdes_pcs_get_state) - err = chip->info->ops->serdes_pcs_get_state(chip, port, lane, - state); - else - err = -EOPNOTSUPP; - mv88e6xxx_reg_unlock(chip); - - return err; -} - -static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - unsigned int mode, - phy_interface_t interface, - const unsigned long *advertise) -{ - const struct mv88e6xxx_ops *ops = chip->info->ops; - int lane; - - if (ops->serdes_pcs_config) { - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane >= 0) - return ops->serdes_pcs_config(chip, port, lane, mode, - interface, advertise); - } - - return 0; -} - -static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port) -{ - struct mv88e6xxx_chip *chip = ds->priv; - const struct mv88e6xxx_ops *ops; - int err = 0; - int lane; - - ops = chip->info->ops; - - if (ops->serdes_pcs_an_restart) { - mv88e6xxx_reg_lock(chip); - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane >= 0) - err = ops->serdes_pcs_an_restart(chip, port, lane); - mv88e6xxx_reg_unlock(chip); - - if (err) - dev_err(ds->dev, "p%d: failed to restart AN\n", port); - } -} - -static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - unsigned int mode, - int speed, int duplex) -{ - const struct mv88e6xxx_ops *ops = chip->info->ops; - int lane; - - if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) { - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane >= 0) - return ops->serdes_pcs_link_up(chip, port, lane, - speed, duplex); - } - - return 0; -} - static const u8 mv88e6185_phy_interface_modes[] = { [MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII, [MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII, @@ -652,6 +577,18 @@ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100; } +static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, + struct phylink_config *config) +{ + unsigned long *supported = config->supported_interfaces; + + /* Translate the default cmode */ + mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported); + + config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | + MAC_1000FD; +} + static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip) { u16 reg, val; @@ -853,6 +790,20 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port, } } +static struct phylink_pcs *mv88e6xxx_mac_select_pcs(struct dsa_switch *ds, + int port, + phy_interface_t interface) +{ + struct mv88e6xxx_chip *chip = ds->priv; + struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); + + if (chip->info->ops->pcs_ops) + pcs = chip->info->ops->pcs_ops->pcs_select(chip, port, + interface); + + return pcs; +} + static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface) { @@ -889,16 +840,6 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, state->interface); if (err && err != -EOPNOTSUPP) goto err_unlock; - - err = mv88e6xxx_serdes_pcs_config(chip, port, mode, - state->interface, - state->advertising); - /* FIXME: we should restart negotiation if something changed - - * which is something we get if we convert to using phylinks - * PCS operations. - */ - if (err > 0) - err = 0; } err_unlock: @@ -982,17 +923,6 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, */ if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) { - /* FIXME: for an automedia port, should we force the link - * down here - what if the link comes up due to "other" media - * while we're bringing the port up, how is the exclusivity - * handled in the Marvell hardware? E.g. port 2 on 88E6390 - * shared between internal PHY and Serdes. - */ - err = mv88e6xxx_serdes_pcs_link_up(chip, port, mode, speed, - duplex); - if (err) - goto error; - if (ops->port_set_speed_duplex) { err = ops->port_set_speed_duplex(chip, port, speed, duplex); @@ -2568,7 +2498,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, else member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED; - /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port + /* net/dsa/user.c will call dsa_port_vlan_add() for the affected port * and then the CPU port. Do not warn for duplicates for the CPU port. */ warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port); @@ -3040,14 +2970,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) * from the wrong location resulting in the switch booting * to wrong mode and inoperable. */ - mv88e6xxx_g1_wait_eeprom_done(chip); + if (chip->info->ops->get_eeprom) + mv88e6xxx_g2_eeprom_wait(chip); gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); usleep_range(10000, 20000); - mv88e6xxx_g1_wait_eeprom_done(chip); + if (chip->info->ops->get_eeprom) + mv88e6xxx_g2_eeprom_wait(chip); } } @@ -3171,102 +3103,6 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port) return 0; } -static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id) -{ - struct mv88e6xxx_port *mvp = dev_id; - struct mv88e6xxx_chip *chip = mvp->chip; - irqreturn_t ret = IRQ_NONE; - int port = mvp->port; - int lane; - - mv88e6xxx_reg_lock(chip); - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane >= 0) - ret = mv88e6xxx_serdes_irq_status(chip, port, lane); - mv88e6xxx_reg_unlock(chip); - - return ret; -} - -static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - struct mv88e6xxx_port *dev_id = &chip->ports[port]; - unsigned int irq; - int err; - - /* Nothing to request if this SERDES port has no IRQ */ - irq = mv88e6xxx_serdes_irq_mapping(chip, port); - if (!irq) - return 0; - - snprintf(dev_id->serdes_irq_name, sizeof(dev_id->serdes_irq_name), - "mv88e6xxx-%s-serdes-%d", dev_name(chip->dev), port); - - /* Requesting the IRQ will trigger IRQ callbacks, so release the lock */ - mv88e6xxx_reg_unlock(chip); - err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn, - IRQF_ONESHOT, dev_id->serdes_irq_name, - dev_id); - mv88e6xxx_reg_lock(chip); - if (err) - return err; - - dev_id->serdes_irq = irq; - - return mv88e6xxx_serdes_irq_enable(chip, port, lane); -} - -static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - struct mv88e6xxx_port *dev_id = &chip->ports[port]; - unsigned int irq = dev_id->serdes_irq; - int err; - - /* Nothing to free if no IRQ has been requested */ - if (!irq) - return 0; - - err = mv88e6xxx_serdes_irq_disable(chip, port, lane); - - /* Freeing the IRQ will trigger IRQ callbacks, so release the lock */ - mv88e6xxx_reg_unlock(chip); - free_irq(irq, dev_id); - mv88e6xxx_reg_lock(chip); - - dev_id->serdes_irq = 0; - - return err; -} - -static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port, - bool on) -{ - int lane; - int err; - - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane < 0) - return 0; - - if (on) { - err = mv88e6xxx_serdes_power_up(chip, port, lane); - if (err) - return err; - - err = mv88e6xxx_serdes_irq_request(chip, port, lane); - } else { - err = mv88e6xxx_serdes_irq_free(chip, port, lane); - if (err) - return err; - - err = mv88e6xxx_serdes_power_down(chip, port, lane); - } - - return err; -} - static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip, enum mv88e6xxx_egress_direction direction, int port) @@ -3330,56 +3166,17 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) { struct device_node *phy_handle = NULL; struct dsa_switch *ds = chip->ds; - phy_interface_t mode; struct dsa_port *dp; - int tx_amp, speed; + int tx_amp; int err; u16 reg; chip->ports[port].chip = chip; chip->ports[port].port = port; - dp = dsa_to_port(ds, port); - - /* MAC Forcing register: don't force link, speed, duplex or flow control - * state to any particular values on physical ports, but force the CPU - * port and all DSA ports to their maximum bandwidth and full duplex. - */ - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { - struct phylink_config pl_config = {}; - unsigned long caps; - - chip->info->ops->phylink_get_caps(chip, port, &pl_config); - - caps = pl_config.mac_capabilities; - - if (chip->info->ops->port_max_speed_mode) - mode = chip->info->ops->port_max_speed_mode(chip, port); - else - mode = PHY_INTERFACE_MODE_NA; - - if (caps & MAC_10000FD) - speed = SPEED_10000; - else if (caps & MAC_5000FD) - speed = SPEED_5000; - else if (caps & MAC_2500FD) - speed = SPEED_2500; - else if (caps & MAC_1000) - speed = SPEED_1000; - else if (caps & MAC_100) - speed = SPEED_100; - else - speed = SPEED_10; - - err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP, - speed, DUPLEX_FULL, - PAUSE_OFF, mode); - } else { - err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED, - SPEED_UNFORCED, DUPLEX_UNFORCED, - PAUSE_ON, - PHY_INTERFACE_MODE_NA); - } + err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED, + SPEED_UNFORCED, DUPLEX_UNFORCED, + PAUSE_ON, PHY_INTERFACE_MODE_NA); if (err) return err; @@ -3556,6 +3353,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) } if (chip->info->ops->serdes_set_tx_amplitude) { + dp = dsa_to_port(ds, port); if (dp) phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0); @@ -3629,29 +3427,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) return ret; } -static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phydev) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_serdes_power(chip, port, true); - mv88e6xxx_reg_unlock(chip); - - return err; -} - -static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port) -{ - struct mv88e6xxx_chip *chip = ds->priv; - - mv88e6xxx_reg_lock(chip); - if (mv88e6xxx_serdes_power(chip, port, false)) - dev_err(chip->dev, "failed to power off SERDES\n"); - mv88e6xxx_reg_unlock(chip); -} - static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) { @@ -3956,7 +3731,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) return err; chip->ds = ds; - ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); + ds->user_mii_bus = mv88e6xxx_default_mdio_bus(chip); /* Since virtual bridges are mapped in the PVT, the number we support * depends on the physical switch topology. We need to let DSA figure @@ -4114,12 +3889,28 @@ out_mdios: static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port) { + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + if (chip->info->ops->pcs_ops && + chip->info->ops->pcs_ops->pcs_init) { + err = chip->info->ops->pcs_ops->pcs_init(chip, port); + if (err) + return err; + } + return mv88e6xxx_setup_devlink_regions_port(ds, port); } static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port) { + struct mv88e6xxx_chip *chip = ds->priv; + mv88e6xxx_teardown_devlink_regions_port(ds, port); + + if (chip->info->ops->pcs_ops && + chip->info->ops->pcs_ops->pcs_teardown) + chip->info->ops->pcs_ops->pcs_teardown(chip, port); } static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) @@ -4236,15 +4027,13 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .stats_get_strings = mv88e6095_stats_get_strings, .stats_get_stats = mv88e6095_stats_get_stats, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, - .serdes_power = mv88e6185_serdes_power, - .serdes_get_lane = mv88e6185_serdes_get_lane, - .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .phylink_get_caps = mv88e6095_phylink_get_caps, + .pcs_ops = &mv88e6185_pcs_ops, .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; @@ -4282,18 +4071,14 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, - .serdes_power = mv88e6185_serdes_power, - .serdes_get_lane = mv88e6185_serdes_get_lane, - .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6097_serdes_irq_enable, - .serdes_irq_status = mv88e6097_serdes_irq_status, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_get_caps = mv88e6095_phylink_get_caps, + .pcs_ops = &mv88e6185_pcs_ops, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .set_max_frame_size = mv88e6185_g1_set_max_frame_size, @@ -4429,16 +4214,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6341_serdes_get_lane, - /* Check status register pause & lpa register */ - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, @@ -4446,6 +4223,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_get_caps = mv88e6341_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6161_ops = { @@ -4576,7 +4354,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e6351_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6172_ops = { @@ -4626,16 +4404,11 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .serdes_get_lane = mv88e6352_serdes_get_lane, - .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6352_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up, - .serdes_power = mv88e6352_serdes_power, .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, .serdes_get_regs = mv88e6352_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .phylink_get_caps = mv88e6352_phylink_get_caps, + .pcs_ops = &mv88e6352_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6175_ops = { @@ -4681,7 +4454,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e6351_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6176_ops = { @@ -4731,20 +4504,13 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .serdes_get_lane = mv88e6352_serdes_get_lane, - .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6352_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up, - .serdes_power = mv88e6352_serdes_power, .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, - .serdes_irq_enable = mv88e6352_serdes_irq_enable, - .serdes_irq_status = mv88e6352_serdes_irq_status, .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, .serdes_get_regs = mv88e6352_serdes_get_regs, .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude, .gpio_ops = &mv88e6352_gpio_ops, .phylink_get_caps = mv88e6352_phylink_get_caps, + .pcs_ops = &mv88e6352_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6185_ops = { @@ -4774,9 +4540,6 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, - .serdes_power = mv88e6185_serdes_power, - .serdes_get_lane = mv88e6185_serdes_get_lane, - .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state, .set_cascade_port = mv88e6185_g1_set_cascade_port, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, @@ -4784,6 +4547,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .phylink_get_caps = mv88e6185_phylink_get_caps, + .pcs_ops = &mv88e6185_pcs_ops, .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; @@ -4834,22 +4598,15 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .stu_getnext = mv88e6390_g1_stu_getnext, .stu_loadpurge = mv88e6390_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6390_serdes_get_lane, - /* Check status register pause & lpa register */ - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .phylink_get_caps = mv88e6390_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6190x_ops = { @@ -4899,22 +4656,15 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .stu_getnext = mv88e6390_g1_stu_getnext, .stu_loadpurge = mv88e6390_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6390x_serdes_get_lane, - /* Check status register pause & lpa register */ - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, .gpio_ops = &mv88e6352_gpio_ops, .phylink_get_caps = mv88e6390x_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6191_ops = { @@ -4962,16 +4712,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .stu_getnext = mv88e6390_g1_stu_getnext, .stu_loadpurge = mv88e6390_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6390_serdes_get_lane, - /* Check status register pause & lpa register */ - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, @@ -4979,6 +4721,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, .phylink_get_caps = mv88e6390_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6240_ops = { @@ -5028,15 +4771,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .serdes_get_lane = mv88e6352_serdes_get_lane, - .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6352_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up, - .serdes_power = mv88e6352_serdes_power, .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, - .serdes_irq_enable = mv88e6352_serdes_irq_enable, - .serdes_irq_status = mv88e6352_serdes_irq_status, .serdes_get_regs_len = mv88e6352_serdes_get_regs_len, .serdes_get_regs = mv88e6352_serdes_get_regs, .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude, @@ -5044,6 +4779,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, .phylink_get_caps = mv88e6352_phylink_get_caps, + .pcs_ops = &mv88e6352_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6250_ops = { @@ -5135,16 +4871,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .stu_getnext = mv88e6390_g1_stu_getnext, .stu_loadpurge = mv88e6390_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6390_serdes_get_lane, - /* Check status register pause & lpa register */ - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, @@ -5153,6 +4881,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6390_ptp_ops, .phylink_get_caps = mv88e6390_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6320_ops = { @@ -5297,16 +5026,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6341_serdes_get_lane, - /* Check status register pause & lpa register */ - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5316,6 +5037,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_get_caps = mv88e6341_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -5361,7 +5083,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e6351_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6351_ops = { @@ -5409,7 +5131,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e6351_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6352_ops = { @@ -5459,15 +5181,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .stu_getnext = mv88e6352_g1_stu_getnext, .stu_loadpurge = mv88e6352_g1_stu_loadpurge, - .serdes_get_lane = mv88e6352_serdes_get_lane, - .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6352_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up, - .serdes_power = mv88e6352_serdes_power, .serdes_irq_mapping = mv88e6352_serdes_irq_mapping, - .serdes_irq_enable = mv88e6352_serdes_irq_enable, - .serdes_irq_status = mv88e6352_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5478,6 +5192,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .serdes_get_regs = mv88e6352_serdes_get_regs, .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude, .phylink_get_caps = mv88e6352_phylink_get_caps, + .pcs_ops = &mv88e6352_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6390_ops = { @@ -5528,16 +5243,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .stu_getnext = mv88e6390_g1_stu_getnext, .stu_loadpurge = mv88e6390_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6390_serdes_get_lane, - /* Check status register pause & lpa register */ - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6390_ptp_ops, @@ -5547,6 +5254,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_get_caps = mv88e6390_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6390x_ops = { @@ -5597,15 +5305,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .stu_getnext = mv88e6390_g1_stu_getnext, .stu_loadpurge = mv88e6390_g1_stu_loadpurge, - .serdes_power = mv88e6390_serdes_power, .serdes_get_lane = mv88e6390x_serdes_get_lane, - .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6390_serdes_irq_enable, - .serdes_irq_status = mv88e6390_serdes_irq_status, .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, @@ -5615,11 +5316,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6390_ptp_ops, .phylink_get_caps = mv88e6390x_phylink_get_caps, + .pcs_ops = &mv88e6390_pcs_ops, }; static const struct mv88e6xxx_ops mv88e6393x_ops = { /* MV88E6XXX_FAMILY_6393 */ - .setup_errata = mv88e6393x_serdes_setup_errata, .irl_init_all = mv88e6390_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -5669,20 +5370,14 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = { .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .stu_getnext = mv88e6390_g1_stu_getnext, .stu_loadpurge = mv88e6390_g1_stu_loadpurge, - .serdes_power = mv88e6393x_serdes_power, .serdes_get_lane = mv88e6393x_serdes_get_lane, - .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state, - .serdes_pcs_config = mv88e6390_serdes_pcs_config, - .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, - .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, - .serdes_irq_enable = mv88e6393x_serdes_irq_enable, - .serdes_irq_status = mv88e6393x_serdes_irq_status, /* TODO: serdes stats */ .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, .phylink_get_caps = mv88e6393x_phylink_get_caps, + .pcs_ops = &mv88e6393x_pcs_ops, }; static const struct mv88e6xxx_info mv88e6xxx_table[] = { @@ -7114,18 +6809,15 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_setup = mv88e6xxx_port_setup, .port_teardown = mv88e6xxx_port_teardown, .phylink_get_caps = mv88e6xxx_get_caps, - .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state, + .phylink_mac_select_pcs = mv88e6xxx_mac_select_pcs, .phylink_mac_prepare = mv88e6xxx_mac_prepare, .phylink_mac_config = mv88e6xxx_mac_config, .phylink_mac_finish = mv88e6xxx_mac_finish, - .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart, .phylink_mac_link_down = mv88e6xxx_mac_link_down, .phylink_mac_link_up = mv88e6xxx_mac_link_up, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, - .port_enable = mv88e6xxx_port_enable, - .port_disable = mv88e6xxx_port_disable, .port_max_mtu = mv88e6xxx_get_max_mtu, .port_change_mtu = mv88e6xxx_change_mtu, .get_mac_eee = mv88e6xxx_get_mac_eee, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 0ad34b2d8913..44383a03ef2f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -205,6 +205,7 @@ struct mv88e6xxx_irq_ops; struct mv88e6xxx_gpio_ops; struct mv88e6xxx_avb_ops; struct mv88e6xxx_ptp_ops; +struct mv88e6xxx_pcs_ops; struct mv88e6xxx_irq { u16 masked; @@ -285,9 +286,8 @@ struct mv88e6xxx_port { u8 cmode; bool mirror_ingress; bool mirror_egress; - unsigned int serdes_irq; - char serdes_irq_name[64]; struct devlink_region *region; + void *pcs_private; /* MacAuth Bypass control flag */ bool mab; @@ -590,31 +590,12 @@ struct mv88e6xxx_ops { int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); - /* Power on/off a SERDES interface */ - int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, int lane, - bool up); - /* SERDES lane mapping */ int (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port); - int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state); - int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port, - int lane, unsigned int mode, - phy_interface_t interface, - const unsigned long *advertise); - int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port, - int lane); - int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port, - int lane, int speed, int duplex); - /* SERDES interrupt handling */ unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip, int port); - int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, int lane, - bool enable); - irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port, - int lane); /* Statistics from the SERDES interface */ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port); @@ -664,6 +645,8 @@ struct mv88e6xxx_ops { void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port, struct phylink_config *config); + const struct mv88e6xxx_pcs_ops *pcs_ops; + /* Max Frame Size */ int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu); }; @@ -736,6 +719,14 @@ struct mv88e6xxx_ptp_ops { u32 cc_mult_dem; }; +struct mv88e6xxx_pcs_ops { + int (*pcs_init)(struct mv88e6xxx_chip *chip, int port); + void (*pcs_teardown)(struct mv88e6xxx_chip *chip, int port); + struct phylink_pcs *(*pcs_select)(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); + +}; + #define STATS_TYPE_PORT BIT(0) #define STATS_TYPE_BANK0 BIT(1) #define STATS_TYPE_BANK1 BIT(2) diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 2fa55a643591..174c773b38c2 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } -void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip) -{ - const unsigned long timeout = jiffies + 1 * HZ; - u16 val; - int err; - - /* Wait up to 1 second for the switch to finish reading the - * EEPROM. - */ - while (time_before(jiffies, timeout)) { - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); - if (err) { - dev_err(chip->dev, "Error reading status"); - return; - } - - /* If the switch is still resetting, it may not - * respond on the bus, and so MDIO read returns - * 0xffff. Differentiate between that, and waiting for - * the EEPROM to be done by bit 0 being set. - */ - if (val != 0xffff && - val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)) - return; - - usleep_range(1000, 2000); - } - - dev_err(chip->dev, "Timeout waiting for EEPROM done"); -} - /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 * Offset 0x02: Switch MAC Address Register Bytes 2 & 3 * Offset 0x03: Switch MAC Address Register Bytes 4 & 5 diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index c99ddd117fe6..1095261f5b49 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -282,7 +282,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); -void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 937a01f2ba75..b2b5f6ba438f 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) * Offset 0x15: EEPROM Addr (for 8-bit data access) */ -static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) { int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY); int err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 7e091965582b..d9434f7cae53 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -365,6 +365,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, int port); +int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops; diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c new file mode 100644 index 000000000000..4d677f836807 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Marvell 88E6185 family SERDES PCS support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch> + */ +#include <linux/phylink.h> + +#include "global2.h" +#include "port.h" +#include "serdes.h" + +struct mv88e6185_pcs { + struct phylink_pcs phylink_pcs; + unsigned int irq; + char name[64]; + + struct mv88e6xxx_chip *chip; + int port; +}; + +static struct mv88e6185_pcs *pcs_to_mv88e6185_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mv88e6185_pcs, phylink_pcs); +} + +static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id) +{ + struct mv88e6185_pcs *mpcs = dev_id; + struct mv88e6xxx_chip *chip; + irqreturn_t ret = IRQ_NONE; + bool link_up; + u16 status; + int port; + int err; + + chip = mpcs->chip; + port = mpcs->port; + + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status); + mv88e6xxx_reg_unlock(chip); + + if (!err) { + link_up = !!(status & MV88E6XXX_PORT_STS_LINK); + + phylink_pcs_change(&mpcs->phylink_pcs, link_up); + + ret = IRQ_HANDLED; + } + + return ret; +} + +static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs); + struct mv88e6xxx_chip *chip = mpcs->chip; + int port = mpcs->port; + u16 status; + int err; + + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status); + mv88e6xxx_reg_unlock(chip); + + if (err) + status = 0; + + state->link = !!(status & MV88E6XXX_PORT_STS_LINK); + if (state->link) { + state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ? + DUPLEX_FULL : DUPLEX_HALF; + + switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) { + case MV88E6XXX_PORT_STS_SPEED_1000: + state->speed = SPEED_1000; + break; + + case MV88E6XXX_PORT_STS_SPEED_100: + state->speed = SPEED_100; + break; + + case MV88E6XXX_PORT_STS_SPEED_10: + state->speed = SPEED_10; + break; + + default: + state->link = false; + break; + } + } +} + +static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static void mv88e6185_pcs_an_restart(struct phylink_pcs *pcs) +{ +} + +static const struct phylink_pcs_ops mv88e6185_phylink_pcs_ops = { + .pcs_get_state = mv88e6185_pcs_get_state, + .pcs_config = mv88e6185_pcs_config, + .pcs_an_restart = mv88e6185_pcs_an_restart, +}; + +static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port) +{ + struct mv88e6185_pcs *mpcs; + struct device *dev; + unsigned int irq; + int err; + + /* There are no configurable serdes lanes on this switch chip, so + * we use the static cmode configuration to determine whether we + * have a PCS or not. + */ + if (chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_SERDES && + chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_1000BASE_X) + return 0; + + dev = chip->dev; + + mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL); + if (!mpcs) + return -ENOMEM; + + mpcs->chip = chip; + mpcs->port = port; + mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops; + + irq = mv88e6xxx_serdes_irq_mapping(chip, port); + if (irq) { + snprintf(mpcs->name, sizeof(mpcs->name), + "mv88e6xxx-%s-serdes-%d", dev_name(dev), port); + + err = request_threaded_irq(irq, NULL, mv88e6185_pcs_handle_irq, + IRQF_ONESHOT, mpcs->name, mpcs); + if (err) { + kfree(mpcs); + return err; + } + + mpcs->irq = irq; + } else { + mpcs->phylink_pcs.poll = true; + } + + chip->ports[port].pcs_private = &mpcs->phylink_pcs; + + return 0; +} + +static void mv88e6185_pcs_teardown(struct mv88e6xxx_chip *chip, int port) +{ + struct mv88e6185_pcs *mpcs; + + mpcs = chip->ports[port].pcs_private; + if (!mpcs) + return; + + if (mpcs->irq) + free_irq(mpcs->irq, mpcs); + + kfree(mpcs); + + chip->ports[port].pcs_private = NULL; +} + +static struct phylink_pcs *mv88e6185_pcs_select(struct mv88e6xxx_chip *chip, + int port, + phy_interface_t interface) +{ + return chip->ports[port].pcs_private; +} + +const struct mv88e6xxx_pcs_ops mv88e6185_pcs_ops = { + .pcs_init = mv88e6185_pcs_init, + .pcs_teardown = mv88e6185_pcs_teardown, + .pcs_select = mv88e6185_pcs_select, +}; diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c new file mode 100644 index 000000000000..88f624b65470 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Marvell 88E6352 family SERDES PCS support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch> + */ +#include <linux/phylink.h> + +#include "global2.h" +#include "port.h" +#include "serdes.h" + +/* Definitions from drivers/net/phy/marvell.c, which would be good to reuse. */ +#define MII_M1011_PHY_STATUS 17 +#define MII_M1011_IMASK 18 +#define MII_M1011_IMASK_LINK_CHANGE BIT(10) +#define MII_M1011_IEVENT 19 +#define MII_M1011_IEVENT_LINK_CHANGE BIT(10) +#define MII_MARVELL_PHY_PAGE 22 +#define MII_MARVELL_FIBER_PAGE 1 + +struct marvell_c22_pcs { + struct mdio_device mdio; + struct phylink_pcs phylink_pcs; + unsigned int irq; + char name[64]; + bool (*link_check)(struct marvell_c22_pcs *mpcs); + struct mv88e6xxx_port *port; +}; + +static struct marvell_c22_pcs *pcs_to_marvell_c22_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct marvell_c22_pcs, phylink_pcs); +} + +static int marvell_c22_pcs_set_fiber_page(struct marvell_c22_pcs *mpcs) +{ + u16 page; + int err; + + mutex_lock(&mpcs->mdio.bus->mdio_lock); + + err = __mdiodev_read(&mpcs->mdio, MII_MARVELL_PHY_PAGE); + if (err < 0) { + dev_err(mpcs->mdio.dev.parent, + "%s: can't read Serdes page register: %pe\n", + mpcs->name, ERR_PTR(err)); + return err; + } + + page = err; + + err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE, + MII_MARVELL_FIBER_PAGE); + if (err) { + dev_err(mpcs->mdio.dev.parent, + "%s: can't set Serdes page register: %pe\n", + mpcs->name, ERR_PTR(err)); + return err; + } + + return page; +} + +static int marvell_c22_pcs_restore_page(struct marvell_c22_pcs *mpcs, + int oldpage, int ret) +{ + int err; + + if (oldpage >= 0) { + err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE, + oldpage); + if (err) + dev_err(mpcs->mdio.dev.parent, + "%s: can't restore Serdes page register: %pe\n", + mpcs->name, ERR_PTR(err)); + if (!err || ret < 0) + err = ret; + } else { + err = oldpage; + } + mutex_unlock(&mpcs->mdio.bus->mdio_lock); + + return err; +} + +static irqreturn_t marvell_c22_pcs_handle_irq(int irq, void *dev_id) +{ + struct marvell_c22_pcs *mpcs = dev_id; + irqreturn_t status = IRQ_NONE; + int err, oldpage; + + oldpage = marvell_c22_pcs_set_fiber_page(mpcs); + if (oldpage < 0) + goto fail; + + err = __mdiodev_read(&mpcs->mdio, MII_M1011_IEVENT); + if (err >= 0 && err & MII_M1011_IEVENT_LINK_CHANGE) { + phylink_pcs_change(&mpcs->phylink_pcs, true); + status = IRQ_HANDLED; + } + +fail: + marvell_c22_pcs_restore_page(mpcs, oldpage, 0); + + return status; +} + +static int marvell_c22_pcs_modify(struct marvell_c22_pcs *mpcs, u8 reg, + u16 mask, u16 val) +{ + int oldpage, err = 0; + + oldpage = marvell_c22_pcs_set_fiber_page(mpcs); + if (oldpage >= 0) + err = __mdiodev_modify(&mpcs->mdio, reg, mask, val); + + return marvell_c22_pcs_restore_page(mpcs, oldpage, err); +} + +static int marvell_c22_pcs_power(struct marvell_c22_pcs *mpcs, + bool on) +{ + u16 val = on ? 0 : BMCR_PDOWN; + + return marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_PDOWN, val); +} + +static int marvell_c22_pcs_control_irq(struct marvell_c22_pcs *mpcs, + bool enable) +{ + u16 val = enable ? MII_M1011_IMASK_LINK_CHANGE : 0; + + return marvell_c22_pcs_modify(mpcs, MII_M1011_IMASK, + MII_M1011_IMASK_LINK_CHANGE, val); +} + +static int marvell_c22_pcs_enable(struct phylink_pcs *pcs) +{ + struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs); + int err; + + err = marvell_c22_pcs_power(mpcs, true); + if (err) + return err; + + return marvell_c22_pcs_control_irq(mpcs, !!mpcs->irq); +} + +static void marvell_c22_pcs_disable(struct phylink_pcs *pcs) +{ + struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs); + + marvell_c22_pcs_control_irq(mpcs, false); + marvell_c22_pcs_power(mpcs, false); +} + +static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs); + int oldpage, bmsr, lpa, status; + + state->link = false; + + if (mpcs->link_check && !mpcs->link_check(mpcs)) + return; + + oldpage = marvell_c22_pcs_set_fiber_page(mpcs); + if (oldpage >= 0) { + bmsr = __mdiodev_read(&mpcs->mdio, MII_BMSR); + lpa = __mdiodev_read(&mpcs->mdio, MII_LPA); + status = __mdiodev_read(&mpcs->mdio, MII_M1011_PHY_STATUS); + } + + if (marvell_c22_pcs_restore_page(mpcs, oldpage, 0) >= 0 && + bmsr >= 0 && lpa >= 0 && status >= 0) + mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa, + status, state); +} + +static int marvell_c22_pcs_config(struct phylink_pcs *pcs, + unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs); + int oldpage, adv, err, ret = 0; + u16 bmcr; + + adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); + if (adv < 0) + return 0; + + bmcr = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ? BMCR_ANENABLE : 0; + + oldpage = marvell_c22_pcs_set_fiber_page(mpcs); + if (oldpage < 0) + goto restore; + + err = __mdiodev_modify_changed(&mpcs->mdio, MII_ADVERTISE, 0xffff, adv); + ret = err; + if (err < 0) + goto restore; + + err = __mdiodev_modify_changed(&mpcs->mdio, MII_BMCR, BMCR_ANENABLE, + bmcr); + if (err < 0) { + ret = err; + goto restore; + } + + /* If the ANENABLE bit was changed, the PHY will restart negotiation, + * so we don't need to flag a change to trigger its own restart. + */ + if (err) + ret = 0; + +restore: + return marvell_c22_pcs_restore_page(mpcs, oldpage, ret); +} + +static void marvell_c22_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs); + + marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_ANRESTART, BMCR_ANRESTART); +} + +static void marvell_c22_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, + int duplex) +{ + struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs); + u16 bmcr; + int err; + + if (phylink_autoneg_inband(mode)) + return; + + bmcr = mii_bmcr_encode_fixed(speed, duplex); + + err = marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_SPEED100 | + BMCR_FULLDPLX | BMCR_SPEED1000, bmcr); + if (err) + dev_err(mpcs->mdio.dev.parent, + "%s: failed to configure mpcs: %pe\n", mpcs->name, + ERR_PTR(err)); +} + +static const struct phylink_pcs_ops marvell_c22_pcs_ops = { + .pcs_enable = marvell_c22_pcs_enable, + .pcs_disable = marvell_c22_pcs_disable, + .pcs_get_state = marvell_c22_pcs_get_state, + .pcs_config = marvell_c22_pcs_config, + .pcs_an_restart = marvell_c22_pcs_an_restart, + .pcs_link_up = marvell_c22_pcs_link_up, +}; + +static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev, + struct mii_bus *bus, + unsigned int addr) +{ + struct marvell_c22_pcs *mpcs; + + mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL); + if (!mpcs) + return NULL; + + mpcs->mdio.dev.parent = dev; + mpcs->mdio.bus = bus; + mpcs->mdio.addr = addr; + mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops; + mpcs->phylink_pcs.neg_mode = true; + + return mpcs; +} + +static int marvell_c22_pcs_setup_irq(struct marvell_c22_pcs *mpcs, + unsigned int irq) +{ + int err; + + mpcs->phylink_pcs.poll = !irq; + mpcs->irq = irq; + + if (irq) { + err = request_threaded_irq(irq, NULL, + marvell_c22_pcs_handle_irq, + IRQF_ONESHOT, mpcs->name, mpcs); + if (err) + return err; + } + + return 0; +} + +/* mv88e6352 specifics */ + +static bool mv88e6352_pcs_link_check(struct marvell_c22_pcs *mpcs) +{ + struct mv88e6xxx_port *port = mpcs->port; + struct mv88e6xxx_chip *chip = port->chip; + u8 cmode; + + /* Port 4 can be in auto-media mode. Check that the port is + * associated with the mpcs. + */ + mv88e6xxx_reg_lock(chip); + chip->info->ops->port_get_cmode(chip, port->port, &cmode); + mv88e6xxx_reg_unlock(chip); + + return cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX || + cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII; +} + +static int mv88e6352_pcs_init(struct mv88e6xxx_chip *chip, int port) +{ + struct marvell_c22_pcs *mpcs; + struct mii_bus *bus; + struct device *dev; + unsigned int irq; + int err; + + mv88e6xxx_reg_lock(chip); + err = mv88e6352_g2_scratch_port_has_serdes(chip, port); + mv88e6xxx_reg_unlock(chip); + if (err <= 0) + return err; + + irq = mv88e6xxx_serdes_irq_mapping(chip, port); + bus = mv88e6xxx_default_mdio_bus(chip); + dev = chip->dev; + + mpcs = marvell_c22_pcs_alloc(dev, bus, MV88E6352_ADDR_SERDES); + if (!mpcs) + return -ENOMEM; + + snprintf(mpcs->name, sizeof(mpcs->name), + "mv88e6xxx-%s-serdes-%d", dev_name(dev), port); + + mpcs->link_check = mv88e6352_pcs_link_check; + mpcs->port = &chip->ports[port]; + + err = marvell_c22_pcs_setup_irq(mpcs, irq); + if (err) { + kfree(mpcs); + return err; + } + + chip->ports[port].pcs_private = &mpcs->phylink_pcs; + + return 0; +} + +static void mv88e6352_pcs_teardown(struct mv88e6xxx_chip *chip, int port) +{ + struct marvell_c22_pcs *mpcs; + struct phylink_pcs *pcs; + + pcs = chip->ports[port].pcs_private; + if (!pcs) + return; + + mpcs = pcs_to_marvell_c22_pcs(pcs); + + if (mpcs->irq) + free_irq(mpcs->irq, mpcs); + + kfree(mpcs); + + chip->ports[port].pcs_private = NULL; +} + +static struct phylink_pcs *mv88e6352_pcs_select(struct mv88e6xxx_chip *chip, + int port, + phy_interface_t interface) +{ + return chip->ports[port].pcs_private; +} + +const struct mv88e6xxx_pcs_ops mv88e6352_pcs_ops = { + .pcs_init = mv88e6352_pcs_init, + .pcs_teardown = mv88e6352_pcs_teardown, + .pcs_select = mv88e6352_pcs_select, +}; diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c new file mode 100644 index 000000000000..d758a6c1b226 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c @@ -0,0 +1,970 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Marvell 88E6352 family SERDES PCS support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch> + */ +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/mii.h> + +#include "chip.h" +#include "global2.h" +#include "phy.h" +#include "port.h" +#include "serdes.h" + +struct mv88e639x_pcs { + struct mdio_device mdio; + struct phylink_pcs sgmii_pcs; + struct phylink_pcs xg_pcs; + bool erratum_3_14; + bool supports_5g; + phy_interface_t interface; + unsigned int irq; + char name[64]; + irqreturn_t (*handle_irq)(struct mv88e639x_pcs *mpcs); +}; + +static int mv88e639x_read(struct mv88e639x_pcs *mpcs, u16 regnum, u16 *val) +{ + int err; + + err = mdiodev_c45_read(&mpcs->mdio, MDIO_MMD_PHYXS, regnum); + if (err < 0) + return err; + + *val = err; + + return 0; +} + +static int mv88e639x_write(struct mv88e639x_pcs *mpcs, u16 regnum, u16 val) +{ + return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, val); +} + +static int mv88e639x_modify(struct mv88e639x_pcs *mpcs, u16 regnum, u16 mask, + u16 val) +{ + return mdiodev_c45_modify(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, mask, + val); +} + +static int mv88e639x_modify_changed(struct mv88e639x_pcs *mpcs, u16 regnum, + u16 mask, u16 set) +{ + return mdiodev_c45_modify_changed(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, + mask, set); +} + +static struct mv88e639x_pcs * +mv88e639x_pcs_alloc(struct device *dev, struct mii_bus *bus, unsigned int addr, + int port) +{ + struct mv88e639x_pcs *mpcs; + + mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL); + if (!mpcs) + return NULL; + + mpcs->mdio.dev.parent = dev; + mpcs->mdio.bus = bus; + mpcs->mdio.addr = addr; + + snprintf(mpcs->name, sizeof(mpcs->name), + "mv88e6xxx-%s-serdes-%d", dev_name(dev), port); + + return mpcs; +} + +static irqreturn_t mv88e639x_pcs_handle_irq(int irq, void *dev_id) +{ + struct mv88e639x_pcs *mpcs = dev_id; + irqreturn_t (*handler)(struct mv88e639x_pcs *); + + handler = READ_ONCE(mpcs->handle_irq); + if (!handler) + return IRQ_NONE; + + return handler(mpcs); +} + +static int mv88e639x_pcs_setup_irq(struct mv88e639x_pcs *mpcs, + struct mv88e6xxx_chip *chip, int port) +{ + unsigned int irq; + + irq = mv88e6xxx_serdes_irq_mapping(chip, port); + if (!irq) { + /* Use polling mode */ + mpcs->sgmii_pcs.poll = true; + mpcs->xg_pcs.poll = true; + return 0; + } + + mpcs->irq = irq; + + return request_threaded_irq(irq, NULL, mv88e639x_pcs_handle_irq, + IRQF_ONESHOT, mpcs->name, mpcs); +} + +static void mv88e639x_pcs_teardown(struct mv88e6xxx_chip *chip, int port) +{ + struct mv88e639x_pcs *mpcs = chip->ports[port].pcs_private; + + if (!mpcs) + return; + + if (mpcs->irq) + free_irq(mpcs->irq, mpcs); + + kfree(mpcs); + + chip->ports[port].pcs_private = NULL; +} + +static struct mv88e639x_pcs *sgmii_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mv88e639x_pcs, sgmii_pcs); +} + +static irqreturn_t mv88e639x_sgmii_handle_irq(struct mv88e639x_pcs *mpcs) +{ + u16 int_status; + int err; + + err = mv88e639x_read(mpcs, MV88E6390_SGMII_INT_STATUS, &int_status); + if (err) + return IRQ_NONE; + + if (int_status & (MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP)) { + phylink_pcs_change(&mpcs->sgmii_pcs, + int_status & MV88E6390_SGMII_INT_LINK_UP); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int mv88e639x_sgmii_pcs_control_irq(struct mv88e639x_pcs *mpcs, + bool enable) +{ + u16 val = 0; + + if (enable) + val |= MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP; + + return mv88e639x_modify(mpcs, MV88E6390_SGMII_INT_ENABLE, + MV88E6390_SGMII_INT_LINK_DOWN | + MV88E6390_SGMII_INT_LINK_UP, val); +} + +static int mv88e639x_sgmii_pcs_control_pwr(struct mv88e639x_pcs *mpcs, + bool enable) +{ + u16 mask, val; + + if (enable) { + mask = BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN; + val = 0; + } else { + mask = val = BMCR_PDOWN; + } + + return mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR, mask, val); +} + +static int mv88e639x_sgmii_pcs_enable(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + + /* power enable done in post_config */ + mpcs->handle_irq = mv88e639x_sgmii_handle_irq; + + return mv88e639x_sgmii_pcs_control_irq(mpcs, !!mpcs->irq); +} + +static void mv88e639x_sgmii_pcs_disable(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + + mv88e639x_sgmii_pcs_control_irq(mpcs, false); + mv88e639x_sgmii_pcs_control_pwr(mpcs, false); +} + +static void mv88e639x_sgmii_pcs_pre_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + + mv88e639x_sgmii_pcs_control_pwr(mpcs, false); +} + +static int mv88e6390_erratum_3_14(struct mv88e639x_pcs *mpcs) +{ + static const int lanes[] = { MV88E6390_PORT9_LANE0, MV88E6390_PORT9_LANE1, + MV88E6390_PORT9_LANE2, MV88E6390_PORT9_LANE3, + MV88E6390_PORT10_LANE0, MV88E6390_PORT10_LANE1, + MV88E6390_PORT10_LANE2, MV88E6390_PORT10_LANE3 }; + int err, i; + + /* 88e6190x and 88e6390x errata 3.14: + * After chip reset, SERDES reconfiguration or SERDES core + * Software Reset, the SERDES lanes may not be properly aligned + * resulting in CRC errors + */ + + for (i = 0; i < ARRAY_SIZE(lanes); i++) { + err = mdiobus_c45_write(mpcs->mdio.bus, lanes[i], + MDIO_MMD_PHYXS, + 0xf054, 0x400C); + if (err) + return err; + + err = mdiobus_c45_write(mpcs->mdio.bus, lanes[i], + MDIO_MMD_PHYXS, + 0xf054, 0x4000); + if (err) + return err; + } + + return 0; +} + +static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + int err; + + mv88e639x_sgmii_pcs_control_pwr(mpcs, true); + + if (mpcs->erratum_3_14) { + err = mv88e6390_erratum_3_14(mpcs); + if (err) + dev_err(mpcs->mdio.dev.parent, + "failed to apply erratum 3.14: %pe\n", + ERR_PTR(err)); + } + + return 0; +} + +static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + u16 bmsr, lpa, status; + int err; + + err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMSR, &bmsr); + if (err) { + dev_err(mpcs->mdio.dev.parent, + "can't read Serdes PHY %s: %pe\n", + "BMSR", ERR_PTR(err)); + state->link = false; + return; + } + + err = mv88e639x_read(mpcs, MV88E6390_SGMII_LPA, &lpa); + if (err) { + dev_err(mpcs->mdio.dev.parent, + "can't read Serdes PHY %s: %pe\n", + "LPA", ERR_PTR(err)); + state->link = false; + return; + } + + err = mv88e639x_read(mpcs, MV88E6390_SGMII_PHY_STATUS, &status); + if (err) { + dev_err(mpcs->mdio.dev.parent, + "can't read Serdes PHY %s: %pe\n", + "status", ERR_PTR(err)); + state->link = false; + return; + } + + mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa, status, + state); +} + +static int mv88e639x_sgmii_pcs_config(struct phylink_pcs *pcs, + unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + u16 val, bmcr; + bool changed; + int adv, err; + + adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); + if (adv < 0) + return 0; + + mpcs->interface = interface; + + err = mv88e639x_modify_changed(mpcs, MV88E6390_SGMII_ADVERTISE, + 0xffff, adv); + if (err < 0) + return err; + + changed = err > 0; + + err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMCR, &val); + if (err) + return err; + + if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) + bmcr = val | BMCR_ANENABLE; + else + bmcr = val & ~BMCR_ANENABLE; + + /* setting ANENABLE triggers a restart of negotiation */ + if (bmcr == val) + return changed; + + return mv88e639x_write(mpcs, MV88E6390_SGMII_BMCR, bmcr); +} + +static void mv88e639x_sgmii_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + + mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR, + BMCR_ANRESTART, BMCR_ANRESTART); +} + +static void mv88e639x_sgmii_pcs_link_up(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + u16 bmcr; + int err; + + if (phylink_autoneg_inband(mode)) + return; + + bmcr = mii_bmcr_encode_fixed(speed, duplex); + + err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR, + BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX, + bmcr); + if (err) + dev_err(mpcs->mdio.dev.parent, + "can't access Serdes PHY %s: %pe\n", + "BMCR", ERR_PTR(err)); +} + +static const struct phylink_pcs_ops mv88e639x_sgmii_pcs_ops = { + .pcs_enable = mv88e639x_sgmii_pcs_enable, + .pcs_disable = mv88e639x_sgmii_pcs_disable, + .pcs_pre_config = mv88e639x_sgmii_pcs_pre_config, + .pcs_post_config = mv88e639x_sgmii_pcs_post_config, + .pcs_get_state = mv88e639x_sgmii_pcs_get_state, + .pcs_an_restart = mv88e639x_sgmii_pcs_an_restart, + .pcs_config = mv88e639x_sgmii_pcs_config, + .pcs_link_up = mv88e639x_sgmii_pcs_link_up, +}; + +static struct mv88e639x_pcs *xg_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mv88e639x_pcs, xg_pcs); +} + +static int mv88e639x_xg_pcs_enable(struct mv88e639x_pcs *mpcs) +{ + return mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1, + MDIO_CTRL1_RESET | MDIO_PCS_CTRL1_LOOPBACK | + MDIO_CTRL1_LPOWER, 0); +} + +static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs) +{ + mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1, MDIO_CTRL1_LPOWER, + MDIO_CTRL1_LPOWER); +} + +static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + u16 status; + int err; + + state->link = false; + + err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &status); + if (err) { + dev_err(mpcs->mdio.dev.parent, + "can't read Serdes PHY %s: %pe\n", + "STAT1", ERR_PTR(err)); + return; + } + + state->link = !!(status & MDIO_STAT1_LSTATUS); + if (state->link) { + switch (state->interface) { + case PHY_INTERFACE_MODE_5GBASER: + state->speed = SPEED_5000; + break; + + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_RXAUI: + case PHY_INTERFACE_MODE_XAUI: + state->speed = SPEED_10000; + break; + + default: + state->link = false; + return; + } + + state->duplex = DUPLEX_FULL; + } +} + +static int mv88e639x_xg_pcs_config(struct phylink_pcs *pcs, + unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static struct phylink_pcs * +mv88e639x_pcs_select(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + struct mv88e639x_pcs *mpcs; + + mpcs = chip->ports[port].pcs_private; + if (!mpcs) + return NULL; + + switch (mode) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return &mpcs->sgmii_pcs; + + case PHY_INTERFACE_MODE_5GBASER: + if (!mpcs->supports_5g) + return NULL; + fallthrough; + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_RXAUI: + case PHY_INTERFACE_MODE_USXGMII: + return &mpcs->xg_pcs; + + default: + return NULL; + } +} + +/* Marvell 88E6390 Specific support */ + +static irqreturn_t mv88e6390_xg_handle_irq(struct mv88e639x_pcs *mpcs) +{ + u16 int_status; + int err; + + err = mv88e639x_read(mpcs, MV88E6390_10G_INT_STATUS, &int_status); + if (err) + return IRQ_NONE; + + if (int_status & (MV88E6390_10G_INT_LINK_DOWN | + MV88E6390_10G_INT_LINK_UP)) { + phylink_pcs_change(&mpcs->xg_pcs, + int_status & MV88E6390_10G_INT_LINK_UP); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int mv88e6390_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable) +{ + u16 val = 0; + + if (enable) + val = MV88E6390_10G_INT_LINK_DOWN | MV88E6390_10G_INT_LINK_UP; + + return mv88e639x_modify(mpcs, MV88E6390_10G_INT_ENABLE, + MV88E6390_10G_INT_LINK_DOWN | + MV88E6390_10G_INT_LINK_UP, val); +} + +static int mv88e6390_xg_pcs_enable(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + int err; + + err = mv88e639x_xg_pcs_enable(mpcs); + if (err) + return err; + + mpcs->handle_irq = mv88e6390_xg_handle_irq; + + return mv88e6390_xg_control_irq(mpcs, !!mpcs->irq); +} + +static void mv88e6390_xg_pcs_disable(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + + mv88e6390_xg_control_irq(mpcs, false); + mv88e639x_xg_pcs_disable(mpcs); +} + +static const struct phylink_pcs_ops mv88e6390_xg_pcs_ops = { + .pcs_enable = mv88e6390_xg_pcs_enable, + .pcs_disable = mv88e6390_xg_pcs_disable, + .pcs_get_state = mv88e639x_xg_pcs_get_state, + .pcs_config = mv88e639x_xg_pcs_config, +}; + +static int mv88e6390_pcs_enable_checker(struct mv88e639x_pcs *mpcs) +{ + return mv88e639x_modify(mpcs, MV88E6390_PG_CONTROL, + MV88E6390_PG_CONTROL_ENABLE_PC, + MV88E6390_PG_CONTROL_ENABLE_PC); +} + +static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port) +{ + struct mv88e639x_pcs *mpcs; + struct mii_bus *bus; + struct device *dev; + int lane, err; + + lane = mv88e6xxx_serdes_get_lane(chip, port); + if (lane < 0) + return 0; + + bus = mv88e6xxx_default_mdio_bus(chip); + dev = chip->dev; + + mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port); + if (!mpcs) + return -ENOMEM; + + mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops; + mpcs->sgmii_pcs.neg_mode = true; + mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops; + mpcs->xg_pcs.neg_mode = true; + + if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X || + chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X) + mpcs->erratum_3_14 = true; + + err = mv88e639x_pcs_setup_irq(mpcs, chip, port); + if (err) + goto err_free; + + /* 6390 and 6390x has the checker, 6393x doesn't appear to? */ + /* This is to enable gathering the statistics. Maybe this + * should call out to a helper? Or we could do this at init time. + */ + err = mv88e6390_pcs_enable_checker(mpcs); + if (err) + goto err_free; + + chip->ports[port].pcs_private = mpcs; + + return 0; + +err_free: + kfree(mpcs); + return err; +} + +const struct mv88e6xxx_pcs_ops mv88e6390_pcs_ops = { + .pcs_init = mv88e6390_pcs_init, + .pcs_teardown = mv88e639x_pcs_teardown, + .pcs_select = mv88e639x_pcs_select, +}; + +/* Marvell 88E6393X Specific support */ + +static int mv88e6393x_power_lane(struct mv88e639x_pcs *mpcs, bool enable) +{ + u16 val = MV88E6393X_SERDES_CTRL1_TX_PDOWN | + MV88E6393X_SERDES_CTRL1_RX_PDOWN; + + return mv88e639x_modify(mpcs, MV88E6393X_SERDES_CTRL1, val, + enable ? 0 : val); +} + +/* mv88e6393x family errata 4.6: + * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD mode or + * P0_mode is configured for [x]MII. + * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1. + * + * It seems that after this workaround the SERDES is automatically powered up + * (the bit is cleared), so power it down. + */ +static int mv88e6393x_erratum_4_6(struct mv88e639x_pcs *mpcs) +{ + int err; + + err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC, + MV88E6393X_SERDES_POC_PDOWN | + MV88E6393X_SERDES_POC_RESET, + MV88E6393X_SERDES_POC_RESET); + if (err) + return err; + + err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR, + BMCR_PDOWN, BMCR_PDOWN); + if (err) + return err; + + err = mv88e639x_sgmii_pcs_control_pwr(mpcs, false); + if (err) + return err; + + return mv88e6393x_power_lane(mpcs, false); +} + +/* mv88e6393x family errata 4.8: + * When a SERDES port is operating in 1000BASE-X or SGMII mode link may not + * come up after hardware reset or software reset of SERDES core. Workaround + * is to write SERDES register 4.F074.14=1 for only those modes and 0 in all + * other modes. + */ +static int mv88e6393x_erratum_4_8(struct mv88e639x_pcs *mpcs) +{ + u16 reg, poc; + int err; + + err = mv88e639x_read(mpcs, MV88E6393X_SERDES_POC, &poc); + if (err) + return err; + + poc &= MV88E6393X_SERDES_POC_PCS_MASK; + if (poc == MV88E6393X_SERDES_POC_PCS_1000BASEX || + poc == MV88E6393X_SERDES_POC_PCS_SGMII_PHY || + poc == MV88E6393X_SERDES_POC_PCS_SGMII_MAC) + reg = MV88E6393X_ERRATA_4_8_BIT; + else + reg = 0; + + return mv88e639x_modify(mpcs, MV88E6393X_ERRATA_4_8_REG, + MV88E6393X_ERRATA_4_8_BIT, reg); +} + +/* mv88e6393x family errata 5.2: + * For optimal signal integrity the following sequence should be applied to + * SERDES operating in 10G mode. These registers only apply to 10G operation + * and have no effect on other speeds. + */ +static int mv88e6393x_erratum_5_2(struct mv88e639x_pcs *mpcs) +{ + static const struct { + u16 dev, reg, val, mask; + } fixes[] = { + { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff }, + { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff }, + { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff }, + { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f }, + { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 }, + { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff }, + { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC, + MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET }, + }; + int err, i; + + for (i = 0; i < ARRAY_SIZE(fixes); ++i) { + err = mdiodev_c45_modify(&mpcs->mdio, fixes[i].dev, + fixes[i].reg, fixes[i].mask, + fixes[i].val); + if (err) + return err; + } + + return 0; +} + +/* Inband AN is broken on Amethyst in 2500base-x mode when set by standard + * mechanism (via cmode). + * We can get around this by configuring the PCS mode to 1000base-x and then + * writing value 0x58 to register 1e.8000. (This must be done while SerDes + * receiver and transmitter are disabled, which is, when this function is + * called.) + * It seem that when we do this configuration to 2500base-x mode (by changing + * PCS mode to 1000base-x and frequency to 3.125 GHz from 1.25 GHz) and then + * configure to sgmii or 1000base-x, the device thinks that it already has + * SerDes at 1.25 GHz and does not change the 1e.8000 register, leaving SerDes + * at 3.125 GHz. + * To avoid this, change PCS mode back to 2500base-x when disabling SerDes from + * 2500base-x mode. + */ +static int mv88e6393x_fix_2500basex_an(struct mv88e639x_pcs *mpcs, bool on) +{ + u16 reg; + int err; + + if (on) + reg = MV88E6393X_SERDES_POC_PCS_1000BASEX | + MV88E6393X_SERDES_POC_AN; + else + reg = MV88E6393X_SERDES_POC_PCS_2500BASEX; + + reg |= MV88E6393X_SERDES_POC_RESET; + + err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC, + MV88E6393X_SERDES_POC_PCS_MASK | + MV88E6393X_SERDES_POC_AN | + MV88E6393X_SERDES_POC_RESET, reg); + if (err) + return err; + + return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_VEND1, 0x8000, 0x58); +} + +static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs, + phy_interface_t interface, + bool enable) +{ + int err; + + if (interface != PHY_INTERFACE_MODE_2500BASEX) + return 0; + + err = mv88e6393x_fix_2500basex_an(mpcs, enable); + if (err) + dev_err(mpcs->mdio.dev.parent, + "failed to %s 2500basex fix: %pe\n", + enable ? "enable" : "disable", ERR_PTR(err)); + + return err; +} + +static void mv88e6393x_sgmii_pcs_disable(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + + mv88e639x_sgmii_pcs_disable(pcs); + mv88e6393x_power_lane(mpcs, false); + mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false); +} + +static void mv88e6393x_sgmii_pcs_pre_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + + mv88e639x_sgmii_pcs_pre_config(pcs, interface); + mv88e6393x_power_lane(mpcs, false); + mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false); +} + +static int mv88e6393x_sgmii_pcs_post_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs); + int err; + + err = mv88e6393x_erratum_4_8(mpcs); + if (err) + return err; + + err = mv88e6393x_sgmii_apply_2500basex_an(mpcs, interface, true); + if (err) + return err; + + err = mv88e6393x_power_lane(mpcs, true); + if (err) + return err; + + return mv88e639x_sgmii_pcs_post_config(pcs, interface); +} + +static const struct phylink_pcs_ops mv88e6393x_sgmii_pcs_ops = { + .pcs_enable = mv88e639x_sgmii_pcs_enable, + .pcs_disable = mv88e6393x_sgmii_pcs_disable, + .pcs_pre_config = mv88e6393x_sgmii_pcs_pre_config, + .pcs_post_config = mv88e6393x_sgmii_pcs_post_config, + .pcs_get_state = mv88e639x_sgmii_pcs_get_state, + .pcs_an_restart = mv88e639x_sgmii_pcs_an_restart, + .pcs_config = mv88e639x_sgmii_pcs_config, + .pcs_link_up = mv88e639x_sgmii_pcs_link_up, +}; + +static irqreturn_t mv88e6393x_xg_handle_irq(struct mv88e639x_pcs *mpcs) +{ + u16 int_status, stat1; + bool link_down; + int err; + + err = mv88e639x_read(mpcs, MV88E6393X_10G_INT_STATUS, &int_status); + if (err) + return IRQ_NONE; + + if (int_status & MV88E6393X_10G_INT_LINK_CHANGE) { + err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &stat1); + if (err) + return IRQ_NONE; + + link_down = !(stat1 & MDIO_STAT1_LSTATUS); + + phylink_pcs_change(&mpcs->xg_pcs, !link_down); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int mv88e6393x_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable) +{ + u16 val = 0; + + if (enable) + val = MV88E6393X_10G_INT_LINK_CHANGE; + + return mv88e639x_modify(mpcs, MV88E6393X_10G_INT_ENABLE, + MV88E6393X_10G_INT_LINK_CHANGE, val); +} + +static int mv88e6393x_xg_pcs_enable(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + + mpcs->handle_irq = mv88e6393x_xg_handle_irq; + + return mv88e6393x_xg_control_irq(mpcs, !!mpcs->irq); +} + +static void mv88e6393x_xg_pcs_disable(struct phylink_pcs *pcs) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + + mv88e6393x_xg_control_irq(mpcs, false); + mv88e639x_xg_pcs_disable(mpcs); + mv88e6393x_power_lane(mpcs, false); +} + +/* The PCS has to be powered down while CMODE is changed */ +static void mv88e6393x_xg_pcs_pre_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + + mv88e639x_xg_pcs_disable(mpcs); + mv88e6393x_power_lane(mpcs, false); +} + +static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + int err; + + if (interface == PHY_INTERFACE_MODE_10GBASER || + interface == PHY_INTERFACE_MODE_USXGMII) { + err = mv88e6393x_erratum_5_2(mpcs); + if (err) + return err; + } + + err = mv88e6393x_power_lane(mpcs, true); + if (err) + return err; + + return mv88e639x_xg_pcs_enable(mpcs); +} + +static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs); + u16 status, lp_status; + int err; + + if (state->interface != PHY_INTERFACE_MODE_USXGMII) + return mv88e639x_xg_pcs_get_state(pcs, state); + + state->link = false; + + err = mv88e639x_read(mpcs, MV88E6390_USXGMII_PHY_STATUS, &status); + err = err ? : mv88e639x_read(mpcs, MV88E6390_USXGMII_LP_STATUS, &lp_status); + if (err) { + dev_err(mpcs->mdio.dev.parent, + "can't read USXGMII status: %pe\n", ERR_PTR(err)); + return; + } + + state->link = !!(status & MDIO_USXGMII_LINK); + state->an_complete = state->link; + phylink_decode_usxgmii_word(state, lp_status); +} + +static const struct phylink_pcs_ops mv88e6393x_xg_pcs_ops = { + .pcs_enable = mv88e6393x_xg_pcs_enable, + .pcs_disable = mv88e6393x_xg_pcs_disable, + .pcs_pre_config = mv88e6393x_xg_pcs_pre_config, + .pcs_post_config = mv88e6393x_xg_pcs_post_config, + .pcs_get_state = mv88e6393x_xg_pcs_get_state, + .pcs_config = mv88e639x_xg_pcs_config, +}; + +static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port) +{ + struct mv88e639x_pcs *mpcs; + struct mii_bus *bus; + struct device *dev; + int lane, err; + + lane = mv88e6xxx_serdes_get_lane(chip, port); + if (lane < 0) + return 0; + + bus = mv88e6xxx_default_mdio_bus(chip); + dev = chip->dev; + + mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port); + if (!mpcs) + return -ENOMEM; + + mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops; + mpcs->sgmii_pcs.neg_mode = true; + mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops; + mpcs->xg_pcs.neg_mode = true; + mpcs->supports_5g = true; + + err = mv88e6393x_erratum_4_6(mpcs); + if (err) + goto err_free; + + err = mv88e639x_pcs_setup_irq(mpcs, chip, port); + if (err) + goto err_free; + + chip->ports[port].pcs_private = mpcs; + + return 0; + +err_free: + kfree(mpcs); + return err; +} + +const struct mv88e6xxx_pcs_ops mv88e6393x_pcs_ops = { + .pcs_init = mv88e6393x_pcs_init, + .pcs_teardown = mv88e639x_pcs_teardown, + .pcs_select = mv88e639x_pcs_select, +}; diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index dd66ec902d4c..5394a8cf7bf1 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -524,7 +524,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode, bool force) { u16 cmode; - int lane; u16 reg; int err; @@ -577,19 +576,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (cmode == chip->ports[port].cmode && !force) return 0; - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane >= 0) { - if (chip->ports[port].serdes_irq) { - err = mv88e6xxx_serdes_irq_disable(chip, port, lane); - if (err) - return err; - } - - err = mv88e6xxx_serdes_power_down(chip, port, lane); - if (err) - return err; - } - chip->ports[port].cmode = 0; if (cmode) { @@ -605,22 +591,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return err; chip->ports[port].cmode = cmode; - - lane = mv88e6xxx_serdes_get_lane(chip, port); - if (lane == -ENODEV) - return 0; - if (lane < 0) - return lane; - - err = mv88e6xxx_serdes_power_up(chip, port, lane); - if (err) - return err; - - if (chip->ports[port].serdes_irq) { - err = mv88e6xxx_serdes_irq_enable(chip, port, lane); - if (err) - return err; - } } return 0; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index ea17231dc34e..56391e09b325 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -182,6 +182,10 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly) mv88e6xxx_reg_lock(chip); err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]); mv88e6xxx_reg_unlock(chip); + if (err) { + dev_err(chip->dev, "failed to write TAI status register\n"); + return; + } /* This is an external timestamp */ ev.type = PTP_CLOCK_EXTTS; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 80167d53212f..3b4b42651fa3 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -39,15 +39,8 @@ static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip, return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val); } -static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, - int lane, int device, int reg, u16 val) -{ - return mv88e6xxx_phy_write_c45(chip, lane, device, reg, val); -} - -static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 bmsr, u16 lpa, u16 status, - struct phylink_link_state *state) +int mv88e6xxx_pcs_decode_state(struct device *dev, u16 bmsr, u16 lpa, + u16 status, struct phylink_link_state *state) { state->link = false; @@ -88,7 +81,7 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, state->speed = SPEED_10; break; default: - dev_err(chip->dev, "invalid PHY speed\n"); + dev_err(dev, "invalid PHY speed\n"); return -EINVAL; } } else if (state->link && @@ -117,160 +110,6 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, return 0; } -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool up) -{ - u16 val, new_val; - int err; - - err = mv88e6352_serdes_read(chip, MII_BMCR, &val); - if (err) - return err; - - if (up) - new_val = val & ~BMCR_PDOWN; - else - new_val = val | BMCR_PDOWN; - - if (val != new_val) - err = mv88e6352_serdes_write(chip, MII_BMCR, new_val); - - return err; -} - -int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - int lane, unsigned int mode, - phy_interface_t interface, - const unsigned long *advertise) -{ - u16 adv, bmcr, val; - bool changed; - int err; - - switch (interface) { - case PHY_INTERFACE_MODE_SGMII: - adv = 0x0001; - break; - - case PHY_INTERFACE_MODE_1000BASEX: - adv = linkmode_adv_to_mii_adv_x(advertise, - ETHTOOL_LINK_MODE_1000baseX_Full_BIT); - break; - - default: - return 0; - } - - err = mv88e6352_serdes_read(chip, MII_ADVERTISE, &val); - if (err) - return err; - - changed = val != adv; - if (changed) { - err = mv88e6352_serdes_write(chip, MII_ADVERTISE, adv); - if (err) - return err; - } - - err = mv88e6352_serdes_read(chip, MII_BMCR, &val); - if (err) - return err; - - if (phylink_autoneg_inband(mode)) - bmcr = val | BMCR_ANENABLE; - else - bmcr = val & ~BMCR_ANENABLE; - - if (bmcr == val) - return changed; - - return mv88e6352_serdes_write(chip, MII_BMCR, bmcr); -} - -int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state) -{ - u16 bmsr, lpa, status; - int err; - - err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr); - if (err) { - dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); - return err; - } - - err = mv88e6352_serdes_read(chip, 0x11, &status); - if (err) { - dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); - return err; - } - - err = mv88e6352_serdes_read(chip, MII_LPA, &lpa); - if (err) { - dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err); - return err; - } - - return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); -} - -int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - u16 bmcr; - int err; - - err = mv88e6352_serdes_read(chip, MII_BMCR, &bmcr); - if (err) - return err; - - return mv88e6352_serdes_write(chip, MII_BMCR, bmcr | BMCR_ANRESTART); -} - -int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - int lane, int speed, int duplex) -{ - u16 val, bmcr; - int err; - - err = mv88e6352_serdes_read(chip, MII_BMCR, &val); - if (err) - return err; - - bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000); - switch (speed) { - case SPEED_1000: - bmcr |= BMCR_SPEED1000; - break; - case SPEED_100: - bmcr |= BMCR_SPEED100; - break; - case SPEED_10: - break; - } - - if (duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - - if (bmcr == val) - return 0; - - return mv88e6352_serdes_write(chip, MII_BMCR, bmcr); -} - -int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) -{ - u8 cmode = chip->ports[port].cmode; - int lane = -ENODEV; - - if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) || - (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) || - (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)) - lane = 0xff; /* Unused */ - - return lane; -} - struct mv88e6352_serdes_hw_stat { char string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -363,51 +202,6 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6352_serdes_hw_stats); } -static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) -{ - u16 bmsr; - int err; - - /* If the link has dropped, we want to know about it. */ - err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr); - if (err) { - dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err); - return; - } - - dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS)); -} - -irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - irqreturn_t ret = IRQ_NONE; - u16 status; - int err; - - err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status); - if (err) - return ret; - - if (status & MV88E6352_SERDES_INT_LINK_CHANGE) { - ret = IRQ_HANDLED; - mv88e6352_serdes_irq_link(chip, port); - } - - return ret; -} - -int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, - bool enable) -{ - u16 val = 0; - - if (enable) - val |= MV88E6352_SERDES_INT_LINK_CHANGE; - - return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, val); -} - unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) { return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ); @@ -461,115 +255,6 @@ int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } -int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool up) -{ - /* The serdes power can't be controlled on this switch chip but we need - * to supply this function to avoid returning -EOPNOTSUPP in - * mv88e6xxx_serdes_power_up/mv88e6xxx_serdes_power_down - */ - return 0; -} - -int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) -{ - /* There are no configurable serdes lanes on this switch chip but we - * need to return a non-negative lane number so that callers of - * mv88e6xxx_serdes_get_lane() know this is a serdes port. - */ - switch (chip->ports[port].cmode) { - case MV88E6185_PORT_STS_CMODE_SERDES: - case MV88E6185_PORT_STS_CMODE_1000BASE_X: - return 0; - default: - return -ENODEV; - } -} - -int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state) -{ - int err; - u16 status; - - err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status); - if (err) - return err; - - state->link = !!(status & MV88E6XXX_PORT_STS_LINK); - - if (state->link) { - state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ? DUPLEX_FULL : DUPLEX_HALF; - - switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) { - case MV88E6XXX_PORT_STS_SPEED_1000: - state->speed = SPEED_1000; - break; - case MV88E6XXX_PORT_STS_SPEED_100: - state->speed = SPEED_100; - break; - case MV88E6XXX_PORT_STS_SPEED_10: - state->speed = SPEED_10; - break; - default: - dev_err(chip->dev, "invalid PHY speed\n"); - return -EINVAL; - } - } else { - state->duplex = DUPLEX_UNKNOWN; - state->speed = SPEED_UNKNOWN; - } - - return 0; -} - -int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, - bool enable) -{ - u8 cmode = chip->ports[port].cmode; - - /* The serdes interrupts are enabled in the G2_INT_MASK register. We - * need to return 0 to avoid returning -EOPNOTSUPP in - * mv88e6xxx_serdes_irq_enable/mv88e6xxx_serdes_irq_disable - */ - switch (cmode) { - case MV88E6185_PORT_STS_CMODE_SERDES: - case MV88E6185_PORT_STS_CMODE_1000BASE_X: - return 0; - } - - return -EOPNOTSUPP; -} - -static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) -{ - u16 status; - int err; - - err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status); - if (err) { - dev_err(chip->dev, "can't read port status: %d\n", err); - return; - } - - dsa_port_phylink_mac_change(chip->ds, port, !!(status & MV88E6XXX_PORT_STS_LINK)); -} - -irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - u8 cmode = chip->ports[port].cmode; - - switch (cmode) { - case MV88E6185_PORT_STS_CMODE_SERDES: - case MV88E6185_PORT_STS_CMODE_1000BASE_X: - mv88e6097_serdes_irq_link(chip, port); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; @@ -690,57 +375,6 @@ int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } -/* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */ -static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane, - bool up) -{ - u16 val, new_val; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_10G_CTRL1, &val); - - if (err) - return err; - - if (up) - new_val = val & ~(MDIO_CTRL1_RESET | - MDIO_PCS_CTRL1_LOOPBACK | - MDIO_CTRL1_LPOWER); - else - new_val = val | MDIO_CTRL1_LPOWER; - - if (val != new_val) - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_10G_CTRL1, new_val); - - return err; -} - -/* Set power up/down for SGMII and 1000Base-X */ -static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane, - bool up) -{ - u16 val, new_val; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &val); - if (err) - return err; - - if (up) - new_val = val & ~(BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN); - else - new_val = val | BMCR_PDOWN; - - if (val != new_val) - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, new_val); - - return err; -} - struct mv88e6390_serdes_hw_stat { char string[ETH_GSTRING_LEN]; int reg; @@ -814,484 +448,6 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6390_serdes_hw_stats); } -static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, int lane) -{ - u16 reg; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_PG_CONTROL, ®); - if (err) - return err; - - reg |= MV88E6390_PG_CONTROL_ENABLE_PC; - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_PG_CONTROL, reg); -} - -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool up) -{ - u8 cmode = chip->ports[port].cmode; - int err; - - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASEX: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - err = mv88e6390_serdes_power_sgmii(chip, lane, up); - break; - case MV88E6XXX_PORT_STS_CMODE_XAUI: - case MV88E6XXX_PORT_STS_CMODE_RXAUI: - err = mv88e6390_serdes_power_10g(chip, lane, up); - break; - default: - err = -EINVAL; - break; - } - - if (!err && up) - err = mv88e6390_serdes_enable_checker(chip, lane); - - return err; -} - -int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - int lane, unsigned int mode, - phy_interface_t interface, - const unsigned long *advertise) -{ - u16 val, bmcr, adv; - bool changed; - int err; - - switch (interface) { - case PHY_INTERFACE_MODE_SGMII: - adv = 0x0001; - break; - - case PHY_INTERFACE_MODE_1000BASEX: - adv = linkmode_adv_to_mii_adv_x(advertise, - ETHTOOL_LINK_MODE_1000baseX_Full_BIT); - break; - - case PHY_INTERFACE_MODE_2500BASEX: - adv = linkmode_adv_to_mii_adv_x(advertise, - ETHTOOL_LINK_MODE_2500baseX_Full_BIT); - break; - - default: - return 0; - } - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_ADVERTISE, &val); - if (err) - return err; - - changed = val != adv; - if (changed) { - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_ADVERTISE, adv); - if (err) - return err; - } - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &val); - if (err) - return err; - - if (phylink_autoneg_inband(mode)) - bmcr = val | BMCR_ANENABLE; - else - bmcr = val & ~BMCR_ANENABLE; - - /* setting ANENABLE triggers a restart of negotiation */ - if (bmcr == val) - return changed; - - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, bmcr); -} - -static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, - int port, int lane, struct phylink_link_state *state) -{ - u16 bmsr, lpa, status; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMSR, &bmsr); - if (err) { - dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); - return err; - } - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_PHY_STATUS, &status); - if (err) { - dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); - return err; - } - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_LPA, &lpa); - if (err) { - dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err); - return err; - } - - return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); -} - -static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, - int port, int lane, struct phylink_link_state *state) -{ - u16 status; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_10G_STAT1, &status); - if (err) - return err; - - state->link = !!(status & MDIO_STAT1_LSTATUS); - if (state->link) { - state->speed = SPEED_10000; - state->duplex = DUPLEX_FULL; - } - - return 0; -} - -static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, - int port, int lane, - struct phylink_link_state *state) -{ - u16 status; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_10G_STAT1, &status); - if (err) - return err; - - state->link = !!(status & MDIO_STAT1_LSTATUS); - if (state->link) { - if (state->interface == PHY_INTERFACE_MODE_5GBASER) - state->speed = SPEED_5000; - else - state->speed = SPEED_10000; - state->duplex = DUPLEX_FULL; - } - return 0; -} - -/* USXGMII registers for Marvell switch 88e639x are undocumented and this function is based - * on some educated guesses. It appears that there are no status bits related to - * autonegotiation complete or flow control. - */ -static int mv88e639x_serdes_pcs_get_state_usxgmii(struct mv88e6xxx_chip *chip, - int port, int lane, - struct phylink_link_state *state) -{ - u16 status, lp_status; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_USXGMII_PHY_STATUS, &status); - if (err) { - dev_err(chip->dev, "can't read Serdes USXGMII PHY status: %d\n", err); - return err; - } - dev_dbg(chip->dev, "USXGMII PHY status: 0x%x\n", status); - - state->link = !!(status & MDIO_USXGMII_LINK); - state->an_complete = state->link; - - if (state->link) { - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_USXGMII_LP_STATUS, &lp_status); - if (err) { - dev_err(chip->dev, "can't read Serdes USXGMII LP status: %d\n", err); - return err; - } - dev_dbg(chip->dev, "USXGMII LP status: 0x%x\n", lp_status); - /* lp_status appears to include the "link" bit as per USXGMII spec. */ - phylink_decode_usxgmii_word(state, lp_status); - } - return 0; -} - -int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state) -{ - switch (state->interface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane, - state); - case PHY_INTERFACE_MODE_XAUI: - case PHY_INTERFACE_MODE_RXAUI: - return mv88e6390_serdes_pcs_get_state_10g(chip, port, lane, - state); - - default: - return -EOPNOTSUPP; - } -} - -int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state) -{ - switch (state->interface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane, - state); - case PHY_INTERFACE_MODE_5GBASER: - case PHY_INTERFACE_MODE_10GBASER: - return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane, - state); - case PHY_INTERFACE_MODE_USXGMII: - return mv88e639x_serdes_pcs_get_state_usxgmii(chip, port, lane, - state); - - default: - return -EOPNOTSUPP; - } -} - -int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - u16 bmcr; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &bmcr); - if (err) - return err; - - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, - bmcr | BMCR_ANRESTART); -} - -int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - int lane, int speed, int duplex) -{ - u16 val, bmcr; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &val); - if (err) - return err; - - bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000); - switch (speed) { - case SPEED_2500: - case SPEED_1000: - bmcr |= BMCR_SPEED1000; - break; - case SPEED_100: - bmcr |= BMCR_SPEED100; - break; - case SPEED_10: - break; - } - - if (duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - - if (bmcr == val) - return 0; - - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, bmcr); -} - -static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, - int port, int lane) -{ - u16 bmsr; - int err; - - /* If the link has dropped, we want to know about it. */ - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMSR, &bmsr); - if (err) { - dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err); - return; - } - - dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS)); -} - -static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip, - int port, u8 lane) -{ - u16 status; - int err; - - /* If the link has dropped, we want to know about it. */ - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_10G_STAT1, &status); - if (err) { - dev_err(chip->dev, "can't read Serdes STAT1: %d\n", err); - return; - } - - dsa_port_phylink_mac_change(chip->ds, port, !!(status & MDIO_STAT1_LSTATUS)); -} - -static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, - int lane, bool enable) -{ - u16 val = 0; - - if (enable) - val |= MV88E6390_SGMII_INT_LINK_DOWN | - MV88E6390_SGMII_INT_LINK_UP; - - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_INT_ENABLE, val); -} - -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, - bool enable) -{ - u8 cmode = chip->ports[port].cmode; - - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASEX: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable); - } - - return 0; -} - -static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip, - int lane, u16 *status) -{ - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_INT_STATUS, status); - - return err; -} - -static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip, - u8 lane, bool enable) -{ - u16 val = 0; - - if (enable) - val |= MV88E6393X_10G_INT_LINK_CHANGE; - - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_10G_INT_ENABLE, val); -} - -int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, - int lane, bool enable) -{ - u8 cmode = chip->ports[port].cmode; - - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASEX: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable); - case MV88E6393X_PORT_STS_CMODE_5GBASER: - case MV88E6393X_PORT_STS_CMODE_10GBASER: - case MV88E6393X_PORT_STS_CMODE_USXGMII: - return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable); - } - - return 0; -} - -static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip, - u8 lane, u16 *status) -{ - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_10G_INT_STATUS, status); - - return err; -} - -irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - u8 cmode = chip->ports[port].cmode; - irqreturn_t ret = IRQ_NONE; - u16 status; - int err; - - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASEX: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); - if (err) - return ret; - if (status & (MV88E6390_SGMII_INT_LINK_DOWN | - MV88E6390_SGMII_INT_LINK_UP)) { - ret = IRQ_HANDLED; - mv88e6390_serdes_irq_link_sgmii(chip, port, lane); - } - break; - case MV88E6393X_PORT_STS_CMODE_5GBASER: - case MV88E6393X_PORT_STS_CMODE_10GBASER: - case MV88E6393X_PORT_STS_CMODE_USXGMII: - err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status); - if (err) - return err; - if (status & MV88E6393X_10G_INT_LINK_CHANGE) { - ret = IRQ_HANDLED; - mv88e6393x_serdes_irq_link_10g(chip, port, lane); - } - break; - } - - return ret; -} - -irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane) -{ - u8 cmode = chip->ports[port].cmode; - irqreturn_t ret = IRQ_NONE; - u16 status; - int err; - - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASEX: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); - if (err) - return ret; - if (status & (MV88E6390_SGMII_INT_LINK_DOWN | - MV88E6390_SGMII_INT_LINK_UP)) { - ret = IRQ_HANDLED; - mv88e6390_serdes_irq_link_sgmii(chip, port, lane); - } - } - - return ret; -} - unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) { return irq_find_mapping(chip->g2_irq.domain, port); @@ -1390,259 +546,3 @@ int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port, return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl); } - -static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane, - bool on) -{ - u16 reg; - int err; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_CTRL1, ®); - if (err) - return err; - - if (on) - reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN | - MV88E6393X_SERDES_CTRL1_RX_PDOWN); - else - reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN | - MV88E6393X_SERDES_CTRL1_RX_PDOWN; - - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_CTRL1, reg); -} - -static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane) -{ - u16 reg; - int err; - - /* mv88e6393x family errata 4.6: - * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD - * mode or P0_mode is configured for [x]MII. - * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1. - * - * It seems that after this workaround the SERDES is automatically - * powered up (the bit is cleared), so power it down. - */ - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, ®); - if (err) - return err; - - reg &= ~MV88E6393X_SERDES_POC_PDOWN; - reg |= MV88E6393X_SERDES_POC_RESET; - - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, reg); - if (err) - return err; - - err = mv88e6390_serdes_power_sgmii(chip, lane, false); - if (err) - return err; - - return mv88e6393x_serdes_power_lane(chip, lane, false); -} - -int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) -{ - int err; - - err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE); - if (err) - return err; - - err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE); - if (err) - return err; - - return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE); -} - -static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane) -{ - u16 reg, pcs; - int err; - - /* mv88e6393x family errata 4.8: - * When a SERDES port is operating in 1000BASE-X or SGMII mode link may - * not come up after hardware reset or software reset of SERDES core. - * Workaround is to write SERDES register 4.F074.14=1 for only those - * modes and 0 in all other modes. - */ - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, &pcs); - if (err) - return err; - - pcs &= MV88E6393X_SERDES_POC_PCS_MASK; - - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_ERRATA_4_8_REG, ®); - if (err) - return err; - - if (pcs == MV88E6393X_SERDES_POC_PCS_1000BASEX || - pcs == MV88E6393X_SERDES_POC_PCS_SGMII_PHY || - pcs == MV88E6393X_SERDES_POC_PCS_SGMII_MAC) - reg |= MV88E6393X_ERRATA_4_8_BIT; - else - reg &= ~MV88E6393X_ERRATA_4_8_BIT; - - return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_ERRATA_4_8_REG, reg); -} - -static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane, - u8 cmode) -{ - static const struct { - u16 dev, reg, val, mask; - } fixes[] = { - { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff }, - { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff }, - { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff }, - { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f }, - { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 }, - { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff }, - { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC, - MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET }, - }; - int err, i; - u16 reg; - - /* mv88e6393x family errata 5.2: - * For optimal signal integrity the following sequence should be applied - * to SERDES operating in 10G mode. These registers only apply to 10G - * operation and have no effect on other speeds. - */ - if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER && - cmode != MV88E6393X_PORT_STS_CMODE_USXGMII) - return 0; - - for (i = 0; i < ARRAY_SIZE(fixes); ++i) { - err = mv88e6390_serdes_read(chip, lane, fixes[i].dev, - fixes[i].reg, ®); - if (err) - return err; - - reg &= ~fixes[i].mask; - reg |= fixes[i].val; - - err = mv88e6390_serdes_write(chip, lane, fixes[i].dev, - fixes[i].reg, reg); - if (err) - return err; - } - - return 0; -} - -static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip, - int lane, u8 cmode, bool on) -{ - u16 reg; - int err; - - if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX) - return 0; - - /* Inband AN is broken on Amethyst in 2500base-x mode when set by - * standard mechanism (via cmode). - * We can get around this by configuring the PCS mode to 1000base-x - * and then writing value 0x58 to register 1e.8000. (This must be done - * while SerDes receiver and transmitter are disabled, which is, when - * this function is called.) - * It seem that when we do this configuration to 2500base-x mode (by - * changing PCS mode to 1000base-x and frequency to 3.125 GHz from - * 1.25 GHz) and then configure to sgmii or 1000base-x, the device - * thinks that it already has SerDes at 1.25 GHz and does not change - * the 1e.8000 register, leaving SerDes at 3.125 GHz. - * To avoid this, change PCS mode back to 2500base-x when disabling - * SerDes from 2500base-x mode. - */ - err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, ®); - if (err) - return err; - - reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN); - if (on) - reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX | - MV88E6393X_SERDES_POC_AN; - else - reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX; - reg |= MV88E6393X_SERDES_POC_RESET; - - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, reg); - if (err) - return err; - - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58); - if (err) - return err; - - return 0; -} - -int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool on) -{ - u8 cmode = chip->ports[port].cmode; - int err; - - if (port != 0 && port != 9 && port != 10) - return -EOPNOTSUPP; - - if (on) { - err = mv88e6393x_serdes_erratum_4_8(chip, lane); - if (err) - return err; - - err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode); - if (err) - return err; - - err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, - true); - if (err) - return err; - - err = mv88e6393x_serdes_power_lane(chip, lane, true); - if (err) - return err; - } - - switch (cmode) { - case MV88E6XXX_PORT_STS_CMODE_SGMII: - case MV88E6XXX_PORT_STS_CMODE_1000BASEX: - case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - err = mv88e6390_serdes_power_sgmii(chip, lane, on); - break; - case MV88E6393X_PORT_STS_CMODE_5GBASER: - case MV88E6393X_PORT_STS_CMODE_10GBASER: - case MV88E6393X_PORT_STS_CMODE_USXGMII: - err = mv88e6390_serdes_power_10g(chip, lane, on); - break; - default: - err = -EINVAL; - break; - } - - if (err) - return err; - - if (!on) { - err = mv88e6393x_serdes_power_lane(chip, lane, false); - if (err) - return err; - - err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, - false); - } - - return err; -} diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index e245687ddb1d..aac95cab46e3 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -12,6 +12,8 @@ #include "chip.h" +struct phylink_link_state; + #define MV88E6352_ADDR_SERDES 0x0f #define MV88E6352_SERDES_PAGE_FIBER 0x01 #define MV88E6352_SERDES_IRQ 0x0b @@ -44,6 +46,10 @@ /* 10GBASE-R and 10GBASE-X4/X2 */ #define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1) #define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1) +#define MV88E6390_10G_INT_ENABLE 0x9001 +#define MV88E6390_10G_INT_LINK_DOWN BIT(3) +#define MV88E6390_10G_INT_LINK_UP BIT(2) +#define MV88E6390_10G_INT_STATUS 0x9003 #define MV88E6393X_10G_INT_ENABLE 0x9000 #define MV88E6393X_10G_INT_LINK_CHANGE BIT(2) #define MV88E6393X_10G_INT_STATUS 0x9001 @@ -107,65 +113,17 @@ #define MV88E6393X_ERRATA_4_8_REG 0xF074 #define MV88E6393X_ERRATA_4_8_BIT BIT(14) -int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6xxx_pcs_decode_state(struct device *dev, u16 bmsr, u16 lpa, + u16 status, struct phylink_link_state *state); + int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); -int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - int lane, unsigned int mode, - phy_interface_t interface, - const unsigned long *advertise); -int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, - int lane, unsigned int mode, - phy_interface_t interface, - const unsigned long *advertise); -int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state); -int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state); -int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state); -int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, - int lane, struct phylink_link_state *state); -int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - int lane); -int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, - int lane); -int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - int lane, int speed, int duplex); -int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port, - int lane, int speed, int duplex); unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); -int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool up); -int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool on); -int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool on); -int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, - bool on); -int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip); -int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, - bool enable); -int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, - bool enable); -int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane, - bool enable); -int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, - int lane, bool enable); -irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane); -irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane); -irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane); -irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, - int lane); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); @@ -195,24 +153,6 @@ static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip, return chip->info->ops->serdes_get_lane(chip, port); } -static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip, - int port, int lane) -{ - if (!chip->info->ops->serdes_power) - return -EOPNOTSUPP; - - return chip->info->ops->serdes_power(chip, port, lane, true); -} - -static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip, - int port, int lane) -{ - if (!chip->info->ops->serdes_power) - return -EOPNOTSUPP; - - return chip->info->ops->serdes_power(chip, port, lane, false); -} - static inline unsigned int mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) { @@ -222,31 +162,9 @@ mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port) return chip->info->ops->serdes_irq_mapping(chip, port); } -static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip, - int port, int lane) -{ - if (!chip->info->ops->serdes_irq_enable) - return -EOPNOTSUPP; - - return chip->info->ops->serdes_irq_enable(chip, port, lane, true); -} - -static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip, - int port, int lane) -{ - if (!chip->info->ops->serdes_irq_enable) - return -EOPNOTSUPP; - - return chip->info->ops->serdes_irq_enable(chip, port, lane, false); -} - -static inline irqreturn_t -mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, int lane) -{ - if (!chip->info->ops->serdes_irq_status) - return IRQ_NONE; - - return chip->info->ops->serdes_irq_status(chip, port, lane); -} +extern const struct mv88e6xxx_pcs_ops mv88e6185_pcs_ops; +extern const struct mv88e6xxx_pcs_ops mv88e6352_pcs_ops; +extern const struct mv88e6xxx_pcs_ops mv88e6390_pcs_ops; +extern const struct mv88e6xxx_pcs_ops mv88e6393x_pcs_ops; #endif diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index bef879c6d500..61e95487732d 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -42,22 +42,22 @@ static struct net_device *felix_classify_db(struct dsa_db db) } } -static int felix_cpu_port_for_master(struct dsa_switch *ds, - struct net_device *master) +static int felix_cpu_port_for_conduit(struct dsa_switch *ds, + struct net_device *conduit) { struct ocelot *ocelot = ds->priv; struct dsa_port *cpu_dp; int lag; - if (netif_is_lag_master(master)) { + if (netif_is_lag_master(conduit)) { mutex_lock(&ocelot->fwd_domain_lock); - lag = ocelot_bond_get_id(ocelot, master); + lag = ocelot_bond_get_id(ocelot, conduit); mutex_unlock(&ocelot->fwd_domain_lock); return lag; } - cpu_dp = master->dsa_ptr; + cpu_dp = conduit->dsa_ptr; return cpu_dp->index; } @@ -366,7 +366,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds, * is the mode through which frames can be injected from and extracted to an * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU * running Linux, and this forms a DSA setup together with the enetc or fman - * DSA master. + * DSA conduit. */ static void felix_npi_port_init(struct ocelot *ocelot, int port) { @@ -441,16 +441,16 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) return BIT(ocelot->num_phys_ports); } -static int felix_tag_npi_change_master(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack) +static int felix_tag_npi_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; struct ocelot *ocelot = ds->priv; - if (netif_is_lag_master(master)) { + if (netif_is_lag_master(conduit)) { NL_SET_ERR_MSG_MOD(extack, - "LAG DSA master only supported using ocelot-8021q"); + "LAG DSA conduit only supported using ocelot-8021q"); return -EOPNOTSUPP; } @@ -459,24 +459,24 @@ static int felix_tag_npi_change_master(struct dsa_switch *ds, int port, * come back up until they're all changed to the new one. */ dsa_switch_for_each_user_port(other_dp, ds) { - struct net_device *slave = other_dp->slave; + struct net_device *user = other_dp->user; - if (other_dp != dp && (slave->flags & IFF_UP) && - dsa_port_to_master(other_dp) != master) { + if (other_dp != dp && (user->flags & IFF_UP) && + dsa_port_to_conduit(other_dp) != conduit) { NL_SET_ERR_MSG_MOD(extack, - "Cannot change while old master still has users"); + "Cannot change while old conduit still has users"); return -EOPNOTSUPP; } } felix_npi_port_deinit(ocelot, ocelot->npi); - felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master)); + felix_npi_port_init(ocelot, felix_cpu_port_for_conduit(ds, conduit)); return 0; } /* Alternatively to using the NPI functionality, that same hardware MAC - * connected internally to the enetc or fman DSA master can be configured to + * connected internally to the enetc or fman DSA conduit can be configured to * use the software-defined tag_8021q frame format. As far as the hardware is * concerned, it thinks it is a "dumb switch" - the queues of the CPU port * module are now disconnected from it, but can still be accessed through @@ -486,7 +486,7 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { .setup = felix_tag_npi_setup, .teardown = felix_tag_npi_teardown, .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, - .change_master = felix_tag_npi_change_master, + .change_conduit = felix_tag_npi_change_conduit, }; static int felix_tag_8021q_setup(struct dsa_switch *ds) @@ -561,11 +561,11 @@ static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) return dsa_cpu_ports(ds); } -static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack) +static int felix_tag_8021q_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) { - int cpu = felix_cpu_port_for_master(ds, master); + int cpu = felix_cpu_port_for_conduit(ds, conduit); struct ocelot *ocelot = ds->priv; ocelot_port_unassign_dsa_8021q_cpu(ocelot, port); @@ -578,7 +578,7 @@ static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { .setup = felix_tag_8021q_setup, .teardown = felix_tag_8021q_teardown, .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, - .change_master = felix_tag_8021q_change_master, + .change_conduit = felix_tag_8021q_change_conduit, }; static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, @@ -741,14 +741,14 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port, !!felix->host_flood_mc_mask, true); } -static int felix_port_change_master(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack) +static int felix_port_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - return felix->tag_proto_ops->change_master(ds, port, master, extack); + return felix->tag_proto_ops->change_conduit(ds, port, conduit, extack); } static int felix_set_ageing_time(struct dsa_switch *ds, @@ -953,7 +953,7 @@ static int felix_lag_join(struct dsa_switch *ds, int port, if (!dsa_is_cpu_port(ds, port)) return 0; - return felix_port_change_master(ds, port, lag.dev, extack); + return felix_port_change_conduit(ds, port, lag.dev, extack); } static int felix_lag_leave(struct dsa_switch *ds, int port, @@ -967,7 +967,7 @@ static int felix_lag_leave(struct dsa_switch *ds, int port, if (!dsa_is_cpu_port(ds, port)) return 0; - return felix_port_change_master(ds, port, lag.dev, NULL); + return felix_port_change_conduit(ds, port, lag.dev, NULL); } static int felix_lag_change(struct dsa_switch *ds, int port) @@ -1042,12 +1042,6 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; - /* This driver does not make use of the speed, duplex, pause or the - * advertisement in its mac_config, so it is safe to mark this driver - * as non-legacy. - */ - config->legacy_pre_march2020 = false; - config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; @@ -1122,10 +1116,10 @@ static int felix_port_enable(struct dsa_switch *ds, int port, return 0; if (ocelot->npi >= 0) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); - if (felix_cpu_port_for_master(ds, master) != ocelot->npi) { - dev_err(ds->dev, "Multiple masters are not allowed\n"); + if (felix_cpu_port_for_conduit(ds, conduit) != ocelot->npi) { + dev_err(ds->dev, "Multiple conduits are not allowed\n"); return -EINVAL; } } @@ -2170,7 +2164,7 @@ const struct dsa_switch_ops felix_switch_ops = { .port_add_dscp_prio = felix_port_add_dscp_prio, .port_del_dscp_prio = felix_port_del_dscp_prio, .port_set_host_flood = felix_port_set_host_flood, - .port_change_master = felix_port_change_master, + .port_change_conduit = felix_port_change_conduit, }; EXPORT_SYMBOL_GPL(felix_switch_ops); @@ -2182,7 +2176,7 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) if (!dsa_is_user_port(ds, port)) return NULL; - return dsa_to_port(ds, port)->slave; + return dsa_to_port(ds, port)->user; } EXPORT_SYMBOL_GPL(felix_port_to_netdev); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 1d4befe7cfe8..dbf5872fe367 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -77,9 +77,9 @@ struct felix_tag_proto_ops { int (*setup)(struct dsa_switch *ds); void (*teardown)(struct dsa_switch *ds); unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds); - int (*change_master)(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack); + int (*change_conduit)(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack); }; extern const struct dsa_switch_ops felix_switch_ops; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index f16daa9b1765..3c5509e75a54 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -16,6 +16,7 @@ #include <net/pkt_sched.h> #include <linux/iopoll.h> #include <linux/mdio.h> +#include <linux/of.h> #include <linux/pci.h> #include <linux/time.h> #include "felix.h" @@ -1748,10 +1749,10 @@ static int vsc9959_stream_identify(struct flow_cls_offload *f, struct flow_dissector *dissector = rule->match.dissector; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) return -EOPNOTSUPP; if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c index c29bee5a5c48..22187d831c4b 100644 --- a/drivers/net/dsa/ocelot/ocelot_ext.c +++ b/drivers/net/dsa/ocelot/ocelot_ext.c @@ -115,19 +115,17 @@ err_free_felix: return err; } -static int ocelot_ext_remove(struct platform_device *pdev) +static void ocelot_ext_remove(struct platform_device *pdev) { struct felix *felix = dev_get_drvdata(&pdev->dev); if (!felix) - return 0; + return; dsa_unregister_switch(felix->ds); kfree(felix->ds); kfree(felix); - - return 0; } static void ocelot_ext_shutdown(struct platform_device *pdev) @@ -154,7 +152,7 @@ static struct platform_driver ocelot_ext_switch_driver = { .of_match_table = ocelot_ext_switch_of_match, }, .probe = ocelot_ext_probe, - .remove = ocelot_ext_remove, + .remove_new = ocelot_ext_remove, .shutdown = ocelot_ext_shutdown, }; module_platform_driver(ocelot_ext_switch_driver); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 15003b2af264..049930da0521 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -2,13 +2,14 @@ /* Distributed Switch Architecture VSC9953 driver * Copyright (C) 2020, Maxim Kochetkov <fido_max@inbox.ru> */ +#include <linux/platform_device.h> #include <linux/types.h> #include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot_sys.h> #include <soc/mscc/ocelot.h> #include <linux/mdio/mdio-mscc-miim.h> +#include <linux/mod_devicetable.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> #include <linux/pcs-lynx.h> #include <linux/dsa/ocelot.h> #include <linux/iopoll.h> @@ -1028,19 +1029,17 @@ err_alloc_felix: return err; } -static int seville_remove(struct platform_device *pdev) +static void seville_remove(struct platform_device *pdev) { struct felix *felix = platform_get_drvdata(pdev); if (!felix) - return 0; + return; dsa_unregister_switch(felix->ds); kfree(felix->ds); kfree(felix); - - return 0; } static void seville_shutdown(struct platform_device *pdev) @@ -1063,7 +1062,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match); static struct platform_driver seville_vsc9953_driver = { .probe = seville_probe, - .remove = seville_remove, + .remove_new = seville_remove, .shutdown = seville_shutdown, .driver = { .name = "mscc_seville", diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index 3b0937031499..8d9d271ac3af 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -1012,7 +1012,7 @@ static const struct regmap_config ar9331_mdio_regmap_config = { .wr_table = &ar9331_register_set, .rd_table = &ar9331_register_set, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, }; static struct regmap_bus ar9331_sw_bus = { diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index efe9380d4a15..ec57d9d52072 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -323,14 +323,14 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) mutex_lock(&mgmt_eth_data->mutex); - /* Check mgmt_master if is operational */ - if (!priv->mgmt_master) { + /* Check if the mgmt_conduit if is operational */ + if (!priv->mgmt_conduit) { kfree_skb(skb); mutex_unlock(&mgmt_eth_data->mutex); return -EINVAL; } - skb->dev = priv->mgmt_master; + skb->dev = priv->mgmt_conduit; reinit_completion(&mgmt_eth_data->rw_done); @@ -375,14 +375,14 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) mutex_lock(&mgmt_eth_data->mutex); - /* Check mgmt_master if is operational */ - if (!priv->mgmt_master) { + /* Check if the mgmt_conduit if is operational */ + if (!priv->mgmt_conduit) { kfree_skb(skb); mutex_unlock(&mgmt_eth_data->mutex); return -EINVAL; } - skb->dev = priv->mgmt_master; + skb->dev = priv->mgmt_conduit; reinit_completion(&mgmt_eth_data->rw_done); @@ -505,10 +505,10 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len, void *val_buf, size_t val_len) { int i, count = val_len / sizeof(u32), ret; - u32 reg = *(u32 *)reg_buf & U16_MAX; struct qca8k_priv *priv = ctx; + u32 reg = *(u16 *)reg_buf; - if (priv->mgmt_master && + if (priv->mgmt_conduit && !qca8k_read_eth(priv, reg, val_buf, val_len)) return 0; @@ -527,11 +527,11 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len, const void *val_buf, size_t val_len) { int i, count = val_len / sizeof(u32), ret; - u32 reg = *(u32 *)reg_buf & U16_MAX; struct qca8k_priv *priv = ctx; + u32 reg = *(u16 *)reg_buf; u32 *val = (u32 *)val_buf; - if (priv->mgmt_master && + if (priv->mgmt_conduit && !qca8k_write_eth(priv, reg, val, val_len)) return 0; @@ -626,7 +626,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, struct sk_buff *write_skb, *clear_skb, *read_skb; struct qca8k_mgmt_eth_data *mgmt_eth_data; u32 write_val, clear_val = 0, val; - struct net_device *mgmt_master; + struct net_device *mgmt_conduit; int ret, ret1; bool ack; @@ -666,6 +666,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, goto err_read_skb; } + /* It seems that accessing the switch's internal PHYs via management + * packets still uses the MDIO bus within the switch internally, and + * these accesses can conflict with external MDIO accesses to other + * devices on the MDIO bus. + * We therefore need to lock the MDIO bus onto which the switch is + * connected. + */ + mutex_lock(&priv->bus->mdio_lock); + /* Actually start the request: * 1. Send mdio master packet * 2. Busy Wait for mdio master command @@ -674,17 +683,18 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, */ mutex_lock(&mgmt_eth_data->mutex); - /* Check if mgmt_master is operational */ - mgmt_master = priv->mgmt_master; - if (!mgmt_master) { + /* Check if mgmt_conduit is operational */ + mgmt_conduit = priv->mgmt_conduit; + if (!mgmt_conduit) { mutex_unlock(&mgmt_eth_data->mutex); + mutex_unlock(&priv->bus->mdio_lock); ret = -EINVAL; - goto err_mgmt_master; + goto err_mgmt_conduit; } - read_skb->dev = mgmt_master; - clear_skb->dev = mgmt_master; - write_skb->dev = mgmt_master; + read_skb->dev = mgmt_conduit; + clear_skb->dev = mgmt_conduit; + write_skb->dev = mgmt_conduit; reinit_completion(&mgmt_eth_data->rw_done); @@ -765,11 +775,12 @@ exit: QCA8K_ETHERNET_TIMEOUT); mutex_unlock(&mgmt_eth_data->mutex); + mutex_unlock(&priv->bus->mdio_lock); return ret; /* Error handling before lock */ -err_mgmt_master: +err_mgmt_conduit: kfree_skb(read_skb); err_read_skb: kfree_skb(clear_skb); @@ -948,12 +959,12 @@ qca8k_mdio_register(struct qca8k_priv *priv) ds->dst->index, ds->index); bus->parent = ds->dev; bus->phy_mask = ~ds->phys_mii_mask; - ds->slave_mii_bus = bus; + ds->user_mii_bus = bus; /* Check if the devicetree declare the port:phy mapping */ mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); if (of_device_is_available(mdio)) { - bus->name = "qca8k slave mii"; + bus->name = "qca8k user mii"; bus->read = qca8k_internal_mdio_read; bus->write = qca8k_internal_mdio_write; return devm_of_mdiobus_register(priv->dev, bus, mdio); @@ -962,7 +973,7 @@ qca8k_mdio_register(struct qca8k_priv *priv) /* If a mapping can't be found the legacy mapping is used, * using the qca8k_port_to_phy function */ - bus->name = "qca8k-legacy slave mii"; + bus->name = "qca8k-legacy user mii"; bus->read = qca8k_legacy_mdio_read; bus->write = qca8k_legacy_mdio_write; return devm_mdiobus_register(priv->dev, bus); @@ -1400,8 +1411,6 @@ static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port, config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; - - config->legacy_pre_march2020 = false; } static void @@ -1719,10 +1728,10 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port, } static void -qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, - bool operational) +qca8k_conduit_change(struct dsa_switch *ds, const struct net_device *conduit, + bool operational) { - struct dsa_port *dp = master->dsa_ptr; + struct dsa_port *dp = conduit->dsa_ptr; struct qca8k_priv *priv = ds->priv; /* Ethernet MIB/MDIO is only supported for CPU port 0 */ @@ -1732,7 +1741,7 @@ qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, mutex_lock(&priv->mgmt_eth_data.mutex); mutex_lock(&priv->mib_eth_data.mutex); - priv->mgmt_master = operational ? (struct net_device *)master : NULL; + priv->mgmt_conduit = operational ? (struct net_device *)conduit : NULL; mutex_unlock(&priv->mib_eth_data.mutex); mutex_unlock(&priv->mgmt_eth_data.mutex); @@ -1758,11 +1767,52 @@ static int qca8k_connect_tag_protocol(struct dsa_switch *ds, return 0; } +static void qca8k_setup_hol_fixup(struct qca8k_priv *priv, int port) +{ + u32 mask; + + switch (port) { + /* The 2 CPU port and port 5 requires some different + * priority than any other ports. + */ + case 0: + case 5: + case 6: + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | + QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); + break; + default: + mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | + QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | + QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | + QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | + QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); + } + regmap_write(priv->regmap, QCA8K_REG_PORT_HOL_CTRL0(port), mask); + + mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN; + regmap_update_bits(priv->regmap, QCA8K_REG_PORT_HOL_CTRL1(port), + QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | + QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | + QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | + QCA8K_PORT_HOL_CTRL1_WRED_EN, + mask); +} + static int qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = ds->priv; - int cpu_port, ret, i; + struct dsa_port *dp; + int cpu_port, ret; u32 mask; cpu_port = qca8k_find_cpu_port(ds); @@ -1817,27 +1867,27 @@ qca8k_setup(struct dsa_switch *ds) dev_warn(priv->dev, "mib init failed"); /* Initial setup of all ports */ - for (i = 0; i < QCA8K_NUM_PORTS; i++) { + dsa_switch_for_each_port(dp, ds) { /* Disable forwarding by default on all ports */ - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(dp->index), QCA8K_PORT_LOOKUP_MEMBER, 0); if (ret) return ret; + } - /* Enable QCA header mode on all cpu ports */ - if (dsa_is_cpu_port(ds, i)) { - ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), - FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | - FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); - if (ret) { - dev_err(priv->dev, "failed enabling QCA header mode"); - return ret; - } + /* Disable MAC by default on all user ports */ + dsa_switch_for_each_user_port(dp, ds) + qca8k_port_set_status(priv, dp->index, 0); + + /* Enable QCA header mode on all cpu ports */ + dsa_switch_for_each_cpu_port(dp, ds) { + ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(dp->index), + FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | + FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); + if (ret) { + dev_err(priv->dev, "failed enabling QCA header mode on port %d", dp->index); + return ret; } - - /* Disable MAC by default on all user ports */ - if (dsa_is_user_port(ds, i)) - qca8k_port_set_status(priv, i, 0); } /* Forward all unknown frames to CPU port for Linux processing @@ -1852,92 +1902,55 @@ qca8k_setup(struct dsa_switch *ds) if (ret) return ret; + /* CPU port gets connected to all user ports of the switch */ + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port), + QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); + if (ret) + return ret; + /* Setup connection between CPU port & user ports - * Configure specific switch configuration for ports + * Individual user ports get connected to CPU port only */ - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - /* CPU port gets connected to all user ports of the switch */ - if (dsa_is_cpu_port(ds, i)) { - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); - if (ret) - return ret; - } + dsa_switch_for_each_user_port(dp, ds) { + u8 port = dp->index; - /* Individual user ports get connected to CPU port only */ - if (dsa_is_user_port(ds, i)) { - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_MEMBER, - BIT(cpu_port)); - if (ret) - return ret; - - /* Enable ARP Auto-learning by default */ - ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_LEARN); - if (ret) - return ret; - - /* For port based vlans to work we need to set the - * default egress vid - */ - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), - QCA8K_EGREES_VLAN_PORT_MASK(i), - QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); - if (ret) - return ret; - - ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), - QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | - QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); - if (ret) - return ret; - } + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_MEMBER, + BIT(cpu_port)); + if (ret) + return ret; + + ret = regmap_clear_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_LEARN); + if (ret) + return ret; - /* The port 5 of the qca8337 have some problem in flood condition. The - * original legacy driver had some specific buffer and priority settings - * for the different port suggested by the QCA switch team. Add this - * missing settings to improve switch stability under load condition. - * This problem is limited to qca8337 and other qca8k switch are not affected. + /* For port based vlans to work we need to set the + * default egress vid */ - if (priv->switch_id == QCA8K_ID_QCA8337) { - switch (i) { - /* The 2 CPU port and port 5 requires some different - * priority than any other ports. - */ - case 0: - case 5: - case 6: - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) | - QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) | - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e); - break; - default: - mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) | - QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) | - QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) | - QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) | - QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19); - } - qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask); - - mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) | - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | - QCA8K_PORT_HOL_CTRL1_WRED_EN; - qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), - QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | - QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | - QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | - QCA8K_PORT_HOL_CTRL1_WRED_EN, - mask); - } + ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), + QCA8K_EGREES_VLAN_PORT_MASK(port), + QCA8K_EGREES_VLAN_PORT(port, QCA8K_PORT_VID_DEF)); + if (ret) + return ret; + + ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); + if (ret) + return ret; } + /* The port 5 of the qca8337 have some problem in flood condition. The + * original legacy driver had some specific buffer and priority settings + * for the different port suggested by the QCA switch team. Add this + * missing settings to improve switch stability under load condition. + * This problem is limited to qca8337 and other qca8k switch are not affected. + */ + if (priv->switch_id == QCA8K_ID_QCA8337) + dsa_switch_for_each_available_port(dp, ds) + qca8k_setup_hol_fixup(priv, dp->index); + /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ if (priv->switch_id == QCA8K_ID_QCA8327) { mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | @@ -1980,6 +1993,8 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_change_mtu = qca8k_port_change_mtu, .port_max_mtu = qca8k_port_max_mtu, .port_stp_state_set = qca8k_port_stp_state_set, + .port_pre_bridge_flags = qca8k_port_pre_bridge_flags, + .port_bridge_flags = qca8k_port_bridge_flags, .port_bridge_join = qca8k_port_bridge_join, .port_bridge_leave = qca8k_port_bridge_leave, .port_fast_age = qca8k_port_fast_age, @@ -2001,7 +2016,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .get_phy_flags = qca8k_get_phy_flags, .port_lag_join = qca8k_port_lag_join, .port_lag_leave = qca8k_port_lag_leave, - .master_state_change = qca8k_master_change, + .conduit_state_change = qca8k_conduit_change, .connect_tag_protocol = qca8k_connect_tag_protocol, }; diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c index 13b8452ce5b2..9243eff8918d 100644 --- a/drivers/net/dsa/qca/qca8k-common.c +++ b/drivers/net/dsa/qca/qca8k-common.c @@ -487,8 +487,7 @@ void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, return; for (i = 0; i < priv->info->mib_count; i++) - strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, - ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", ar8327_mib[i].name); } void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, @@ -500,7 +499,7 @@ void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, u32 hi = 0; int ret; - if (priv->mgmt_master && priv->info->ops->autocast_mib && + if (priv->mgmt_conduit && priv->info->ops->autocast_mib && priv->info->ops->autocast_mib(ds, port, data) > 0) return; @@ -565,9 +564,26 @@ int qca8k_get_mac_eee(struct dsa_switch *ds, int port, return 0; } +static int qca8k_port_configure_learning(struct dsa_switch *ds, int port, + bool learning) +{ + struct qca8k_priv *priv = ds->priv; + + if (learning) + return regmap_set_bits(priv->regmap, + QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_LEARN); + else + return regmap_clear_bits(priv->regmap, + QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_LEARN); +} + void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { + struct dsa_port *dp = dsa_to_port(ds, port); struct qca8k_priv *priv = ds->priv; + bool learning = false; u32 stp_state; switch (state) { @@ -582,8 +598,11 @@ void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) break; case BR_STATE_LEARNING: stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; + learning = dp->learning; break; case BR_STATE_FORWARDING: + learning = dp->learning; + fallthrough; default: stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; break; @@ -591,6 +610,34 @@ void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); + + qca8k_port_configure_learning(ds, port, learning); +} + +int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~BR_LEARNING) + return -EINVAL; + + return 0; +} + +int qca8k_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + int ret; + + if (flags.mask & BR_LEARNING) { + ret = qca8k_port_configure_learning(ds, port, + flags.val & BR_LEARNING); + if (ret) + return ret; + } + + return 0; } int qca8k_port_bridge_join(struct dsa_switch *ds, int port, @@ -714,7 +761,7 @@ int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) int ret; /* We have only have a general MTU setting. - * DSA always set the CPU port's MTU to the largest MTU of the slave + * DSA always set the CPU port's MTU to the largest MTU of the user * ports. * Setting MTU just for the CPU port is sufficient to correctly set a * value for every port. diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c index 1261e0bb21ef..90e30c2909e4 100644 --- a/drivers/net/dsa/qca/qca8k-leds.c +++ b/drivers/net/dsa/qca/qca8k-leds.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/property.h> #include <linux/regmap.h> #include <net/dsa.h> @@ -355,8 +356,8 @@ static struct device *qca8k_cled_hw_control_get_device(struct led_classdev *ldev dp = dsa_to_port(priv->ds, qca8k_phy_to_port(led->port_num)); if (!dp) return NULL; - if (dp->slave) - return &dp->slave->dev; + if (dp->user) + return &dp->user->dev; return NULL; } @@ -428,7 +429,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p init_data.default_label = ":port"; init_data.fwnode = led; init_data.devname_mandatory = true; - init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id, + init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->user_mii_bus->id, port_num); if (!init_data.devicename) return -ENOMEM; diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h index c5cc8a172d65..2ac7e88f8da5 100644 --- a/drivers/net/dsa/qca/qca8k.h +++ b/drivers/net/dsa/qca/qca8k.h @@ -458,7 +458,7 @@ struct qca8k_priv { struct mutex reg_mutex; struct device *dev; struct gpio_desc *reset_gpio; - struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ + struct net_device *mgmt_conduit; /* Track if mdio/mib Ethernet is available */ struct qca8k_mgmt_eth_data mgmt_eth_data; struct qca8k_mib_eth_data mib_eth_data; struct qca8k_mdio_cache mdio_cache; @@ -522,6 +522,12 @@ int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); /* Common bridge function */ void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state); +int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack); +int qca8k_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack); int qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c index 5a8fe707ca25..292e6d087e8b 100644 --- a/drivers/net/dsa/realtek/realtek-mdio.c +++ b/drivers/net/dsa/realtek/realtek-mdio.c @@ -20,7 +20,7 @@ */ #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/overflow.h> #include <linux/regmap.h> @@ -276,7 +276,7 @@ MODULE_DEVICE_TABLE(of, realtek_mdio_of_match); static struct mdio_driver realtek_mdio_driver = { .mdiodrv.driver = { .name = "realtek-mdio", - .of_match_table = of_match_ptr(realtek_mdio_of_match), + .of_match_table = realtek_mdio_of_match, }, .probe = realtek_mdio_probe, .remove = realtek_mdio_remove, diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c index 1b447d96b9c4..755546ed8db6 100644 --- a/drivers/net/dsa/realtek/realtek-smi.c +++ b/drivers/net/dsa/realtek/realtek-smi.c @@ -31,7 +31,6 @@ #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> @@ -379,25 +378,25 @@ static int realtek_smi_setup_mdio(struct dsa_switch *ds) return -ENODEV; } - priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev); - if (!priv->slave_mii_bus) { + priv->user_mii_bus = devm_mdiobus_alloc(priv->dev); + if (!priv->user_mii_bus) { ret = -ENOMEM; goto err_put_node; } - priv->slave_mii_bus->priv = priv; - priv->slave_mii_bus->name = "SMI slave MII"; - priv->slave_mii_bus->read = realtek_smi_mdio_read; - priv->slave_mii_bus->write = realtek_smi_mdio_write; - snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d", + priv->user_mii_bus->priv = priv; + priv->user_mii_bus->name = "SMI user MII"; + priv->user_mii_bus->read = realtek_smi_mdio_read; + priv->user_mii_bus->write = realtek_smi_mdio_write; + snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index); - priv->slave_mii_bus->dev.of_node = mdio_np; - priv->slave_mii_bus->parent = priv->dev; - ds->slave_mii_bus = priv->slave_mii_bus; + priv->user_mii_bus->dev.of_node = mdio_np; + priv->user_mii_bus->parent = priv->dev; + ds->user_mii_bus = priv->user_mii_bus; - ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np); + ret = devm_of_mdiobus_register(priv->dev, priv->user_mii_bus, mdio_np); if (ret) { dev_err(priv->dev, "unable to register MDIO bus %s\n", - priv->slave_mii_bus->id); + priv->user_mii_bus->id); goto err_put_node; } @@ -507,22 +506,20 @@ static int realtek_smi_probe(struct platform_device *pdev) return 0; } -static int realtek_smi_remove(struct platform_device *pdev) +static void realtek_smi_remove(struct platform_device *pdev) { struct realtek_priv *priv = platform_get_drvdata(pdev); if (!priv) - return 0; + return; dsa_unregister_switch(priv->ds); - if (priv->slave_mii_bus) - of_node_put(priv->slave_mii_bus->dev.of_node); + if (priv->user_mii_bus) + of_node_put(priv->user_mii_bus->dev.of_node); /* leave the device reset asserted */ if (priv->reset) gpiod_set_value(priv->reset, 1); - - return 0; } static void realtek_smi_shutdown(struct platform_device *pdev) @@ -557,10 +554,10 @@ MODULE_DEVICE_TABLE(of, realtek_smi_of_match); static struct platform_driver realtek_smi_driver = { .driver = { .name = "realtek-smi", - .of_match_table = of_match_ptr(realtek_smi_of_match), + .of_match_table = realtek_smi_of_match, }, .probe = realtek_smi_probe, - .remove = realtek_smi_remove, + .remove_new = realtek_smi_remove, .shutdown = realtek_smi_shutdown, }; module_platform_driver(realtek_smi_driver); diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h index 4fa7c6ba874a..790488e9c667 100644 --- a/drivers/net/dsa/realtek/realtek.h +++ b/drivers/net/dsa/realtek/realtek.h @@ -54,7 +54,7 @@ struct realtek_priv { struct regmap *map; struct regmap *map_nolock; struct mutex map_lock; - struct mii_bus *slave_mii_bus; + struct mii_bus *user_mii_bus; struct mii_bus *bus; int mdio_addr; diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 41ea3b5a42b1..0875e4fc9f57 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -1144,7 +1144,7 @@ static int rtl8365mb_port_change_mtu(struct dsa_switch *ds, int port, int frame_size; /* When a new MTU is set, DSA always sets the CPU port's MTU to the - * largest MTU of the slave ports. Because the switch only has a global + * largest MTU of the user ports. Because the switch only has a global * RX length register, only allowing CPU port here is enough. */ if (!dsa_is_cpu_port(ds, port)) @@ -1303,8 +1303,7 @@ static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset for (i = 0; i < RTL8365MB_MIB_END; i++) { struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i]; - - strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", mib->name); } } diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c index dc5f75be3017..82e267b8fddb 100644 --- a/drivers/net/dsa/realtek/rtl8366-core.c +++ b/drivers/net/dsa/realtek/rtl8366-core.c @@ -395,17 +395,13 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { struct realtek_priv *priv = ds->priv; - struct rtl8366_mib_counter *mib; int i; if (port >= priv->num_ports) return; - for (i = 0; i < priv->num_mib_counters; i++) { - mib = &priv->mib_counters[i]; - strncpy(data + i * ETH_GSTRING_LEN, - mib->name, ETH_GSTRING_LEN); - } + for (i = 0; i < priv->num_mib_counters; i++) + ethtool_sprintf(&data, "%s", priv->mib_counters[i].name); } EXPORT_SYMBOL_GPL(rtl8366_get_strings); diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 25f88022b9e4..b39b719a5b8f 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -95,12 +95,6 @@ #define RTL8366RB_PAACR_RX_PAUSE BIT(6) #define RTL8366RB_PAACR_AN BIT(7) -#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \ - RTL8366RB_PAACR_FULL_DUPLEX | \ - RTL8366RB_PAACR_LINK_UP | \ - RTL8366RB_PAACR_TX_PAUSE | \ - RTL8366RB_PAACR_RX_PAUSE) - /* bits 0..7 = port 0, bits 8..15 = port 1 */ #define RTL8366RB_PSTAT0 0x0014 /* bits 0..7 = port 2, bits 8..15 = port 3 */ @@ -1049,35 +1043,93 @@ static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds, return DSA_TAG_PROTO_RTL4_A; } +static void rtl8366rb_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + unsigned long *interfaces = config->supported_interfaces; + struct realtek_priv *priv = ds->priv; + + if (port == priv->cpu_port) { + __set_bit(PHY_INTERFACE_MODE_MII, interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, interfaces); + /* REVMII only supports 100M FD */ + __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces); + /* RGMII only supports 1G FD */ + phy_interface_set_rgmii(interfaces); + + config->mac_capabilities = MAC_1000 | MAC_100 | + MAC_SYM_PAUSE; + } else { + /* RSGMII port, but we don't have that, and we don't + * specify in DT, so phylib uses the default of GMII + */ + __set_bit(PHY_INTERFACE_MODE_GMII, interfaces); + config->mac_capabilities = MAC_1000 | MAC_100 | MAC_10 | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + } +} + static void rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { struct realtek_priv *priv = ds->priv; + unsigned int val; int ret; + /* Allow forcing the mode on the fixed CPU port, no autonegotiation. + * We assume autonegotiation works on the PHY-facing ports. + */ if (port != priv->cpu_port) return; dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port); - /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG, BIT(port), BIT(port)); if (ret) { - dev_err(priv->dev, "failed to force 1Gbit on CPU port\n"); + dev_err(priv->dev, "failed to force CPU port\n"); return; } + /* Conjure port config */ + switch (speed) { + case SPEED_10: + val = RTL8366RB_PAACR_SPEED_10M; + break; + case SPEED_100: + val = RTL8366RB_PAACR_SPEED_100M; + break; + case SPEED_1000: + val = RTL8366RB_PAACR_SPEED_1000M; + break; + default: + val = RTL8366RB_PAACR_SPEED_1000M; + break; + } + + if (duplex == DUPLEX_FULL) + val |= RTL8366RB_PAACR_FULL_DUPLEX; + + if (tx_pause) + val |= RTL8366RB_PAACR_TX_PAUSE; + + if (rx_pause) + val |= RTL8366RB_PAACR_RX_PAUSE; + + val |= RTL8366RB_PAACR_LINK_UP; + ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2, 0xFF00U, - RTL8366RB_PAACR_CPU_PORT << 8); + val << 8); if (ret) { dev_err(priv->dev, "failed to set PAACR on CPU port\n"); return; } + dev_dbg(priv->dev, "set PAACR to %04x\n", val); + /* Enable the CPU port */ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port), 0); @@ -1796,6 +1848,7 @@ static int rtl8366rb_detect(struct realtek_priv *priv) static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = { .get_tag_protocol = rtl8366_get_tag_protocol, .setup = rtl8366rb_setup, + .phylink_get_caps = rtl8366rb_phylink_get_caps, .phylink_mac_link_up = rtl8366rb_mac_link_up, .phylink_mac_link_down = rtl8366rb_mac_link_down, .get_strings = rtl8366_get_strings, @@ -1821,6 +1874,7 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = { .setup = rtl8366rb_setup, .phy_read = rtl8366rb_dsa_phy_read, .phy_write = rtl8366rb_dsa_phy_write, + .phylink_get_caps = rtl8366rb_phylink_get_caps, .phylink_mac_link_up = rtl8366rb_mac_link_up, .phylink_mac_link_down = rtl8366rb_mac_link_down, .get_strings = rtl8366_get_strings, diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index c37d2e537230..10092ea85e46 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -331,13 +331,9 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port, A5PSW_MCAST_DEF_MASK}; int i; - if (set) - a5psw->bridged_ports |= BIT(port); - else - a5psw->bridged_ports &= ~BIT(port); - for (i = 0; i < ARRAY_SIZE(offsets); i++) - a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports); + a5psw_reg_rmw(a5psw, offsets[i], BIT(port), + set ? BIT(port) : 0); } static void a5psw_port_set_standalone(struct a5psw *a5psw, int port, @@ -365,6 +361,8 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port, a5psw->br_dev = bridge.dev; a5psw_port_set_standalone(a5psw, port, false); + a5psw->bridged_ports |= BIT(port); + return 0; } @@ -373,6 +371,8 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port, { struct a5psw *a5psw = ds->priv; + a5psw->bridged_ports &= ~BIT(port); + a5psw_port_set_standalone(a5psw, port, true); /* No more ports bridged */ @@ -380,9 +380,63 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port, a5psw->br_dev = NULL; } +static int a5psw_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int +a5psw_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct a5psw *a5psw = ds->priv; + u32 val; + + /* If a port is set as standalone, we do not want to be able to + * configure flooding nor learning which would result in joining the + * unique bridge. This can happen when a port leaves the bridge, in + * which case the DSA core will try to "clear" all flags for the + * standalone port (ie enable flooding, disable learning). In that case + * do not fail but do not apply the flags. + */ + if (!(a5psw->bridged_ports & BIT(port))) + return 0; + + if (flags.mask & BR_LEARNING) { + val = flags.val & BR_LEARNING ? 0 : A5PSW_INPUT_LEARN_DIS(port); + a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, + A5PSW_INPUT_LEARN_DIS(port), val); + } + + if (flags.mask & BR_FLOOD) { + val = flags.val & BR_FLOOD ? BIT(port) : 0; + a5psw_reg_rmw(a5psw, A5PSW_UCAST_DEF_MASK, BIT(port), val); + } + + if (flags.mask & BR_MCAST_FLOOD) { + val = flags.val & BR_MCAST_FLOOD ? BIT(port) : 0; + a5psw_reg_rmw(a5psw, A5PSW_MCAST_DEF_MASK, BIT(port), val); + } + + if (flags.mask & BR_BCAST_FLOOD) { + val = flags.val & BR_BCAST_FLOOD ? BIT(port) : 0; + a5psw_reg_rmw(a5psw, A5PSW_BCAST_DEF_MASK, BIT(port), val); + } + + return 0; +} + static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { bool learning_enabled, rx_enabled, tx_enabled; + struct dsa_port *dp = dsa_to_port(ds, port); struct a5psw *a5psw = ds->priv; switch (state) { @@ -396,12 +450,12 @@ static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) case BR_STATE_LEARNING: rx_enabled = false; tx_enabled = false; - learning_enabled = true; + learning_enabled = dp->learning; break; case BR_STATE_FORWARDING: rx_enabled = true; tx_enabled = true; - learning_enabled = true; + learning_enabled = dp->learning; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -585,6 +639,140 @@ out_unlock: return ret; } +static int a5psw_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct netlink_ext_ack *extack) +{ + u32 mask = BIT(port + A5PSW_VLAN_VERI_SHIFT) | + BIT(port + A5PSW_VLAN_DISC_SHIFT); + u32 val = vlan_filtering ? mask : 0; + struct a5psw *a5psw = ds->priv; + + /* Disable/enable vlan tagging */ + a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE_ENA, BIT(port), + vlan_filtering ? BIT(port) : 0); + + /* Disable/enable vlan input filtering */ + a5psw_reg_rmw(a5psw, A5PSW_VLAN_VERIFY, mask, val); + + return 0; +} + +static int a5psw_find_vlan_entry(struct a5psw *a5psw, u16 vid) +{ + u32 vlan_res; + int i; + + /* Find vlan for this port */ + for (i = 0; i < A5PSW_VLAN_COUNT; i++) { + vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i)); + if (FIELD_GET(A5PSW_VLAN_RES_VLANID, vlan_res) == vid) + return i; + } + + return -1; +} + +static int a5psw_new_vlan_res_entry(struct a5psw *a5psw, u16 newvid) +{ + u32 vlan_res; + int i; + + /* Find a free VLAN entry */ + for (i = 0; i < A5PSW_VLAN_COUNT; i++) { + vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i)); + if (!(FIELD_GET(A5PSW_VLAN_RES_PORTMASK, vlan_res))) { + vlan_res = FIELD_PREP(A5PSW_VLAN_RES_VLANID, newvid); + a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(i), vlan_res); + return i; + } + } + + return -1; +} + +static void a5psw_port_vlan_tagged_cfg(struct a5psw *a5psw, + unsigned int vlan_res_id, int port, + bool set) +{ + u32 mask = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_RD_TAGMASK | + BIT(port); + u32 vlan_res_off = A5PSW_VLAN_RES(vlan_res_id); + u32 val = A5PSW_VLAN_RES_WR_TAGMASK, reg; + + if (set) + val |= BIT(port); + + /* Toggle tag mask read */ + a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK); + reg = a5psw_reg_readl(a5psw, vlan_res_off); + a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK); + + reg &= ~mask; + reg |= val; + a5psw_reg_writel(a5psw, vlan_res_off, reg); +} + +static void a5psw_port_vlan_cfg(struct a5psw *a5psw, unsigned int vlan_res_id, + int port, bool set) +{ + u32 mask = A5PSW_VLAN_RES_WR_TAGMASK | BIT(port); + u32 reg = A5PSW_VLAN_RES_WR_PORTMASK; + + if (set) + reg |= BIT(port); + + a5psw_reg_rmw(a5psw, A5PSW_VLAN_RES(vlan_res_id), mask, reg); +} + +static int a5psw_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + bool tagged = !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct a5psw *a5psw = ds->priv; + u16 vid = vlan->vid; + int vlan_res_id; + + vlan_res_id = a5psw_find_vlan_entry(a5psw, vid); + if (vlan_res_id < 0) { + vlan_res_id = a5psw_new_vlan_res_entry(a5psw, vid); + if (vlan_res_id < 0) + return -ENOSPC; + } + + a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, true); + if (tagged) + a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, true); + + /* Configure port to tag with corresponding VID, but do not enable it + * yet: wait for vlan filtering to be enabled to enable vlan port + * tagging + */ + if (pvid) + a5psw_reg_writel(a5psw, A5PSW_SYSTEM_TAGINFO(port), vid); + + return 0; +} + +static int a5psw_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct a5psw *a5psw = ds->priv; + u16 vid = vlan->vid; + int vlan_res_id; + + vlan_res_id = a5psw_find_vlan_entry(a5psw, vid); + if (vlan_res_id < 0) + return -EINVAL; + + a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, false); + a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, false); + + return 0; +} + static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port) { u32 reg_lo, reg_hi; @@ -702,6 +890,27 @@ static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port, ctrl_stats->MACControlFramesReceived = stat; } +static void a5psw_vlan_setup(struct a5psw *a5psw, int port) +{ + u32 reg; + + /* Enable TAG always mode for the port, this is actually controlled + * by VLAN_IN_MODE_ENA field which will be used for PVID insertion + */ + reg = A5PSW_VLAN_IN_MODE_TAG_ALWAYS; + reg <<= A5PSW_VLAN_IN_MODE_PORT_SHIFT(port); + a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE, A5PSW_VLAN_IN_MODE_PORT(port), + reg); + + /* Set transparent mode for output frame manipulation, this will depend + * on the VLAN_RES configuration mode + */ + reg = A5PSW_VLAN_OUT_MODE_TRANSPARENT; + reg <<= A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port); + a5psw_reg_rmw(a5psw, A5PSW_VLAN_OUT_MODE, + A5PSW_VLAN_OUT_MODE_PORT(port), reg); +} + static int a5psw_setup(struct dsa_switch *ds) { struct a5psw *a5psw = ds->priv; @@ -776,6 +985,8 @@ static int a5psw_setup(struct dsa_switch *ds) /* Enable standalone mode for user ports */ if (dsa_port_is_user(dp)) a5psw_port_set_standalone(a5psw, port, true); + + a5psw_vlan_setup(a5psw, port); } return 0; @@ -801,8 +1012,13 @@ static const struct dsa_switch_ops a5psw_switch_ops = { .set_ageing_time = a5psw_set_ageing_time, .port_bridge_join = a5psw_port_bridge_join, .port_bridge_leave = a5psw_port_bridge_leave, + .port_pre_bridge_flags = a5psw_port_pre_bridge_flags, + .port_bridge_flags = a5psw_port_bridge_flags, .port_stp_state_set = a5psw_port_stp_state_set, .port_fast_age = a5psw_port_fast_age, + .port_vlan_filtering = a5psw_port_vlan_filtering, + .port_vlan_add = a5psw_port_vlan_add, + .port_vlan_del = a5psw_port_vlan_del, .port_fdb_add = a5psw_port_fdb_add, .port_fdb_del = a5psw_port_fdb_del, .port_fdb_dump = a5psw_port_fdb_dump, @@ -992,6 +1208,8 @@ static int a5psw_probe(struct platform_device *pdev) if (IS_ERR(a5psw->base)) return PTR_ERR(a5psw->base); + a5psw->bridged_ports = BIT(A5PSW_CPU_PORT); + ret = a5psw_pcs_get(a5psw); if (ret) return ret; @@ -1054,19 +1272,17 @@ free_pcs: return ret; } -static int a5psw_remove(struct platform_device *pdev) +static void a5psw_remove(struct platform_device *pdev) { struct a5psw *a5psw = platform_get_drvdata(pdev); if (!a5psw) - return 0; + return; dsa_unregister_switch(&a5psw->ds); a5psw_pcs_free(a5psw); clk_disable_unprepare(a5psw->hclk); clk_disable_unprepare(a5psw->clk); - - return 0; } static void a5psw_shutdown(struct platform_device *pdev) @@ -1090,10 +1306,10 @@ MODULE_DEVICE_TABLE(of, a5psw_of_mtable); static struct platform_driver a5psw_driver = { .driver = { .name = "rzn1_a5psw", - .of_match_table = of_match_ptr(a5psw_of_mtable), + .of_match_table = a5psw_of_mtable, }, .probe = a5psw_probe, - .remove = a5psw_remove, + .remove_new = a5psw_remove, .shutdown = a5psw_shutdown, }; module_platform_driver(a5psw_driver); diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h index b869192eef3f..d54acedac194 100644 --- a/drivers/net/dsa/rzn1_a5psw.h +++ b/drivers/net/dsa/rzn1_a5psw.h @@ -51,7 +51,9 @@ #define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2 #define A5PSW_VLAN_OUT_MODE 0x2C -#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << ((port) * 2)) +#define A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port) ((port) * 2) +#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << \ + A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port)) #define A5PSW_VLAN_OUT_MODE_DIS 0x0 #define A5PSW_VLAN_OUT_MODE_STRIP 0x1 #define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2 @@ -60,7 +62,7 @@ #define A5PSW_VLAN_IN_MODE_ENA 0x30 #define A5PSW_VLAN_TAG_ID 0x34 -#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + A5PSW_PORT_OFFSET(port)) +#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + 4 * (port)) #define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port)) #define A5PSW_AUTH_PORT_AUTHORIZED BIT(0) @@ -69,7 +71,7 @@ #define A5PSW_VLAN_RES_WR_PORTMASK BIT(30) #define A5PSW_VLAN_RES_WR_TAGMASK BIT(29) #define A5PSW_VLAN_RES_RD_TAGMASK BIT(28) -#define A5PSW_VLAN_RES_ID GENMASK(16, 5) +#define A5PSW_VLAN_RES_VLANID GENMASK(16, 5) #define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0) #define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port)) diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index dee35ba924ad..8c66d3bf61f0 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -132,6 +132,8 @@ struct sja1105_info { int max_frame_mem; int num_ports; bool multiple_cascade_ports; + /* Every {port, TXQ} has its own CBS shaper */ + bool fixed_cbs_mapping; enum dsa_tag_protocol tag_proto; const struct sja1105_dynamic_table_ops *dyn_ops; const struct sja1105_table_ops *static_ops; @@ -264,6 +266,8 @@ struct sja1105_private { * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; + /* Serializes accesses to the FDB */ + struct mutex fdb_lock; /* PTP two-step TX timestamp ID, and its serialization lock */ spinlock_t ts_id_lock; u8 ts_id; diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c index e3699f76f6d7..08a3e7b96254 100644 --- a/drivers/net/dsa/sja1105/sja1105_clocking.c +++ b/drivers/net/dsa/sja1105/sja1105_clocking.c @@ -153,14 +153,14 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv, { const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_tx_clk; - const int mac_clk_sources[] = { + static const int mac_clk_sources[] = { CLKSRC_MII0_TX_CLK, CLKSRC_MII1_TX_CLK, CLKSRC_MII2_TX_CLK, CLKSRC_MII3_TX_CLK, CLKSRC_MII4_TX_CLK, }; - const int phy_clk_sources[] = { + static const int phy_clk_sources[] = { CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, @@ -194,7 +194,7 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port) const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_rx_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_MII0_RX_CLK, CLKSRC_MII1_RX_CLK, CLKSRC_MII2_RX_CLK, @@ -221,7 +221,7 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port) const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_ext_tx_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, @@ -248,7 +248,7 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port) const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl mii_ext_rx_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, @@ -349,8 +349,13 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv, if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) { clksrc = CLKSRC_PLL0; } else { - int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, - CLKSRC_IDIV3, CLKSRC_IDIV4}; + static const int clk_sources[] = { + CLKSRC_IDIV0, + CLKSRC_IDIV1, + CLKSRC_IDIV2, + CLKSRC_IDIV3, + CLKSRC_IDIV4, + }; clksrc = clk_sources[port]; } @@ -638,7 +643,7 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv, const struct sja1105_regs *regs = priv->info->regs; struct sja1105_cgu_mii_ctrl ref_clk; u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; - const int clk_sources[] = { + static const int clk_sources[] = { CLKSRC_MII0_TX_CLK, CLKSRC_MII1_TX_CLK, CLKSRC_MII2_TX_CLK, diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 7729d3f8b7f5..984c0e604e8d 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = { static int sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, - struct sja1105_dyn_cmd *cmd, - const struct sja1105_dynamic_table_ops *ops) + const struct sja1105_dynamic_table_ops *ops, + void *entry, bool check_valident, + bool check_errors) { u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {}; + struct sja1105_dyn_cmd cmd = {}; int rc; - /* We don't _need_ to read the full entry, just the command area which - * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a - * buffer that contains the full entry too. Additionally, our API - * doesn't really know how many bytes into the buffer does the command - * area really begin. So just read back the whole entry. - */ + /* Read back the whole entry + command structure. */ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, ops->packed_size); if (rc) @@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, /* Unpack the command structure, and return it to the caller in case it * needs to perform further checks on it (VALIDENT). */ - memset(cmd, 0, sizeof(*cmd)); - ops->cmd_packing(packed_buf, cmd, UNPACK); + ops->cmd_packing(packed_buf, &cmd, UNPACK); /* Hardware hasn't cleared VALID => still working on it */ - return cmd->valid ? -EAGAIN : 0; + if (cmd.valid) + return -EAGAIN; + + if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY)) + return -ENOENT; + + if (check_errors && cmd.errors) + return -EINVAL; + + /* Don't dereference possibly NULL pointer - maybe caller + * only wanted to see whether the entry existed or not. + */ + if (entry) + ops->entry_packing(packed_buf, entry, UNPACK); + + return 0; } /* Poll the dynamic config entry's control area until the hardware has @@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv, */ static int sja1105_dynamic_config_wait_complete(struct sja1105_private *priv, - struct sja1105_dyn_cmd *cmd, - const struct sja1105_dynamic_table_ops *ops) + const struct sja1105_dynamic_table_ops *ops, + void *entry, bool check_valident, + bool check_errors) { - int rc; - - return read_poll_timeout(sja1105_dynamic_config_poll_valid, - rc, rc != -EAGAIN, - SJA1105_DYNAMIC_CONFIG_SLEEP_US, - SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, - false, priv, cmd, ops); + int err, rc; + + err = read_poll_timeout(sja1105_dynamic_config_poll_valid, + rc, rc != -EAGAIN, + SJA1105_DYNAMIC_CONFIG_SLEEP_US, + SJA1105_DYNAMIC_CONFIG_TIMEOUT_US, + false, priv, ops, entry, check_valident, + check_errors); + return err < 0 ? err : rc; } /* Provides read access to the settings through the dynamic interface @@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); - if (rc < 0) { - mutex_unlock(&priv->dynamic_config_lock); - return rc; - } - - rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); - mutex_unlock(&priv->dynamic_config_lock); if (rc < 0) - return rc; + goto out; - if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY)) - return -ENOENT; + rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false); +out: + mutex_unlock(&priv->dynamic_config_lock); - /* Don't dereference possibly NULL pointer - maybe caller - * only wanted to see whether the entry existed or not. - */ - if (entry) - ops->entry_packing(packed_buf, entry, UNPACK); - return 0; + return rc; } int sja1105_dynamic_config_write(struct sja1105_private *priv, @@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv, mutex_lock(&priv->dynamic_config_lock); rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, ops->packed_size); - if (rc < 0) { - mutex_unlock(&priv->dynamic_config_lock); - return rc; - } - - rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops); - mutex_unlock(&priv->dynamic_config_lock); if (rc < 0) - return rc; + goto out; - cmd = (struct sja1105_dyn_cmd) {0}; - ops->cmd_packing(packed_buf, &cmd, UNPACK); - if (cmd.errors) - return -EINVAL; + rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true); +out: + mutex_unlock(&priv->dynamic_config_lock); - return 0; + return rc; } static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly) diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index fad5afe3819c..9e8ca182c722 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -205,10 +205,10 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv, u16 pcp = U16_MAX; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported keys used"); return -EOPNOTSUPP; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 3529a565b4aa..74cee39d73df 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -15,7 +15,6 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/of_device.h> #include <linux/pcs/pcs-xpcs.h> #include <linux/netdev_features.h> #include <linux/netdevice.h> @@ -1396,12 +1395,6 @@ static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port, struct sja1105_xmii_params_entry *mii; phy_interface_t phy_mode; - /* This driver does not make use of the speed, duplex, pause or the - * advertisement in its mac_config, so it is safe to mark this driver - * as non-legacy. - */ - config->legacy_pre_march2020 = false; - phy_mode = priv->phy_mode[port]; if (phy_mode == PHY_INTERFACE_MODE_SGMII || phy_mode == PHY_INTERFACE_MODE_2500BASEX) { @@ -1805,6 +1798,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, struct dsa_db db) { struct sja1105_private *priv = ds->priv; + int rc; if (!vid) { switch (db.type) { @@ -1819,12 +1813,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, } } - return priv->info->fdb_add_cmd(ds, port, addr, vid); + mutex_lock(&priv->fdb_lock); + rc = priv->info->fdb_add_cmd(ds, port, addr, vid); + mutex_unlock(&priv->fdb_lock); + + return rc; } -static int sja1105_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) +static int __sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct sja1105_private *priv = ds->priv; @@ -1844,6 +1842,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port, return priv->info->fdb_del_cmd(ds, port, addr, vid); } +static int sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct sja1105_private *priv = ds->priv; + int rc; + + mutex_lock(&priv->fdb_lock); + rc = __sja1105_fdb_del(ds, port, addr, vid, db); + mutex_unlock(&priv->fdb_lock); + + return rc; +} + static int sja1105_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -1875,13 +1887,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, if (!(l2_lookup.destports & BIT(port))) continue; - /* We need to hide the FDB entry for unknown multicast */ - if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST && - l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST) - continue; - u64_to_ether_addr(l2_lookup.macaddr, macaddr); + /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" + * only wants to see unicast + */ + if (is_multicast_ether_addr(macaddr)) + continue; + /* We need to hide the dsa_8021q VLANs from the user. */ if (vid_is_dsa_8021q(l2_lookup.vlanid)) l2_lookup.vlanid = 0; @@ -1905,6 +1918,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) }; int i; + mutex_lock(&priv->fdb_lock); + for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { struct sja1105_l2_lookup_entry l2_lookup = {0}; u8 macaddr[ETH_ALEN]; @@ -1918,7 +1933,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) if (rc) { dev_err(ds->dev, "Failed to read FDB: %pe\n", ERR_PTR(rc)); - return; + break; } if (!(l2_lookup.destports & BIT(port))) @@ -1930,14 +1945,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port) u64_to_ether_addr(l2_lookup.macaddr, macaddr); - rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); + rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); if (rc) { dev_err(ds->dev, "Failed to delete FDB entry %pM vid %lld: %pe\n", macaddr, l2_lookup.vlanid, ERR_PTR(rc)); - return; + break; } } + + mutex_unlock(&priv->fdb_lock); } static int sja1105_mdb_add(struct dsa_switch *ds, int port, @@ -2122,11 +2139,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port, } #define BYTES_PER_KBIT (1000LL / 8) +/* Port 0 (the uC port) does not have CBS shapers */ +#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio)) + +static int sja1105_find_cbs_shaper(struct sja1105_private *priv, + int port, int prio) +{ + int i; + + if (priv->info->fixed_cbs_mapping) { + i = SJA1110_FIXED_CBS(port, prio); + if (i >= 0 && i < priv->info->num_cbs_shapers) + return i; + + return -1; + } + + for (i = 0; i < priv->info->num_cbs_shapers; i++) + if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) + return i; + + return -1; +} static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) { int i; + if (priv->info->fixed_cbs_mapping) + return -1; + for (i = 0; i < priv->info->num_cbs_shapers; i++) if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) return i; @@ -2157,14 +2199,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; struct sja1105_cbs_entry *cbs; + s64 port_transmit_rate_kbps; int index; if (!offload->enable) return sja1105_delete_cbs_shaper(priv, port, offload->queue); - index = sja1105_find_unused_cbs_shaper(priv); - if (index < 0) - return -ENOSPC; + /* The user may be replacing an existing shaper */ + index = sja1105_find_cbs_shaper(priv, port, offload->queue); + if (index < 0) { + /* That isn't the case - see if we can allocate a new one */ + index = sja1105_find_unused_cbs_shaper(priv); + if (index < 0) + return -ENOSPC; + } cbs = &priv->cbs[index]; cbs->port = port; @@ -2174,9 +2222,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, */ cbs->credit_hi = offload->hicredit; cbs->credit_lo = abs(offload->locredit); - /* User space is in kbits/sec, hardware in bytes/sec */ - cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; - cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); + /* User space is in kbits/sec, while the hardware in bytes/sec times + * link speed. Since the given offload->sendslope is good only for the + * current link speed anyway, and user space is likely to reprogram it + * when that changes, don't even bother to track the port's link speed, + * but deduce the port transmit rate from idleslope - sendslope. + */ + port_transmit_rate_kbps = offload->idleslope - offload->sendslope; + cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, + port_transmit_rate_kbps); + cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), + port_transmit_rate_kbps); /* Convert the negative values from 64-bit 2's complement * to 32-bit 2's complement (for the case of 0x80000000 whose * negative is still negative). @@ -2241,6 +2297,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv, int rc, i; s64 now; + mutex_lock(&priv->fdb_lock); mutex_lock(&priv->mgmt_lock); mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; @@ -2353,6 +2410,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv, goto out; out: mutex_unlock(&priv->mgmt_lock); + mutex_unlock(&priv->fdb_lock); return rc; } @@ -2630,7 +2688,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, } /* Transfer skb to the host port. */ - dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); + dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user); /* Wait until the switch has processed the frame */ do { @@ -2922,7 +2980,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, { struct sja1105_l2_lookup_entry *l2_lookup; struct sja1105_table *table; - int match; + int match, rc; + + mutex_lock(&priv->fdb_lock); table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; l2_lookup = table->entries; @@ -2935,7 +2995,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, if (match == table->entry_count) { NL_SET_ERR_MSG_MOD(extack, "Could not find FDB entry for unknown multicast"); - return -ENOSPC; + rc = -ENOSPC; + goto out; } if (flags.val & BR_MCAST_FLOOD) @@ -2943,10 +3004,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, else l2_lookup[match].destports &= ~BIT(to); - return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, - l2_lookup[match].index, - &l2_lookup[match], - true); + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + l2_lookup[match].index, + &l2_lookup[match], true); +out: + mutex_unlock(&priv->fdb_lock); + + return rc; } static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port, @@ -3017,7 +3081,7 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, * ref_clk pin. So port clocking needs to be initialized early, before * connecting to PHYs is attempted, otherwise they won't respond through MDIO. * Setting correct PHY link speed does not matter now. - * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY + * But dsa_user_phy_setup is called later than sja1105_setup, so the PHY * bindings are not yet parsed by DSA core. We need to parse early so that we * can populate the xMII mode parameters table. */ @@ -3316,6 +3380,7 @@ static int sja1105_probe(struct spi_device *spi) mutex_init(&priv->ptp_data.lock); mutex_init(&priv->dynamic_config_lock); mutex_init(&priv->mgmt_lock); + mutex_init(&priv->fdb_lock); spin_lock_init(&priv->ts_id_lock); rc = sja1105_parse_dt(priv); diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index 5ce29c8057a4..834b5c1b4db0 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, @@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, @@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, @@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = { .tag_proto = DSA_TAG_PROTO_SJA1110, .can_limit_mcast_flood = true, .multiple_cascade_ports = true, + .fixed_cbs_mapping = true, .ptp_ts_bits = 32, .ptpegr_ts_bytes = 8, .max_frame_mem = SJA1110_MAX_FRAME_MEMORY, diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index ef1a4a7c47b2..e6f29e4e508c 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -18,7 +18,6 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/bitops.h> #include <linux/if_bridge.h> @@ -929,7 +928,8 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, const struct vsc73xx_counter *cnt; struct vsc73xx *vsc = ds->priv; u8 indices[6]; - int i, j; + u8 *buf = data; + int i; u32 val; int ret; @@ -949,10 +949,7 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */ /* The first counters is the RX octets */ - j = 0; - strncpy(data + j * ETH_GSTRING_LEN, - "RxEtherStatsOctets", ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "RxEtherStatsOctets"); /* Each port supports recording 3 RX counters and 3 TX counters, * figure out what counters we use in this set-up and return the @@ -962,23 +959,16 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, */ for (i = 0; i < 3; i++) { cnt = vsc73xx_find_counter(vsc, indices[i], false); - if (cnt) - strncpy(data + j * ETH_GSTRING_LEN, - cnt->name, ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "%s", cnt ? cnt->name : ""); } /* TX stats begins with the number of TX octets */ - strncpy(data + j * ETH_GSTRING_LEN, - "TxEtherStatsOctets", ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "TxEtherStatsOctets"); for (i = 3; i < 6; i++) { cnt = vsc73xx_find_counter(vsc, indices[i], true); - if (cnt) - strncpy(data + j * ETH_GSTRING_LEN, - cnt->name, ETH_GSTRING_LEN); - j++; + ethtool_sprintf(&buf, "%s", cnt ? cnt->name : ""); + } } @@ -1038,6 +1028,31 @@ static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port) return 9600 - ETH_HLEN - ETH_FCS_LEN; } +static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port, + struct phylink_config *config) +{ + unsigned long *interfaces = config->supported_interfaces; + + if (port == 5) + return; + + if (port == CPU_PORT) { + __set_bit(PHY_INTERFACE_MODE_MII, interfaces); + __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, interfaces); + __set_bit(PHY_INTERFACE_MODE_RGMII, interfaces); + } + + if (port <= 4) { + /* Internal PHYs */ + __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces); + /* phylib default */ + __set_bit(PHY_INTERFACE_MODE_GMII, interfaces); + } + + config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000; +} + static const struct dsa_switch_ops vsc73xx_ds_ops = { .get_tag_protocol = vsc73xx_get_tag_protocol, .setup = vsc73xx_setup, @@ -1051,6 +1066,7 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = { .port_disable = vsc73xx_port_disable, .port_change_mtu = vsc73xx_change_mtu, .port_max_mtu = vsc73xx_get_max_mtu, + .phylink_get_caps = vsc73xx_phylink_get_caps, }; static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c index bd4206e8f9af..755b7895a15a 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-platform.c +++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c @@ -112,16 +112,14 @@ static int vsc73xx_platform_probe(struct platform_device *pdev) return vsc73xx_probe(&vsc_platform->vsc); } -static int vsc73xx_platform_remove(struct platform_device *pdev) +static void vsc73xx_platform_remove(struct platform_device *pdev) { struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev); if (!vsc_platform) - return 0; + return; vsc73xx_remove(&vsc_platform->vsc); - - return 0; } static void vsc73xx_platform_shutdown(struct platform_device *pdev) @@ -160,7 +158,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match); static struct platform_driver vsc73xx_platform_driver = { .probe = vsc73xx_platform_probe, - .remove = vsc73xx_platform_remove, + .remove_new = vsc73xx_platform_remove, .shutdown = vsc73xx_platform_shutdown, .driver = { .name = "vsc73xx-platform", diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index fa622639d640..96db032b478f 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -7,7 +7,7 @@ #include <net/dsa.h> #include <linux/etherdevice.h> #include <linux/if_bridge.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/netdev_features.h> #include <linux/if_hsr.h> #include "xrs700x.h" @@ -548,12 +548,13 @@ static void xrs700x_bridge_leave(struct dsa_switch *ds, int port, } static int xrs700x_hsr_join(struct dsa_switch *ds, int port, - struct net_device *hsr) + struct net_device *hsr, + struct netlink_ext_ack *extack) { unsigned int val = XRS_HSR_CFG_HSR_PRP; struct dsa_port *partner = NULL, *dp; struct xrs700x *priv = ds->priv; - struct net_device *slave; + struct net_device *user; int ret, i, hsr_pair[2]; enum hsr_version ver; bool fwd = false; @@ -562,16 +563,21 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, if (ret) return ret; - /* Only ports 1 and 2 can be HSR/PRP redundant ports. */ - if (port != 1 && port != 2) + if (port != 1 && port != 2) { + NL_SET_ERR_MSG_MOD(extack, + "Only ports 1 and 2 can offload HSR/PRP"); return -EOPNOTSUPP; + } - if (ver == HSR_V1) + if (ver == HSR_V1) { val |= XRS_HSR_CFG_HSR; - else if (ver == PRP_V1) + } else if (ver == PRP_V1) { val |= XRS_HSR_CFG_PRP; - else + } else { + NL_SET_ERR_MSG_MOD(extack, + "Only HSR v1 and PRP v1 can be offloaded"); return -EOPNOTSUPP; + } dsa_hsr_foreach_port(dp, ds, hsr) { if (dp->index != port) { @@ -632,8 +638,8 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, hsr_pair[0] = port; hsr_pair[1] = partner->index; for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) { - slave = dsa_to_port(ds, hsr_pair[i])->slave; - slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES; + user = dsa_to_port(ds, hsr_pair[i])->user; + user->features |= XRS7000X_SUPPORTED_HSR_FEATURES; } return 0; @@ -644,7 +650,7 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port, { struct dsa_port *partner = NULL, *dp; struct xrs700x *priv = ds->priv; - struct net_device *slave; + struct net_device *user; int i, hsr_pair[2]; unsigned int val; @@ -686,8 +692,8 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port, hsr_pair[0] = port; hsr_pair[1] = partner->index; for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) { - slave = dsa_to_port(ds, hsr_pair[i])->slave; - slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES; + user = dsa_to_port(ds, hsr_pair[i])->user; + user->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES; } return 0; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index c4b1b0aa438a..768454aa36d6 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -202,4 +202,5 @@ static void __exit dummy_cleanup_module(void) module_init(dummy_init_module); module_exit(dummy_cleanup_module); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Dummy netdevice driver which discards all packets sent to it"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/eql.c b/drivers/net/eql.c index ca3e4700a813..3c2efda916f1 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -607,4 +607,5 @@ static void __exit eql_cleanup_module(void) module_init(eql_init_module); module_exit(eql_cleanup_module); +MODULE_DESCRIPTION("Equalizer Load-balancer for serial network interfaces"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index af603256b724..2874680ef24d 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -811,7 +811,7 @@ static int ax_init_dev(struct net_device *dev) return ret; } -static int ax_remove(struct platform_device *pdev) +static void ax_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ei_device *ei_local = netdev_priv(dev); @@ -832,8 +832,6 @@ static int ax_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); free_netdev(dev); - - return 0; } /* @@ -1011,7 +1009,7 @@ static struct platform_driver axdrv = { .name = "ax88796", }, .probe = ax_probe, - .remove = ax_remove, + .remove_new = ax_remove, .suspend = ax_suspend, .resume = ax_resume, }; diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 217838b28220..5a0fa995e643 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -441,7 +441,7 @@ static int mcf8390_probe(struct platform_device *pdev) return 0; } -static int mcf8390_remove(struct platform_device *pdev) +static void mcf8390_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct resource *mem; @@ -450,7 +450,6 @@ static int mcf8390_remove(struct platform_device *pdev) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); free_netdev(dev); - return 0; } static struct platform_driver mcf8390_drv = { @@ -458,7 +457,7 @@ static struct platform_driver mcf8390_drv = { .name = "mcf8390", }, .probe = mcf8390_probe, - .remove = mcf8390_remove, + .remove_new = mcf8390_remove, }; module_platform_driver(mcf8390_drv); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 7d89ec1cf273..350683a09d2e 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -823,7 +823,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) return 0; } -static int ne_drv_remove(struct platform_device *pdev) +static void ne_drv_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -842,7 +842,6 @@ static int ne_drv_remove(struct platform_device *pdev) release_region(dev->base_addr, NE_IO_EXTENT); free_netdev(dev); } - return 0; } /* Remove unused devices or all if true. */ @@ -895,7 +894,7 @@ static int ne_drv_resume(struct platform_device *pdev) #endif static struct platform_driver ne_driver = { - .remove = ne_drv_remove, + .remove_new = ne_drv_remove, .suspend = ne_drv_suspend, .resume = ne_drv_resume, .driver = { diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 2c6bd36d2f31..65f56a98c0a0 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -731,18 +731,4 @@ static struct pci_driver ne2k_driver = { .id_table = ne2k_pci_tbl, .driver.pm = &ne2k_pci_pm_ops, }; - - -static int __init ne2k_pci_init(void) -{ - return pci_register_driver(&ne2k_driver); -} - - -static void __exit ne2k_pci_cleanup(void) -{ - pci_unregister_driver(&ne2k_driver); -} - -module_init(ne2k_pci_init); -module_exit(ne2k_pci_cleanup); +module_pci_driver(ne2k_driver); diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index c6f8f852bff1..e03193da5874 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -1582,15 +1582,13 @@ static int owl_emac_probe(struct platform_device *pdev) return 0; } -static int owl_emac_remove(struct platform_device *pdev) +static void owl_emac_remove(struct platform_device *pdev) { struct owl_emac_priv *priv = platform_get_drvdata(pdev); netif_napi_del(&priv->napi); phy_disconnect(priv->netdev->phydev); cancel_work_sync(&priv->mac_reset_task); - - return 0; } static const struct of_device_id owl_emac_of_match[] = { @@ -1609,7 +1607,7 @@ static struct platform_driver owl_emac_driver = { .pm = &owl_emac_pm_ops, }, .probe = owl_emac_probe, - .remove = owl_emac_remove, + .remove_new = owl_emac_remove, }; module_platform_driver(owl_emac_driver); diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index f5c2d7a9abc1..d7c274af6d4d 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) { struct adin1110_priv *priv = port_priv->priv; u32 header_len = ADIN1110_RD_HEADER_LEN; - struct spi_transfer t; + struct spi_transfer t = {0}; u32 frame_size_no_fcs; struct sk_buff *rxb; u32 frame_size; @@ -739,7 +739,7 @@ static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv, u32 port_rules = 0; u8 mask[ETH_ALEN]; - memset(mask, 0xFF, ETH_ALEN); + eth_broadcast_addr(mask); if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING) port_rules = adin1110_port_rules(port_priv, true, true); @@ -760,7 +760,7 @@ static int adin1110_set_mac_address(struct net_device *netdev, return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, dev_addr); - memset(mask, 0xFF, ETH_ALEN); + eth_broadcast_addr(mask); mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT; port_rules = adin1110_port_rules(port_priv, true, false); @@ -1271,7 +1271,7 @@ static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv goto out; /* Allow only BPDUs to be passed to the CPU */ - memset(mask, 0xFF, ETH_ALEN); + eth_broadcast_addr(mask); port_rules = adin1110_port_rules(port_priv, true, false); ret = adin1110_write_mac_address(port_priv, mac_slot, mac, mask, port_rules); @@ -1385,8 +1385,8 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv, return -ENOMEM; other_port = priv->ports[!port_priv->nr]; - port_rules = adin1110_port_rules(port_priv, false, true); - memset(mask, 0xFF, ETH_ALEN); + port_rules = adin1110_port_rules(other_port, false, true); + eth_broadcast_addr(mask); return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr, mask, port_rules); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index aa0d2f3aaeaa..27af7746d645 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -29,9 +29,9 @@ #include <linux/io.h> #include <linux/crc32.h> #include <linux/mii.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/byteorder.h> @@ -1525,7 +1525,7 @@ error1: return err; } -static int greth_of_remove(struct platform_device *of_dev) +static void greth_of_remove(struct platform_device *of_dev) { struct net_device *ndev = platform_get_drvdata(of_dev); struct greth_private *greth = netdev_priv(ndev); @@ -1544,8 +1544,6 @@ static int greth_of_remove(struct platform_device *of_dev) of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); free_netdev(ndev); - - return 0; } static const struct of_device_id greth_of_match[] = { @@ -1566,7 +1564,7 @@ static struct platform_driver greth_of_driver = { .of_match_table = greth_of_match, }, .probe = greth_of_probe, - .remove = greth_of_remove, + .remove_new = greth_of_remove, }; module_platform_driver(greth_of_driver); diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 5fab589b3ddf..3d9220f9c9fe 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3982,8 +3982,7 @@ static int et131x_pci_setup(struct pci_dev *pdev, } adapter->mii_bus->name = "et131x_eth_mii"; - snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", - (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); + snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(adapter->pdev)); adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index a30d0f172986..78231c85234d 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1520,10 +1520,8 @@ static void slic_get_ethtool_stats(struct net_device *dev, static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { - if (stringset == ETH_SS_STATS) { + if (stringset == ETH_SS_STATS) memcpy(data, slic_stats_strings, sizeof(slic_stats_strings)); - data += sizeof(slic_stats_strings); - } } static void slic_get_drvinfo(struct net_device *dev, diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index a94c62956eed..d761c08fe5c1 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -1083,7 +1083,7 @@ out: return ret; } -static int emac_remove(struct platform_device *pdev) +static void emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct emac_board_info *db = netdev_priv(ndev); @@ -1101,7 +1101,6 @@ static int emac_remove(struct platform_device *pdev) free_netdev(ndev); dev_dbg(&pdev->dev, "released and freed device\n"); - return 0; } static int emac_suspend(struct platform_device *dev, pm_message_t state) @@ -1143,7 +1142,7 @@ static struct platform_driver emac_driver = { .of_match_table = emac_of_match, }, .probe = emac_probe, - .remove = emac_remove, + .remove_new = emac_remove, .suspend = emac_suspend, .resume = emac_resume, }; diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig index 17985319088c..4ef819a9a1ad 100644 --- a/drivers/net/ethernet/altera/Kconfig +++ b/drivers/net/ethernet/altera/Kconfig @@ -2,6 +2,7 @@ config ALTERA_TSE tristate "Altera Triple-Speed Ethernet MAC support" depends on HAS_DMA + depends on HAS_IOMEM select PHYLIB select PHYLINK select PCS_LYNX diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index db5eed06e92d..82f2363a45cd 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -472,7 +472,7 @@ struct altera_tse_private { /* ethtool msglvl option */ u32 msg_enable; - struct altera_dmaops *dmaops; + const struct altera_dmaops *dmaops; struct phylink *phylink; struct phylink_config phylink_config; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 2e15800e5310..1c8763be0e4b 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -29,13 +29,13 @@ #include <linux/mii.h> #include <linux/mdio/mdio-regmap.h> #include <linux/netdevice.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> #include <linux/pcs-lynx.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/skbuff.h> #include <asm/cacheflush.h> @@ -82,8 +82,6 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); #define TXQUEUESTOP_THRESHHOLD 2 -static const struct of_device_id altera_tse_ids[]; - static inline u32 tse_tx_avail(struct altera_tse_private *priv) { return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; @@ -1133,7 +1131,6 @@ static int request_and_map(struct platform_device *pdev, const char *name, */ static int altera_tse_probe(struct platform_device *pdev) { - const struct of_device_id *of_id = NULL; struct regmap_config pcs_regmap_cfg; struct altera_tse_private *priv; struct mdio_regmap_config mrc; @@ -1159,11 +1156,7 @@ static int altera_tse_probe(struct platform_device *pdev) priv->dev = ndev; priv->msg_enable = netif_msg_init(debug, default_msg_level); - of_id = of_match_device(altera_tse_ids, &pdev->dev); - - if (of_id) - priv->dmaops = (struct altera_dmaops *)of_id->data; - + priv->dmaops = device_get_match_data(&pdev->dev); if (priv->dmaops && priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { @@ -1464,7 +1457,7 @@ err_free_netdev: /* Remove Altera TSE MAC device */ -static int altera_tse_remove(struct platform_device *pdev) +static void altera_tse_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct altera_tse_private *priv = netdev_priv(ndev); @@ -1476,8 +1469,6 @@ static int altera_tse_remove(struct platform_device *pdev) lynx_pcs_destroy(priv->pcs); free_netdev(ndev); - - return 0; } static const struct altera_dmaops altera_dtype_sgdma = { @@ -1528,7 +1519,7 @@ MODULE_DEVICE_TABLE(of, altera_tse_ids); static struct platform_driver altera_tse_driver = { .probe = altera_tse_probe, - .remove = altera_tse_remove, + .remove_new = altera_tse_remove, .suspend = NULL, .resume = NULL, .driver = { diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 3d6f0a466a9e..f9f886289b97 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, * compare it to the stored version, just create the meta */ if (io_sq->disable_meta_caching) { - if (unlikely(!ena_tx_ctx->meta_valid)) - return -EINVAL; - *have_meta = true; return ena_com_create_meta(io_sq, ena_meta); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d19593fae226..c44c44e26ddf 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info); static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, int first_index, int count); +static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, + int first_index, int count); /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ static void ena_increase_stat(u64 *statp, u64 cnt, @@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter) static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) { + u32 xdp_first_ring = adapter->xdp_first_ring; + u32 xdp_num_queues = adapter->xdp_num_queues; int rc = 0; - rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring, - adapter->xdp_num_queues); + rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); if (rc) goto setup_err; - rc = ena_create_io_tx_queues_in_range(adapter, - adapter->xdp_first_ring, - adapter->xdp_num_queues); + rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues); if (rc) goto create_err; return 0; create_err: - ena_free_all_io_tx_resources(adapter); + ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); setup_err: return rc; } @@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, if (unlikely(!skb)) return NULL; - /* sync this buffer for CPU use */ - dma_sync_single_for_cpu(rx_ring->dev, - dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, - len, - DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, buf_addr + buf_offset, len); dma_sync_single_for_device(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, @@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); - pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); - /* If XDP isn't loaded try to reuse part of the RX buffer */ reuse_rx_buf_page = !is_xdp_loaded && ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); - dma_sync_single_for_cpu(rx_ring->dev, - pre_reuse_paddr + pkt_offset, - len, - DMA_FROM_DEVICE); - if (!reuse_rx_buf_page) ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); @@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring, } } -static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) +static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs) { struct ena_rx_buffer *rx_info; int ret; + /* XDP multi-buffer packets not supported */ + if (unlikely(num_descs > 1)) { + netdev_err_once(rx_ring->adapter->netdev, + "xdp: dropped unsupported multi-buffer packets\n"); + ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp); + return ENA_XDP_DROP; + } + rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; xdp_prepare_buff(xdp, page_address(rx_info->page), rx_info->buf_offset, rx_ring->ena_bufs[0].len, false); - /* If for some reason we received a bigger packet than - * we expect, then we simply drop it - */ - if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) - return ENA_XDP_DROP; ret = ena_xdp_execute(rx_ring, xdp); @@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, int xdp_flags = 0; int total_len = 0; int xdp_verdict; + u8 pkt_offset; int rc = 0; int i; @@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, /* First descriptor might have an offset set by the device */ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; - rx_info->buf_offset += ena_rx_ctx.pkt_offset; + pkt_offset = ena_rx_ctx.pkt_offset; + rx_info->buf_offset += pkt_offset; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, ena_rx_ctx.hash); + dma_sync_single_for_cpu(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, + rx_ring->ena_bufs[0].len, + DMA_FROM_DEVICE); + if (ena_xdp_present_ring(rx_ring)) - xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); + xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs); /* allocate skb and fill it */ if (xdp_verdict == ENA_XDP_PASS) @@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, if (xdp_verdict & ENA_XDP_FORWARDED) { ena_unmap_rx_buff_attrs(rx_ring, &rx_ring->rx_buffer_info[req_id], - 0); + DMA_ATTR_SKIP_CPU_SYNC); rx_ring->rx_buffer_info[req_id].page = NULL; } } @@ -1828,11 +1827,14 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, } if (xdp_flags & ENA_XDP_REDIRECT) - xdp_do_flush_map(); + xdp_do_flush(); return work_done; error: + if (xdp_flags & ENA_XDP_REDIRECT) + xdp_do_flush(); + adapter = netdev_priv(rx_ring->netdev); if (rc == -ENOSPC) { @@ -3267,7 +3269,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd host_info = ena_dev->host_attr.host_info; - host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; + host_info->bdf = pci_dev_id(pdev); host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; strscpy(host_info->kernel_ver_str, utsname()->version, diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 248b715b4d68..33c923e1261a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -14,6 +14,7 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <net/xdp.h> #include <uapi/linux/bpf.h> #include "ena_com.h" diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index ec704222925d..751454d305c6 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -367,7 +367,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len ) } -struct net_device * __init atarilance_probe(void) +static struct net_device * __init atarilance_probe(void) { int i; static int found; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index c5cec4e79489..85c978149bf6 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1323,7 +1323,7 @@ out: return err; } -static int au1000_remove(struct platform_device *pdev) +static void au1000_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct au1000_private *aup = netdev_priv(dev); @@ -1359,13 +1359,11 @@ static int au1000_remove(struct platform_device *pdev) release_mem_region(macen->start, resource_size(macen)); free_netdev(dev); - - return 0; } static struct platform_driver au1000_eth_driver = { .probe = au1000_probe, - .remove = au1000_remove, + .remove_new = au1000_remove, .driver = { .name = "au1000-eth", }, diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c index 045fe133f6ee..5beadabc2136 100644 --- a/drivers/net/ethernet/amd/pds_core/adminq.c +++ b/drivers/net/ethernet/amd/pds_core/adminq.c @@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data) } queue_work(pdsc->wq, &qcq->work); - pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR); + pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c index 561af8e5b3ea..11c23a7f3172 100644 --- a/drivers/net/ethernet/amd/pds_core/auxbus.c +++ b/drivers/net/ethernet/amd/pds_core/auxbus.c @@ -8,24 +8,19 @@ /** * pds_client_register - Link the client to the firmware - * @pf_pdev: ptr to the PF driver struct + * @pf: ptr to the PF driver's private data struct * @devname: name that includes service into, e.g. pds_core.vDPA * - * Return: 0 on success, or + * Return: positive client ID (ci) on success, or * negative for error */ -int pds_client_register(struct pci_dev *pf_pdev, char *devname) +int pds_client_register(struct pdsc *pf, char *devname) { union pds_core_adminq_comp comp = {}; union pds_core_adminq_cmd cmd = {}; - struct pdsc *pf; int err; u16 ci; - pf = pci_get_drvdata(pf_pdev); - if (pf->state) - return -ENXIO; - cmd.client_reg.opcode = PDS_AQ_CMD_CLIENT_REG; strscpy(cmd.client_reg.devname, devname, sizeof(cmd.client_reg.devname)); @@ -53,23 +48,18 @@ EXPORT_SYMBOL_GPL(pds_client_register); /** * pds_client_unregister - Unlink the client from the firmware - * @pf_pdev: ptr to the PF driver struct + * @pf: ptr to the PF driver's private data struct * @client_id: id returned from pds_client_register() * * Return: 0 on success, or * negative for error */ -int pds_client_unregister(struct pci_dev *pf_pdev, u16 client_id) +int pds_client_unregister(struct pdsc *pf, u16 client_id) { union pds_core_adminq_comp comp = {}; union pds_core_adminq_cmd cmd = {}; - struct pdsc *pf; int err; - pf = pci_get_drvdata(pf_pdev); - if (pf->state) - return -ENXIO; - cmd.client_unreg.opcode = PDS_AQ_CMD_CLIENT_UNREG; cmd.client_unreg.client_id = cpu_to_le16(client_id); @@ -198,7 +188,7 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf) padev = pf->vfs[cf->vf_id].padev; if (padev) { - pds_client_unregister(pf->pdev, padev->client_id); + pds_client_unregister(pf, padev->client_id); auxiliary_device_delete(&padev->aux_dev); auxiliary_device_uninit(&padev->aux_dev); padev->client_id = 0; @@ -243,7 +233,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf) */ snprintf(devname, sizeof(devname), "%s.%s.%d", PDS_CORE_DRV_NAME, pf->viftype_status[vt].name, cf->uid); - client_id = pds_client_register(pf->pdev, devname); + client_id = pds_client_register(pf, devname); if (client_id < 0) { err = client_id; goto out_unlock; @@ -252,7 +242,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf) padev = pdsc_auxbus_dev_register(cf, pf, client_id, pf->viftype_status[vt].name); if (IS_ERR(padev)) { - pds_client_unregister(pf->pdev, client_id); + pds_client_unregister(pf, client_id); err = PTR_ERR(padev); goto out_unlock; } diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c index f2c79456d745..0d2091e9eb28 100644 --- a/drivers/net/ethernet/amd/pds_core/core.c +++ b/drivers/net/ethernet/amd/pds_core/core.c @@ -152,11 +152,8 @@ void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq) dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); - if (qcq->cq.info) - vfree(qcq->cq.info); - - if (qcq->q.info) - vfree(qcq->q.info); + vfree(qcq->cq.info); + vfree(qcq->q.info); memset(qcq, 0, sizeof(*qcq)); } @@ -445,12 +442,13 @@ int pdsc_setup(struct pdsc *pdsc, bool init) goto err_out_teardown; /* Set up the VIFs */ - err = pdsc_viftypes_init(pdsc); - if (err) - goto err_out_teardown; + if (init) { + err = pdsc_viftypes_init(pdsc); + if (err) + goto err_out_teardown; - if (init) pdsc_debugfs_add_viftype(pdsc); + } clear_bit(PDSC_S_FW_DEAD, &pdsc->state); return 0; @@ -464,12 +462,15 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing) { int i; - pdsc_devcmd_reset(pdsc); + if (!pdsc->pdev->is_virtfn) + pdsc_devcmd_reset(pdsc); pdsc_qcq_free(pdsc, &pdsc->notifyqcq); pdsc_qcq_free(pdsc, &pdsc->adminqcq); - kfree(pdsc->viftype_status); - pdsc->viftype_status = NULL; + if (removing) { + kfree(pdsc->viftype_status); + pdsc->viftype_status = NULL; + } if (pdsc->intr_info) { for (i = 0; i < pdsc->nintrs; i++) @@ -511,7 +512,7 @@ void pdsc_stop(struct pdsc *pdsc) PDS_CORE_INTR_MASK_SET); } -static void pdsc_fw_down(struct pdsc *pdsc) +void pdsc_fw_down(struct pdsc *pdsc) { union pds_core_notifyq_comp reset_event = { .reset.ecode = cpu_to_le16(PDS_EVENT_RESET), @@ -519,19 +520,23 @@ static void pdsc_fw_down(struct pdsc *pdsc) }; if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) { - dev_err(pdsc->dev, "%s: already happening\n", __func__); + dev_warn(pdsc->dev, "%s: already happening\n", __func__); return; } + if (pdsc->pdev->is_virtfn) + return; + /* Notify clients of fw_down */ - devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc); + if (pdsc->fw_reporter) + devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc); pdsc_notify(PDS_EVENT_RESET, &reset_event); pdsc_stop(pdsc); pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY); } -static void pdsc_fw_up(struct pdsc *pdsc) +void pdsc_fw_up(struct pdsc *pdsc) { union pds_core_notifyq_comp reset_event = { .reset.ecode = cpu_to_le16(PDS_EVENT_RESET), @@ -544,6 +549,11 @@ static void pdsc_fw_up(struct pdsc *pdsc) return; } + if (pdsc->pdev->is_virtfn) { + clear_bit(PDSC_S_FW_DEAD, &pdsc->state); + return; + } + err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY); if (err) goto err_out; @@ -554,8 +564,9 @@ static void pdsc_fw_up(struct pdsc *pdsc) /* Notify clients of fw_up */ pdsc->fw_recoveries++; - devlink_health_reporter_state_update(pdsc->fw_reporter, - DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); + if (pdsc->fw_reporter) + devlink_health_reporter_state_update(pdsc->fw_reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); pdsc_notify(PDS_EVENT_RESET, &reset_event); return; @@ -564,6 +575,18 @@ err_out: pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY); } +static void pdsc_check_pci_health(struct pdsc *pdsc) +{ + u8 fw_status = ioread8(&pdsc->info_regs->fw_status); + + /* is PCI broken? */ + if (fw_status != PDS_RC_BAD_PCI) + return; + + pdsc_reset_prepare(pdsc->pdev); + pdsc_reset_done(pdsc->pdev); +} + void pdsc_health_thread(struct work_struct *work) { struct pdsc *pdsc = container_of(work, struct pdsc, health_work); @@ -590,6 +613,8 @@ void pdsc_health_thread(struct work_struct *work) pdsc_fw_down(pdsc); } + pdsc_check_pci_health(pdsc); + pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION; out_unlock: diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index e545fafc4819..e35d3e7006bf 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -15,7 +15,7 @@ #define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver" #define PDSC_WATCHDOG_SECS 5 -#define PDSC_QUEUE_NAME_MAX_SZ 32 +#define PDSC_QUEUE_NAME_MAX_SZ 16 #define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */ #define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */ #define PDSC_TEARDOWN_RECOVERY false @@ -283,6 +283,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc); int pdsc_dev_reinit(struct pdsc *pdsc); int pdsc_dev_init(struct pdsc *pdsc); +void pdsc_reset_prepare(struct pci_dev *pdev); +void pdsc_reset_done(struct pci_dev *pdev); + int pdsc_intr_alloc(struct pdsc *pdsc, char *name, irq_handler_t handler, void *data); void pdsc_intr_free(struct pdsc *pdsc, int index); @@ -309,4 +312,8 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data); int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw, struct netlink_ext_ack *extack); + +void pdsc_fw_down(struct pdsc *pdsc); +void pdsc_fw_up(struct pdsc *pdsc); + #endif /* _PDSC_H_ */ diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c index debe5216fe29..31940b857e0e 100644 --- a/drivers/net/ethernet/amd/pds_core/dev.c +++ b/drivers/net/ethernet/amd/pds_core/dev.c @@ -42,6 +42,8 @@ int pdsc_err_to_errno(enum pds_core_status_code code) return -ERANGE; case PDS_RC_BAD_ADDR: return -EFAULT; + case PDS_RC_BAD_PCI: + return -ENXIO; case PDS_RC_EOPCODE: case PDS_RC_EINTR: case PDS_RC_DEV_CMD: @@ -62,7 +64,7 @@ bool pdsc_is_fw_running(struct pdsc *pdsc) /* Firmware is useful only if the running bit is set and * fw_status != 0xff (bad PCI read) */ - return (pdsc->fw_status != 0xff) && + return (pdsc->fw_status != PDS_RC_BAD_PCI) && (pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING); } @@ -121,24 +123,26 @@ static const char *pdsc_devcmd_str(int opcode) } } -static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds) +static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds) { struct device *dev = pdsc->dev; unsigned long start_time; unsigned long max_wait; unsigned long duration; int timeout = 0; + bool running; int done = 0; int err = 0; int status; - int opcode; - - opcode = ioread8(&pdsc->cmd_regs->cmd.opcode); start_time = jiffies; max_wait = start_time + (max_seconds * HZ); while (!done && !timeout) { + running = pdsc_is_fw_running(pdsc); + if (!running) + break; + done = pdsc_devcmd_done(pdsc); if (done) break; @@ -155,7 +159,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds) dev_dbg(dev, "DEVCMD %d %s after %ld secs\n", opcode, pdsc_devcmd_str(opcode), duration / HZ); - if (!done || timeout) { + if ((!done || timeout) && running) { dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n", opcode, pdsc_devcmd_str(opcode), done, timeout, max_seconds); @@ -180,10 +184,10 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd, memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd)); pdsc_devcmd_dbell(pdsc); - err = pdsc_devcmd_wait(pdsc, max_seconds); + err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds); memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp)); - if (err == -ENXIO || err == -ETIMEDOUT) + if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq) queue_work(pdsc->wq, &pdsc->health_work); return err; @@ -257,10 +261,14 @@ static int pdsc_identify(struct pdsc *pdsc) struct pds_core_drv_identity drv = {}; size_t sz; int err; + int n; drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX); - snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str), - "%s %s", PDS_CORE_DRV_NAME, utsname()->release); + /* Catching the return quiets a Wformat-truncation complaint */ + n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str), + "%s %s", PDS_CORE_DRV_NAME, utsname()->release); + if (n > sizeof(drv.driver_ver_str)) + dev_dbg(pdsc->dev, "release name truncated, don't care\n"); /* Next let's get some info about the device * We use the devcmd_lock at this level in order to diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index 9c6b3653c1c7..e9948ea5bbcd 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -10,6 +10,9 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc, { int vt; + if (!pdsc->viftype_status) + return NULL; + for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) { if (pdsc->viftype_status[vt].dl_id == dl_id) return &pdsc->viftype_status[vt]; @@ -101,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, struct pds_core_fw_list_info fw_list; struct pdsc *pdsc = devlink_priv(dl); union pds_core_dev_comp comp; - char buf[16]; + char buf[32]; int listlen; int err; int i; @@ -121,6 +124,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, snprintf(buf, sizeof(buf), "fw.slot_%d", i); err = devlink_info_version_stored_put(req, buf, fw_list.fw_names[i].fw_version); + if (err) + return err; } err = devlink_info_version_running_put(req, @@ -151,33 +156,20 @@ int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct pdsc *pdsc = devlink_health_reporter_priv(reporter); - int err; mutex_lock(&pdsc->config_lock); - if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) - err = devlink_fmsg_string_pair_put(fmsg, "Status", "dead"); + devlink_fmsg_string_pair_put(fmsg, "Status", "dead"); else if (!pdsc_is_fw_good(pdsc)) - err = devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy"); + devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy"); else - err = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); - + devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); mutex_unlock(&pdsc->config_lock); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "State", + pdsc->fw_status & ~PDS_CORE_FW_STS_F_GENERATION); + devlink_fmsg_u32_pair_put(fmsg, "Generation", pdsc->fw_generation >> 4); + devlink_fmsg_u32_pair_put(fmsg, "Recoveries", pdsc->fw_recoveries); - err = devlink_fmsg_u32_pair_put(fmsg, "State", - pdsc->fw_status & - ~PDS_CORE_FW_STS_F_GENERATION); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "Generation", - pdsc->fw_generation >> 4); - if (err) - return err; - - return devlink_fmsg_u32_pair_put(fmsg, "Recoveries", - pdsc->fw_recoveries); + return 0; } diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c index 672757932246..3080898d7b95 100644 --- a/drivers/net/ethernet/amd/pds_core/main.c +++ b/drivers/net/ethernet/amd/pds_core/main.c @@ -367,14 +367,13 @@ static int pdsc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pdsc_init_vf(pdsc); if (err) { dev_err(dev, "Cannot init device: %pe\n", ERR_PTR(err)); - goto err_out_clear_master; + goto err_out_disable_device; } clear_bit(PDSC_S_INITING_DRIVER, &pdsc->state); return 0; -err_out_clear_master: - pci_clear_master(pdev); +err_out_disable_device: pci_disable_device(pdev); err_out_free_ida: ida_free(&pdsc_ida, pdsc->uid); @@ -439,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev) pci_release_regions(pdev); } - pci_clear_master(pdev); pci_disable_device(pdev); ida_free(&pdsc_ida, pdsc->uid); @@ -447,12 +445,62 @@ static void pdsc_remove(struct pci_dev *pdev) devlink_free(dl); } +void pdsc_reset_prepare(struct pci_dev *pdev) +{ + struct pdsc *pdsc = pci_get_drvdata(pdev); + + pdsc_fw_down(pdsc); + + pci_free_irq_vectors(pdev); + pdsc_unmap_bars(pdsc); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +void pdsc_reset_done(struct pci_dev *pdev) +{ + struct pdsc *pdsc = pci_get_drvdata(pdev); + struct device *dev = pdsc->dev; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err)); + return; + } + pci_set_master(pdev); + + if (!pdev->is_virtfn) { + pcie_print_link_status(pdsc->pdev); + + err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME); + if (err) { + dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n", + ERR_PTR(err)); + return; + } + + err = pdsc_map_bars(pdsc); + if (err) + return; + } + + pdsc_fw_up(pdsc); +} + +static const struct pci_error_handlers pdsc_err_handler = { + /* FLR handling */ + .reset_prepare = pdsc_reset_prepare, + .reset_done = pdsc_reset_done, +}; + static struct pci_driver pdsc_driver = { .name = PDS_CORE_DRV_NAME, .id_table = pdsc_id_table, .probe = pdsc_probe, .remove = pdsc_remove, .sriov_configure = pdsc_sriov_configure, + .err_handler = &pdsc_err_handler, }; void *pdsc_get_pf_struct(struct pci_dev *vf_pdev) diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 68ca1225eedc..c78706d21a6a 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -92,7 +92,7 @@ static char lancestr[] = "LANCE"; #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/gfp.h> #include <linux/pgtable.h> @@ -1487,7 +1487,7 @@ static int sunlance_sbus_probe(struct platform_device *op) return err; } -static int sunlance_sbus_remove(struct platform_device *op) +static void sunlance_sbus_remove(struct platform_device *op) { struct lance_private *lp = platform_get_drvdata(op); struct net_device *net_dev = lp->dev; @@ -1497,8 +1497,6 @@ static int sunlance_sbus_remove(struct platform_device *op) lance_free_hwresources(lp); free_netdev(net_dev); - - return 0; } static const struct of_device_id sunlance_sbus_match[] = { @@ -1516,7 +1514,7 @@ static struct platform_driver sunlance_sbus_driver = { .of_match_table = sunlance_sbus_match, }, .probe = sunlance_sbus_probe, - .remove = sunlance_sbus_remove, + .remove_new = sunlance_sbus_remove, }; module_platform_driver(sunlance_sbus_driver); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 614c0278419b..6b73648b3779 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work) static void xgbe_service_timer(struct timer_list *t) { struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer); + struct xgbe_channel *channel; + unsigned int i; queue_work(pdata->dev_workqueue, &pdata->service_work); mod_timer(&pdata->service_timer, jiffies + HZ); + + if (!pdata->tx_usecs) + return; + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel[i]; + if (!channel->tx_ring || channel->tx_timer_active) + break; + channel->tx_timer_active = 1; + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); + } } static void xgbe_init_timers(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 6e83ff59172a..32fab5e77246 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev, cmd->base.phy_address = pdata->phy.address; - cmd->base.autoneg = pdata->phy.autoneg; - cmd->base.speed = pdata->phy.speed; - cmd->base.duplex = pdata->phy.duplex; + if (netif_carrier_ok(netdev)) { + cmd->base.speed = pdata->phy.speed; + cmd->base.duplex = pdata->phy.duplex; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.autoneg = pdata->phy.autoneg; cmd->base.port = PORT_NONE; XGBE_LM_COPY(cmd, supported, lks, supported); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 32d2c6fac652..4a2dc705b528 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) if (pdata->phy.duplex != DUPLEX_FULL) return -EINVAL; - xgbe_set_mode(pdata, mode); + /* Force the mode change for SFI in Fixed PHY config. + * Fixed PHY configs needs PLL to be enabled while doing mode set. + * When the SFP module isn't connected during boot, driver assumes + * AN is ON and attempts autonegotiation. However, if the connected + * SFP comes up in Fixed PHY config, the link will not come up as + * PLL isn't enabled while the initial mode set command is issued. + * So, force the mode change for SFI in Fixed PHY configuration to + * fix link issues. + */ + if (mode == XGBE_MODE_SFI) + xgbe_change_mode(pdata, mode); + else + xgbe_set_mode(pdata, mode); return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 4d790a89fe77..9131020d06af 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -123,9 +123,7 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/of_device.h> #include <linux/clk.h> #include <linux/property.h> #include <linux/acpi.h> @@ -135,17 +133,6 @@ #include "xgbe-common.h" #ifdef CONFIG_ACPI -static const struct acpi_device_id xgbe_acpi_match[]; - -static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata) -{ - const struct acpi_device_id *id; - - id = acpi_match_device(xgbe_acpi_match, pdata->dev); - - return id ? (struct xgbe_version_data *)id->driver_data : NULL; -} - static int xgbe_acpi_support(struct xgbe_prv_data *pdata) { struct device *dev = pdata->dev; @@ -173,11 +160,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata) return 0; } #else /* CONFIG_ACPI */ -static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata) -{ - return NULL; -} - static int xgbe_acpi_support(struct xgbe_prv_data *pdata) { return -EINVAL; @@ -185,17 +167,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata) #endif /* CONFIG_ACPI */ #ifdef CONFIG_OF -static const struct of_device_id xgbe_of_match[]; - -static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata) -{ - const struct of_device_id *id; - - id = of_match_device(xgbe_of_match, pdata->dev); - - return id ? (struct xgbe_version_data *)id->data : NULL; -} - static int xgbe_of_support(struct xgbe_prv_data *pdata) { struct device *dev = pdata->dev; @@ -244,11 +215,6 @@ static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata) return phy_pdev; } #else /* CONFIG_OF */ -static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata) -{ - return NULL; -} - static int xgbe_of_support(struct xgbe_prv_data *pdata) { return -EINVAL; @@ -290,12 +256,6 @@ static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata) return phy_pdev; } -static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata) -{ - return pdata->use_acpi ? xgbe_acpi_vdata(pdata) - : xgbe_of_vdata(pdata); -} - static int xgbe_platform_probe(struct platform_device *pdev) { struct xgbe_prv_data *pdata; @@ -321,7 +281,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) pdata->use_acpi = dev->of_node ? 0 : 1; /* Get the version data */ - pdata->vdata = xgbe_get_vdata(pdata); + pdata->vdata = (struct xgbe_version_data *)device_get_match_data(dev); phy_pdev = xgbe_get_phy_pdev(pdata); if (!phy_pdev) { @@ -512,7 +472,7 @@ err_alloc: return ret; } -static int xgbe_platform_remove(struct platform_device *pdev) +static void xgbe_platform_remove(struct platform_device *pdev) { struct xgbe_prv_data *pdata = platform_get_drvdata(pdev); @@ -521,8 +481,6 @@ static int xgbe_platform_remove(struct platform_device *pdev) platform_device_put(pdata->phy_platdev); xgbe_free_pdata(pdata); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -615,7 +573,7 @@ static struct platform_driver xgbe_driver = { .pm = &xgbe_platform_pm_ops, }, .probe = xgbe_platform_probe, - .remove = xgbe_platform_remove, + .remove_new = xgbe_platform_remove, }; int xgbe_platform_init(void) diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 379d19d18dbe..9e90c2381491 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -690,7 +690,7 @@ err: return ret; } -static int xge_remove(struct platform_device *pdev) +static void xge_remove(struct platform_device *pdev) { struct xge_pdata *pdata; struct net_device *ndev; @@ -706,8 +706,6 @@ static int xge_remove(struct platform_device *pdev) xge_mdio_remove(ndev); unregister_netdev(ndev); free_netdev(ndev); - - return 0; } static void xge_shutdown(struct platform_device *pdev) @@ -736,7 +734,7 @@ static struct platform_driver xge_driver = { .acpi_match_table = ACPI_PTR(xge_acpi_match), }, .probe = xge_probe, - .remove = xge_remove, + .remove_new = xge_remove, .shutdown = xge_shutdown, }; module_platform_driver(xge_driver); diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h index b3985a7be59d..7be6f83e22fe 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.h +++ b/drivers/net/ethernet/apm/xgene-v2/main.h @@ -22,6 +22,7 @@ #include <linux/of_mdio.h> #include <linux/prefetch.h> #include <linux/phy.h> +#include <linux/platform_device.h> #include <net/ip.h> #include "mac.h" #include "enet.h" diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 390671640388..44900026d11b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1632,7 +1632,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) for (i = 0; i < max_irqs; i++) { ret = platform_get_irq(pdev, i); - if (ret <= 0) { + if (ret < 0) { if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { max_irqs = i; pdata->rxq_cnt = max_irqs / 2; @@ -1640,7 +1640,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) pdata->cq_cnt = max_irqs / 2; break; } - return ret ? : -ENXIO; + return ret; } pdata->irqs[i] = ret; } @@ -2018,7 +2018,6 @@ static int xgene_enet_probe(struct platform_device *pdev) struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; void (*link_state)(struct work_struct *); - const struct of_device_id *of_id; int ret; ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), @@ -2039,19 +2038,7 @@ static int xgene_enet_probe(struct platform_device *pdev) NETIF_F_GRO | NETIF_F_SG; - of_id = of_match_device(xgene_enet_of_match, &pdev->dev); - if (of_id) { - pdata->enet_id = (enum xgene_enet_id)of_id->data; - } -#ifdef CONFIG_ACPI - else { - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev); - if (acpi_id) - pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data; - } -#endif + pdata->enet_id = (enum xgene_enet_id)device_get_match_data(&pdev->dev); if (!pdata->enet_id) { ret = -ENODEV; goto err; @@ -2127,7 +2114,7 @@ err: return ret; } -static int xgene_enet_remove(struct platform_device *pdev) +static void xgene_enet_remove(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; struct net_device *ndev; @@ -2149,8 +2136,6 @@ static int xgene_enet_remove(struct platform_device *pdev) xgene_enet_delete_desc_rings(pdata); pdata->port_ops->shutdown(pdata); free_netdev(ndev); - - return 0; } static void xgene_enet_shutdown(struct platform_device *pdev) @@ -2170,11 +2155,11 @@ static void xgene_enet_shutdown(struct platform_device *pdev) static struct platform_driver xgene_enet_driver = { .driver = { .name = "xgene-enet", - .of_match_table = of_match_ptr(xgene_enet_of_match), + .of_match_table = xgene_enet_of_match, .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), }, .probe = xgene_enet_probe, - .remove = xgene_enet_remove, + .remove_new = xgene_enet_remove, .shutdown = xgene_enet_shutdown, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 643f5e646740..bce2c19e3f22 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -15,9 +15,10 @@ #include <linux/efi.h> #include <linux/irq.h> #include <linux/io.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> +#include <linux/platform_device.h> #include <linux/mdio/mdio-xgene.h> #include <linux/module.h> #include <net/ip.h> diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 8fcaf1639920..766ab78256fe 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -77,7 +77,7 @@ struct mace_frame { u8 pad4; u32 pad5; u32 pad6; - u8 data[1]; + DECLARE_FLEX_ARRAY(u8, data); /* And frame continues.. */ }; @@ -739,7 +739,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Macintosh MACE ethernet driver"); MODULE_ALIAS("platform:macmace"); -static int mac_mace_device_remove(struct platform_device *pdev) +static void mac_mace_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct mace_data *mp = netdev_priv(dev); @@ -755,13 +755,11 @@ static int mac_mace_device_remove(struct platform_device *pdev) mp->tx_ring, mp->tx_ring_phys); free_netdev(dev); - - return 0; } static struct platform_driver mac_mace_driver = { .probe = mace_probe, - .remove = mac_mace_device_remove, + .remove_new = mac_mace_device_remove, .driver = { .name = mac_mace_string, }, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 80b44043e6c5..28c9b6f1a54f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -553,17 +553,17 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp * @adapter: pointer to adapter struct - * @skb: particular skb to send timestamp with + * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the hwtstamps structure which * is passed up the network stack */ -static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb, +static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps, u64 timestamp) { timestamp -= atomic_read(&aq_ptp->offset_ingress); - aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp); + aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp); } void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, @@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring) &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring; } -u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p, +u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p, unsigned int len) { struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; @@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p, p, len, ×tamp); if (ret > 0) - aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp); + aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp); return ret; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h index 28ccb7ca2df9..210b723f2207 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h @@ -67,7 +67,7 @@ int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, /* Return either ring is belong to PTP or not*/ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring); -u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p, +u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p, unsigned int len); struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp); @@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring) } static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, - struct sk_buff *skb, u8 *p, + struct skb_shared_hwtstamps *shhwtstamps, u8 *p, unsigned int len) { return 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 4de22eed099a..e1885c1eb100 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -647,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, } if (is_ptp_ring) buff->len -= - aq_ptp_extract_ts(self->aq_nic, skb, + aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb), aq_buf_vaddr(&buff->rxdata), buff->len); @@ -742,6 +742,8 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head]; bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring); struct aq_ring_buff_s *buff_ = NULL; + u16 ptp_hwtstamp_len = 0; + struct skb_shared_hwtstamps shhwtstamps; struct sk_buff *skb = NULL; unsigned int next_ = 0U; struct xdp_buff xdp; @@ -810,11 +812,12 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, hard_start = page_address(buff->rxdata.page) + buff->rxdata.pg_off - rx_ring->page_offset; - if (is_ptp_ring) - buff->len -= - aq_ptp_extract_ts(rx_ring->aq_nic, skb, - aq_buf_vaddr(&buff->rxdata), - buff->len); + if (is_ptp_ring) { + ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps, + aq_buf_vaddr(&buff->rxdata), + buff->len); + buff->len -= ptp_hwtstamp_len; + } xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset, @@ -834,6 +837,9 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, if (IS_ERR(skb) || !skb) continue; + if (ptp_hwtstamp_len > 0) + *skb_hwtstamps(skb) = shhwtstamps; + if (buff->is_vlan) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), buff->vlan_rx_tag); @@ -932,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self) return; kfree(self->buff_ring); + self->buff_ring = NULL; - if (self->dx_ring) + if (self->dx_ring) { dma_free_coherent(aq_nic_get_dev(self->aq_nic), self->size * self->dx_size, self->dx_ring, self->dx_ring_pa); + self->dx_ring = NULL; + } } unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index 5dfc751572ed..220400a633f5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -93,7 +93,7 @@ static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) static int hw_atl2_hw_reset(struct aq_hw_s *self) { - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct hw_atl2_priv *priv = self->priv; int err; err = hw_atl2_utils_soft_reset(self); @@ -378,8 +378,8 @@ static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self) static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self) { - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map; + struct hw_atl2_priv *priv = self->priv; u16 action; u8 index; int i; @@ -433,7 +433,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self, u16 off_action = (!promisc && !hw_atl_rpfl2promiscuous_mode_en_get(self)) ? HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE; - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct hw_atl2_priv *priv = self->priv; u8 index; index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; @@ -445,7 +445,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self, static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc) { u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP; - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct hw_atl2_priv *priv = self->priv; bool vlan_promisc_enable; u8 index; @@ -539,8 +539,8 @@ static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr) [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, }; - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; + struct hw_atl2_priv *priv = self->priv; u8 base_index, count; int err; @@ -770,7 +770,7 @@ static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self) static int hw_atl2_hw_vlan_set(struct aq_hw_s *self, struct aq_rx_filter_vlan *aq_vlans) { - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct hw_atl2_priv *priv = self->priv; u32 queue; u8 index; int i; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index 674683b54304..52e2070a4a2f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -413,8 +413,8 @@ do { \ static int aq_a2_fw_update_stats(struct aq_hw_s *self) { - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; struct aq_stats_s *cs = &self->curr_stats; + struct hw_atl2_priv *priv = self->priv; struct statistics_s stats; struct version_s version; int err; diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c index ce3147e886a1..a3afddb23ee8 100644 --- a/drivers/net/ethernet/arc/emac_arc.c +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -58,14 +58,12 @@ out_netdev: return err; } -static int emac_arc_remove(struct platform_device *pdev) +static void emac_arc_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); arc_emac_remove(ndev); free_netdev(ndev); - - return 0; } static const struct of_device_id emac_arc_dt_ids[] = { @@ -76,7 +74,7 @@ MODULE_DEVICE_TABLE(of, emac_arc_dt_ids); static struct platform_driver emac_arc_driver = { .probe = emac_arc_probe, - .remove = emac_arc_remove, + .remove_new = emac_arc_remove, .driver = { .name = DRV_NAME, .of_match_table = emac_arc_dt_ids, diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 2b427d8a1831..31ee477dd131 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -15,11 +15,11 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> #include "emac.h" diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 509101112279..493d6356c8ca 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -244,7 +244,7 @@ out_netdev: return err; } -static int emac_rockchip_remove(struct platform_device *pdev) +static void emac_rockchip_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct rockchip_priv_data *priv = netdev_priv(ndev); @@ -260,12 +260,11 @@ static int emac_rockchip_remove(struct platform_device *pdev) clk_disable_unprepare(priv->macclk); free_netdev(ndev); - return 0; } static struct platform_driver emac_rockchip_driver = { .probe = emac_rockchip_probe, - .remove = emac_rockchip_remove, + .remove_new = emac_rockchip_remove, .driver = { .name = DRV_NAME, .of_match_table = emac_rockchip_dt_ids, diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c index 916ae380a004..7d2fe2e5af92 100644 --- a/drivers/net/ethernet/asix/ax88796c_ioctl.c +++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c @@ -24,7 +24,7 @@ static void ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { /* Inherit standard device info */ - strncpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); } static u32 ax88796c_get_msglevel(struct net_device *ndev) diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index ff1a5edf8df1..0f2f400b5bc4 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -29,9 +29,10 @@ #include <linux/if_vlan.h> #include <linux/mfd/syscon.h> +#include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/phylink.h> #include <linux/regmap.h> #include <linux/reset.h> @@ -1967,21 +1968,19 @@ err_put_clk: return err; } -static int ag71xx_remove(struct platform_device *pdev) +static void ag71xx_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ag71xx *ag; if (!ndev) - return 0; + return; ag = netdev_priv(ndev); unregister_netdev(ndev); ag71xx_mdio_remove(ag); clk_disable_unprepare(ag->clk_eth); platform_set_drvdata(pdev, NULL); - - return 0; } static const u32 ar71xx_fifo_ar7100[] = { @@ -2068,7 +2067,7 @@ static const struct of_device_id ag71xx_match[] = { static struct platform_driver ag71xx_driver = { .probe = ag71xx_probe, - .remove = ag71xx_remove, + .remove_new = ag71xx_remove, .driver = { .name = "ag71xx", .of_match_table = ag71xx_match, diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index b716adacd815..7f6b69a52367 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -292,9 +292,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev, spin_lock(&alx->stats_lock); alx_update_hw_stats(hw); - BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) < - ALX_NUM_STATS * sizeof(u64)); - memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64)); + BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64)); + memcpy(data, &hw->stats, sizeof(hw->stats)); spin_unlock(&alx->stats_lock); } diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index 43d821fe7a54..63ba64dbb731 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -504,15 +504,12 @@ struct atl1c_rrd_ring { u16 next_to_use; u16 next_to_clean; struct napi_struct napi; - struct page *rx_page; - unsigned int rx_page_offset; }; /* board specific private data structure */ struct atl1c_adapter { struct net_device *netdev; struct pci_dev *pdev; - unsigned int rx_frag_size; struct atl1c_hw hw; struct atl1c_hw_stats hw_stats; struct mii_if_info mii; /* MII interface info */ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 940c5d1ff9cf..46cdc32b4e31 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, struct net_device *dev) { - unsigned int head_size; int mtu = dev->mtu; adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; - - head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - adapter->rx_frag_size = roundup_pow_of_two(head_size); } static netdev_features_t atl1c_fix_features(struct net_device *netdev, @@ -847,7 +842,8 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter) } static inline void atl1c_clean_buffer(struct pci_dev *pdev, - struct atl1c_buffer *buffer_info) + struct atl1c_buffer *buffer_info, + int budget) { u16 pci_driection; if (buffer_info->flags & ATL1C_BUFFER_FREE) @@ -866,7 +862,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev, buffer_info->length, pci_driection); } if (buffer_info->skb) - dev_consume_skb_any(buffer_info->skb); + napi_consume_skb(buffer_info->skb, budget); buffer_info->dma = 0; buffer_info->skb = NULL; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); @@ -887,7 +883,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, ring_count = tpd_ring->count; for (index = 0; index < ring_count; index++) { buffer_info = &tpd_ring->buffer_info[index]; - atl1c_clean_buffer(pdev, buffer_info); + atl1c_clean_buffer(pdev, buffer_info, 0); } netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue)); @@ -914,7 +910,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue) for (j = 0; j < rfd_ring->count; j++) { buffer_info = &rfd_ring->buffer_info[j]; - atl1c_clean_buffer(pdev, buffer_info); + atl1c_clean_buffer(pdev, buffer_info, 0); } /* zero out the descriptor ring */ memset(rfd_ring->desc, 0, rfd_ring->size); @@ -964,7 +960,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; - int i; dma_free_coherent(&pdev->dev, adapter->ring_header.size, adapter->ring_header.desc, adapter->ring_header.dma); @@ -977,12 +972,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) kfree(adapter->tpd_ring[0].buffer_info); adapter->tpd_ring[0].buffer_info = NULL; } - for (i = 0; i < adapter->rx_queue_count; ++i) { - if (adapter->rrd_ring[i].rx_page) { - put_page(adapter->rrd_ring[i].rx_page); - adapter->rrd_ring[i].rx_page = NULL; - } - } } /** @@ -1619,7 +1608,7 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget) total_bytes += buffer_info->skb->len; total_packets++; } - atl1c_clean_buffer(pdev, buffer_info); + atl1c_clean_buffer(pdev, buffer_info, budget); if (++next_to_clean == tpd_ring->count) next_to_clean = 0; atomic_set(&tpd_ring->next_to_clean, next_to_clean); @@ -1754,48 +1743,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, skb_checksum_none_assert(skb); } -static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter, - u32 queue, bool napi_mode) -{ - struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; - struct sk_buff *skb; - struct page *page; - - if (adapter->rx_frag_size > PAGE_SIZE) { - if (likely(napi_mode)) - return napi_alloc_skb(&rrd_ring->napi, - adapter->rx_buffer_len); - else - return netdev_alloc_skb_ip_align(adapter->netdev, - adapter->rx_buffer_len); - } - - page = rrd_ring->rx_page; - if (!page) { - page = alloc_page(GFP_ATOMIC); - if (unlikely(!page)) - return NULL; - rrd_ring->rx_page = page; - rrd_ring->rx_page_offset = 0; - } - - skb = build_skb(page_address(page) + rrd_ring->rx_page_offset, - adapter->rx_frag_size); - if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - rrd_ring->rx_page_offset += adapter->rx_frag_size; - if (rrd_ring->rx_page_offset >= PAGE_SIZE) - rrd_ring->rx_page = NULL; - else - get_page(page); - } - return skb; -} - static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, bool napi_mode) { struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue]; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; struct pci_dev *pdev = adapter->pdev; struct atl1c_buffer *buffer_info, *next_info; struct sk_buff *skb; @@ -1814,13 +1766,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, while (next_info->flags & ATL1C_BUFFER_FREE) { rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); - skb = atl1c_alloc_skb(adapter, queue, napi_mode); + /* When DMA RX address is set to something like + * 0x....fc0, it will be very likely to cause DMA + * RFD overflow issue. + * + * To work around it, we apply rx skb with 64 bytes + * longer space, and offset the address whenever + * 0x....fc0 is detected. + */ + if (likely(napi_mode)) + skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64); + else + skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64); if (unlikely(!skb)) { if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "alloc rx buffer failed\n"); break; } + if (((unsigned long)skb->data & 0xfff) == 0xfc0) + skb_reserve(skb, 64); + /* * Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after @@ -2186,7 +2152,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt, while (index != tpd_ring->next_to_use) { tpd = ATL1C_TPD_DESC(tpd_ring, index); buffer_info = &tpd_ring->buffer_info[index]; - atl1c_clean_buffer(adpt->pdev, buffer_info); + atl1c_clean_buffer(adpt->pdev, buffer_info, 0); memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); if (++index == tpd_ring->count) index = 0; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 5935be190b9e..5f2a6fcba967 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", offset, adapter->ring_size); err = -1; - goto failed; + goto free_buffer; } return 0; +free_buffer: + kfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; failed: if (adapter->ring_vir_addr != NULL) { dma_free_coherent(&pdev->dev, adapter->ring_size, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 02aa6fd8ebc2..a9014d7932db 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2446,7 +2446,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget) static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) { - if (!napi_schedule_prep(&adapter->napi)) + if (!napi_schedule(&adapter->napi)) /* It is possible in case even the RX/TX ints are disabled via IMR * register the ISR bits are set anyway (but do not produce IRQ). * To handle such situation the napi functions used to check is @@ -2454,8 +2454,6 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) */ return 0; - __napi_schedule(&adapter->napi); - /* * Disable RX/TX ints via IMR register if it is * allowed. NAPI handler must reenable them in same diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 1b487c071cb6..bcfc9488125b 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1377,7 +1377,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 5 * HZ; netdev->min_mtu = 40; netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 948586bf1b5b..75ca3ddda1f5 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -255,4 +255,16 @@ config BNXT_HWMON Say Y if you want to expose the thermal sensor data on NetXtreme-C/E devices, via the hwmon sysfs interface. +config BCMASP + tristate "Broadcom ASP 2.0 Ethernet support" + depends on ARCH_BRCMSTB || COMPILE_TEST + default ARCH_BRCMSTB + depends on OF + select MII + select PHYLIB + select MDIO_BCM_UNIMAC + help + This configuration enables the Broadcom ASP 2.0 Ethernet controller + driver which is present in Broadcom STB SoCs such as 72165. + endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 0ddfb5b5d53c..bac5cb6ad0cd 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -17,3 +17,4 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o obj-$(CONFIG_BNXT) += bnxt/ +obj-$(CONFIG_BCMASP) += asp2/ diff --git a/drivers/net/ethernet/broadcom/asp2/Makefile b/drivers/net/ethernet/broadcom/asp2/Makefile new file mode 100644 index 000000000000..e07550315f83 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BCMASP) += bcm-asp.o +bcm-asp-objs := bcmasp.o bcmasp_intf.o bcmasp_ethtool.o diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c new file mode 100644 index 000000000000..29b04a274d07 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -0,0 +1,1441 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Broadcom STB ASP 2.0 Driver + * + * Copyright (c) 2023 Broadcom + */ +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/clk.h> + +#include "bcmasp.h" +#include "bcmasp_intf_defs.h" + +static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask) +{ + intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR); + priv->irq_mask &= ~mask; +} + +static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask) +{ + intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET); + priv->irq_mask |= mask; +} + +void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en) +{ + struct bcmasp_priv *priv = intf->parent; + + if (en) + _intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel)); + else + _intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel)); +} +EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq); + +void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en) +{ + struct bcmasp_priv *priv = intf->parent; + + if (en) + _intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel)); + else + _intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel)); +} +EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq); + +static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv) +{ + _intr2_mask_set(priv, 0xffffffff); + priv->irq_mask = 0xffffffff; +} + +static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv) +{ + intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR); +} + +static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status) +{ + if (status & ASP_INTR2_RX_ECH(intf->channel)) { + if (likely(napi_schedule_prep(&intf->rx_napi))) { + bcmasp_enable_rx_irq(intf, 0); + __napi_schedule_irqoff(&intf->rx_napi); + } + } + + if (status & ASP_INTR2_TX_DESC(intf->channel)) { + if (likely(napi_schedule_prep(&intf->tx_napi))) { + bcmasp_enable_tx_irq(intf, 0); + __napi_schedule_irqoff(&intf->tx_napi); + } + } +} + +static irqreturn_t bcmasp_isr(int irq, void *data) +{ + struct bcmasp_priv *priv = data; + struct bcmasp_intf *intf; + u32 status; + + status = intr2_core_rl(priv, ASP_INTR2_STATUS) & + ~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS); + + intr2_core_wl(priv, status, ASP_INTR2_CLEAR); + + if (unlikely(status == 0)) { + dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n"); + return IRQ_NONE; + } + + /* Handle intferfaces */ + list_for_each_entry(intf, &priv->intfs, list) + bcmasp_intr2_handling(intf, status); + + return IRQ_HANDLED; +} + +void bcmasp_flush_rx_port(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + u32 mask; + + switch (intf->port) { + case 0: + mask = ASP_CTRL_UMAC0_FLUSH_MASK; + break; + case 1: + mask = ASP_CTRL_UMAC1_FLUSH_MASK; + break; + case 2: + mask = ASP_CTRL_SPB_FLUSH_MASK; + break; + default: + /* Not valid port */ + return; + } + + rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush); +} + +static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt) +{ + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64), + ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index)); + + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) | + ASP_RX_FILTER_NET_OFFSET_L3_0(32) | + ASP_RX_FILTER_NET_OFFSET_L3_1(96) | + ASP_RX_FILTER_NET_OFFSET_L4(32), + ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1)); + + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) | + ASP_RX_FILTER_NET_CFG_EN | + ASP_RX_FILTER_NET_CFG_L2_EN | + ASP_RX_FILTER_NET_CFG_L3_EN | + ASP_RX_FILTER_NET_CFG_L4_EN | + ASP_RX_FILTER_NET_CFG_L3_FRM(2) | + ASP_RX_FILTER_NET_CFG_L4_FRM(2) | + ASP_RX_FILTER_NET_CFG_UMC(nfilt->port), + ASP_RX_FILTER_NET_CFG(nfilt->hw_index)); + + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) | + ASP_RX_FILTER_NET_CFG_EN | + ASP_RX_FILTER_NET_CFG_L2_EN | + ASP_RX_FILTER_NET_CFG_L3_EN | + ASP_RX_FILTER_NET_CFG_L4_EN | + ASP_RX_FILTER_NET_CFG_L3_FRM(2) | + ASP_RX_FILTER_NET_CFG_L4_FRM(2) | + ASP_RX_FILTER_NET_CFG_UMC(nfilt->port), + ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1)); +} + +#define MAX_WAKE_FILTER_SIZE 256 +enum asp_netfilt_reg_type { + ASP_NETFILT_MATCH = 0, + ASP_NETFILT_MASK, + ASP_NETFILT_MAX +}; + +static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + enum asp_netfilt_reg_type reg_type, + u32 offset) +{ + u32 block_index, filter_sel; + + if (offset < 32) { + block_index = ASP_RX_FILTER_NET_L2; + filter_sel = nfilt->hw_index; + } else if (offset < 64) { + block_index = ASP_RX_FILTER_NET_L2; + filter_sel = nfilt->hw_index + 1; + } else if (offset < 96) { + block_index = ASP_RX_FILTER_NET_L3_0; + filter_sel = nfilt->hw_index; + } else if (offset < 128) { + block_index = ASP_RX_FILTER_NET_L3_0; + filter_sel = nfilt->hw_index + 1; + } else if (offset < 160) { + block_index = ASP_RX_FILTER_NET_L3_1; + filter_sel = nfilt->hw_index; + } else if (offset < 192) { + block_index = ASP_RX_FILTER_NET_L3_1; + filter_sel = nfilt->hw_index + 1; + } else if (offset < 224) { + block_index = ASP_RX_FILTER_NET_L4; + filter_sel = nfilt->hw_index; + } else if (offset < 256) { + block_index = ASP_RX_FILTER_NET_L4; + filter_sel = nfilt->hw_index + 1; + } else { + return -EINVAL; + } + + switch (reg_type) { + case ASP_NETFILT_MATCH: + return ASP_RX_FILTER_NET_PAT(filter_sel, block_index, + (offset % 32)); + case ASP_NETFILT_MASK: + return ASP_RX_FILTER_NET_MASK(filter_sel, block_index, + (offset % 32)); + default: + return -EINVAL; + } +} + +static void bcmasp_netfilt_wr(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + enum asp_netfilt_reg_type reg_type, + u32 val, u32 offset) +{ + int reg_offset; + + /* HW only accepts 4 byte aligned writes */ + if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE) + return; + + reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type, + offset); + + rx_filter_core_wl(priv, val, reg_offset); +} + +static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + enum asp_netfilt_reg_type reg_type, + u32 offset) +{ + int reg_offset; + + /* HW only accepts 4 byte aligned writes */ + if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE) + return 0; + + reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type, + offset); + + return rx_filter_core_rl(priv, reg_offset); +} + +static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + u32 offset, void *match, void *mask, + size_t size) +{ + u32 shift, mask_val = 0, match_val = 0; + bool first_byte = true; + + if ((offset + size) > MAX_WAKE_FILTER_SIZE) + return -EINVAL; + + while (size--) { + /* The HW only accepts 4 byte aligned writes, so if we + * begin unaligned or if remaining bytes less than 4, + * we need to read then write to avoid losing current + * register state + */ + if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) { + match_val = bcmasp_netfilt_rd(priv, nfilt, + ASP_NETFILT_MATCH, + ALIGN_DOWN(offset, 4)); + mask_val = bcmasp_netfilt_rd(priv, nfilt, + ASP_NETFILT_MASK, + ALIGN_DOWN(offset, 4)); + } + + shift = (3 - (offset % 4)) * 8; + match_val &= ~GENMASK(shift + 7, shift); + mask_val &= ~GENMASK(shift + 7, shift); + match_val |= (u32)(*((u8 *)match) << shift); + mask_val |= (u32)(*((u8 *)mask) << shift); + + /* If last byte or last byte of word, write to reg */ + if (!size || ((offset % 4) == 3)) { + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, + match_val, ALIGN_DOWN(offset, 4)); + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, + mask_val, ALIGN_DOWN(offset, 4)); + first_byte = true; + } else { + first_byte = false; + } + + offset++; + match++; + mask++; + } + + return 0; +} + +static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt) +{ + int i; + + for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) { + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i); + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i); + } +} + +static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + struct ethtool_tcpip4_spec *match, + struct ethtool_tcpip4_spec *mask, + u32 offset) +{ + __be16 val_16, mask_16; + + val_16 = htons(ETH_P_IP); + mask_16 = htons(0xFFFF); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1, + &match->tos, &mask->tos, + sizeof(match->tos)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12, + &match->ip4src, &mask->ip4src, + sizeof(match->ip4src)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16, + &match->ip4dst, &mask->ip4dst, + sizeof(match->ip4dst)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20, + &match->psrc, &mask->psrc, + sizeof(match->psrc)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22, + &match->pdst, &mask->pdst, + sizeof(match->pdst)); +} + +static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + struct ethtool_tcpip6_spec *match, + struct ethtool_tcpip6_spec *mask, + u32 offset) +{ + __be16 val_16, mask_16; + + val_16 = htons(ETH_P_IPV6); + mask_16 = htons(0xFFFF); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &val_16, &mask_16, sizeof(val_16)); + val_16 = htons(match->tclass << 4); + mask_16 = htons(mask->tclass << 4); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8, + &match->ip6src, &mask->ip6src, + sizeof(match->ip6src)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24, + &match->ip6dst, &mask->ip6dst, + sizeof(match->ip6dst)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40, + &match->psrc, &mask->psrc, + sizeof(match->psrc)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42, + &match->pdst, &mask->pdst, + sizeof(match->pdst)); +} + +static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt) +{ + struct ethtool_rx_flow_spec *fs = &nfilt->fs; + unsigned int offset = 0; + __be16 val_16, mask_16; + u8 val_8, mask_8; + + /* Currently only supports wake filters */ + if (!nfilt->wake_filter) + return -EINVAL; + + bcmasp_netfilt_reset_hw(priv, nfilt); + + if (fs->flow_type & FLOW_MAC_EXT) { + bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest, + &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) { + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2), + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2), + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + + break; + case IP_USER_FLOW: + val_16 = htons(ETH_P_IP); + mask_16 = htons(0xFFFF); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset, + &val_8, &mask_8, sizeof(val_8)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes) + ); + break; + case TCP_V4_FLOW: + val_8 = IPPROTO_TCP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec, + &fs->m_u.tcp_ip4_spec, offset); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9, + &val_8, &mask_8, sizeof(val_8)); + break; + case UDP_V4_FLOW: + val_8 = IPPROTO_UDP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec, + &fs->m_u.udp_ip4_spec, offset); + + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9, + &val_8, &mask_8, sizeof(val_8)); + break; + case TCP_V6_FLOW: + val_8 = IPPROTO_TCP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec, + &fs->m_u.tcp_ip6_spec, offset); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6, + &val_8, &mask_8, sizeof(val_8)); + break; + case UDP_V6_FLOW: + val_8 = IPPROTO_UDP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec, + &fs->m_u.udp_ip6_spec, offset); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6, + &val_8, &mask_8, sizeof(val_8)); + break; + } + + bcmasp_netfilt_hw_en_wake(priv, nfilt); + + return 0; +} + +void bcmasp_netfilt_suspend(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + bool write = false; + int ret, i; + + /* Write all filters to HW */ + for (i = 0; i < NUM_NET_FILTERS; i++) { + /* If the filter does not match the port, skip programming. */ + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + if (i > 0 && (i % 2) && + priv->net_filters[i].wake_filter && + priv->net_filters[i - 1].wake_filter) + continue; + + ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]); + if (!ret) + write = true; + } + + /* Successfully programmed at least one wake filter + * so enable top level wake config + */ + if (write) + rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN | + ASP_RX_FILTER_LNR_MD | + ASP_RX_FILTER_GEN_WK_EN | + ASP_RX_FILTER_NT_FLT_EN), + ASP_RX_FILTER_BLK_CTRL); +} + +int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt) +{ + struct bcmasp_priv *priv = intf->parent; + int j = 0, i; + + for (i = 0; i < NUM_NET_FILTERS; i++) { + if (j == *rule_cnt) + return -EMSGSIZE; + + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + if (i > 0 && (i % 2) && + priv->net_filters[i].wake_filter && + priv->net_filters[i - 1].wake_filter) + continue; + + rule_locs[j++] = priv->net_filters[i].fs.location; + } + + *rule_cnt = j; + + return 0; +} + +int bcmasp_netfilt_get_active(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + int cnt = 0, i; + + for (i = 0; i < NUM_NET_FILTERS; i++) { + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + /* Skip over a wake filter pair */ + if (i > 0 && (i % 2) && + priv->net_filters[i].wake_filter && + priv->net_filters[i - 1].wake_filter) + continue; + + cnt++; + } + + return cnt; +} + +bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf, + struct ethtool_rx_flow_spec *fs) +{ + struct bcmasp_priv *priv = intf->parent; + struct ethtool_rx_flow_spec *cur; + size_t fs_size = 0; + int i; + + for (i = 0; i < NUM_NET_FILTERS; i++) { + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + cur = &priv->net_filters[i].fs; + + if (cur->flow_type != fs->flow_type || + cur->ring_cookie != fs->ring_cookie) + continue; + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + fs_size = sizeof(struct ethhdr); + break; + case IP_USER_FLOW: + fs_size = sizeof(struct ethtool_usrip4_spec); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + fs_size = sizeof(struct ethtool_tcpip6_spec); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + fs_size = sizeof(struct ethtool_tcpip4_spec); + break; + default: + continue; + } + + if (memcmp(&cur->h_u, &fs->h_u, fs_size) || + memcmp(&cur->m_u, &fs->m_u, fs_size)) + continue; + + if (cur->flow_type & FLOW_EXT) { + if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype || + cur->m_ext.vlan_etype != fs->m_ext.vlan_etype || + cur->h_ext.vlan_tci != fs->h_ext.vlan_tci || + cur->m_ext.vlan_tci != fs->m_ext.vlan_tci || + cur->h_ext.data[0] != fs->h_ext.data[0]) + continue; + } + if (cur->flow_type & FLOW_MAC_EXT) { + if (memcmp(&cur->h_ext.h_dest, + &fs->h_ext.h_dest, ETH_ALEN) || + memcmp(&cur->m_ext.h_dest, + &fs->m_ext.h_dest, ETH_ALEN)) + continue; + } + + return true; + } + + return false; +} + +/* If no network filter found, return open filter. + * If no more open filters return NULL + */ +struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf, + u32 loc, bool wake_filter, + bool init) +{ + struct bcmasp_net_filter *nfilter = NULL; + struct bcmasp_priv *priv = intf->parent; + int i, open_index = -1; + + /* Check whether we exceed the filter table capacity */ + if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS) + return ERR_PTR(-EINVAL); + + /* If the filter location is busy (already claimed) and we are initializing + * the filter (insertion), return a busy error code. + */ + if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed) + return ERR_PTR(-EBUSY); + + /* We need two filters for wake-up, so we cannot use an odd filter */ + if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2)) + return ERR_PTR(-EINVAL); + + /* Initialize the loop index based on the desired location or from 0 */ + i = loc == RX_CLS_LOC_ANY ? 0 : loc; + + for ( ; i < NUM_NET_FILTERS; i++) { + /* Found matching network filter */ + if (!init && + priv->net_filters[i].claimed && + priv->net_filters[i].hw_index == i && + priv->net_filters[i].port == intf->port) + return &priv->net_filters[i]; + + /* If we don't need a new filter or new filter already found */ + if (!init || open_index >= 0) + continue; + + /* Wake filter conslidates two filters to cover more bytes + * Wake filter is open if... + * 1. It is an even filter + * 2. The current and next filter is not claimed + */ + if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed && + !priv->net_filters[i + 1].claimed) + open_index = i; + else if (!priv->net_filters[i].claimed) + open_index = i; + } + + if (open_index >= 0) { + nfilter = &priv->net_filters[open_index]; + nfilter->claimed = true; + nfilter->port = intf->port; + nfilter->hw_index = open_index; + } + + if (wake_filter && open_index >= 0) { + /* Claim next filter */ + priv->net_filters[open_index + 1].claimed = true; + priv->net_filters[open_index + 1].wake_filter = true; + nfilter->wake_filter = true; + } + + return nfilter ? nfilter : ERR_PTR(-EINVAL); +} + +void bcmasp_netfilt_release(struct bcmasp_intf *intf, + struct bcmasp_net_filter *nfilt) +{ + struct bcmasp_priv *priv = intf->parent; + + if (nfilt->wake_filter) { + memset(&priv->net_filters[nfilt->hw_index + 1], 0, + sizeof(struct bcmasp_net_filter)); + } + + memset(nfilt, 0, sizeof(struct bcmasp_net_filter)); +} + +static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low) +{ + *high = (u32)(addr[0] << 8 | addr[1]); + *low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | + addr[5]); +} + +static void bcmasp_set_mda_filter(struct bcmasp_intf *intf, + const unsigned char *addr, + unsigned char *mask, + unsigned int i) +{ + struct bcmasp_priv *priv = intf->parent; + u32 addr_h, addr_l, mask_h, mask_l; + + /* Set local copy */ + ether_addr_copy(priv->mda_filters[i].mask, mask); + ether_addr_copy(priv->mda_filters[i].addr, addr); + + /* Write to HW */ + bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l); + bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l); + rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i)); + rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i)); + rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i)); + rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i)); +} + +static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en, + unsigned int i) +{ + struct bcmasp_priv *priv = intf->parent; + + if (priv->mda_filters[i].en == en) + return; + + priv->mda_filters[i].en = en; + priv->mda_filters[i].port = intf->port; + + rx_filter_core_wl(priv, ((intf->channel + 8) | + (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) | + ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)), + ASP_RX_FILTER_MDA_CFG(i)); +} + +/* There are 32 MDA filters shared between all ports, we reserve 4 filters per + * port for the following. + * - Promisc: Filter to allow all packets when promisc is enabled + * - All Multicast + * - Broadcast + * - Own address + * + * The reserved filters are identified as so. + * - Promisc: (index * 4) + 0 + * - All Multicast: (index * 4) + 1 + * - Broadcast: (index * 4) + 2 + * - Own address: (index * 4) + 3 + */ +enum asp_rx_filter_id { + ASP_RX_FILTER_MDA_PROMISC = 0, + ASP_RX_FILTER_MDA_ALLMULTI, + ASP_RX_FILTER_MDA_BROADCAST, + ASP_RX_FILTER_MDA_OWN_ADDR, + ASP_RX_FILTER_MDA_RES_MAX, +}; + +#define ASP_RX_FILT_MDA(intf, name) (((intf)->index * \ + ASP_RX_FILTER_MDA_RES_MAX) \ + + ASP_RX_FILTER_MDA_##name) + +static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv) +{ + return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX; +} + +void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en) +{ + unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC); + unsigned char promisc[ETH_ALEN]; + + eth_zero_addr(promisc); + /* Set mask to 00:00:00:00:00:00 to match all packets */ + bcmasp_set_mda_filter(intf, promisc, promisc, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en) +{ + unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; + unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI); + + /* Set mask to 01:00:00:00:00:00 to match all multicast */ + bcmasp_set_mda_filter(intf, allmulti, allmulti, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_set_broad(struct bcmasp_intf *intf, bool en) +{ + unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST); + unsigned char addr[ETH_ALEN]; + + eth_broadcast_addr(addr); + bcmasp_set_mda_filter(intf, addr, addr, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr, + bool en) +{ + unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR); + + bcmasp_set_mda_filter(intf, addr, mask, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_disable_all_filters(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + unsigned int i; + int res_count; + + res_count = bcmasp_total_res_mda_cnt(intf->parent); + + /* Disable all filters held by this port */ + for (i = res_count; i < NUM_MDA_FILTERS; i++) { + if (priv->mda_filters[i].en && + priv->mda_filters[i].port == intf->port) + bcmasp_en_mda_filter(intf, 0, i); + } +} + +static int bcmasp_combine_set_filter(struct bcmasp_intf *intf, + unsigned char *addr, unsigned char *mask, + int i) +{ + struct bcmasp_priv *priv = intf->parent; + u64 addr1, addr2, mask1, mask2, mask3; + + /* Switch to u64 to help with the calculations */ + addr1 = ether_addr_to_u64(priv->mda_filters[i].addr); + mask1 = ether_addr_to_u64(priv->mda_filters[i].mask); + addr2 = ether_addr_to_u64(addr); + mask2 = ether_addr_to_u64(mask); + + /* Check if one filter resides within the other */ + mask3 = mask1 & mask2; + if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) { + /* Filter 2 resides within filter 1, so everything is good */ + return 0; + } else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) { + /* Filter 1 resides within filter 2, so swap filters */ + bcmasp_set_mda_filter(intf, addr, mask, i); + return 0; + } + + /* Unable to combine */ + return -EINVAL; +} + +int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr, + unsigned char *mask) +{ + struct bcmasp_priv *priv = intf->parent; + int ret, res_count; + unsigned int i; + + res_count = bcmasp_total_res_mda_cnt(intf->parent); + + for (i = res_count; i < NUM_MDA_FILTERS; i++) { + /* If filter not enabled or belongs to another port skip */ + if (!priv->mda_filters[i].en || + priv->mda_filters[i].port != intf->port) + continue; + + /* Attempt to combine filters */ + ret = bcmasp_combine_set_filter(intf, addr, mask, i); + if (!ret) { + intf->mib.filters_combine_cnt++; + return 0; + } + } + + /* Create new filter if possible */ + for (i = res_count; i < NUM_MDA_FILTERS; i++) { + if (priv->mda_filters[i].en) + continue; + + bcmasp_set_mda_filter(intf, addr, mask, i); + bcmasp_en_mda_filter(intf, 1, i); + return 0; + } + + /* No room for new filter */ + return -EINVAL; +} + +static void bcmasp_core_init_filters(struct bcmasp_priv *priv) +{ + unsigned int i; + + /* Disable all filters and reset software view since the HW + * can lose context while in deep sleep suspend states + */ + for (i = 0; i < NUM_MDA_FILTERS; i++) { + rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i)); + priv->mda_filters[i].en = 0; + } + + for (i = 0; i < NUM_NET_FILTERS; i++) + rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i)); + + /* Top level filter enable bit should be enabled at all times, set + * GEN_WAKE_CLEAR to clear the network filter wake-up which would + * otherwise be sticky + */ + rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN | + ASP_RX_FILTER_MDA_EN | + ASP_RX_FILTER_GEN_WK_CLR | + ASP_RX_FILTER_NT_FLT_EN), + ASP_RX_FILTER_BLK_CTRL); +} + +/* ASP core initialization */ +static void bcmasp_core_init(struct bcmasp_priv *priv) +{ + tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL); + rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL); + + rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT), + ASP_EDPKT_HDR_CFG); + rx_edpkt_core_wl(priv, + (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT), + ASP_EDPKT_ENDI); + + rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT); + rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT); + rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT); + + rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE); + + /* Disable and clear both UniMAC's wake-up interrupts to avoid + * sticky interrupts. + */ + _intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE); + intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE, + ASP_INTR2_CLEAR); +} + +static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow) +{ + u32 reg; + + reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT); + if (slow) + reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN; + else + reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN; + ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT); +} + +static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set) +{ + u32 reg; + + reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL); + reg &= ~clr; + reg |= set; + ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL); + + reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0); + reg &= ~clr; + reg |= set; + ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0); +} + +static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->clk_lock, flags); + bcmasp_core_clock_set_ll(priv, clr, set); + spin_unlock_irqrestore(&priv->clk_lock, flags); +} + +void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en) +{ + u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port); + struct bcmasp_priv *priv = intf->parent; + unsigned long flags; + u32 reg; + + /* When enabling an interface, if the RX or TX clocks were not enabled, + * enable them. Conversely, while disabling an interface, if this is + * the last one enabled, we can turn off the shared RX and TX clocks as + * well. We control enable bits which is why we test for equality on + * the RGMII clock bit mask. + */ + spin_lock_irqsave(&priv->clk_lock, flags); + if (en) { + intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE | + ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE; + bcmasp_core_clock_set_ll(priv, intf_mask, 0); + } else { + reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask; + if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) == + ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) + intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE | + ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE; + bcmasp_core_clock_set_ll(priv, 0, intf_mask); + } + spin_unlock_irqrestore(&priv->clk_lock, flags); +} + +static irqreturn_t bcmasp_isr_wol(int irq, void *data) +{ + struct bcmasp_priv *priv = data; + u32 status; + + /* No L3 IRQ, so we good */ + if (priv->wol_irq <= 0) + goto irq_handled; + + status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) & + ~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS); + wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR); + +irq_handled: + pm_wakeup_event(&priv->pdev->dev, 0); + return IRQ_HANDLED; +} + +static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i) +{ + struct platform_device *pdev = priv->pdev; + int irq, ret; + + irq = platform_get_irq_optional(pdev, i); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0, + pdev->name, priv); + if (ret) + return ret; + + return irq; +} + +static void bcmasp_init_wol_shared(struct bcmasp_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int irq; + + irq = bcmasp_get_and_request_irq(priv, 1); + if (irq < 0) { + dev_warn(dev, "Failed to init WoL irq: %d\n", irq); + return; + } + + priv->wol_irq = irq; + priv->wol_irq_enabled_mask = 0; + device_set_wakeup_capable(&pdev->dev, 1); +} + +static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en) +{ + struct bcmasp_priv *priv = intf->parent; + struct device *dev = &priv->pdev->dev; + + if (en) { + if (priv->wol_irq_enabled_mask) { + set_bit(intf->port, &priv->wol_irq_enabled_mask); + return; + } + + /* First enable */ + set_bit(intf->port, &priv->wol_irq_enabled_mask); + enable_irq_wake(priv->wol_irq); + device_set_wakeup_enable(dev, 1); + } else { + if (!priv->wol_irq_enabled_mask) + return; + + clear_bit(intf->port, &priv->wol_irq_enabled_mask); + if (priv->wol_irq_enabled_mask) + return; + + /* Last disable */ + disable_irq_wake(priv->wol_irq); + device_set_wakeup_enable(dev, 0); + } +} + +static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv) +{ + if (priv->wol_irq > 0) + free_irq(priv->wol_irq, priv); +} + +static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + struct bcmasp_intf *intf; + int irq; + + list_for_each_entry(intf, &priv->intfs, list) { + irq = bcmasp_get_and_request_irq(priv, intf->port + 1); + if (irq < 0) { + dev_warn(dev, "Failed to init WoL irq(port %d): %d\n", + intf->port, irq); + continue; + } + + intf->wol_irq = irq; + intf->wol_irq_enabled = false; + device_set_wakeup_capable(&pdev->dev, 1); + } +} + +static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en) +{ + struct device *dev = &intf->parent->pdev->dev; + + if (en ^ intf->wol_irq_enabled) + irq_set_irq_wake(intf->wol_irq, en); + + intf->wol_irq_enabled = en; + device_set_wakeup_enable(dev, en); +} + +static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv) +{ + struct bcmasp_intf *intf; + + list_for_each_entry(intf, &priv->intfs, list) { + if (intf->wol_irq > 0) + free_irq(intf->wol_irq, priv); + } +} + +static struct bcmasp_hw_info v20_hw_info = { + .rx_ctrl_flush = ASP_RX_CTRL_FLUSH, + .umac2fb = UMAC2FB_OFFSET, + .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT, + .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT, + .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH, +}; + +static const struct bcmasp_plat_data v20_plat_data = { + .init_wol = bcmasp_init_wol_per_intf, + .enable_wol = bcmasp_enable_wol_per_intf, + .destroy_wol = bcmasp_wol_irq_destroy_per_intf, + .hw_info = &v20_hw_info, +}; + +static struct bcmasp_hw_info v21_hw_info = { + .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1, + .umac2fb = UMAC2FB_OFFSET_2_1, + .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1, + .rx_ctrl_fb_filt_out_frame_count = + ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1, + .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1, +}; + +static const struct bcmasp_plat_data v21_plat_data = { + .init_wol = bcmasp_init_wol_shared, + .enable_wol = bcmasp_enable_wol_shared, + .destroy_wol = bcmasp_wol_irq_destroy_shared, + .hw_info = &v21_hw_info, +}; + +static const struct of_device_id bcmasp_of_match[] = { + { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data }, + { .compatible = "brcm,asp-v2.1", .data = &v21_plat_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcmasp_of_match); + +static const struct of_device_id bcmasp_mdio_of_match[] = { + { .compatible = "brcm,asp-v2.1-mdio", }, + { .compatible = "brcm,asp-v2.0-mdio", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match); + +static void bcmasp_remove_intfs(struct bcmasp_priv *priv) +{ + struct bcmasp_intf *intf, *n; + + list_for_each_entry_safe(intf, n, &priv->intfs, list) { + list_del(&intf->list); + bcmasp_interface_destroy(intf); + } +} + +static int bcmasp_probe(struct platform_device *pdev) +{ + struct device_node *ports_node, *intf_node; + const struct bcmasp_plat_data *pdata; + struct device *dev = &pdev->dev; + struct bcmasp_priv *priv; + struct bcmasp_intf *intf; + int ret = 0, count = 0; + unsigned int i; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq <= 0) + return -EINVAL; + + priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp"); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to request clock\n"); + + /* Base from parent node */ + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n"); + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret); + + dev_set_drvdata(&pdev->dev, priv); + priv->pdev = pdev; + spin_lock_init(&priv->mda_lock); + spin_lock_init(&priv->clk_lock); + mutex_init(&priv->wol_lock); + mutex_init(&priv->net_lock); + INIT_LIST_HEAD(&priv->intfs); + + pdata = device_get_match_data(&pdev->dev); + if (!pdata) + return dev_err_probe(dev, -EINVAL, "unable to find platform data\n"); + + priv->init_wol = pdata->init_wol; + priv->enable_wol = pdata->enable_wol; + priv->destroy_wol = pdata->destroy_wol; + priv->hw_info = pdata->hw_info; + + /* Enable all clocks to ensure successful probing */ + bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0); + + /* Switch to the main clock */ + bcmasp_core_clock_select(priv, false); + + bcmasp_intr2_mask_set_all(priv); + bcmasp_intr2_clear_all(priv); + + ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0, + pdev->name, priv); + if (ret) + return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret); + + /* Register mdio child nodes */ + of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev); + + /* ASP specific initialization, Needs to be done regardless of + * how many interfaces come up. + */ + bcmasp_core_init(priv); + bcmasp_core_init_filters(priv); + + ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports"); + if (!ports_node) { + dev_warn(dev, "No ports found\n"); + return -EINVAL; + } + + i = 0; + for_each_available_child_of_node(ports_node, intf_node) { + intf = bcmasp_interface_create(priv, intf_node, i); + if (!intf) { + dev_err(dev, "Cannot create eth interface %d\n", i); + bcmasp_remove_intfs(priv); + of_node_put(intf_node); + goto of_put_exit; + } + list_add_tail(&intf->list, &priv->intfs); + i++; + } + + /* Check and enable WoL */ + priv->init_wol(priv); + + /* Drop the clock reference count now and let ndo_open()/ndo_close() + * manage it for us from now on. + */ + bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE); + + clk_disable_unprepare(priv->clk); + + /* Now do the registration of the network ports which will take care + * of managing the clock properly. + */ + list_for_each_entry(intf, &priv->intfs, list) { + ret = register_netdev(intf->ndev); + if (ret) { + netdev_err(intf->ndev, + "failed to register net_device: %d\n", ret); + priv->destroy_wol(priv); + bcmasp_remove_intfs(priv); + goto of_put_exit; + } + count++; + } + + dev_info(dev, "Initialized %d port(s)\n", count); + +of_put_exit: + of_node_put(ports_node); + return ret; +} + +static void bcmasp_remove(struct platform_device *pdev) +{ + struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev); + + if (!priv) + return; + + priv->destroy_wol(priv); + bcmasp_remove_intfs(priv); +} + +static void bcmasp_shutdown(struct platform_device *pdev) +{ + bcmasp_remove(pdev); +} + +static int __maybe_unused bcmasp_suspend(struct device *d) +{ + struct bcmasp_priv *priv = dev_get_drvdata(d); + struct bcmasp_intf *intf; + int ret; + + list_for_each_entry(intf, &priv->intfs, list) { + ret = bcmasp_interface_suspend(intf); + if (ret) + break; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Whether Wake-on-LAN is enabled or not, we can always disable + * the shared TX clock + */ + bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE); + + bcmasp_core_clock_select(priv, true); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static int __maybe_unused bcmasp_resume(struct device *d) +{ + struct bcmasp_priv *priv = dev_get_drvdata(d); + struct bcmasp_intf *intf; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Switch to the main clock domain */ + bcmasp_core_clock_select(priv, false); + + /* Re-enable all clocks for re-initialization */ + bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0); + + bcmasp_core_init(priv); + bcmasp_core_init_filters(priv); + + /* And disable them to let the network devices take care of them */ + bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE); + + clk_disable_unprepare(priv->clk); + + list_for_each_entry(intf, &priv->intfs, list) { + ret = bcmasp_interface_resume(intf); + if (ret) + break; + } + + return ret; +} + +static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops, + bcmasp_suspend, bcmasp_resume); + +static struct platform_driver bcmasp_driver = { + .probe = bcmasp_probe, + .remove_new = bcmasp_remove, + .shutdown = bcmasp_shutdown, + .driver = { + .name = "brcm,asp-v2", + .of_match_table = bcmasp_of_match, + .pm = &bcmasp_pm_ops, + }, +}; +module_platform_driver(bcmasp_driver); + +MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver"); +MODULE_ALIAS("platform:brcm,asp-v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h new file mode 100644 index 000000000000..ec90add6b03e --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h @@ -0,0 +1,586 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BCMASP_H +#define __BCMASP_H + +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <uapi/linux/ethtool.h> + +#define ASP_INTR2_OFFSET 0x1000 +#define ASP_INTR2_STATUS 0x0 +#define ASP_INTR2_SET 0x4 +#define ASP_INTR2_CLEAR 0x8 +#define ASP_INTR2_MASK_STATUS 0xc +#define ASP_INTR2_MASK_SET 0x10 +#define ASP_INTR2_MASK_CLEAR 0x14 + +#define ASP_INTR2_RX_ECH(intr) BIT(intr) +#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14) +#define ASP_INTR2_UMC0_WAKE BIT(22) +#define ASP_INTR2_UMC1_WAKE BIT(28) + +#define ASP_WAKEUP_INTR2_OFFSET 0x1200 +#define ASP_WAKEUP_INTR2_STATUS 0x0 +#define ASP_WAKEUP_INTR2_SET 0x4 +#define ASP_WAKEUP_INTR2_CLEAR 0x8 +#define ASP_WAKEUP_INTR2_MASK_STATUS 0xc +#define ASP_WAKEUP_INTR2_MASK_SET 0x10 +#define ASP_WAKEUP_INTR2_MASK_CLEAR 0x14 +#define ASP_WAKEUP_INTR2_MPD_0 BIT(0) +#define ASP_WAKEUP_INTR2_MPD_1 BIT(1) +#define ASP_WAKEUP_INTR2_FILT_0 BIT(2) +#define ASP_WAKEUP_INTR2_FILT_1 BIT(3) +#define ASP_WAKEUP_INTR2_FW BIT(4) + +#define ASP_TX_ANALYTICS_OFFSET 0x4c000 +#define ASP_TX_ANALYTICS_CTRL 0x0 + +#define ASP_RX_ANALYTICS_OFFSET 0x98000 +#define ASP_RX_ANALYTICS_CTRL 0x0 + +#define ASP_RX_CTRL_OFFSET 0x9f000 +#define ASP_RX_CTRL_UMAC_0_FRAME_COUNT 0x8 +#define ASP_RX_CTRL_UMAC_1_FRAME_COUNT 0xc +#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14 +#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18 +#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c +/* asp2.1 diverges offsets here */ +/* ASP2.0 */ +#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20 +#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24 +#define ASP_RX_CTRL_FLUSH 0x28 +#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12)) +#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13)) +#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20)) +#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30 +/* ASP2.1 */ +#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20 +#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24 +#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28 +#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c +#define ASP_RX_CTRL_FLUSH_2_1 0x30 +#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38 + +#define ASP_RX_FILTER_OFFSET 0x80000 +#define ASP_RX_FILTER_BLK_CTRL 0x0 +#define ASP_RX_FILTER_OPUT_EN BIT(0) +#define ASP_RX_FILTER_MDA_EN BIT(1) +#define ASP_RX_FILTER_LNR_MD BIT(2) +#define ASP_RX_FILTER_GEN_WK_EN BIT(3) +#define ASP_RX_FILTER_GEN_WK_CLR BIT(4) +#define ASP_RX_FILTER_NT_FLT_EN BIT(5) +#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100) +#define ASP_RX_FILTER_MDA_CFG_EN_SHIFT 8 +#define ASP_RX_FILTER_MDA_CFG_UMC_SEL(sel) ((sel) > 1 ? BIT(17) : \ + BIT((sel) + 9)) +#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104) +#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108) +#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c) +#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110) +#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100) +#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104) +#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108) +#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c) +#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110) +#define ASP_RX_FILTER_NET_CFG(sel) (((sel) * 0xa04) + 0x400) +#define ASP_RX_FILTER_NET_CFG_CH(sel) ((sel) << 0) +#define ASP_RX_FILTER_NET_CFG_EN BIT(9) +#define ASP_RX_FILTER_NET_CFG_L2_EN BIT(10) +#define ASP_RX_FILTER_NET_CFG_L3_EN BIT(11) +#define ASP_RX_FILTER_NET_CFG_L4_EN BIT(12) +#define ASP_RX_FILTER_NET_CFG_L3_FRM(sel) ((sel) << 13) +#define ASP_RX_FILTER_NET_CFG_L4_FRM(sel) ((sel) << 15) +#define ASP_RX_FILTER_NET_CFG_UMC(sel) BIT((sel) + 19) +#define ASP_RX_FILTER_NET_CFG_DMA_EN BIT(27) + +#define ASP_RX_FILTER_NET_OFFSET_MAX 32 +#define ASP_RX_FILTER_NET_PAT(sel, block, off) \ + (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x600) +#define ASP_RX_FILTER_NET_MASK(sel, block, off) \ + (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x700) + +#define ASP_RX_FILTER_NET_OFFSET(sel) (((sel) * 0xa04) + 0xe00) +#define ASP_RX_FILTER_NET_OFFSET_L2(val) ((val) << 0) +#define ASP_RX_FILTER_NET_OFFSET_L3_0(val) ((val) << 8) +#define ASP_RX_FILTER_NET_OFFSET_L3_1(val) ((val) << 16) +#define ASP_RX_FILTER_NET_OFFSET_L4(val) ((val) << 24) + +enum asp_rx_net_filter_block { + ASP_RX_FILTER_NET_L2 = 0, + ASP_RX_FILTER_NET_L3_0, + ASP_RX_FILTER_NET_L3_1, + ASP_RX_FILTER_NET_L4, + ASP_RX_FILTER_NET_BLOCK_MAX +}; + +#define ASP_EDPKT_OFFSET 0x9c000 +#define ASP_EDPKT_ENABLE 0x4 +#define ASP_EDPKT_ENABLE_EN BIT(0) +#define ASP_EDPKT_HDR_CFG 0xc +#define ASP_EDPKT_HDR_SZ_SHIFT 2 +#define ASP_EDPKT_HDR_SZ_32 0 +#define ASP_EDPKT_HDR_SZ_64 1 +#define ASP_EDPKT_HDR_SZ_96 2 +#define ASP_EDPKT_HDR_SZ_128 3 +#define ASP_EDPKT_BURST_BUF_PSCAL_TOUT 0x10 +#define ASP_EDPKT_BURST_BUF_WRITE_TOUT 0x14 +#define ASP_EDPKT_BURST_BUF_READ_TOUT 0x18 +#define ASP_EDPKT_RX_TS_COUNTER 0x38 +#define ASP_EDPKT_ENDI 0x48 +#define ASP_EDPKT_ENDI_DESC_SHIFT 8 +#define ASP_EDPKT_ENDI_NO_BT_SWP 0 +#define ASP_EDPKT_ENDI_BT_SWP_WD 1 +#define ASP_EDPKT_RX_PKT_CNT 0x138 +#define ASP_EDPKT_HDR_EXTR_CNT 0x13c +#define ASP_EDPKT_HDR_OUT_CNT 0x140 + +#define ASP_CTRL 0x101000 +#define ASP_CTRL_ASP_SW_INIT 0x04 +#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0) +#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1) +#define ASP_CTRL_ASP_SW_INIT_AS_RX BIT(2) +#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC0 BIT(3) +#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC1 BIT(4) +#define ASP_CTRL_ASP_SW_INIT_ASP_XMEMIF BIT(5) +#define ASP_CTRL_CLOCK_CTRL 0x04 +#define ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE BIT(0) +#define ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE BIT(1) +#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT 2 +#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK (0x7 << ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT) +#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(x) BIT(ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT + (x)) +#define ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE GENMASK(4, 0) +#define ASP_CTRL_CORE_CLOCK_SELECT 0x08 +#define ASP_CTRL_CORE_CLOCK_SELECT_MAIN BIT(0) +#define ASP_CTRL_SCRATCH_0 0x0c + +struct bcmasp_tx_cb { + struct sk_buff *skb; + unsigned int bytes_sent; + bool last; + + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +struct bcmasp_res { + /* Per interface resources */ + /* Port */ + void __iomem *umac; + void __iomem *umac2fb; + void __iomem *rgmii; + + /* TX slowpath/configuration */ + void __iomem *tx_spb_ctrl; + void __iomem *tx_spb_top; + void __iomem *tx_epkt_core; + void __iomem *tx_pause_ctrl; +}; + +#define DESC_ADDR(x) ((x) & GENMASK_ULL(39, 0)) +#define DESC_FLAGS(x) ((x) & GENMASK_ULL(63, 40)) + +struct bcmasp_desc { + u64 buf; + #define DESC_CHKSUM BIT_ULL(40) + #define DESC_CRC_ERR BIT_ULL(41) + #define DESC_RX_SYM_ERR BIT_ULL(42) + #define DESC_NO_OCT_ALN BIT_ULL(43) + #define DESC_PKT_TRUC BIT_ULL(44) + /* 39:0 (TX/RX) bits 0-39 of buf addr + * 40 (RX) checksum + * 41 (RX) crc_error + * 42 (RX) rx_symbol_error + * 43 (RX) non_octet_aligned + * 44 (RX) pkt_truncated + * 45 Reserved + * 56:46 (RX) mac_filter_id + * 60:57 (RX) rx_port_num (0-unicmac0, 1-unimac1) + * 61 Reserved + * 63:62 (TX) forward CRC, overwrite CRC + */ + u32 size; + u32 flags; + #define DESC_INT_EN BIT(0) + #define DESC_SOF BIT(1) + #define DESC_EOF BIT(2) + #define DESC_EPKT_CMD BIT(3) + #define DESC_SCRAM_ST BIT(8) + #define DESC_SCRAM_END BIT(9) + #define DESC_PCPP BIT(10) + #define DESC_PPPP BIT(11) + /* 0 (TX) tx_int_en + * 1 (TX/RX) SOF + * 2 (TX/RX) EOF + * 3 (TX) epkt_command + * 6:4 (TX) PA + * 7 (TX) pause at desc end + * 8 (TX) scram_start + * 9 (TX) scram_end + * 10 (TX) PCPP + * 11 (TX) PPPP + * 14:12 Reserved + * 15 (TX) pid ch Valid + * 19:16 (TX) data_pkt_type + * 32:20 (TX) pid_channel (RX) nw_filter_id + */ +}; + +struct bcmasp_intf; + +struct bcmasp_intf_stats64 { + /* Rx Stats */ + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t rx_dropped; + u64_stats_t rx_crc_errs; + u64_stats_t rx_sym_errs; + + /* Tx Stats*/ + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + + struct u64_stats_sync syncp; +}; + +struct bcmasp_mib_counters { + u32 edpkt_ts; + u32 edpkt_rx_pkt_cnt; + u32 edpkt_hdr_ext_cnt; + u32 edpkt_hdr_out_cnt; + u32 umac_frm_cnt; + u32 fb_frm_cnt; + u32 fb_rx_fifo_depth; + u32 fb_out_frm_cnt; + u32 fb_filt_out_frm_cnt; + u32 alloc_rx_skb_failed; + u32 tx_dma_failed; + u32 mc_filters_full_cnt; + u32 uc_filters_full_cnt; + u32 filters_combine_cnt; + u32 promisc_filters_cnt; + u32 tx_realloc_offload_failed; + u32 tx_timeout_cnt; +}; + +struct bcmasp_intf_ops { + unsigned long (*rx_desc_read)(struct bcmasp_intf *intf); + void (*rx_buffer_write)(struct bcmasp_intf *intf, dma_addr_t addr); + void (*rx_desc_write)(struct bcmasp_intf *intf, dma_addr_t addr); + unsigned long (*tx_read)(struct bcmasp_intf *intf); + void (*tx_write)(struct bcmasp_intf *intf, dma_addr_t addr); +}; + +struct bcmasp_priv; + +struct bcmasp_intf { + struct list_head list; + struct net_device *ndev; + struct bcmasp_priv *parent; + + /* ASP Ch */ + int channel; + int port; + const struct bcmasp_intf_ops *ops; + + /* Used for splitting shared resources */ + int index; + + struct napi_struct tx_napi; + /* TX ring, starts on a new cacheline boundary */ + void __iomem *tx_spb_dma; + int tx_spb_index; + int tx_spb_clean_index; + struct bcmasp_desc *tx_spb_cpu; + dma_addr_t tx_spb_dma_addr; + dma_addr_t tx_spb_dma_valid; + dma_addr_t tx_spb_dma_read; + struct bcmasp_tx_cb *tx_cbs; + + /* RX ring, starts on a new cacheline boundary */ + void __iomem *rx_edpkt_cfg; + void __iomem *rx_edpkt_dma; + int rx_edpkt_index; + int rx_buf_order; + struct bcmasp_desc *rx_edpkt_cpu; + dma_addr_t rx_edpkt_dma_addr; + dma_addr_t rx_edpkt_dma_read; + + /* RX buffer prefetcher ring*/ + void *rx_ring_cpu; + dma_addr_t rx_ring_dma; + dma_addr_t rx_ring_dma_valid; + struct napi_struct rx_napi; + + struct bcmasp_res res; + unsigned int crc_fwd; + + /* PHY device */ + struct device_node *phy_dn; + struct device_node *ndev_dn; + phy_interface_t phy_interface; + bool internal_phy; + int old_pause; + int old_link; + int old_duplex; + + u32 msg_enable; + + /* Statistics */ + struct bcmasp_intf_stats64 stats64; + struct bcmasp_mib_counters mib; + + u32 wolopts; + u8 sopass[SOPASS_MAX]; + /* Used if per intf wol irq */ + int wol_irq; + unsigned int wol_irq_enabled:1; + + struct ethtool_eee eee; +}; + +#define NUM_NET_FILTERS 32 +struct bcmasp_net_filter { + struct ethtool_rx_flow_spec fs; + + bool claimed; + bool wake_filter; + + int port; + unsigned int hw_index; +}; + +#define NUM_MDA_FILTERS 32 +struct bcmasp_mda_filter { + /* Current owner of this filter */ + int port; + bool en; + u8 addr[ETH_ALEN]; + u8 mask[ETH_ALEN]; +}; + +struct bcmasp_hw_info { + u32 rx_ctrl_flush; + u32 umac2fb; + u32 rx_ctrl_fb_out_frame_count; + u32 rx_ctrl_fb_filt_out_frame_count; + u32 rx_ctrl_fb_rx_fifo_depth; +}; + +struct bcmasp_plat_data { + void (*init_wol)(struct bcmasp_priv *priv); + void (*enable_wol)(struct bcmasp_intf *intf, bool en); + void (*destroy_wol)(struct bcmasp_priv *priv); + struct bcmasp_hw_info *hw_info; +}; + +struct bcmasp_priv { + struct platform_device *pdev; + struct clk *clk; + + int irq; + u32 irq_mask; + + /* Used if shared wol irq */ + struct mutex wol_lock; + int wol_irq; + unsigned long wol_irq_enabled_mask; + + void (*init_wol)(struct bcmasp_priv *priv); + void (*enable_wol)(struct bcmasp_intf *intf, bool en); + void (*destroy_wol)(struct bcmasp_priv *priv); + + void __iomem *base; + struct bcmasp_hw_info *hw_info; + + struct list_head intfs; + + struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS]; + + /* MAC destination address filters lock */ + spinlock_t mda_lock; + + /* Protects accesses to ASP_CTRL_CLOCK_CTRL */ + spinlock_t clk_lock; + + struct bcmasp_net_filter net_filters[NUM_NET_FILTERS]; + + /* Network filter lock */ + struct mutex net_lock; +}; + +static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf) +{ + return intf->ops->rx_desc_read(intf); +} + +static inline void bcmasp_intf_rx_buffer_write(struct bcmasp_intf *intf, + dma_addr_t addr) +{ + intf->ops->rx_buffer_write(intf, addr); +} + +static inline void bcmasp_intf_rx_desc_write(struct bcmasp_intf *intf, + dma_addr_t addr) +{ + intf->ops->rx_desc_write(intf, addr); +} + +static inline unsigned long bcmasp_intf_tx_read(struct bcmasp_intf *intf) +{ + return intf->ops->tx_read(intf); +} + +static inline void bcmasp_intf_tx_write(struct bcmasp_intf *intf, + dma_addr_t addr) +{ + intf->ops->tx_write(intf, addr); +} + +#define __BCMASP_IO_MACRO(name, m) \ +static inline u32 name##_rl(struct bcmasp_intf *intf, u32 off) \ +{ \ + u32 reg = readl_relaxed(intf->m + off); \ + return reg; \ +} \ +static inline void name##_wl(struct bcmasp_intf *intf, u32 val, u32 off)\ +{ \ + writel_relaxed(val, intf->m + off); \ +} + +#define BCMASP_IO_MACRO(name) __BCMASP_IO_MACRO(name, res.name) +#define BCMASP_FP_IO_MACRO(name) __BCMASP_IO_MACRO(name, name) + +BCMASP_IO_MACRO(umac); +BCMASP_IO_MACRO(umac2fb); +BCMASP_IO_MACRO(rgmii); +BCMASP_FP_IO_MACRO(tx_spb_dma); +BCMASP_IO_MACRO(tx_spb_ctrl); +BCMASP_IO_MACRO(tx_spb_top); +BCMASP_IO_MACRO(tx_epkt_core); +BCMASP_IO_MACRO(tx_pause_ctrl); +BCMASP_FP_IO_MACRO(rx_edpkt_dma); +BCMASP_FP_IO_MACRO(rx_edpkt_cfg); + +#define __BCMASP_FP_IO_MACRO_Q(name, m) \ +static inline u64 name##_rq(struct bcmasp_intf *intf, u32 off) \ +{ \ + u64 reg = readq_relaxed(intf->m + off); \ + return reg; \ +} \ +static inline void name##_wq(struct bcmasp_intf *intf, u64 val, u32 off)\ +{ \ + writeq_relaxed(val, intf->m + off); \ +} + +#define BCMASP_FP_IO_MACRO_Q(name) __BCMASP_FP_IO_MACRO_Q(name, name) + +BCMASP_FP_IO_MACRO_Q(tx_spb_dma); +BCMASP_FP_IO_MACRO_Q(rx_edpkt_dma); +BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg); + +#define PKT_OFFLOAD_NOP (0 << 28) +#define PKT_OFFLOAD_HDR_OP (1 << 28) +#define PKT_OFFLOAD_HDR_WRBACK BIT(19) +#define PKT_OFFLOAD_HDR_COUNT(x) ((x) << 16) +#define PKT_OFFLOAD_HDR_SIZE_1(x) ((x) << 4) +#define PKT_OFFLOAD_HDR_SIZE_2(x) (x) +#define PKT_OFFLOAD_HDR2_SIZE_2(x) ((x) << 24) +#define PKT_OFFLOAD_HDR2_SIZE_3(x) ((x) << 12) +#define PKT_OFFLOAD_HDR2_SIZE_4(x) (x) +#define PKT_OFFLOAD_EPKT_OP (2 << 28) +#define PKT_OFFLOAD_EPKT_WRBACK BIT(23) +#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21) +#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19) +#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16) +#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15) +#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14) +#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12) +#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10) +#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8) +#define PKT_OFFLOAD_EPKT_BLOC(x) (x) +#define PKT_OFFLOAD_END_OP (7 << 28) + +struct bcmasp_pkt_offload { + __be32 nop; + __be32 header; + __be32 header2; + __be32 epkt; + __be32 end; +}; + +#define BCMASP_CORE_IO_MACRO(name, offset) \ +static inline u32 name##_core_rl(struct bcmasp_priv *priv, \ + u32 off) \ +{ \ + u32 reg = readl_relaxed(priv->base + (offset) + off); \ + return reg; \ +} \ +static inline void name##_core_wl(struct bcmasp_priv *priv, \ + u32 val, u32 off) \ +{ \ + writel_relaxed(val, priv->base + (offset) + off); \ +} + +BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET); +BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET); +BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET); +BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET); +BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET); +BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET); +BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET); +BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL); + +struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, + struct device_node *ndev_dn, int i); + +void bcmasp_interface_destroy(struct bcmasp_intf *intf); + +void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en); + +void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en); + +void bcmasp_flush_rx_port(struct bcmasp_intf *intf); + +extern const struct ethtool_ops bcmasp_ethtool_ops; + +int bcmasp_interface_suspend(struct bcmasp_intf *intf); + +int bcmasp_interface_resume(struct bcmasp_intf *intf); + +void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en); + +void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en); + +void bcmasp_set_broad(struct bcmasp_intf *intf, bool en); + +void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr, + bool en); + +int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr, + unsigned char *mask); + +void bcmasp_disable_all_filters(struct bcmasp_intf *intf); + +void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en); + +struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf, + u32 loc, bool wake_filter, + bool init); + +bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf, + struct ethtool_rx_flow_spec *fs); + +void bcmasp_netfilt_release(struct bcmasp_intf *intf, + struct bcmasp_net_filter *nfilt); + +int bcmasp_netfilt_get_active(struct bcmasp_intf *intf); + +int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt); + +void bcmasp_netfilt_suspend(struct bcmasp_intf *intf); + +void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable); +#endif diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c new file mode 100644 index 000000000000..ce6a3d56fb23 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "bcmasp_ethtool: " fmt + +#include <asm-generic/unaligned.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#include "bcmasp.h" +#include "bcmasp_intf_defs.h" + +enum bcmasp_stat_type { + BCMASP_STAT_RX_EDPKT, + BCMASP_STAT_RX_CTRL, + BCMASP_STAT_RX_CTRL_PER_INTF, + BCMASP_STAT_SOFT, +}; + +struct bcmasp_stats { + char stat_string[ETH_GSTRING_LEN]; + enum bcmasp_stat_type type; + u32 reg_offset; +}; + +#define STAT_BCMASP_SOFT_MIB(str) { \ + .stat_string = str, \ + .type = BCMASP_STAT_SOFT, \ +} + +#define STAT_BCMASP_OFFSET(str, _type, offset) { \ + .stat_string = str, \ + .type = _type, \ + .reg_offset = offset, \ +} + +#define STAT_BCMASP_RX_EDPKT(str, offset) \ + STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset) +#define STAT_BCMASP_RX_CTRL(str, offset) \ + STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset) +#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \ + STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL_PER_INTF, offset) + +/* Must match the order of struct bcmasp_mib_counters */ +static const struct bcmasp_stats bcmasp_gstrings_stats[] = { + /* EDPKT counters */ + STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER), + STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT), + STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT), + STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT), + /* ASP RX control */ + STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac", + ASP_RX_CTRL_UMAC_0_FRAME_COUNT), + STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Port", + ASP_RX_CTRL_FB_0_FRAME_COUNT), + STAT_BCMASP_RX_CTRL_PER_INTF("RX Buffer FIFO Depth", + ASP_RX_CTRL_FB_RX_FIFO_DEPTH), + STAT_BCMASP_RX_CTRL("Frames Out(Buffer)", + ASP_RX_CTRL_FB_OUT_FRAME_COUNT), + STAT_BCMASP_RX_CTRL("Frames Out(Filters)", + ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT), + /* Software maintained statistics */ + STAT_BCMASP_SOFT_MIB("RX SKB Alloc Failed"), + STAT_BCMASP_SOFT_MIB("TX DMA Failed"), + STAT_BCMASP_SOFT_MIB("Multicast Filters Full"), + STAT_BCMASP_SOFT_MIB("Unicast Filters Full"), + STAT_BCMASP_SOFT_MIB("MDA Filters Combined"), + STAT_BCMASP_SOFT_MIB("Promisc Filter Set"), + STAT_BCMASP_SOFT_MIB("TX Realloc For Offload Failed"), + STAT_BCMASP_SOFT_MIB("Tx Timeout Count"), +}; + +#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats) + +static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf, + const struct bcmasp_stats *s) +{ + struct bcmasp_priv *priv = intf->parent; + + if (!strcmp("Frames Out(Buffer)", s->stat_string)) + return priv->hw_info->rx_ctrl_fb_out_frame_count; + + if (!strcmp("Frames Out(Filters)", s->stat_string)) + return priv->hw_info->rx_ctrl_fb_filt_out_frame_count; + + if (!strcmp("RX Buffer FIFO Depth", s->stat_string)) + return priv->hw_info->rx_ctrl_fb_rx_fifo_depth; + + return s->reg_offset; +} + +static int bcmasp_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMASP_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmasp_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMASP_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmasp_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + default: + return; + } +} + +static void bcmasp_update_mib_counters(struct bcmasp_intf *intf) +{ + unsigned int i; + + for (i = 0; i < BCMASP_STATS_LEN; i++) { + const struct bcmasp_stats *s; + u32 offset, val; + char *p; + + s = &bcmasp_gstrings_stats[i]; + offset = bcmasp_stat_fixup_offset(intf, s); + switch (s->type) { + case BCMASP_STAT_SOFT: + continue; + case BCMASP_STAT_RX_EDPKT: + val = rx_edpkt_core_rl(intf->parent, offset); + break; + case BCMASP_STAT_RX_CTRL: + val = rx_ctrl_core_rl(intf->parent, offset); + break; + case BCMASP_STAT_RX_CTRL_PER_INTF: + offset += sizeof(u32) * intf->port; + val = rx_ctrl_core_rl(intf->parent, offset); + break; + default: + continue; + } + p = (char *)(&intf->mib) + (i * sizeof(u32)); + put_unaligned(val, (u32 *)p); + } +} + +static void bcmasp_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + unsigned int i; + char *p; + + if (netif_running(dev)) + bcmasp_update_mib_counters(intf); + + for (i = 0; i < BCMASP_STATS_LEN; i++) { + p = (char *)(&intf->mib) + (i * sizeof(u32)); + data[i] = *(u32 *)p; + } +} + +static void bcmasp_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "bcmasp", sizeof(info->driver)); + strscpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static u32 bcmasp_get_msglevel(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + return intf->msg_enable; +} + +static void bcmasp_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + intf->msg_enable = level; +} + +#define BCMASP_SUPPORTED_WAKE (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER) +static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + wol->supported = BCMASP_SUPPORTED_WAKE; + wol->wolopts = intf->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass)); +} + +static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_priv *priv = intf->parent; + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -EOPNOTSUPP; + + /* Interface Specific */ + intf->wolopts = wol->wolopts; + if (intf->wolopts & WAKE_MAGICSECURE) + memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass)); + + mutex_lock(&priv->wol_lock); + priv->enable_wol(intf, !!intf->wolopts); + mutex_unlock(&priv->wol_lock); + + return 0; +} + +static int bcmasp_flow_insert(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_net_filter *nfilter; + u32 loc = cmd->fs.location; + bool wake = false; + + if (cmd->fs.ring_cookie == RX_CLS_FLOW_WAKE) + wake = true; + + /* Currently only supports WAKE filters */ + if (!wake) + return -EOPNOTSUPP; + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + case IP_USER_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + break; + default: + return -EOPNOTSUPP; + } + + /* Check if filter already exists */ + if (bcmasp_netfilt_check_dup(intf, &cmd->fs)) + return -EINVAL; + + nfilter = bcmasp_netfilt_get_init(intf, loc, wake, true); + if (IS_ERR(nfilter)) + return PTR_ERR(nfilter); + + /* Return the location where we did insert the filter */ + cmd->fs.location = nfilter->hw_index; + memcpy(&nfilter->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec)); + + /* Since we only support wake filters, defer register programming till + * suspend time. + */ + return 0; +} + +static int bcmasp_flow_delete(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_net_filter *nfilter; + + nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false); + if (IS_ERR(nfilter)) + return PTR_ERR(nfilter); + + bcmasp_netfilt_release(intf, nfilter); + + return 0; +} + +static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_net_filter *nfilter; + + nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false); + if (IS_ERR(nfilter)) + return PTR_ERR(nfilter); + + memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs)); + + cmd->data = NUM_NET_FILTERS; + + return 0; +} + +static int bcmasp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + mutex_lock(&intf->parent->net_lock); + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = bcmasp_flow_insert(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = bcmasp_flow_delete(dev, cmd); + break; + default: + break; + } + + mutex_unlock(&intf->parent->net_lock); + + return ret; +} + +static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + int err = 0; + + mutex_lock(&intf->parent->net_lock); + + switch (cmd->cmd) { + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmasp_netfilt_get_active(intf); + /* We support specifying rule locations */ + cmd->data |= RX_CLS_LOC_SPECIAL; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmasp_flow_get(intf, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt); + cmd->data = NUM_NET_FILTERS; + break; + default: + err = -EOPNOTSUPP; + break; + } + + mutex_unlock(&intf->parent->net_lock); + + return err; +} + +void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable) +{ + u32 reg; + + reg = umac_rl(intf, UMC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + umac_wl(intf, reg, UMC_EEE_CTRL); + + intf->eee.eee_enabled = enable; + intf->eee.eee_active = enable; +} + +static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct ethtool_eee *p = &intf->eee; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_enabled = p->tx_lpi_enabled; + e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct ethtool_eee *p = &intf->eee; + int ret; + + if (!dev->phydev) + return -ENODEV; + + if (!p->eee_enabled) { + bcmasp_eee_enable_set(intf, false); + } else { + ret = phy_init_eee(dev->phydev, 0); + if (ret) { + netif_err(intf, hw, dev, + "EEE initialization failed: %d\n", ret); + return ret; + } + + umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER); + intf->eee.eee_active = ret >= 0; + intf->eee.tx_lpi_enabled = e->tx_lpi_enabled; + bcmasp_eee_enable_set(intf, true); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static void bcmasp_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + mac_stats->FramesTransmittedOK = umac_rl(intf, UMC_GTPOK); + mac_stats->SingleCollisionFrames = umac_rl(intf, UMC_GTSCL); + mac_stats->MultipleCollisionFrames = umac_rl(intf, UMC_GTMCL); + mac_stats->FramesReceivedOK = umac_rl(intf, UMC_GRPOK); + mac_stats->FrameCheckSequenceErrors = umac_rl(intf, UMC_GRFCS); + mac_stats->AlignmentErrors = umac_rl(intf, UMC_GRALN); + mac_stats->OctetsTransmittedOK = umac_rl(intf, UMC_GTBYT); + mac_stats->FramesWithDeferredXmissions = umac_rl(intf, UMC_GTDRF); + mac_stats->LateCollisions = umac_rl(intf, UMC_GTLCL); + mac_stats->FramesAbortedDueToXSColls = umac_rl(intf, UMC_GTXCL); + mac_stats->OctetsReceivedOK = umac_rl(intf, UMC_GRBYT); + mac_stats->MulticastFramesXmittedOK = umac_rl(intf, UMC_GTMCA); + mac_stats->BroadcastFramesXmittedOK = umac_rl(intf, UMC_GTBCA); + mac_stats->FramesWithExcessiveDeferral = umac_rl(intf, UMC_GTEDF); + mac_stats->MulticastFramesReceivedOK = umac_rl(intf, UMC_GRMCA); + mac_stats->BroadcastFramesReceivedOK = umac_rl(intf, UMC_GRBCA); +} + +static const struct ethtool_rmon_hist_range bcmasp_rmon_ranges[] = { + { 0, 64}, + { 65, 127}, + { 128, 255}, + { 256, 511}, + { 512, 1023}, + { 1024, 1518}, + { 1519, 1522}, + {} +}; + +static void bcmasp_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + *ranges = bcmasp_rmon_ranges; + + rmon_stats->undersize_pkts = umac_rl(intf, UMC_RRUND); + rmon_stats->oversize_pkts = umac_rl(intf, UMC_GROVR); + rmon_stats->fragments = umac_rl(intf, UMC_RRFRG); + rmon_stats->jabbers = umac_rl(intf, UMC_GRJBR); + + rmon_stats->hist[0] = umac_rl(intf, UMC_GR64); + rmon_stats->hist[1] = umac_rl(intf, UMC_GR127); + rmon_stats->hist[2] = umac_rl(intf, UMC_GR255); + rmon_stats->hist[3] = umac_rl(intf, UMC_GR511); + rmon_stats->hist[4] = umac_rl(intf, UMC_GR1023); + rmon_stats->hist[5] = umac_rl(intf, UMC_GR1518); + rmon_stats->hist[6] = umac_rl(intf, UMC_GRMGV); + + rmon_stats->hist_tx[0] = umac_rl(intf, UMC_TR64); + rmon_stats->hist_tx[1] = umac_rl(intf, UMC_TR127); + rmon_stats->hist_tx[2] = umac_rl(intf, UMC_TR255); + rmon_stats->hist_tx[3] = umac_rl(intf, UMC_TR511); + rmon_stats->hist_tx[4] = umac_rl(intf, UMC_TR1023); + rmon_stats->hist_tx[5] = umac_rl(intf, UMC_TR1518); + rmon_stats->hist_tx[6] = umac_rl(intf, UMC_TRMGV); +} + +static void bcmasp_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + ctrl_stats->MACControlFramesTransmitted = umac_rl(intf, UMC_GTXCF); + ctrl_stats->MACControlFramesReceived = umac_rl(intf, UMC_GRXCF); + ctrl_stats->UnsupportedOpcodesReceived = umac_rl(intf, UMC_GRXUO); +} + +const struct ethtool_ops bcmasp_ethtool_ops = { + .get_drvinfo = bcmasp_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_msglevel = bcmasp_get_msglevel, + .set_msglevel = bcmasp_set_msglevel, + .get_wol = bcmasp_get_wol, + .set_wol = bcmasp_set_wol, + .get_rxnfc = bcmasp_get_rxnfc, + .set_rxnfc = bcmasp_set_rxnfc, + .set_eee = bcmasp_set_eee, + .get_eee = bcmasp_get_eee, + .get_eth_mac_stats = bcmasp_get_eth_mac_stats, + .get_rmon_stats = bcmasp_get_rmon_stats, + .get_eth_ctrl_stats = bcmasp_get_eth_ctrl_stats, + .get_strings = bcmasp_get_strings, + .get_ethtool_stats = bcmasp_get_ethtool_stats, + .get_sset_count = bcmasp_get_sset_count, +}; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c new file mode 100644 index 000000000000..53e542881255 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -0,0 +1,1415 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "bcmasp_intf: " fmt + +#include <asm/byteorder.h> +#include <linux/brcmphy.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/ptp_classify.h> +#include <linux/platform_device.h> +#include <net/ip.h> +#include <net/ipv6.h> + +#include "bcmasp.h" +#include "bcmasp_intf_defs.h" + +static int incr_ring(int index, int ring_count) +{ + index++; + if (index == ring_count) + return 0; + + return index; +} + +/* Points to last byte of descriptor */ +static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg, + int ring_count) +{ + dma_addr_t end = beg + (ring_count * DESC_SIZE); + + addr += DESC_SIZE; + if (addr > end) + return beg + DESC_SIZE - 1; + + return addr; +} + +/* Points to first byte of descriptor */ +static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg, + int ring_count) +{ + dma_addr_t end = beg + (ring_count * DESC_SIZE); + + addr += DESC_SIZE; + if (addr >= end) + return beg; + + return addr; +} + +static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en) +{ + if (en) { + tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE); + tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN | + TX_EPKT_C_CFG_MISC_PT | + (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)), + TX_EPKT_C_CFG_MISC); + } else { + tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); + tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); + } +} + +static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en) +{ + if (en) + rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN, + RX_EDPKT_CFG_ENABLE); + else + rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); +} + +static void bcmasp_set_rx_mode(struct net_device *dev) +{ + unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct bcmasp_intf *intf = netdev_priv(dev); + struct netdev_hw_addr *ha; + int ret; + + spin_lock_bh(&intf->parent->mda_lock); + + bcmasp_disable_all_filters(intf); + + if (dev->flags & IFF_PROMISC) + goto set_promisc; + + bcmasp_set_promisc(intf, 0); + + bcmasp_set_broad(intf, 1); + + bcmasp_set_oaddr(intf, dev->dev_addr, 1); + + if (dev->flags & IFF_ALLMULTI) { + bcmasp_set_allmulti(intf, 1); + } else { + bcmasp_set_allmulti(intf, 0); + + netdev_for_each_mc_addr(ha, dev) { + ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); + if (ret) { + intf->mib.mc_filters_full_cnt++; + goto set_promisc; + } + } + } + + netdev_for_each_uc_addr(ha, dev) { + ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); + if (ret) { + intf->mib.uc_filters_full_cnt++; + goto set_promisc; + } + } + + spin_unlock_bh(&intf->parent->mda_lock); + return; + +set_promisc: + bcmasp_set_promisc(intf, 1); + intf->mib.promisc_filters_cnt++; + + /* disable all filters used by this port */ + bcmasp_disable_all_filters(intf); + + spin_unlock_bh(&intf->parent->mda_lock); +} + +static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index) +{ + struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index]; + + txcb->skb = NULL; + dma_unmap_addr_set(txcb, dma_addr, 0); + dma_unmap_len_set(txcb, dma_len, 0); + txcb->last = false; +} + +static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt) +{ + int next_index, i; + + /* Check if we have enough room for cnt descriptors */ + for (i = 0; i < cnt; i++) { + next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT); + if (next_index == intf->tx_spb_clean_index) + return 1; + } + + return 0; +} + +static struct sk_buff *bcmasp_csum_offload(struct net_device *dev, + struct sk_buff *skb, + bool *csum_hw) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + u32 header = 0, header2 = 0, epkt = 0; + struct bcmasp_pkt_offload *offload; + unsigned int header_cnt = 0; + u8 ip_proto; + int ret; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return skb; + + ret = skb_cow_head(skb, sizeof(*offload)); + if (ret < 0) { + intf->mib.tx_realloc_offload_failed++; + goto help; + } + + switch (skb->protocol) { + case htons(ETH_P_IP): + header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf); + header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff); + epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2; + ip_proto = ip_hdr(skb)->protocol; + header_cnt += 2; + break; + case htons(ETH_P_IPV6): + header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf); + header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff); + epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2; + ip_proto = ipv6_hdr(skb)->nexthdr; + header_cnt += 2; + break; + default: + goto help; + } + + switch (ip_proto) { + case IPPROTO_TCP: + header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb)); + epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3; + header_cnt++; + break; + case IPPROTO_UDP: + header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN); + epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3; + header_cnt++; + break; + default: + goto help; + } + + offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload)); + + header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) | + PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN); + epkt |= PKT_OFFLOAD_EPKT_OP; + + offload->nop = htonl(PKT_OFFLOAD_NOP); + offload->header = htonl(header); + offload->header2 = htonl(header2); + offload->epkt = htonl(epkt); + offload->end = htonl(PKT_OFFLOAD_END_OP); + *csum_hw = true; + + return skb; + +help: + skb_checksum_help(skb); + + return skb; +} + +static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf) +{ + return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID); +} + +static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr) +{ + rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ); +} + +static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr) +{ + rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ); +} + +static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf) +{ + return tx_spb_dma_rq(intf, TX_SPB_DMA_READ); +} + +static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr) +{ + tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID); +} + +static const struct bcmasp_intf_ops bcmasp_intf_ops = { + .rx_desc_read = bcmasp_rx_edpkt_dma_rq, + .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq, + .rx_desc_write = bcmasp_rx_edpkt_dma_wq, + .tx_read = bcmasp_tx_spb_dma_rq, + .tx_write = bcmasp_tx_spb_dma_wq, +}; + +static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + unsigned int total_bytes, size; + int spb_index, nr_frags, i, j; + struct bcmasp_tx_cb *txcb; + dma_addr_t mapping, valid; + struct bcmasp_desc *desc; + bool csum_hw = false; + struct device *kdev; + skb_frag_t *frag; + + kdev = &intf->parent->pdev->dev; + + nr_frags = skb_shinfo(skb)->nr_frags; + + if (tx_spb_ring_full(intf, nr_frags + 1)) { + netif_stop_queue(dev); + if (net_ratelimit()) + netdev_err(dev, "Tx Ring Full!\n"); + return NETDEV_TX_BUSY; + } + + /* Save skb len before adding csum offload header */ + total_bytes = skb->len; + skb = bcmasp_csum_offload(dev, skb, &csum_hw); + if (!skb) + return NETDEV_TX_OK; + + spb_index = intf->tx_spb_index; + valid = intf->tx_spb_dma_valid; + for (i = 0; i <= nr_frags; i++) { + if (!i) { + size = skb_headlen(skb); + if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) { + if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN)) + return NETDEV_TX_OK; + size = skb->len; + } + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + if (dma_mapping_error(kdev, mapping)) { + intf->mib.tx_dma_failed++; + spb_index = intf->tx_spb_index; + for (j = 0; j < i; j++) { + bcmasp_clean_txcb(intf, spb_index); + spb_index = incr_ring(spb_index, + DESC_RING_COUNT); + } + /* Rewind so we do not have a hole */ + spb_index = intf->tx_spb_index; + return NETDEV_TX_OK; + } + + txcb = &intf->tx_cbs[spb_index]; + desc = &intf->tx_spb_cpu[spb_index]; + memset(desc, 0, sizeof(*desc)); + txcb->skb = skb; + txcb->bytes_sent = total_bytes; + dma_unmap_addr_set(txcb, dma_addr, mapping); + dma_unmap_len_set(txcb, dma_len, size); + if (!i) { + desc->flags |= DESC_SOF; + if (csum_hw) + desc->flags |= DESC_EPKT_CMD; + } + + if (i == nr_frags) { + desc->flags |= DESC_EOF; + txcb->last = true; + } + + desc->buf = mapping; + desc->size = size; + desc->flags |= DESC_INT_EN; + + netif_dbg(intf, tx_queued, dev, + "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n", + __func__, &mapping, desc->size, desc->flags, + spb_index); + + spb_index = incr_ring(spb_index, DESC_RING_COUNT); + valid = incr_last_byte(valid, intf->tx_spb_dma_addr, + DESC_RING_COUNT); + } + + /* Ensure all descriptors have been written to DRAM for the + * hardware to see up-to-date contents. + */ + wmb(); + + intf->tx_spb_index = spb_index; + intf->tx_spb_dma_valid = valid; + bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid); + + if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1)) + netif_stop_queue(dev); + + return NETDEV_TX_OK; +} + +static void bcmasp_netif_start(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + bcmasp_set_rx_mode(dev); + napi_enable(&intf->tx_napi); + napi_enable(&intf->rx_napi); + + bcmasp_enable_rx_irq(intf, 1); + bcmasp_enable_tx_irq(intf, 1); + + phy_start(dev->phydev); +} + +static void umac_reset(struct bcmasp_intf *intf) +{ + umac_wl(intf, 0x0, UMC_CMD); + umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD); + usleep_range(10, 100); + umac_wl(intf, 0x0, UMC_CMD); +} + +static void umac_set_hw_addr(struct bcmasp_intf *intf, + const unsigned char *addr) +{ + u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | + addr[3]; + u32 mac1 = (addr[4] << 8) | addr[5]; + + umac_wl(intf, mac0, UMC_MAC0); + umac_wl(intf, mac1, UMC_MAC1); +} + +static void umac_enable_set(struct bcmasp_intf *intf, u32 mask, + unsigned int enable) +{ + u32 reg; + + reg = umac_rl(intf, UMC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + umac_wl(intf, reg, UMC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-sized packet + * to be processed (1 msec). + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void umac_init(struct bcmasp_intf *intf) +{ + umac_wl(intf, 0x800, UMC_FRM_LEN); + umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL); + umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ); + umac_enable_set(intf, UMC_CMD_PROMISC, 1); +} + +static int bcmasp_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmasp_intf *intf = + container_of(napi, struct bcmasp_intf, tx_napi); + struct bcmasp_intf_stats64 *stats = &intf->stats64; + struct device *kdev = &intf->parent->pdev->dev; + unsigned long read, released = 0; + struct bcmasp_tx_cb *txcb; + struct bcmasp_desc *desc; + dma_addr_t mapping; + + read = bcmasp_intf_tx_read(intf); + while (intf->tx_spb_dma_read != read) { + txcb = &intf->tx_cbs[intf->tx_spb_clean_index]; + mapping = dma_unmap_addr(txcb, dma_addr); + + dma_unmap_single(kdev, mapping, + dma_unmap_len(txcb, dma_len), + DMA_TO_DEVICE); + + if (txcb->last) { + dev_consume_skb_any(txcb->skb); + + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_packets); + u64_stats_add(&stats->tx_bytes, txcb->bytes_sent); + u64_stats_update_end(&stats->syncp); + } + + desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index]; + + netif_dbg(intf, tx_done, intf->ndev, + "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n", + __func__, &mapping, desc->size, desc->flags, + intf->tx_spb_clean_index); + + bcmasp_clean_txcb(intf, intf->tx_spb_clean_index); + released++; + + intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index, + DESC_RING_COUNT); + intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read, + intf->tx_spb_dma_addr, + DESC_RING_COUNT); + } + + /* Ensure all descriptors have been written to DRAM for the hardware + * to see updated contents. + */ + wmb(); + + napi_complete(&intf->tx_napi); + + bcmasp_enable_tx_irq(intf, 1); + + if (released) + netif_wake_queue(intf->ndev); + + return 0; +} + +static int bcmasp_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmasp_intf *intf = + container_of(napi, struct bcmasp_intf, rx_napi); + struct bcmasp_intf_stats64 *stats = &intf->stats64; + struct device *kdev = &intf->parent->pdev->dev; + unsigned long processed = 0; + struct bcmasp_desc *desc; + struct sk_buff *skb; + dma_addr_t valid; + void *data; + u64 flags; + u32 len; + + valid = bcmasp_intf_rx_desc_read(intf) + 1; + if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE) + valid = intf->rx_edpkt_dma_addr; + + while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) { + desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index]; + + /* Ensure that descriptor has been fully written to DRAM by + * hardware before reading by the CPU + */ + rmb(); + + /* Calculate virt addr by offsetting from physical addr */ + data = intf->rx_ring_cpu + + (DESC_ADDR(desc->buf) - intf->rx_ring_dma); + + flags = DESC_FLAGS(desc->buf); + if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) { + if (net_ratelimit()) { + netif_err(intf, rx_status, intf->ndev, + "flags=0x%llx\n", flags); + } + + u64_stats_update_begin(&stats->syncp); + if (flags & DESC_CRC_ERR) + u64_stats_inc(&stats->rx_crc_errs); + if (flags & DESC_RX_SYM_ERR) + u64_stats_inc(&stats->rx_sym_errs); + u64_stats_update_end(&stats->syncp); + + goto next; + } + + dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size, + DMA_FROM_DEVICE); + + len = desc->size; + + skb = napi_alloc_skb(napi, len); + if (!skb) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_dropped); + u64_stats_update_end(&stats->syncp); + intf->mib.alloc_rx_skb_failed++; + + goto next; + } + + skb_put(skb, len); + memcpy(skb->data, data, len); + + skb_pull(skb, 2); + len -= 2; + if (likely(intf->crc_fwd)) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + if ((intf->ndev->features & NETIF_F_RXCSUM) && + (desc->buf & DESC_CHKSUM)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb->protocol = eth_type_trans(skb, intf->ndev); + + napi_gro_receive(napi, skb); + + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_packets); + u64_stats_add(&stats->rx_bytes, len); + u64_stats_update_end(&stats->syncp); + +next: + bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) + + desc->size)); + + processed++; + intf->rx_edpkt_dma_read = + incr_first_byte(intf->rx_edpkt_dma_read, + intf->rx_edpkt_dma_addr, + DESC_RING_COUNT); + intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index, + DESC_RING_COUNT); + } + + bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read); + + if (processed < budget) { + napi_complete_done(&intf->rx_napi, processed); + bcmasp_enable_rx_irq(intf, 1); + } + + return processed; +} + +static void bcmasp_adj_link(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 cmd_bits = 0, reg; + int changed = 0; + + if (intf->old_link != phydev->link) { + changed = 1; + intf->old_link = phydev->link; + } + + if (intf->old_duplex != phydev->duplex) { + changed = 1; + intf->old_duplex = phydev->duplex; + } + + switch (phydev->speed) { + case SPEED_2500: + cmd_bits = UMC_CMD_SPEED_2500; + break; + case SPEED_1000: + cmd_bits = UMC_CMD_SPEED_1000; + break; + case SPEED_100: + cmd_bits = UMC_CMD_SPEED_100; + break; + case SPEED_10: + cmd_bits = UMC_CMD_SPEED_10; + break; + default: + break; + } + cmd_bits <<= UMC_CMD_SPEED_SHIFT; + + if (phydev->duplex == DUPLEX_HALF) + cmd_bits |= UMC_CMD_HD_EN; + + if (intf->old_pause != phydev->pause) { + changed = 1; + intf->old_pause = phydev->pause; + } + + if (!phydev->pause) + cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE; + + if (!changed) + return; + + if (phydev->link) { + reg = umac_rl(intf, UMC_CMD); + reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) | + UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE | + UMC_CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + umac_wl(intf, reg, UMC_CMD); + + intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0; + bcmasp_eee_enable_set(intf, intf->eee.eee_active); + } + + reg = rgmii_rl(intf, RGMII_OOB_CNTRL); + if (phydev->link) + reg |= RGMII_LINK; + else + reg &= ~RGMII_LINK; + rgmii_wl(intf, reg, RGMII_OOB_CNTRL); + + if (changed) + phy_print_status(phydev); +} + +static int bcmasp_init_rx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + struct page *buffer_pg; + dma_addr_t dma; + void *p; + u32 reg; + int ret; + + intf->rx_buf_order = get_order(RING_BUFFER_SIZE); + buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); + + dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, dma)) { + __free_pages(buffer_pg, intf->rx_buf_order); + return -ENOMEM; + } + intf->rx_ring_cpu = page_to_virt(buffer_pg); + intf->rx_ring_dma = dma; + intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; + + p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr, + GFP_KERNEL); + if (!p) { + ret = -ENOMEM; + goto free_rx_ring; + } + intf->rx_edpkt_cpu = p; + + netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); + + intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; + intf->rx_edpkt_index = 0; + + /* Make sure channels are disabled */ + rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); + + /* Rx SPB */ + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, + RX_EDPKT_RING_BUFFER_END); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, + RX_EDPKT_RING_BUFFER_VALID); + + /* EDPKT */ + rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K << + RX_EDPKT_CFG_CFG0_DBUF_SHIFT) | + (RX_EDPKT_CFG_CFG0_64_ALN << + RX_EDPKT_CFG_CFG0_BALN_SHIFT) | + (RX_EDPKT_CFG_CFG0_EFRM_STUF), + RX_EDPKT_CFG_CFG0); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), + RX_EDPKT_DMA_END); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), + RX_EDPKT_DMA_VALID); + + reg = UMAC2FB_CFG_DEFAULT_EN | + ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT); + reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT); + umac2fb_wl(intf, reg, UMAC2FB_CFG); + + return 0; + +free_rx_ring: + dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); + + return ret; +} + +static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + + dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, + intf->rx_edpkt_dma_addr); + dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); +} + +static int bcmasp_init_tx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + void *p; + int ret; + + p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr, + GFP_KERNEL); + if (!p) + return -ENOMEM; + + intf->tx_spb_cpu = p; + intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; + intf->tx_spb_dma_read = intf->tx_spb_dma_addr; + + intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb), + GFP_KERNEL); + if (!intf->tx_cbs) { + ret = -ENOMEM; + goto free_tx_spb; + } + + intf->tx_spb_index = 0; + intf->tx_spb_clean_index = 0; + + netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); + + /* Make sure channels are disabled */ + tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); + tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); + + /* Tx SPB */ + tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), + TX_SPB_CTRL_XF_CTRL2); + tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); + tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); + tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL); + + tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); + tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); + tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); + tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); + + return 0; + +free_tx_spb: + dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, + intf->tx_spb_dma_addr); + + return ret; +} + +static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + + /* Free descriptors */ + dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, + intf->tx_spb_dma_addr); + + /* Free cbs */ + kfree(intf->tx_cbs); +} + +static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable) +{ + u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN | + RGMII_EPHY_CFG_IDDQ_GLOBAL; + u32 reg; + + reg = rgmii_rl(intf, RGMII_EPHY_CNTRL); + if (enable) { + reg &= ~RGMII_EPHY_CK25_DIS; + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + + reg &= ~mask; + reg |= RGMII_EPHY_RESET; + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + + reg &= ~RGMII_EPHY_RESET; + } else { + reg |= mask | RGMII_EPHY_RESET; + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + reg |= RGMII_EPHY_CK25_DIS; + } + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + + /* Set or clear the LED control override to avoid lighting up LEDs + * while the EPHY is powered off and drawing unnecessary current. + */ + reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL); + if (enable) + reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD; + else + reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD; + rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL); +} + +static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable) +{ + u32 reg; + + reg = rgmii_rl(intf, RGMII_OOB_CNTRL); + reg &= ~RGMII_OOB_DIS; + if (enable) + reg |= RGMII_MODE_EN; + else + reg &= ~RGMII_MODE_EN; + rgmii_wl(intf, reg, RGMII_OOB_CNTRL); +} + +static void bcmasp_netif_deinit(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + u32 reg, timeout = 1000; + + napi_disable(&intf->tx_napi); + + bcmasp_enable_tx(intf, 0); + + /* Flush any TX packets in the pipe */ + tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL); + do { + reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS); + if (!(reg & TX_SPB_DMA_FIFO_FLUSH)) + break; + usleep_range(1000, 2000); + } while (timeout-- > 0); + tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL); + + umac_enable_set(intf, UMC_CMD_TX_EN, 0); + + phy_stop(dev->phydev); + + umac_enable_set(intf, UMC_CMD_RX_EN, 0); + + bcmasp_flush_rx_port(intf); + usleep_range(1000, 2000); + bcmasp_enable_rx(intf, 0); + + napi_disable(&intf->rx_napi); + + /* Disable interrupts */ + bcmasp_enable_tx_irq(intf, 0); + bcmasp_enable_rx_irq(intf, 0); + + netif_napi_del(&intf->tx_napi); + bcmasp_reclaim_free_all_tx(intf); + + netif_napi_del(&intf->rx_napi); + bcmasp_reclaim_free_all_rx(intf); +} + +static int bcmasp_stop(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + netif_dbg(intf, ifdown, dev, "bcmasp stop\n"); + + /* Stop tx from updating HW */ + netif_tx_disable(dev); + + bcmasp_netif_deinit(dev); + + phy_disconnect(dev->phydev); + + /* Disable internal EPHY or external PHY */ + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, false); + else + bcmasp_rgmii_mode_en_set(intf, false); + + /* Disable the interface clocks */ + bcmasp_core_clock_set_intf(intf, false); + + clk_disable_unprepare(intf->parent->clk); + + return 0; +} + +static void bcmasp_configure_port(struct bcmasp_intf *intf) +{ + u32 reg, id_mode_dis = 0; + + reg = rgmii_rl(intf, RGMII_PORT_CNTRL); + reg &= ~RGMII_PORT_MODE_MASK; + + switch (intf->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * RGMII: Add 2ns delay on TXC (90 degree shift) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + id_mode_dis = RGMII_ID_MODE_DIS; + fallthrough; + case PHY_INTERFACE_MODE_RGMII_TXID: + reg |= RGMII_PORT_MODE_EXT_GPHY; + break; + case PHY_INTERFACE_MODE_MII: + reg |= RGMII_PORT_MODE_EXT_EPHY; + break; + default: + break; + } + + if (intf->internal_phy) + reg |= RGMII_PORT_MODE_EPHY; + + rgmii_wl(intf, reg, RGMII_PORT_CNTRL); + + reg = rgmii_rl(intf, RGMII_OOB_CNTRL); + reg &= ~RGMII_ID_MODE_DIS; + reg |= id_mode_dis; + rgmii_wl(intf, reg, RGMII_OOB_CNTRL); +} + +static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + phy_interface_t phy_iface = intf->phy_interface; + u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; + struct phy_device *phydev = NULL; + int ret; + + /* Always enable interface clocks */ + bcmasp_core_clock_set_intf(intf, true); + + /* Enable internal PHY or external PHY before any MAC activity */ + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, true); + else + bcmasp_rgmii_mode_en_set(intf, true); + bcmasp_configure_port(intf); + + /* This is an ugly quirk but we have not been correctly + * interpreting the phy_interface values and we have done that + * across different drivers, so at least we are consistent in + * our mistakes. + * + * When the Generic PHY driver is in use either the PHY has + * been strapped or programmed correctly by the boot loader so + * we should stick to our incorrect interpretation since we + * have validated it. + * + * Now when a dedicated PHY driver is in use, we need to + * reverse the meaning of the phy_interface_mode values to + * something that the PHY driver will interpret and act on such + * that we have two mistakes canceling themselves so to speak. + * We only do this for the two modes that GENET driver + * officially supports on Broadcom STB chips: + * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. + * Other modes are not *officially* supported with the boot + * loader and the scripted environment generating Device Tree + * blobs for those platforms. + * + * Note that internal PHY and fixed-link configurations are not + * affected because they use different phy_interface_t values + * or the Generic PHY driver. + */ + switch (phy_iface) { + case PHY_INTERFACE_MODE_RGMII: + phy_iface = PHY_INTERFACE_MODE_RGMII_ID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + default: + break; + } + + if (phy_connect) { + phydev = of_phy_connect(dev, intf->phy_dn, + bcmasp_adj_link, phy_flags, + phy_iface); + if (!phydev) { + ret = -ENODEV; + netdev_err(dev, "could not attach to PHY\n"); + goto err_phy_disable; + } + } else if (!intf->wolopts) { + ret = phy_resume(dev->phydev); + if (ret) + goto err_phy_disable; + } + + umac_reset(intf); + + umac_init(intf); + + /* Disable the UniMAC RX/TX */ + umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0); + + umac_set_hw_addr(intf, dev->dev_addr); + + intf->old_duplex = -1; + intf->old_link = -1; + intf->old_pause = -1; + + ret = bcmasp_init_tx(intf); + if (ret) + goto err_phy_disconnect; + + /* Turn on asp */ + bcmasp_enable_tx(intf, 1); + + ret = bcmasp_init_rx(intf); + if (ret) + goto err_reclaim_tx; + + bcmasp_enable_rx(intf, 1); + + /* Turn on UniMAC TX/RX */ + umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1); + + intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD); + + bcmasp_netif_start(dev); + + netif_start_queue(dev); + + return 0; + +err_reclaim_tx: + bcmasp_reclaim_free_all_tx(intf); +err_phy_disconnect: + if (phydev) + phy_disconnect(phydev); +err_phy_disable: + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, false); + else + bcmasp_rgmii_mode_en_set(intf, false); + return ret; +} + +static int bcmasp_open(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + int ret; + + netif_dbg(intf, ifup, dev, "bcmasp open\n"); + + ret = clk_prepare_enable(intf->parent->clk); + if (ret) + return ret; + + ret = bcmasp_netif_init(dev, true); + if (ret) + clk_disable_unprepare(intf->parent->clk); + + return ret; +} + +static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + netif_dbg(intf, tx_err, dev, "transmit timeout!\n"); + intf->mib.tx_timeout_cnt++; +} + +static int bcmasp_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + if (snprintf(name, len, "p%d", intf->port) >= len) + return -EINVAL; + + return 0; +} + +static void bcmasp_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_intf_stats64 *lstats; + unsigned int start; + + lstats = &intf->stats64; + + do { + start = u64_stats_fetch_begin(&lstats->syncp); + stats->rx_packets = u64_stats_read(&lstats->rx_packets); + stats->rx_bytes = u64_stats_read(&lstats->rx_bytes); + stats->rx_dropped = u64_stats_read(&lstats->rx_dropped); + stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs); + stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs); + stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors; + + stats->tx_packets = u64_stats_read(&lstats->tx_packets); + stats->tx_bytes = u64_stats_read(&lstats->tx_bytes); + } while (u64_stats_fetch_retry(&lstats->syncp, start)); +} + +static const struct net_device_ops bcmasp_netdev_ops = { + .ndo_open = bcmasp_open, + .ndo_stop = bcmasp_stop, + .ndo_start_xmit = bcmasp_xmit, + .ndo_tx_timeout = bcmasp_tx_timeout, + .ndo_set_rx_mode = bcmasp_set_rx_mode, + .ndo_get_phys_port_name = bcmasp_get_phys_port_name, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_mac_address = eth_mac_addr, + .ndo_get_stats64 = bcmasp_get_stats64, +}; + +static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf) +{ + /* Per port */ + intf->res.umac = priv->base + UMC_OFFSET(intf); + intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb + + (intf->port * 0x4)); + intf->res.rgmii = priv->base + RGMII_OFFSET(intf); + + /* Per ch */ + intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf); + intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf); + intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf); + intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf); + intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf); + + intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf); + intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf); +} + +#define MAX_IRQ_STR_LEN 64 +struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, + struct device_node *ndev_dn, int i) +{ + struct device *dev = &priv->pdev->dev; + struct bcmasp_intf *intf; + struct net_device *ndev; + int ch, port, ret; + + if (of_property_read_u32(ndev_dn, "reg", &port)) { + dev_warn(dev, "%s: invalid port number\n", ndev_dn->name); + goto err; + } + + if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) { + dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name); + goto err; + } + + ndev = alloc_etherdev(sizeof(struct bcmasp_intf)); + if (!ndev) { + dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name); + goto err; + } + intf = netdev_priv(ndev); + + intf->parent = priv; + intf->ndev = ndev; + intf->channel = ch; + intf->port = port; + intf->ndev_dn = ndev_dn; + intf->index = i; + + ret = of_get_phy_mode(ndev_dn, &intf->phy_interface); + if (ret < 0) { + dev_err(dev, "invalid PHY mode property\n"); + goto err_free_netdev; + } + + if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + intf->internal_phy = true; + + intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0); + if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) { + ret = of_phy_register_fixed_link(ndev_dn); + if (ret) { + dev_warn(dev, "%s: failed to register fixed PHY\n", + ndev_dn->name); + goto err_free_netdev; + } + intf->phy_dn = ndev_dn; + } + + /* Map resource */ + bcmasp_map_res(priv, intf); + + if ((!phy_interface_mode_is_rgmii(intf->phy_interface) && + intf->phy_interface != PHY_INTERFACE_MODE_MII && + intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) || + (intf->port != 1 && intf->internal_phy)) { + netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", + phy_modes(intf->phy_interface), intf->port); + ret = -EINVAL; + goto err_free_netdev; + } + + ret = of_get_ethdev_address(ndev_dn, ndev); + if (ret) { + netdev_warn(ndev, "using random Ethernet MAC\n"); + eth_hw_addr_random(ndev); + } + + SET_NETDEV_DEV(ndev, dev); + intf->ops = &bcmasp_intf_ops; + ndev->netdev_ops = &bcmasp_netdev_ops; + ndev->ethtool_ops = &bcmasp_ethtool_ops; + intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK); + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + ndev->hw_features |= ndev->features; + ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload); + + return intf; + +err_free_netdev: + free_netdev(ndev); +err: + return NULL; +} + +void bcmasp_interface_destroy(struct bcmasp_intf *intf) +{ + if (intf->ndev->reg_state == NETREG_REGISTERED) + unregister_netdev(intf->ndev); + if (of_phy_is_fixed_link(intf->ndev_dn)) + of_phy_deregister_fixed_link(intf->ndev_dn); + free_netdev(intf->ndev); +} + +static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf) +{ + struct net_device *ndev = intf->ndev; + u32 reg; + + reg = umac_rl(intf, UMC_MPD_CTRL); + if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) + reg |= UMC_MPD_CTRL_MPD_EN; + reg &= ~UMC_MPD_CTRL_PSW_EN; + if (intf->wolopts & WAKE_MAGICSECURE) { + /* Program the SecureOn password */ + umac_wl(intf, get_unaligned_be16(&intf->sopass[0]), + UMC_PSW_MS); + umac_wl(intf, get_unaligned_be32(&intf->sopass[2]), + UMC_PSW_LS); + reg |= UMC_MPD_CTRL_PSW_EN; + } + umac_wl(intf, reg, UMC_MPD_CTRL); + + if (intf->wolopts & WAKE_FILTER) + bcmasp_netfilt_suspend(intf); + + /* UniMAC receive needs to be turned on */ + umac_enable_set(intf, UMC_CMD_RX_EN, 1); + + if (intf->parent->wol_irq > 0) { + wakeup_intr2_core_wl(intf->parent, 0xffffffff, + ASP_WAKEUP_INTR2_MASK_CLEAR); + } + + netif_dbg(intf, wol, ndev, "entered WOL mode\n"); +} + +int bcmasp_interface_suspend(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + struct net_device *dev = intf->ndev; + int ret = 0; + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + + bcmasp_netif_deinit(dev); + + if (!intf->wolopts) { + ret = phy_suspend(dev->phydev); + if (ret) + goto out; + + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, false); + else + bcmasp_rgmii_mode_en_set(intf, false); + + /* If Wake-on-LAN is disabled, we can safely + * disable the network interface clocks. + */ + bcmasp_core_clock_set_intf(intf, false); + } + + if (device_may_wakeup(kdev) && intf->wolopts) + bcmasp_suspend_to_wol(intf); + + clk_disable_unprepare(intf->parent->clk); + + return ret; + +out: + bcmasp_netif_init(dev, false); + return ret; +} + +static void bcmasp_resume_from_wol(struct bcmasp_intf *intf) +{ + u32 reg; + + reg = umac_rl(intf, UMC_MPD_CTRL); + reg &= ~UMC_MPD_CTRL_MPD_EN; + umac_wl(intf, reg, UMC_MPD_CTRL); + + if (intf->parent->wol_irq > 0) { + wakeup_intr2_core_wl(intf->parent, 0xffffffff, + ASP_WAKEUP_INTR2_MASK_SET); + } +} + +int bcmasp_interface_resume(struct bcmasp_intf *intf) +{ + struct net_device *dev = intf->ndev; + int ret; + + if (!netif_running(dev)) + return 0; + + ret = clk_prepare_enable(intf->parent->clk); + if (ret) + return ret; + + ret = bcmasp_netif_init(dev, false); + if (ret) + goto out; + + bcmasp_resume_from_wol(intf); + + if (intf->eee.eee_enabled) + bcmasp_eee_enable_set(intf, true); + + netif_device_attach(dev); + + return 0; + +out: + clk_disable_unprepare(intf->parent->clk); + return ret; +} diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h new file mode 100644 index 000000000000..ad742612895f --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BCMASP_INTF_DEFS_H +#define __BCMASP_INTF_DEFS_H + +#define UMC_OFFSET(intf) \ + ((((intf)->port) * 0x800) + 0xc000) +#define UMC_CMD 0x008 +#define UMC_CMD_TX_EN BIT(0) +#define UMC_CMD_RX_EN BIT(1) +#define UMC_CMD_SPEED_SHIFT 0x2 +#define UMC_CMD_SPEED_MASK 0x3 +#define UMC_CMD_SPEED_10 0x0 +#define UMC_CMD_SPEED_100 0x1 +#define UMC_CMD_SPEED_1000 0x2 +#define UMC_CMD_SPEED_2500 0x3 +#define UMC_CMD_PROMISC BIT(4) +#define UMC_CMD_PAD_EN BIT(5) +#define UMC_CMD_CRC_FWD BIT(6) +#define UMC_CMD_PAUSE_FWD BIT(7) +#define UMC_CMD_RX_PAUSE_IGNORE BIT(8) +#define UMC_CMD_TX_ADDR_INS BIT(9) +#define UMC_CMD_HD_EN BIT(10) +#define UMC_CMD_SW_RESET BIT(13) +#define UMC_CMD_LCL_LOOP_EN BIT(15) +#define UMC_CMD_AUTO_CONFIG BIT(22) +#define UMC_CMD_CNTL_FRM_EN BIT(23) +#define UMC_CMD_NO_LEN_CHK BIT(24) +#define UMC_CMD_RMT_LOOP_EN BIT(25) +#define UMC_CMD_PRBL_EN BIT(27) +#define UMC_CMD_TX_PAUSE_IGNORE BIT(28) +#define UMC_CMD_TX_RX_EN BIT(29) +#define UMC_CMD_RUNT_FILTER_DIS BIT(30) +#define UMC_MAC0 0x0c +#define UMC_MAC1 0x10 +#define UMC_FRM_LEN 0x14 +#define UMC_EEE_CTRL 0x64 +#define EN_LPI_RX_PAUSE BIT(0) +#define EN_LPI_TX_PFC BIT(1) +#define EN_LPI_TX_PAUSE BIT(2) +#define EEE_EN BIT(3) +#define RX_FIFO_CHECK BIT(4) +#define EEE_TX_CLK_DIS BIT(5) +#define DIS_EEE_10M BIT(6) +#define LP_IDLE_PREDICTION_MODE BIT(7) +#define UMC_EEE_LPI_TIMER 0x68 +#define UMC_PAUSE_CNTRL 0x330 +#define UMC_TX_FLUSH 0x334 +#define UMC_GR64 0x400 +#define UMC_GR127 0x404 +#define UMC_GR255 0x408 +#define UMC_GR511 0x40c +#define UMC_GR1023 0x410 +#define UMC_GR1518 0x414 +#define UMC_GRMGV 0x418 +#define UMC_GR2047 0x41c +#define UMC_GR4095 0x420 +#define UMC_GR9216 0x424 +#define UMC_GRPKT 0x428 +#define UMC_GRBYT 0x42c +#define UMC_GRMCA 0x430 +#define UMC_GRBCA 0x434 +#define UMC_GRFCS 0x438 +#define UMC_GRXCF 0x43c +#define UMC_GRXPF 0x440 +#define UMC_GRXUO 0x444 +#define UMC_GRALN 0x448 +#define UMC_GRFLR 0x44c +#define UMC_GRCDE 0x450 +#define UMC_GRFCR 0x454 +#define UMC_GROVR 0x458 +#define UMC_GRJBR 0x45c +#define UMC_GRMTUE 0x460 +#define UMC_GRPOK 0x464 +#define UMC_GRUC 0x468 +#define UMC_GRPPP 0x46c +#define UMC_GRMCRC 0x470 +#define UMC_TR64 0x480 +#define UMC_TR127 0x484 +#define UMC_TR255 0x488 +#define UMC_TR511 0x48c +#define UMC_TR1023 0x490 +#define UMC_TR1518 0x494 +#define UMC_TRMGV 0x498 +#define UMC_TR2047 0x49c +#define UMC_TR4095 0x4a0 +#define UMC_TR9216 0x4a4 +#define UMC_GTPKT 0x4a8 +#define UMC_GTMCA 0x4ac +#define UMC_GTBCA 0x4b0 +#define UMC_GTXPF 0x4b4 +#define UMC_GTXCF 0x4b8 +#define UMC_GTFCS 0x4bc +#define UMC_GTOVR 0x4c0 +#define UMC_GTDRF 0x4c4 +#define UMC_GTEDF 0x4c8 +#define UMC_GTSCL 0x4cc +#define UMC_GTMCL 0x4d0 +#define UMC_GTLCL 0x4d4 +#define UMC_GTXCL 0x4d8 +#define UMC_GTFRG 0x4dc +#define UMC_GTNCL 0x4e0 +#define UMC_GTJBR 0x4e4 +#define UMC_GTBYT 0x4e8 +#define UMC_GTPOK 0x4ec +#define UMC_GTUC 0x4f0 +#define UMC_RRPKT 0x500 +#define UMC_RRUND 0x504 +#define UMC_RRFRG 0x508 +#define UMC_RRBYT 0x50c +#define UMC_MIB_CNTRL 0x580 +#define UMC_MIB_CNTRL_RX_CNT_RST BIT(0) +#define UMC_MIB_CNTRL_RUNT_CNT_RST BIT(1) +#define UMC_MIB_CNTRL_TX_CNT_RST BIT(2) +#define UMC_RX_MAX_PKT_SZ 0x608 +#define UMC_MPD_CTRL 0x620 +#define UMC_MPD_CTRL_MPD_EN BIT(0) +#define UMC_MPD_CTRL_PSW_EN BIT(27) +#define UMC_PSW_MS 0x624 +#define UMC_PSW_LS 0x628 + +#define UMAC2FB_OFFSET_2_1 0x9f044 +#define UMAC2FB_OFFSET 0x9f03c +#define UMAC2FB_CFG 0x0 +#define UMAC2FB_CFG_OPUT_EN BIT(0) +#define UMAC2FB_CFG_VLAN_EN BIT(1) +#define UMAC2FB_CFG_SNAP_EN BIT(2) +#define UMAC2FB_CFG_BCM_TG_EN BIT(3) +#define UMAC2FB_CFG_IPUT_EN BIT(4) +#define UMAC2FB_CFG_CHID_SHIFT 8 +#define UMAC2FB_CFG_OK_SEND_SHIFT 24 +#define UMAC2FB_CFG_DEFAULT_EN \ + (UMAC2FB_CFG_OPUT_EN | UMAC2FB_CFG_VLAN_EN \ + | UMAC2FB_CFG_SNAP_EN | UMAC2FB_CFG_IPUT_EN) + +#define RGMII_OFFSET(intf) \ + ((((intf)->port) * 0x100) + 0xd000) +#define RGMII_EPHY_CNTRL 0x00 +#define RGMII_EPHY_CFG_IDDQ_BIAS BIT(0) +#define RGMII_EPHY_CFG_EXT_PWRDOWN BIT(1) +#define RGMII_EPHY_CFG_FORCE_DLL_EN BIT(2) +#define RGMII_EPHY_CFG_IDDQ_GLOBAL BIT(3) +#define RGMII_EPHY_CK25_DIS BIT(4) +#define RGMII_EPHY_RESET BIT(7) +#define RGMII_OOB_CNTRL 0x0c +#define RGMII_LINK BIT(4) +#define RGMII_OOB_DIS BIT(5) +#define RGMII_MODE_EN BIT(6) +#define RGMII_ID_MODE_DIS BIT(16) + +#define RGMII_PORT_CNTRL 0x60 +#define RGMII_PORT_MODE_EPHY 0 +#define RGMII_PORT_MODE_GPHY 1 +#define RGMII_PORT_MODE_EXT_EPHY 2 +#define RGMII_PORT_MODE_EXT_GPHY 3 +#define RGMII_PORT_MODE_EXT_RVMII 4 +#define RGMII_PORT_MODE_MASK GENMASK(2, 0) + +#define RGMII_SYS_LED_CNTRL 0x74 +#define RGMII_SYS_LED_CNTRL_LINK_OVRD BIT(15) + +#define TX_SPB_DMA_OFFSET(intf) \ + ((((intf)->channel) * 0x30) + 0x48180) +#define TX_SPB_DMA_READ 0x00 +#define TX_SPB_DMA_BASE 0x08 +#define TX_SPB_DMA_END 0x10 +#define TX_SPB_DMA_VALID 0x18 +#define TX_SPB_DMA_FIFO_CTRL 0x20 +#define TX_SPB_DMA_FIFO_FLUSH BIT(0) +#define TX_SPB_DMA_FIFO_STATUS 0x24 + +#define TX_SPB_CTRL_OFFSET(intf) \ + ((((intf)->channel) * 0x68) + 0x49340) +#define TX_SPB_CTRL_ENABLE 0x0 +#define TX_SPB_CTRL_ENABLE_EN BIT(0) +#define TX_SPB_CTRL_XF_CTRL2 0x20 +#define TX_SPB_CTRL_XF_BID_SHIFT 16 + +#define TX_SPB_TOP_OFFSET(intf) \ + ((((intf)->channel) * 0x1c) + 0x4a0e0) +#define TX_SPB_TOP_BLKOUT 0x0 +#define TX_SPB_TOP_SPRE_BW_CTRL 0x4 + +#define TX_EPKT_C_OFFSET(intf) \ + ((((intf)->channel) * 0x120) + 0x40900) +#define TX_EPKT_C_CFG_MISC 0x0 +#define TX_EPKT_C_CFG_MISC_EN BIT(0) +#define TX_EPKT_C_CFG_MISC_PT BIT(1) +#define TX_EPKT_C_CFG_MISC_PS_SHIFT 14 +#define TX_EPKT_C_CFG_MISC_FD_SHIFT 20 + +#define TX_PAUSE_CTRL_OFFSET(intf) \ + ((((intf)->channel * 0xc) + 0x49a20)) +#define TX_PAUSE_MAP_VECTOR 0x8 + +#define RX_EDPKT_DMA_OFFSET(intf) \ + ((((intf)->channel) * 0x38) + 0x9ca00) +#define RX_EDPKT_DMA_WRITE 0x00 +#define RX_EDPKT_DMA_READ 0x08 +#define RX_EDPKT_DMA_BASE 0x10 +#define RX_EDPKT_DMA_END 0x18 +#define RX_EDPKT_DMA_VALID 0x20 +#define RX_EDPKT_DMA_FULLNESS 0x28 +#define RX_EDPKT_DMA_MIN_THRES 0x2c +#define RX_EDPKT_DMA_CH_XONOFF 0x30 + +#define RX_EDPKT_CFG_OFFSET(intf) \ + ((((intf)->channel) * 0x70) + 0x9c600) +#define RX_EDPKT_CFG_CFG0 0x0 +#define RX_EDPKT_CFG_CFG0_DBUF_SHIFT 9 +#define RX_EDPKT_CFG_CFG0_RBUF 0x0 +#define RX_EDPKT_CFG_CFG0_RBUF_4K 0x1 +#define RX_EDPKT_CFG_CFG0_BUF_4K 0x2 +/* EFRM STUFF, 0 = no byte stuff, 1 = two byte stuff */ +#define RX_EDPKT_CFG_CFG0_EFRM_STUF BIT(11) +#define RX_EDPKT_CFG_CFG0_BALN_SHIFT 12 +#define RX_EDPKT_CFG_CFG0_NO_ALN 0 +#define RX_EDPKT_CFG_CFG0_4_ALN 2 +#define RX_EDPKT_CFG_CFG0_64_ALN 6 +#define RX_EDPKT_RING_BUFFER_WRITE 0x38 +#define RX_EDPKT_RING_BUFFER_READ 0x40 +#define RX_EDPKT_RING_BUFFER_BASE 0x48 +#define RX_EDPKT_RING_BUFFER_END 0x50 +#define RX_EDPKT_RING_BUFFER_VALID 0x58 +#define RX_EDPKT_CFG_ENABLE 0x6c +#define RX_EDPKT_CFG_ENABLE_EN BIT(0) + +#define RX_SPB_DMA_OFFSET(intf) \ + ((((intf)->channel) * 0x30) + 0xa0000) +#define RX_SPB_DMA_READ 0x00 +#define RX_SPB_DMA_BASE 0x08 +#define RX_SPB_DMA_END 0x10 +#define RX_SPB_DMA_VALID 0x18 +#define RX_SPB_DMA_FIFO_CTRL 0x20 +#define RX_SPB_DMA_FIFO_FLUSH BIT(0) +#define RX_SPB_DMA_FIFO_STATUS 0x24 + +#define RX_SPB_CTRL_OFFSET(intf) \ + ((((intf)->channel - 6) * 0x68) + 0xa1000) +#define RX_SPB_CTRL_ENABLE 0x00 +#define RX_SPB_CTRL_ENABLE_EN BIT(0) + +#define RX_PAUSE_CTRL_OFFSET(intf) \ + ((((intf)->channel - 6) * 0x4) + 0xa1138) +#define RX_PAUSE_MAP_VECTOR 0x00 + +#define RX_SPB_TOP_CTRL_OFFSET(intf) \ + ((((intf)->channel - 6) * 0x14) + 0xa2000) +#define RX_SPB_TOP_BLKOUT 0x00 + +#define NUM_4K_BUFFERS 32 +#define RING_BUFFER_SIZE (PAGE_SIZE * NUM_4K_BUFFERS) + +#define DESC_RING_COUNT (64 * NUM_4K_BUFFERS) +#define DESC_SIZE 16 +#define DESC_RING_SIZE (DESC_RING_COUNT * DESC_SIZE) + +#endif diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 33d86683af50..3e7c8671cd11 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -768,7 +768,7 @@ err_dma_free: return err; } -static int bcm4908_enet_remove(struct platform_device *pdev) +static void bcm4908_enet_remove(struct platform_device *pdev) { struct bcm4908_enet *enet = platform_get_drvdata(pdev); @@ -776,8 +776,6 @@ static int bcm4908_enet_remove(struct platform_device *pdev) netif_napi_del(&enet->rx_ring.napi); netif_napi_del(&enet->tx_ring.napi); bcm4908_enet_dma_free(enet); - - return 0; } static const struct of_device_id bcm4908_enet_of_match[] = { @@ -791,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = { .of_match_table = bcm4908_enet_of_match, }, .probe = bcm4908_enet_probe, - .remove = bcm4908_enet_remove, + .remove_new = bcm4908_enet_remove, }; module_platform_driver(bcm4908_enet_driver); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 2cf96892e565..3196c4dea076 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1902,7 +1902,7 @@ out: /* * exit func, stops hardware and unregisters netdevice */ -static int bcm_enet_remove(struct platform_device *pdev) +static void bcm_enet_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; @@ -1932,15 +1932,13 @@ static int bcm_enet_remove(struct platform_device *pdev) clk_disable_unprepare(priv->mac_clk); free_netdev(dev); - return 0; } static struct platform_driver bcm63xx_enet_driver = { .probe = bcm_enet_probe, - .remove = bcm_enet_remove, + .remove_new = bcm_enet_remove, .driver = { .name = "bcm63xx_enet", - .owner = THIS_MODULE, }, }; @@ -2532,8 +2530,8 @@ static int bcm_enetsw_get_sset_count(struct net_device *netdev, static void bcm_enetsw_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); - strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); + strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); } static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, @@ -2740,7 +2738,7 @@ out: /* exit func, stops hardware and unregisters netdevice */ -static int bcm_enetsw_remove(struct platform_device *pdev) +static void bcm_enetsw_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; @@ -2753,15 +2751,13 @@ static int bcm_enetsw_remove(struct platform_device *pdev) clk_disable_unprepare(priv->mac_clk); free_netdev(dev); - return 0; } static struct platform_driver bcm63xx_enetsw_driver = { .probe = bcm_enetsw_probe, - .remove = bcm_enetsw_remove, + .remove_new = bcm_enetsw_remove, .driver = { .name = "bcm63xx_enetsw", - .owner = THIS_MODULE, }, }; @@ -2791,7 +2787,6 @@ struct platform_driver bcm63xx_enet_shared_driver = { .probe = bcm_enet_shared_probe, .driver = { .name = "bcm63xx_enet_shared", - .owner = THIS_MODULE, }, }; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index bf1611cce974..c9faa8540859 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2430,7 +2430,7 @@ static int bcm_sysport_netdevice_event(struct notifier_block *nb, if (dev->netdev_ops != &bcm_sysport_netdev_ops) return NOTIFY_DONE; - if (!dsa_slave_dev_check(info->upper_dev)) + if (!dsa_user_dev_check(info->upper_dev)) return NOTIFY_DONE; if (info->linking) @@ -2648,7 +2648,7 @@ err_free_netdev: return ret; } -static int bcm_sysport_remove(struct platform_device *pdev) +static void bcm_sysport_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2663,8 +2663,6 @@ static int bcm_sysport_remove(struct platform_device *pdev) of_phy_deregister_fixed_link(dn); free_netdev(dev); dev_set_drvdata(&pdev->dev, NULL); - - return 0; } static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) @@ -2901,7 +2899,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, static struct platform_driver bcm_sysport_driver = { .probe = bcm_sysport_probe, - .remove = bcm_sysport_remove, + .remove_new = bcm_sysport_remove, .driver = { .name = "brcm-systemport", .of_match_table = bcm_sysport_of_match, diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index b4381cd41979..0b21fd5bd457 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -246,13 +246,11 @@ static int bgmac_probe(struct platform_device *pdev) return bgmac_enet_probe(bgmac); } -static int bgmac_remove(struct platform_device *pdev) +static void bgmac_remove(struct platform_device *pdev) { struct bgmac *bgmac = platform_get_drvdata(pdev); bgmac_enet_remove(bgmac); - - return 0; } #ifdef CONFIG_PM @@ -296,7 +294,7 @@ static struct platform_driver bgmac_enet_driver = { .pm = BGMAC_PM_OPS }, .probe = bgmac_probe, - .remove = bgmac_remove, + .remove_new = bgmac_remove, }; module_platform_driver(bgmac_enet_driver); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 52ee3751187a..448a1b90de5e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1450,7 +1450,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); - return -ENODEV; + return PTR_ERR(phy_dev); } err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 542c69822649..8e04552d2216 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -890,7 +890,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, (struct eth_classify_rules_ramrod_data *)(raw->rdata); int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + bool add = cmd == BNX2X_VLAN_MAC_ADD; unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; @@ -1075,7 +1075,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, int rule_cnt = rule_idx + 1; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + bool add = cmd == BNX2X_VLAN_MAC_ADD; u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; /* Reset the ramrod data buffer for the first rule */ @@ -1125,7 +1125,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, int rule_cnt = rule_idx + 1; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + bool add = cmd == BNX2X_VLAN_MAC_ADD; u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; u16 inner_mac; diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 2bc2b707d6ee..ba6c239d52fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o +bnxt_en-$(CONFIG_BNXT_HWMON) += bnxt_hwmon.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1eb490c48c52..579eebb6fc56 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -52,9 +52,7 @@ #include <linux/cpu_rmap.h> #include <linux/cpumask.h> #include <net/pkt_cls.h> -#include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <linux/align.h> #include <net/netdev_queues.h> @@ -71,6 +69,7 @@ #include "bnxt_tc.h" #include "bnxt_devlink.h" #include "bnxt_debugfs.h" +#include "bnxt_hwmon.h" #define BNXT_TX_TIMEOUT (5 * HZ) #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ @@ -293,6 +292,60 @@ static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) BNXT_DB_CQ(db, idx); } +static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) +{ + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) + return; + + if (BNXT_PF(bp)) + queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); + else + schedule_delayed_work(&bp->fw_reset_task, delay); +} + +static void __bnxt_queue_sp_work(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + queue_work(bnxt_pf_wq, &bp->sp_task); + else + schedule_work(&bp->sp_task); +} + +static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) +{ + set_bit(event, &bp->sp_event); + __bnxt_queue_sp_work(bp); +} + +static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + if (!rxr->bnapi->in_reset) { + rxr->bnapi->in_reset = true; + if (bp->flags & BNXT_FLAG_CHIP_P5) + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + else + set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); + __bnxt_queue_sp_work(bp); + } + rxr->rx_next_cons = 0xffff; +} + +void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + int idx) +{ + struct bnxt_napi *bnapi = txr->bnapi; + + if (bnapi->tx_fault) + return; + + netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)", + txr->txq_index, bnapi->tx_pkts, + txr->tx_cons, txr->tx_prod, idx); + WARN_ON_ONCE(1); + bnapi->tx_fault = 1; + bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); +} + const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_TO_1023, @@ -653,6 +706,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) skb = tx_buf->skb; tx_buf->skb = NULL; + if (unlikely(!skb)) { + bnxt_sched_reset_txr(bp, txr, i); + return; + } + tx_bytes += skb->len; if (tx_buf->is_push) { @@ -686,7 +744,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) next_tx_int: cons = NEXT_TX(cons); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } bnapi->tx_pkts = 0; @@ -702,7 +760,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, unsigned int *offset, gfp_t gfp) { - struct device *dev = &bp->pdev->dev; struct page *page; if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { @@ -715,12 +772,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, if (!page) return NULL; - *mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE, - bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - if (dma_mapping_error(dev, *mapping)) { - page_pool_recycle_direct(rxr->page_pool, page); - return NULL; - } + *mapping = page_pool_get_dma_addr(page) + *offset; return page; } @@ -818,48 +870,15 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, struct rx_bd *rxbd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct bnxt_sw_rx_agg_bd *rx_agg_buf; - struct pci_dev *pdev = bp->pdev; struct page *page; dma_addr_t mapping; u16 sw_prod = rxr->rx_sw_agg_prod; unsigned int offset = 0; - if (BNXT_RX_PAGE_MODE(bp)) { - page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); - - if (!page) - return -ENOMEM; + page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); - } else { - if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { - page = rxr->rx_page; - if (!page) { - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - rxr->rx_page = page; - rxr->rx_page_offset = 0; - } - offset = rxr->rx_page_offset; - rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; - if (rxr->rx_page_offset == PAGE_SIZE) - rxr->rx_page = NULL; - else - get_page(page); - } else { - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - } - - mapping = dma_map_page_attrs(&pdev->dev, page, offset, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); - if (dma_mapping_error(&pdev->dev, mapping)) { - __free_page(page); - return -EIO; - } - } + if (!page) + return -ENOMEM; if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); @@ -972,9 +991,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, return NULL; } dma_addr -= bp->rx_dma_offset; - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, - bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir); + skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); if (!skb) { page_pool_recycle_direct(rxr->page_pool, page); return NULL; @@ -1006,8 +1025,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, return NULL; } dma_addr -= bp->rx_dma_offset; - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, - bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir); if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@ -1049,7 +1068,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return NULL; } - skb = build_skb(data, bp->rx_buf_size); + skb = napi_build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (!skb) { @@ -1123,9 +1142,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, return 0; } - dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, + bp->rx_dir); total_frag_len += frag_len; prod = NEXT_RX_AGG(prod); @@ -1145,6 +1163,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa, NULL); if (!total_frag_len) { + skb_mark_for_recycle(skb); dev_kfree_skb(skb); return NULL; } @@ -1244,38 +1263,6 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return 0; } -static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) -{ - if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) - return; - - if (BNXT_PF(bp)) - queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); - else - schedule_delayed_work(&bp->fw_reset_task, delay); -} - -static void bnxt_queue_sp_work(struct bnxt *bp) -{ - if (BNXT_PF(bp)) - queue_work(bnxt_pf_wq, &bp->sp_task); - else - schedule_work(&bp->sp_task); -} - -static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) -{ - if (!rxr->bnapi->in_reset) { - rxr->bnapi->in_reset = true; - if (bp->flags & BNXT_FLAG_CHIP_P5) - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - else - set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } - rxr->rx_next_cons = 0xffff; -} - static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) { struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; @@ -1330,7 +1317,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", cons, rxr->rx_next_cons, TPA_START_ERROR_CODE(tpa_start1)); - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); return; } /* Store cfa_code in tpa_info to use in tpa_end @@ -1689,7 +1676,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info->data_ptr = new_data + bp->rx_offset; tpa_info->mapping = new_mapping; - skb = build_skb(data, bp->rx_buf_size); + skb = napi_build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, mapping, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); @@ -1761,6 +1748,8 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, struct sk_buff *skb) { + skb_mark_for_recycle(skb); + if (skb->dev != bp->dev) { /* this packet belongs to a vf-rep */ bnxt_vf_rep_rx(bp, skb); @@ -1770,6 +1759,21 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, napi_gro_receive(&bnapi->napi, skb); } +static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, + struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) +{ + u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); + + if (BNXT_PTP_RX_TS_VALID(flags)) + goto ts_valid; + if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) + return false; + +ts_valid: + *cmpl_ts = ts; + return true; +} + /* returns the following: * 1 - 1 packet successfully received * 0 - successful TPA_START, packet not completed yet @@ -1795,6 +1799,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct sk_buff *skb; struct xdp_buff xdp; u32 flags, misc; + u32 cmpl_ts; void *data; int rc = 0; @@ -1854,7 +1859,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (rxr->rx_next_cons != 0xffff) netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", cons, rxr->rx_next_cons); - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); if (rc1) return rc1; goto next_rx_no_prod_no_len; @@ -1892,7 +1897,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { netdev_warn_once(bp->dev, "RX buffer error %x\n", rx_err); - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); } } goto next_rx_no_len; @@ -2017,10 +2022,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == - RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { + if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); u64 ns, ts; if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { @@ -2141,7 +2144,68 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) return INVALID_HW_RING_ID; } -static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) +static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) +{ + if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) + return link_info->force_pam4_link_speed; + return link_info->force_link_speed; +} + +static void bnxt_set_force_speed(struct bnxt_link_info *link_info) +{ + link_info->req_link_speed = link_info->force_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + if (link_info->force_pam4_link_speed) { + link_info->req_link_speed = link_info->force_pam4_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; + } +} + +static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) +{ + link_info->advertising = link_info->auto_link_speeds; + link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; +} + +static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) +{ + if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && + link_info->req_link_speed != link_info->force_link_speed) + return true; + if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && + link_info->req_link_speed != link_info->force_pam4_link_speed) + return true; + return false; +} + +static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) +{ + if (link_info->advertising != link_info->auto_link_speeds || + link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) + return true; + return false; +} + +#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) + +#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) + +#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ + ((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) + +#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) + +/* Return true if the workqueue has to be scheduled */ +static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) { u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); @@ -2156,11 +2220,53 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { + u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); + char *threshold_type; + bool notify = false; + char *dir_str; + + switch (type) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: + threshold_type = "warning"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: + threshold_type = "critical"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: + threshold_type = "fatal"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: + threshold_type = "shutdown"; + break; + default: + netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); + return false; + } + if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { + dir_str = "above"; + notify = true; + } else { + dir_str = "below"; + } + netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", + dir_str, threshold_type); + netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", + BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), + BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); + if (notify) { + bp->thermal_threshold_type = type; + set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); + return true; + } + return false; + } default: netdev_err(bp->dev, "FW reported unknown error type %u\n", err_type); break; } + return false; } #define BNXT_GET_EVENT_PORT(data) \ @@ -2206,7 +2312,7 @@ static int bnxt_async_event_process(struct bnxt *bp, /* print unsupported speed warning in forced speed mode only */ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && (data1 & 0x20000)) { - u16 fw_speed = link_info->force_link_speed; + u16 fw_speed = bnxt_get_force_speed(link_info); u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); if (speed != SPEED_UNKNOWN) @@ -2339,7 +2445,7 @@ static int bnxt_async_event_process(struct bnxt *bp, goto async_event_process_exit; } rxr = bp->bnapi[grp_idx]->rx_ring; - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); goto async_event_process_exit; } case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { @@ -2361,7 +2467,8 @@ static int bnxt_async_event_process(struct bnxt *bp, goto async_event_process_exit; } case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { - bnxt_event_error_report(bp, data1, data2); + if (bnxt_event_error_report(bp, data1, data2)) + break; goto async_event_process_exit; } case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { @@ -2394,7 +2501,7 @@ static int bnxt_async_event_process(struct bnxt *bp, default: goto async_event_process_exit; } - bnxt_queue_sp_work(bp); + __bnxt_queue_sp_work(bp); async_event_process_exit: return 0; } @@ -2423,8 +2530,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) } set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); - set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); break; case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: @@ -2582,7 +2688,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { - if (bnapi->tx_pkts) + if (bnapi->tx_pkts && !bnapi->tx_fault) bnapi->tx_int(bp, bnapi, budget); if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { @@ -2626,6 +2732,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) struct rx_cmp_ext *rxcmp1; u32 cp_cons, tmp_raw_cons; u32 raw_cons = cpr->cp_raw_cons; + bool flush_xdp = false; u32 rx_pkts = 0; u8 event = 0; @@ -2660,6 +2767,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; + if (event & BNXT_REDIRECT_EVENT) + flush_xdp = true; } else if (unlikely(TX_CMP_TYPE(txcmp) == CMPL_BASE_TYPE_HWRM_DONE)) { bnxt_hwrm_handler(bp, txcmp); @@ -2679,6 +2788,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) if (event & BNXT_AGG_EVENT) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + if (flush_xdp) + xdp_do_flush(); if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { napi_complete_done(napi, rx_pkts); @@ -2952,10 +3063,6 @@ skip_rx_tpa_free: rx_buf->data = NULL; if (BNXT_RX_PAGE_MODE(bp)) { - mapping -= bp->rx_dma_offset; - dma_unmap_page_attrs(&pdev->dev, mapping, - BNXT_RX_PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); page_pool_recycle_direct(rxr->page_pool, data); } else { dma_unmap_single_attrs(&pdev->dev, mapping, @@ -2976,30 +3083,13 @@ skip_rx_buf_free: if (!page) continue; - if (BNXT_RX_PAGE_MODE(bp)) { - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - rx_agg_buf->page = NULL; - __clear_bit(i, rxr->rx_agg_bmap); - - page_pool_recycle_direct(rxr->page_pool, page); - } else { - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); - rx_agg_buf->page = NULL; - __clear_bit(i, rxr->rx_agg_bmap); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - __free_page(page); - } + page_pool_recycle_direct(rxr->page_pool, page); } skip_rx_agg_free: - if (rxr->rx_page) { - __free_page(rxr->rx_page); - rxr->rx_page = NULL; - } map = rxr->rx_tpa_idx_map; if (map) memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); @@ -3218,13 +3308,15 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, { struct page_pool_params pp = { 0 }; - pp.pool_size = bp->rx_ring_size; + pp.pool_size = bp->rx_agg_ring_size; + if (BNXT_RX_PAGE_MODE(bp)) + pp.pool_size += bp->rx_ring_size; pp.nid = dev_to_node(&bp->pdev->dev); pp.napi = &rxr->bnapi->napi; pp.dev = &bp->pdev->dev; - pp.dma_dir = DMA_BIDIRECTIONAL; - if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) - pp.flags |= PP_FLAG_PAGE_FRAG; + pp.dma_dir = bp->rx_dir; + pp.max_len = PAGE_SIZE; + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; rxr->page_pool = page_pool_create(&pp); if (IS_ERR(rxr->page_pool)) { @@ -5834,7 +5926,7 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) if (BNXT_PF(bp)) { struct hwrm_func_cfg_input *req; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -6245,7 +6337,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, struct hwrm_func_cfg_input *req; u32 enables = 0; - if (hwrm_req_init(bp, req, HWRM_FUNC_CFG)) + if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) return NULL; req->fid = cpu_to_le16(0xffff); @@ -8590,7 +8682,7 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) else return -EINVAL; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -8608,7 +8700,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -9422,10 +9514,16 @@ static void bnxt_disable_napi(struct bnxt *bp) return; for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; - napi_disable(&bp->bnapi[i]->napi); - if (bp->bnapi[i]->rx_ring) + cpr = &bnapi->cp_ring; + if (bnapi->tx_fault) + cpr->sw_stats.tx.tx_resets++; + if (bnapi->in_reset) + cpr->sw_stats.rx.rx_resets++; + napi_disable(&bnapi->napi); + if (bnapi->rx_ring) cancel_work_sync(&cpr->dim.work); } } @@ -9439,9 +9537,9 @@ static void bnxt_enable_napi(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; + bnapi->tx_fault = 0; + cpr = &bnapi->cp_ring; - if (bnapi->in_reset) - cpr->sw_stats.rx.rx_resets++; bnapi->in_reset = false; bnapi->tx_pkts = 0; @@ -9646,13 +9744,31 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported) return ((supported | diff) != supported); } +static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) +{ + /* Check if any advertised speeds are no longer supported. The caller + * holds the link_lock mutex, so we can modify link_info settings. + */ + if (bnxt_support_dropped(link_info->advertising, + link_info->support_auto_speeds)) { + link_info->advertising = link_info->support_auto_speeds; + return true; + } + if (bnxt_support_dropped(link_info->advertising_pam4, + link_info->support_pam4_auto_speeds)) { + link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; + return true; + } + return false; +} + int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { struct bnxt_link_info *link_info = &bp->link_info; struct hwrm_port_phy_qcfg_output *resp; struct hwrm_port_phy_qcfg_input *req; u8 link_state = link_info->link_state; - bool support_changed = false; + bool support_changed; int rc; rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); @@ -9766,19 +9882,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) if (!BNXT_PHY_CFG_ABLE(bp)) return 0; - /* Check if any advertised speeds are no longer supported. The caller - * holds the link_lock mutex, so we can modify link_info settings. - */ - if (bnxt_support_dropped(link_info->advertising, - link_info->support_auto_speeds)) { - link_info->advertising = link_info->support_auto_speeds; - support_changed = true; - } - if (bnxt_support_dropped(link_info->advertising_pam4, - link_info->support_pam4_auto_speeds)) { - link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; - support_changed = true; - } + support_changed = bnxt_support_speed_dropped(link_info); if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) bnxt_hwrm_set_link_setting(bp, true, false); return 0; @@ -10268,79 +10372,6 @@ static void bnxt_get_wol_settings(struct bnxt *bp) } while (handle && handle != 0xffff); } -#ifdef CONFIG_BNXT_HWMON -static ssize_t bnxt_show_temp(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - struct hwrm_temp_monitor_query_output *resp; - struct hwrm_temp_monitor_query_input *req; - struct bnxt *bp = dev_get_drvdata(dev); - u32 len = 0; - int rc; - - rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); - if (rc) - return rc; - resp = hwrm_req_hold(bp, req); - rc = hwrm_req_send(bp, req); - if (!rc) - len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ - hwrm_req_drop(bp, req); - if (rc) - return rc; - return len; -} -static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); - -static struct attribute *bnxt_attrs[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - NULL -}; -ATTRIBUTE_GROUPS(bnxt); - -static void bnxt_hwmon_close(struct bnxt *bp) -{ - if (bp->hwmon_dev) { - hwmon_device_unregister(bp->hwmon_dev); - bp->hwmon_dev = NULL; - } -} - -static void bnxt_hwmon_open(struct bnxt *bp) -{ - struct hwrm_temp_monitor_query_input *req; - struct pci_dev *pdev = bp->pdev; - int rc; - - rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); - if (!rc) - rc = hwrm_req_send_silent(bp, req); - if (rc == -EACCES || rc == -EOPNOTSUPP) { - bnxt_hwmon_close(bp); - return; - } - - if (bp->hwmon_dev) - return; - - bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, - DRV_MODULE_NAME, bp, - bnxt_groups); - if (IS_ERR(bp->hwmon_dev)) { - bp->hwmon_dev = NULL; - dev_warn(&pdev->dev, "Cannot register hwmon device\n"); - } -} -#else -static void bnxt_hwmon_close(struct bnxt *bp) -{ -} - -static void bnxt_hwmon_open(struct bnxt *bp) -{ -} -#endif - static bool bnxt_eee_config_ok(struct bnxt *bp) { struct ethtool_eee *eee = &bp->eee; @@ -10392,19 +10423,14 @@ static int bnxt_update_phy_setting(struct bnxt *bp) if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { if (BNXT_AUTO_MODE(link_info->auto_mode)) update_link = true; - if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && - link_info->req_link_speed != link_info->force_link_speed) - update_link = true; - else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && - link_info->req_link_speed != link_info->force_pam4_link_speed) + if (bnxt_force_speed_updated(link_info)) update_link = true; if (link_info->req_duplex != link_info->duplex_setting) update_link = true; } else { if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) update_link = true; - if (link_info->advertising != link_info->auto_link_speeds || - link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) + if (bnxt_auto_speed_updated(link_info)) update_link = true; } @@ -10669,7 +10695,6 @@ static int bnxt_open(struct net_device *dev) bnxt_reenable_sriov(bp); } } - bnxt_hwmon_open(bp); } return rc; @@ -10710,8 +10735,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_skbs(bp); /* Save ring stats before shutdown */ - if (bp->bnapi && irq_re_init) + if (bp->bnapi && irq_re_init) { bnxt_get_ring_stats(bp, &bp->net_stats_prev); + bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); + } if (irq_re_init) { bnxt_free_irq(bp); bnxt_del_napi(bp); @@ -10719,10 +10746,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_mem(bp, irq_re_init); } -int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { - int rc = 0; - if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { /* If we get here, it means firmware reset is in progress * while we are trying to close. We can safely proceed with @@ -10737,22 +10762,24 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) #ifdef CONFIG_BNXT_SRIOV if (bp->sriov_cfg) { + int rc; + rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, !bp->sriov_cfg, BNXT_SRIOV_CFG_WAIT_TMO); - if (rc) - netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); + if (!rc) + netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); + else if (rc < 0) + netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); } #endif __bnxt_close_nic(bp, irq_re_init, link_re_init); - return rc; } static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); bnxt_hwrm_if_change(bp, false); @@ -10960,6 +10987,35 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) clear_bit(BNXT_STATE_READ_STATS, &bp->state); } +static void bnxt_get_one_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; + u64 *hw_stats = cpr->stats.sw_stats; + + stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; + stats->rx_total_resets += sw_stats->rx.rx_resets; + stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; + stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; + stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; + stats->rx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); + stats->tx_total_resets += sw_stats->tx.tx_resets; + stats->tx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); + stats->total_missed_irqs += sw_stats->cmn.missed_irqs; +} + +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) + bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); +} + static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) { struct net_device *dev = bp->dev; @@ -11048,8 +11104,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) if (mask != vnic->rx_mask || uc_update || mc_update) { vnic->rx_mask = mask; - set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); } } @@ -11614,8 +11669,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) struct bnxt *bp = netdev_priv(dev); netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); } static void bnxt_fw_health_check(struct bnxt *bp) @@ -11652,8 +11706,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) return; fw_reset: - set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); } static void bnxt_timer(struct timer_list *t) @@ -11670,21 +11723,15 @@ static void bnxt_timer(struct timer_list *t) if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) bnxt_fw_health_check(bp); - if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) { - set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) + bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); - if (bnxt_tc_flower_enabled(bp)) { - set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if (bnxt_tc_flower_enabled(bp)) + bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); #ifdef CONFIG_RFS_ACCEL - if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { - set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) + bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); #endif /*CONFIG_RFS_ACCEL*/ if (bp->link_info.phy_retry) { @@ -11692,21 +11739,17 @@ static void bnxt_timer(struct timer_list *t) bp->link_info.phy_retry = false; netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); } else { - set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); } } - if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) { - set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) + bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && - netif_carrier_ok(dev)) { - set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + netif_carrier_ok(dev)) + bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); + bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -12006,16 +12049,9 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp) } else { link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; } - link_info->advertising = link_info->auto_link_speeds; - link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; + bnxt_set_auto_speed(link_info); } else { - link_info->req_link_speed = link_info->force_link_speed; - link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; - if (link_info->force_pam4_link_speed) { - link_info->req_link_speed = - link_info->force_pam4_link_speed; - link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; - } + bnxt_set_force_speed(link_info); link_info->req_duplex = link_info->duplex_setting; } if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) @@ -12109,6 +12145,9 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) bnxt_fw_echo_reply(bp); + if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) + bnxt_hwmon_notify_event(bp); + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They * must be the last functions to be called before exiting. */ @@ -12237,6 +12276,20 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +/* FW that pre-reserves 1 VNIC per function */ +static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) +{ + u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); + + if (!(bp->flags & BNXT_FLAG_CHIP_P5) && + (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) + return true; + if ((bp->flags & BNXT_FLAG_CHIP_P5) && + (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) + return true; + return false; +} + static int bnxt_fw_init_one_p1(struct bnxt *bp) { int rc; @@ -12293,6 +12346,9 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) if (rc) return -ENODEV; + if (bnxt_fw_pre_resv_vnics(bp)) + bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; + bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); @@ -12300,6 +12356,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) if (bp->fw_cap & BNXT_FW_CAP_PTP) __bnxt_hwrm_ptp_qcfg(bp); bnxt_dcb_init(bp); + bnxt_hwmon_init(bp); return 0; } @@ -12985,8 +13042,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, bp->ntp_fltr_count++; spin_unlock_bh(&bp->ntp_fltr_lock); - set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); return new_fltr->sw_id; @@ -13118,9 +13174,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; - if (nla_len(attr) < sizeof(mode)) - return -EINVAL; - mode = nla_get_u16(attr); if (mode == bp->br_mode) break; @@ -13209,6 +13262,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); + bnxt_hwmon_uninit(bp); bnxt_ethtool_free(bp); bnxt_dcb_free(bp); kfree(bp->ptp_cfg); @@ -13805,6 +13859,7 @@ init_err_dl: init_err_pci_clean: bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); + bnxt_hwmon_uninit(bp); bnxt_ethtool_free(bp); bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); @@ -13901,6 +13956,8 @@ static int bnxt_resume(struct device *device) if (rc) goto resume_exit; + bnxt_clear_reservations(bp, true); + if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { rc = -ENODEV; goto resume_exit; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index bb95c3dc5270..a7d7b09ea162 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -161,7 +161,7 @@ struct rx_cmp { #define RX_CMP_FLAGS_ERROR (1 << 6) #define RX_CMP_FLAGS_PLACEMENT (7 << 7) #define RX_CMP_FLAGS_RSS_VALID (1 << 10) - #define RX_CMP_FLAGS_UNUSED (1 << 11) + #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11) #define RX_CMP_FLAGS_ITYPES_SHIFT 12 #define RX_CMP_FLAGS_ITYPES_MASK 0xf000 #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12) @@ -188,6 +188,12 @@ struct rx_cmp { __le32 rx_cmp_rss_hash; }; +#define BNXT_PTP_RX_TS_VALID(flags) \ + (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS) + +#define BNXT_ALL_RX_TS_VALID(flags) \ + !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT) + #define RX_CMP_HASH_VALID(rxcmp) \ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) @@ -919,9 +925,6 @@ struct bnxt_rx_ring_info { unsigned long *rx_agg_bmap; u16 rx_agg_bmap_size; - struct page *rx_page; - unsigned int rx_page_offset; - dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; @@ -942,15 +945,32 @@ struct bnxt_rx_sw_stats { u64 rx_netpoll_discards; }; +struct bnxt_tx_sw_stats { + u64 tx_resets; +}; + struct bnxt_cmn_sw_stats { u64 missed_irqs; }; struct bnxt_sw_stats { struct bnxt_rx_sw_stats rx; + struct bnxt_tx_sw_stats tx; struct bnxt_cmn_sw_stats cmn; }; +struct bnxt_total_ring_err_stats { + u64 rx_total_l4_csum_errors; + u64 rx_total_resets; + u64 rx_total_buf_errors; + u64 rx_total_oom_discards; + u64 rx_total_netpoll_discards; + u64 rx_total_ring_discards; + u64 tx_total_resets; + u64 tx_total_ring_discards; + u64 total_missed_irqs; +}; + struct bnxt_stats_mem { u64 *sw_stats; u64 *hw_masks; @@ -1008,6 +1028,7 @@ struct bnxt_napi { int budget); int tx_pkts; u8 events; + u8 tx_fault:1; u32 flags; #define BNXT_NAPI_FLAG_XDP 0x1 @@ -1280,6 +1301,7 @@ struct bnxt_link_info { u8 req_signal_mode; #define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ #define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 +#define BNXT_SIG_MODE_MAX (PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST + 1) u8 req_duplex; u8 req_flow_ctrl; u16 req_link_speed; @@ -1998,6 +2020,9 @@ struct bnxt { #define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30) #define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(31) #define BNXT_FW_CAP_PTP BIT_ULL(32) + #define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33) + #define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34) + #define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35) u32 fw_dbg_cap; @@ -2020,6 +2045,8 @@ struct bnxt { u8 pri2cos_idx[8]; u8 pri2cos_valid; + struct bnxt_total_ring_err_stats ring_err_stats_prev; + u16 hwrm_max_req_len; u16 hwrm_max_ext_req_len; unsigned int hwrm_cmd_timeout; @@ -2036,6 +2063,7 @@ struct bnxt { #define BNXT_FW_VER_CODE(maj, min, bld, rsv) \ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) #define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48) +#define BNXT_FW_BLD(bp) (((bp)->fw_ver_code >> 16) & 0xffff) u16 vxlan_fw_dst_port_id; u16 nge_fw_dst_port_id; @@ -2073,6 +2101,7 @@ struct bnxt { #define BNXT_FW_RESET_NOTIFY_SP_EVENT 18 #define BNXT_FW_EXCEPTION_SP_EVENT 19 #define BNXT_LINK_CFG_CHANGE_SP_EVENT 21 +#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22 #define BNXT_FW_ECHO_REQUEST_SP_EVENT 23 struct delayed_work fw_reset_task; @@ -2168,7 +2197,14 @@ struct bnxt { struct bnxt_tc_info *tc_info; struct list_head tc_indr_block_list; struct dentry *debugfs_pdev; +#ifdef CONFIG_BNXT_HWMON struct device *hwmon_dev; + u8 warn_thresh_temp; + u8 crit_thresh_temp; + u8 fatal_thresh_temp; + u8 shutdown_thresh_temp; +#endif + u32 thermal_threshold_type; enum board_idx board_idx; }; @@ -2329,6 +2365,8 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); +void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + int idx); void bnxt_report_link(struct bnxt *bp); int bnxt_update_link(struct bnxt *bp, bool chng_link_state); int bnxt_hwrm_set_pause(struct bnxt *); @@ -2343,7 +2381,9 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); void bnxt_reenable_sriov(struct bnxt *bp); -int bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats); int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf); void bnxt_fw_exception(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index caab3d626a2a..63e067038385 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -98,7 +98,6 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, { struct hwrm_queue_cos2bw_cfg_input *req; struct bnxt_cos2bw_cfg cos2bw; - void *data; int rc, i; rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG); @@ -129,11 +128,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } - data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4); - memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (qidx == 0) { req->queue_id0 = cos2bw.queue_id; - req->unused_0 = 0; + req->queue_id0_min_bw = cos2bw.min_bw; + req->queue_id0_max_bw = cos2bw.max_bw; + req->queue_id0_tsa_assign = cos2bw.tsa; + req->queue_id0_pri_lvl = cos2bw.pri_lvl; + req->queue_id0_bw_weight = cos2bw.bw_weight; + } else { + memcpy(&req->cfg[i - 1], &cos2bw.cfg, sizeof(cos2bw.cfg)); } } return hwrm_req_send(bp, req); @@ -144,7 +147,6 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) struct hwrm_queue_cos2bw_qcfg_output *resp; struct hwrm_queue_cos2bw_qcfg_input *req; struct bnxt_cos2bw_cfg cos2bw; - void *data; int rc, i; rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG); @@ -158,13 +160,19 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) return rc; } - data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); - for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) { + for (i = 0; i < bp->max_tc; i++) { int tc; - memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg)); - if (i == 0) + if (i == 0) { cos2bw.queue_id = resp->queue_id0; + cos2bw.min_bw = resp->queue_id0_min_bw; + cos2bw.max_bw = resp->queue_id0_max_bw; + cos2bw.tsa = resp->queue_id0_tsa_assign; + cos2bw.pri_lvl = resp->queue_id0_pri_lvl; + cos2bw.bw_weight = resp->queue_id0_bw_weight; + } else { + memcpy(&cos2bw.cfg, &resp->cfg[i - 1], sizeof(cos2bw.cfg)); + } tc = bnxt_queue_to_tc(bp, cos2bw.queue_id); if (tc < 0) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index 716742522161..5b2a6f678244 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -27,11 +27,12 @@ struct bnxt_cos2bw_cfg { u8 queue_id; __le32 min_bw; __le32 max_bw; -#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 tsa; u8 pri_lvl; u8 bw_weight; ); +/* for min_bw / max_bw */ +#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 unused; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 8b3e7697390f..89809f1b129c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -62,7 +62,7 @@ static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset) if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) return -EOPNOTSUPP; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -104,20 +104,21 @@ static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, struct bnxt *bp = devlink_health_reporter_priv(reporter); struct bnxt_fw_health *h = bp->fw_health; u32 fw_status, fw_resets; - int rc; - if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) - return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); + return 0; + } - if (!h->status_reliable) - return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); + if (!h->status_reliable) { + devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); + return 0; + } mutex_lock(&h->lock); fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); if (BNXT_FW_IS_BOOTING(fw_status)) { - rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) { if (!h->severity) { h->severity = SEVERITY_FATAL; @@ -126,58 +127,35 @@ static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, devlink_health_report(h->fw_reporter, "FW error diagnosed", h); } - rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error"); - if (rc) - goto unlock; - rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Status", "error"); + devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); } else { - rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); } - rc = devlink_fmsg_string_pair_put(fmsg, "Severity", - bnxt_health_severity_str(h->severity)); - if (rc) - goto unlock; + devlink_fmsg_string_pair_put(fmsg, "Severity", + bnxt_health_severity_str(h->severity)); if (h->severity) { - rc = devlink_fmsg_string_pair_put(fmsg, "Remedy", - bnxt_health_remedy_str(h->remedy)); - if (rc) - goto unlock; - if (h->remedy == REMEDY_DEVLINK_RECOVER) { - rc = devlink_fmsg_string_pair_put(fmsg, "Impact", - "traffic+ntuple_cfg"); - if (rc) - goto unlock; - } + devlink_fmsg_string_pair_put(fmsg, "Remedy", + bnxt_health_remedy_str(h->remedy)); + if (h->remedy == REMEDY_DEVLINK_RECOVER) + devlink_fmsg_string_pair_put(fmsg, "Impact", + "traffic+ntuple_cfg"); } -unlock: mutex_unlock(&h->lock); - if (rc || !h->resets_reliable) - return rc; + if (!h->resets_reliable) + return 0; fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); - if (rc) - return rc; - rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); - if (rc) - return rc; - return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); + devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); + devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); + devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); + devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); + devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); + devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); + return 0; } static int bnxt_fw_dump(struct devlink_health_reporter *reporter, @@ -203,19 +181,12 @@ static int bnxt_fw_dump(struct devlink_health_reporter *reporter, rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len); if (!rc) { - rc = devlink_fmsg_pair_nest_start(fmsg, "core"); - if (rc) - goto exit; - rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); - if (rc) - goto exit; - rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); - if (rc) - goto exit; - rc = devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_pair_nest_start(fmsg, "core"); + devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); + devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); + devlink_fmsg_pair_nest_end(fmsg); } -exit: vfree(data); return rc; } @@ -478,15 +449,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, return -ENODEV; } bnxt_ulp_stop(bp); - if (netif_running(bp->dev)) { - rc = bnxt_close_nic(bp, true, true); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Failed to close"); - dev_close(bp->dev); - rtnl_unlock(); - break; - } - } + if (netif_running(bp->dev)) + bnxt_close_nic(bp, true, true); bnxt_vf_reps_free(bp); rc = bnxt_hwrm_func_drv_unrgtr(bp); if (rc) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8fd5071d8b09..5f67a7f94e7d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -8,6 +8,7 @@ * the Free Software Foundation. */ +#include <linux/bitops.h> #include <linux/ctype.h> #include <linux/stringify.h> #include <linux/ethtool.h> @@ -164,9 +165,8 @@ static int bnxt_set_coalesce(struct net_device *dev, reset_coalesce: if (test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_stats) { - rc = bnxt_close_nic(bp, true, false); - if (!rc) - rc = bnxt_open_nic(bp, true, false); + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); } else { rc = bnxt_hwrm_set_coal(bp); } @@ -339,13 +339,16 @@ enum { RX_NETPOLL_DISCARDS, }; -static struct { - u64 counter; - char string[ETH_GSTRING_LEN]; -} bnxt_sw_func_stats[] = { - {0, "rx_total_discard_pkts"}, - {0, "tx_total_discard_pkts"}, - {0, "rx_total_netpoll_discards"}, +static const char *const bnxt_ring_err_stats_arr[] = { + "rx_total_l4_csum_errors", + "rx_total_resets", + "rx_total_buf_errors", + "rx_total_oom_discards", + "rx_total_netpoll_discards", + "rx_total_ring_discards", + "tx_total_resets", + "tx_total_ring_discards", + "total_missed_irqs", }; #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) @@ -495,7 +498,7 @@ static const struct { BNXT_TX_STATS_PRI_ENTRIES(tx_packets), }; -#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) +#define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_STATS_PRI \ (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ @@ -531,15 +534,20 @@ static int bnxt_get_num_ring_stats(struct bnxt *bp) static int bnxt_get_num_stats(struct bnxt *bp) { int num_stats = bnxt_get_num_ring_stats(bp); + int len; - num_stats += BNXT_NUM_SW_FUNC_STATS; + num_stats += BNXT_NUM_RING_ERR_STATS; if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - num_stats += bp->fw_rx_stats_ext_size + - bp->fw_tx_stats_ext_size; + len = min_t(int, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + num_stats += len; + len = min_t(int, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + num_stats += len; if (bp->pri2cos_valid) num_stats += BNXT_NUM_STATS_PRI; } @@ -583,18 +591,17 @@ static bool is_tx_ring(struct bnxt *bp, int ring_num) static void bnxt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) { - u32 i, j = 0; + struct bnxt_total_ring_err_stats ring_err_stats = {0}; struct bnxt *bp = netdev_priv(dev); + u64 *curr, *prev; u32 tpa_stats; + u32 i, j = 0; if (!bp->bnapi) { - j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; + j += bnxt_get_num_ring_stats(bp); goto skip_ring_stats; } - for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) - bnxt_sw_func_stats[i].counter = 0; - tpa_stats = bnxt_get_num_tpa_ring_stats(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -631,19 +638,16 @@ skip_tpa_ring_stats: sw = (u64 *)&cpr->sw_stats.cmn; for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) buf[j] = sw[k]; - - bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += - BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); - bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += - BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); - bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter += - cpr->sw_stats.rx.rx_netpoll_discards; } - for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) - buf[j] = bnxt_sw_func_stats[i].counter; + bnxt_get_ring_err_stats(bp, &ring_err_stats); skip_ring_stats: + curr = &ring_err_stats.rx_total_l4_csum_errors; + prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; + for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) + buf[j] = *curr + *prev; + if (bp->flags & BNXT_FLAG_PORT_STATS) { u64 *port_stats = bp->port_stats.sw_stats; @@ -653,12 +657,17 @@ skip_ring_stats: if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; + u32 len; - for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++, j++) { buf[j] = *(rx_port_stats_ext + bnxt_port_stats_ext_arr[i].offset); } - for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++, j++) { buf[j] = *(tx_port_stats_ext + bnxt_tx_port_stats_ext_arr[i].offset); } @@ -745,8 +754,8 @@ skip_tpa_stats: buf += ETH_GSTRING_LEN; } } - for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { - strcpy(buf, bnxt_sw_func_stats[i].string); + for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) { + strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } @@ -757,11 +766,17 @@ skip_tpa_stats: } } if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - for (i = 0; i < bp->fw_rx_stats_ext_size; i++) { + u32 len; + + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++) { strcpy(buf, bnxt_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; } - for (i = 0; i < bp->fw_tx_stats_ext_size; i++) { + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++) { strcpy(buf, bnxt_tx_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; @@ -956,12 +971,7 @@ static int bnxt_set_channels(struct net_device *dev, * before PF unload */ } - rc = bnxt_close_nic(bp, true, false); - if (rc) { - netdev_err(bp->dev, "Set channel failure rc :%x\n", - rc); - return rc; - } + bnxt_close_nic(bp, true, false); } if (sh) { @@ -1507,94 +1517,388 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) return speed_mask; } -#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ -{ \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 100baseT_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 1000baseT_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 10000baseT_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 25000baseCR_Full); \ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 40000baseCR4_Full);\ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 50000baseCR2_Full);\ - if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 100000baseCR4_Full);\ - if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - Pause); \ - if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ - ethtool_link_ksettings_add_link_mode( \ - lk_ksettings, name, Asym_Pause);\ - } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - Asym_Pause); \ - } \ +enum bnxt_media_type { + BNXT_MEDIA_UNKNOWN = 0, + BNXT_MEDIA_TP, + BNXT_MEDIA_CR, + BNXT_MEDIA_SR, + BNXT_MEDIA_LR_ER_FR, + BNXT_MEDIA_KR, + BNXT_MEDIA_KX, + BNXT_MEDIA_X, + __BNXT_MEDIA_END, +}; + +static const enum bnxt_media_type bnxt_phy_types[] = { + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, +}; + +static enum bnxt_media_type +bnxt_get_media(struct bnxt_link_info *link_info) +{ + switch (link_info->media_type) { + case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: + return BNXT_MEDIA_TP; + case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: + return BNXT_MEDIA_CR; + default: + if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) + return bnxt_phy_types[link_info->phy_type]; + return BNXT_MEDIA_UNKNOWN; + } } -#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ -{ \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100baseT_Full) || \ - ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100baseT_Half)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 1000baseT_Full) || \ - ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 1000baseT_Half)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 10000baseT_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 25000baseCR_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 40000baseCR4_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 50000baseCR2_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100000baseCR4_Full)) \ - (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \ +enum bnxt_link_speed_indices { + BNXT_LINK_SPEED_UNKNOWN = 0, + BNXT_LINK_SPEED_100MB_IDX, + BNXT_LINK_SPEED_1GB_IDX, + BNXT_LINK_SPEED_10GB_IDX, + BNXT_LINK_SPEED_25GB_IDX, + BNXT_LINK_SPEED_40GB_IDX, + BNXT_LINK_SPEED_50GB_IDX, + BNXT_LINK_SPEED_100GB_IDX, + BNXT_LINK_SPEED_200GB_IDX, + __BNXT_LINK_SPEED_END +}; + +static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) +{ + switch (speed) { + case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; + case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; + case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; + case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; + case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; + case BNXT_LINK_SPEED_50GB: return BNXT_LINK_SPEED_50GB_IDX; + case BNXT_LINK_SPEED_100GB: return BNXT_LINK_SPEED_100GB_IDX; + case BNXT_LINK_SPEED_200GB: return BNXT_LINK_SPEED_200GB_IDX; + default: return BNXT_LINK_SPEED_UNKNOWN; + } } -#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ -{ \ - if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 50000baseCR_Full); \ - if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 100000baseCR2_Full);\ - if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \ - ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ - 200000baseCR4_Full);\ +static const enum ethtool_link_mode_bit_indices +bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { + [BNXT_LINK_SPEED_100MB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_1GB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + /* historically baseT, but DAC is more correctly baseX */ + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_10GB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_25GB_IDX] = { + { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_40GB_IDX] = { + { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_50GB_IDX] = { + [BNXT_SIG_MODE_NRZ] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_100GB_IDX] = { + [BNXT_SIG_MODE_NRZ] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_200GB_IDX] = { + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + }, + }, +}; + +#define BNXT_LINK_MODE_UNKNOWN -1 + +static enum ethtool_link_mode_bit_indices +bnxt_get_link_mode(struct bnxt_link_info *link_info) +{ + enum ethtool_link_mode_bit_indices link_mode; + enum bnxt_link_speed_indices speed; + enum bnxt_media_type media; + u8 sig_mode; + + if (link_info->phy_link_status != BNXT_LINK_LINK) + return BNXT_LINK_MODE_UNKNOWN; + + media = bnxt_get_media(link_info); + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + speed = bnxt_fw_speed_idx(link_info->link_speed); + sig_mode = link_info->active_fec_sig_mode & + PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; + } else { + speed = bnxt_fw_speed_idx(link_info->req_link_speed); + sig_mode = link_info->req_signal_mode; + } + if (sig_mode >= BNXT_SIG_MODE_MAX) + return BNXT_LINK_MODE_UNKNOWN; + + /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux + * link mode, but since no such devices exist, the zeroes in the + * map can be conveniently used to represent unknown link modes. + */ + link_mode = bnxt_link_modes[speed][sig_mode][media]; + if (!link_mode) + return BNXT_LINK_MODE_UNKNOWN; + + switch (link_mode) { + case ETHTOOL_LINK_MODE_100baseT_Full_BIT: + if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) + link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; + break; + case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: + if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) + link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; + break; + default: + break; + } + + return link_mode; } -#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \ -{ \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 50000baseCR_Full)) \ - (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 100000baseCR2_Full)) \ - (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \ - if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ - 200000baseCR4_Full)) \ - (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \ +static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.supported); + } + + if (link_info->support_auto_speeds || link_info->support_pam4_auto_speeds) + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lk_ksettings->link_modes.supported); + + if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + return; + + if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.advertising); + if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.advertising); + if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.lp_advertising); + if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.lp_advertising); +} + +static const u16 bnxt_nrz_speed_masks[] = { + [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, + [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, + [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, + [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, + [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, + [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ +}; + +static const u16 bnxt_pam4_speed_masks[] = { + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, + [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, +}; + +static enum bnxt_link_speed_indices +bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk) +{ + const u16 *speeds; + int idx, len; + + switch (sig_mode) { + case BNXT_SIG_MODE_NRZ: + speeds = bnxt_nrz_speed_masks; + len = ARRAY_SIZE(bnxt_nrz_speed_masks); + break; + case BNXT_SIG_MODE_PAM4: + speeds = bnxt_pam4_speed_masks; + len = ARRAY_SIZE(bnxt_pam4_speed_masks); + break; + default: + return BNXT_LINK_SPEED_UNKNOWN; + } + + for (idx = 0; idx < len; idx++) { + if (speeds[idx] == speed_msk) + return idx; + } + + return BNXT_LINK_SPEED_UNKNOWN; +} + +#define BNXT_FW_SPEED_MSK_BITS 16 + +static void +__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, + u8 sig_mode, unsigned long *et_mask) +{ + enum ethtool_link_mode_bit_indices link_mode; + enum bnxt_link_speed_indices speed; + u8 bit; + + for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { + speed = bnxt_encoding_speed_idx(sig_mode, 1 << bit); + if (!speed) + continue; + + link_mode = bnxt_link_modes[speed][sig_mode][media]; + if (!link_mode) + continue; + + linkmode_set_bit(link_mode, et_mask); + } +} + +static void +bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, + u8 sig_mode, unsigned long *et_mask) +{ + if (media) { + __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask); + return; + } + + /* list speeds for all media if unknown */ + for (media = 1; media < __BNXT_MEDIA_END; media++) + __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask); +} + +static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, + u16 speed_msk, const unsigned long *et_mask, + enum ethtool_link_mode_bit_indices mode) +{ + bool mode_desired = linkmode_test_bit(mode, et_mask); + + if (!mode) + return; + + /* enabled speeds for installed media should override */ + if (installed_media && mode_desired) { + *speeds |= speed_msk; + *delta |= speed_msk; + return; + } + + /* many to one mapping, only allow one change per fw_speed bit */ + if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { + *speeds ^= speed_msk; + *delta |= speed_msk; + } +} + +static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, + const unsigned long *et_mask) +{ + enum bnxt_media_type media = bnxt_get_media(link_info); + u32 delta_pam4 = 0; + u32 delta_nrz = 0; + int i, m; + + for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { + /* accept any legal media from user */ + for (m = 1; m < __BNXT_MEDIA_END; m++) { + bnxt_update_speed(&delta_nrz, m == media, + &link_info->advertising, + bnxt_nrz_speed_masks[i], et_mask, + bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); + bnxt_update_speed(&delta_pam4, m == media, + &link_info->advertising_pam4, + bnxt_pam4_speed_masks[i], et_mask, + bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); + } + } } static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, @@ -1618,36 +1922,6 @@ static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, lk_ksettings->link_modes.advertising); } -static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, - struct ethtool_link_ksettings *lk_ksettings) -{ - u16 fw_speeds = link_info->advertising; - u8 fw_pause = 0; - - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) - fw_pause = link_info->auto_pause_setting; - - BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); - fw_speeds = link_info->advertising_pam4; - BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising); - bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); -} - -static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, - struct ethtool_link_ksettings *lk_ksettings) -{ - u16 fw_speeds = link_info->lp_auto_link_speeds; - u8 fw_pause = 0; - - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) - fw_pause = link_info->lp_pause; - - BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, - lp_advertising); - fw_speeds = link_info->lp_auto_pam4_link_speeds; - BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising); -} - static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, struct ethtool_link_ksettings *lk_ksettings) { @@ -1669,30 +1943,6 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, lk_ksettings->link_modes.supported); } -static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, - struct ethtool_link_ksettings *lk_ksettings) -{ - struct bnxt *bp = container_of(link_info, struct bnxt, link_info); - u16 fw_speeds = link_info->support_speeds; - - BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); - fw_speeds = link_info->support_pam4_speeds; - BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported); - - if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - Pause); - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - Asym_Pause); - } - - if (link_info->support_auto_speeds || - link_info->support_pam4_auto_speeds) - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - Autoneg); - bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); -} - u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) { switch (fw_link_speed) { @@ -1721,60 +1971,95 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) } } +static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, + struct bnxt_link_info *link_info) +{ + struct ethtool_link_settings *base = &lk_ksettings->base; + + if (link_info->link_state == BNXT_LINK_STATE_UP) { + base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + base->duplex = DUPLEX_HALF; + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; + } else if (!link_info->autoneg) { + base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); + base->duplex = DUPLEX_HALF; + if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; + } +} + static int bnxt_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *lk_ksettings) { - struct bnxt *bp = netdev_priv(dev); - struct bnxt_link_info *link_info = &bp->link_info; struct ethtool_link_settings *base = &lk_ksettings->base; - u32 ethtool_speed; + enum ethtool_link_mode_bit_indices link_mode; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info; + enum bnxt_media_type media; + ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); + ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + base->duplex = DUPLEX_UNKNOWN; + base->speed = SPEED_UNKNOWN; + link_info = &bp->link_info; + mutex_lock(&bp->link_lock); - bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); + bnxt_get_ethtool_modes(link_info, lk_ksettings); + media = bnxt_get_media(link_info); + bnxt_get_ethtool_speeds(link_info->support_speeds, + media, BNXT_SIG_MODE_NRZ, + lk_ksettings->link_modes.supported); + bnxt_get_ethtool_speeds(link_info->support_pam4_speeds, + media, BNXT_SIG_MODE_PAM4, + lk_ksettings->link_modes.supported); + bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); + link_mode = bnxt_get_link_mode(link_info); + if (link_mode != BNXT_LINK_MODE_UNKNOWN) + ethtool_params_from_link_mode(lk_ksettings, link_mode); + else + bnxt_get_default_speeds(lk_ksettings, link_info); - ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); if (link_info->autoneg) { - bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); - ethtool_link_ksettings_add_link_mode(lk_ksettings, - advertising, Autoneg); + bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lk_ksettings->link_modes.advertising); base->autoneg = AUTONEG_ENABLE; - base->duplex = DUPLEX_UNKNOWN; + bnxt_get_ethtool_speeds(link_info->advertising, + media, BNXT_SIG_MODE_NRZ, + lk_ksettings->link_modes.advertising); + bnxt_get_ethtool_speeds(link_info->advertising_pam4, + media, BNXT_SIG_MODE_PAM4, + lk_ksettings->link_modes.advertising); if (link_info->phy_link_status == BNXT_LINK_LINK) { - bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); - if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - base->duplex = DUPLEX_FULL; - else - base->duplex = DUPLEX_HALF; + bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, + media, BNXT_SIG_MODE_NRZ, + lk_ksettings->link_modes.lp_advertising); + bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, + media, BNXT_SIG_MODE_PAM4, + lk_ksettings->link_modes.lp_advertising); } - ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); } else { base->autoneg = AUTONEG_DISABLE; - ethtool_speed = - bnxt_fw_to_ethtool_speed(link_info->req_link_speed); - base->duplex = DUPLEX_HALF; - if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) - base->duplex = DUPLEX_FULL; } - base->speed = ethtool_speed; base->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { base->port = PORT_TP; - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - TP); - ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, - TP); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, + lk_ksettings->link_modes.advertising); } else { - ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, - FIBRE); - ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, - FIBRE); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + lk_ksettings->link_modes.advertising); if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) base->port = PORT_DA; - else if (link_info->media_type == - PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) + else base->port = PORT_FIBRE; } base->phy_address = link_info->phy_addr; @@ -1783,13 +2068,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev, return 0; } -static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) +static int +bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; u16 support_pam4_spds = link_info->support_pam4_speeds; u16 support_spds = link_info->support_speeds; u8 sig_mode = BNXT_SIG_MODE_NRZ; + u32 lanes_needed = 1; u16 fw_speed = 0; switch (ethtool_speed) { @@ -1810,37 +2097,46 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; break; case SPEED_20000: - if (support_spds & BNXT_LINK_SPEED_MSK_20GB) + if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; + lanes_needed = 2; + } break; case SPEED_25000: if (support_spds & BNXT_LINK_SPEED_MSK_25GB) fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; break; case SPEED_40000: - if (support_spds & BNXT_LINK_SPEED_MSK_40GB) + if (support_spds & BNXT_LINK_SPEED_MSK_40GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; + lanes_needed = 4; + } break; case SPEED_50000: - if (support_spds & BNXT_LINK_SPEED_MSK_50GB) { + if ((support_spds & BNXT_LINK_SPEED_MSK_50GB) && lanes != 1) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + lanes_needed = 2; } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; sig_mode = BNXT_SIG_MODE_PAM4; } break; case SPEED_100000: - if (support_spds & BNXT_LINK_SPEED_MSK_100GB) { + if ((support_spds & BNXT_LINK_SPEED_MSK_100GB) && + lanes != 2 && lanes != 1) { fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; + lanes_needed = 4; } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 2; } break; case SPEED_200000: if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 4; } break; } @@ -1850,6 +2146,11 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed) return -EINVAL; } + if (lanes && lanes != lanes_needed) { + netdev_err(dev, "unsupported number of lanes for speed\n"); + return -EINVAL; + } + if (link_info->req_link_speed == fw_speed && link_info->req_signal_mode == sig_mode && link_info->autoneg == 0) @@ -1894,7 +2195,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, struct bnxt_link_info *link_info = &bp->link_info; const struct ethtool_link_settings *base = &lk_ksettings->base; bool set_pause = false; - u32 speed; + u32 speed, lanes = 0; int rc = 0; if (!BNXT_PHY_CFG_ABLE(bp)) @@ -1902,12 +2203,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev, mutex_lock(&bp->link_lock); if (base->autoneg == AUTONEG_ENABLE) { - link_info->advertising = 0; - link_info->advertising_pam4 = 0; - BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings, - advertising); - BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4, - lk_ksettings, advertising); + bnxt_set_ethtool_speeds(link_info, + lk_ksettings->link_modes.advertising); link_info->autoneg |= BNXT_AUTONEG_SPEED; if (!link_info->advertising && !link_info->advertising_pam4) { link_info->advertising = link_info->support_auto_speeds; @@ -1935,7 +2232,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev, goto set_setting_exit; } speed = base->speed; - rc = bnxt_force_link_speed(dev, speed); + lanes = lk_ksettings->lanes; + rc = bnxt_force_link_speed(dev, speed, lanes); if (rc) { if (rc == -EALREADY) rc = 0; @@ -3738,12 +4036,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, bnxt_run_fw_tests(bp, test_mask, &test_results); } else { bnxt_ulp_stop(bp); - rc = bnxt_close_nic(bp, true, false); - if (rc) { - etest->flags |= ETH_TEST_FL_FAILED; - bnxt_ulp_start(bp, rc); - return; - } + bnxt_close_nic(bp, true, false); bnxt_run_fw_tests(bp, test_mask, &test_results); buf[BNXT_MACLPBK_TEST_IDX] = 1; @@ -4141,6 +4434,7 @@ void bnxt_ethtool_free(struct bnxt *bp) } const struct ethtool_ops bnxt_ethtool_ops = { + .cap_link_lanes_supported = 1, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index b31de4cf6534..d5fad5a3cdd1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2022 Broadcom Inc. + * Copyright (c) 2018-2023 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -191,6 +191,11 @@ struct cmd_nums { #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL #define HWRM_QUEUE_GLOBAL_CFG 0x86UL #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL + #define HWRM_QUEUE_QCAPS 0x8cUL #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL #define HWRM_CFA_L2_FILTER_FREE 0x91UL #define HWRM_CFA_L2_FILTER_CFG 0x92UL @@ -315,6 +320,7 @@ struct cmd_nums { #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL #define HWRM_CFA_TLS_FILTER_FREE 0x129UL + #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL @@ -383,6 +389,9 @@ struct cmd_nums { #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL #define HWRM_FUNC_SYNCE_CFG 0x1abUL #define HWRM_FUNC_SYNCE_QCFG 0x1acUL + #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL + #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL + #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -408,10 +417,10 @@ struct cmd_nums { #define HWRM_MFG_SELFTEST_QLIST 0x216UL #define HWRM_MFG_SELFTEST_EXEC 0x217UL #define HWRM_STAT_GENERIC_QSTATS 0x218UL + #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL #define HWRM_TF 0x2bcUL #define HWRM_TF_VERSION_GET 0x2bdUL #define HWRM_TF_SESSION_OPEN 0x2c6UL - #define HWRM_TF_SESSION_ATTACH 0x2c7UL #define HWRM_TF_SESSION_REGISTER 0x2c8UL #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL #define HWRM_TF_SESSION_CLOSE 0x2caUL @@ -426,14 +435,6 @@ struct cmd_nums { #define HWRM_TF_TBL_TYPE_GET 0x2daUL #define HWRM_TF_TBL_TYPE_SET 0x2dbUL #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL - #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL - #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL - #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL - #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL - #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL - #define HWRM_TF_EXT_EM_OP 0x2e7UL - #define HWRM_TF_EXT_EM_CFG 0x2e8UL - #define HWRM_TF_EXT_EM_QCFG 0x2e9UL #define HWRM_TF_EM_INSERT 0x2eaUL #define HWRM_TF_EM_DELETE 0x2ebUL #define HWRM_TF_EM_HASH_INSERT 0x2ecUL @@ -465,6 +466,14 @@ struct cmd_nums { #define HWRM_TFC_IDX_TBL_GET 0x390UL #define HWRM_TFC_IDX_TBL_FREE 0x391UL #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL + #define HWRM_TFC_TCAM_SET 0x393UL + #define HWRM_TFC_TCAM_GET 0x394UL + #define HWRM_TFC_TCAM_ALLOC 0x395UL + #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL + #define HWRM_TFC_TCAM_FREE 0x397UL + #define HWRM_TFC_IF_TBL_SET 0x398UL + #define HWRM_TFC_IF_TBL_GET 0x399UL + #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL #define HWRM_SV 0x400UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL @@ -494,6 +503,8 @@ struct cmd_nums { #define HWRM_DBG_USEQ_RUN 0xff29UL #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL + #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL + #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL #define HWRM_NVM_DEFRAG 0xffecUL #define HWRM_NVM_REQ_ARBITRATION 0xffedUL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL @@ -540,6 +551,7 @@ struct ret_codes { #define HWRM_ERR_CODE_BUSY 0x10UL #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL + #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL @@ -571,8 +583,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_RSVD 118 -#define HWRM_VERSION_STR "1.10.2.118" +#define HWRM_VERSION_RSVD 171 +#define HWRM_VERSION_STR "1.10.2.171" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -761,51 +773,53 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT __le16 event_id; - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL - #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL - #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL - #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL - #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL - #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL - #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL - #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL - #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL - #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL - #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL - #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL - #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL - #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL - #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL - #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL - #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL - #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL - #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL - #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL - #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL - #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL + #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR __le32 event_data2; u8 opaque_v; #define ASYNC_EVENT_CMPL_V 0x1UL @@ -1011,6 +1025,7 @@ struct hwrm_async_event_cmpl_vf_cfg_change { #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL }; /* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ @@ -1402,6 +1417,45 @@ struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold { #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8 }; +/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_thermal { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING +}; + /* hwrm_func_reset_input (size:192b/24B) */ struct hwrm_func_reset_input { __le16 req_type; @@ -1502,7 +1556,7 @@ struct hwrm_func_vf_free_output { u8 valid; }; -/* hwrm_func_vf_cfg_input (size:448b/56B) */ +/* hwrm_func_vf_cfg_input (size:576b/72B) */ struct hwrm_func_vf_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1510,20 +1564,22 @@ struct hwrm_func_vf_cfg_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL - #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL - #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL - #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL - #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL __le16 mtu; __le16 guest_vlan; __le16 async_event_cr; @@ -1547,8 +1603,12 @@ struct hwrm_func_vf_cfg_input { __le16 num_vnics; __le16 num_stat_ctxs; __le16 num_hw_ring_grps; - __le16 num_tx_key_ctxs; - __le16 num_rx_key_ctxs; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le16 num_msix; + u8 unused[2]; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; }; /* hwrm_func_vf_cfg_output (size:128b/16B) */ @@ -1572,7 +1632,7 @@ struct hwrm_func_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_qcaps_output (size:768b/96B) */ +/* hwrm_func_qcaps_output (size:896b/112B) */ struct hwrm_func_qcaps_output { __le16 error_code; __le16 req_type; @@ -1686,6 +1746,11 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL __le16 tunnel_disable_flag; #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL @@ -1695,7 +1760,15 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL + u8 key_xid_partition_cap; + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_TKC 0x1UL + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_RKC 0x2UL + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_TKC 0x4UL + #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_RKC 0x8UL u8 unused_1; + u8 device_serial_number[8]; + __le16 ctxs_per_partition; + u8 unused_2[5]; u8 valid; }; @@ -1710,7 +1783,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:896b/112B) */ +/* hwrm_func_qcfg_output (size:1024b/128B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -1870,19 +1943,24 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 __le16 host_mtu; - __le16 alloc_tx_key_ctxs; - __le16 alloc_rx_key_ctxs; + u8 unused_3[2]; + u8 unused_4[2]; u8 port_kdnet_mode; #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED u8 kdnet_pcie_function; __le16 port_kdnet_fid; - u8 unused_3; + u8 unused_5[2]; + __le32 alloc_tx_key_ctxs; + __le32 alloc_rx_key_ctxs; + u8 lag_id; + u8 parif; + u8 unused_6[5]; u8 valid; }; -/* hwrm_func_cfg_input (size:960b/120B) */ +/* hwrm_func_cfg_input (size:1088b/136B) */ struct hwrm_func_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -2061,8 +2139,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 __be16 tpid; __le16 host_mtu; - __le16 num_tx_key_ctxs; - __le16 num_rx_key_ctxs; + u8 unused_0[4]; __le32 enables2; #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL @@ -2083,7 +2160,12 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB - u8 unused_0[6]; + u8 unused_1[2]; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; + __le32 unused_2; }; /* hwrm_func_cfg_output (size:128b/16B) */ @@ -2390,7 +2472,11 @@ struct hwrm_func_drv_qver_input { __le64 resp_addr; __le32 reserved; __le16 fid; - u8 unused_0[2]; + u8 driver_type; + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE + u8 unused_0; }; /* hwrm_func_drv_qver_output (size:256b/32B) */ @@ -2435,7 +2521,7 @@ struct hwrm_func_resource_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_resource_qcaps_output (size:512b/64B) */ +/* hwrm_func_resource_qcaps_output (size:704b/88B) */ struct hwrm_func_resource_qcaps_output { __le16 error_code; __le16 req_type; @@ -2467,15 +2553,20 @@ struct hwrm_func_resource_qcaps_output { __le16 max_tx_scheduler_inputs; __le16 flags; #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL - __le16 min_tx_key_ctxs; - __le16 max_tx_key_ctxs; - __le16 min_rx_key_ctxs; - __le16 max_rx_key_ctxs; - u8 unused_0[5]; + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; + u8 unused_0[3]; u8 valid; }; -/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */ +/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */ struct hwrm_func_vf_resource_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -2502,14 +2593,18 @@ struct hwrm_func_vf_resource_cfg_input { __le16 max_hw_ring_grps; __le16 flags; #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL - __le16 min_tx_key_ctxs; - __le16 max_tx_key_ctxs; - __le16 min_rx_key_ctxs; - __le16 max_rx_key_ctxs; - u8 unused_0[2]; -}; - -/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; +}; + +/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */ struct hwrm_func_vf_resource_cfg_output { __le16 error_code; __le16 req_type; @@ -2523,9 +2618,9 @@ struct hwrm_func_vf_resource_cfg_output { __le16 reserved_vnics; __le16 reserved_stat_ctx; __le16 reserved_hw_ring_grps; - __le16 reserved_tx_key_ctxs; - __le16 reserved_rx_key_ctxs; - u8 unused_0[3]; + __le32 reserved_tx_key_ctxs; + __le32 reserved_rx_key_ctxs; + u8 unused_0[7]; u8 valid; }; @@ -2592,7 +2687,8 @@ struct hwrm_func_backing_store_qcaps_output { __le16 rkc_entry_size; __le32 tkc_max_entries; __le32 rkc_max_entries; - u8 rsvd1[7]; + __le16 fast_qpmd_qp_num_entries; + u8 rsvd1[5]; u8 valid; }; @@ -2630,27 +2726,28 @@ struct hwrm_func_backing_store_cfg_input { #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL __le32 enables; - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL - #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL u8 qpc_pg_size_qpc_lvl; #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0 @@ -3047,7 +3144,7 @@ struct hwrm_func_backing_store_cfg_input { #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4) #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4) #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G - u8 rsvd[2]; + __le16 qp_num_fast_qpmd_entries; }; /* hwrm_func_backing_store_cfg_output (size:128b/16B) */ @@ -3477,6 +3574,8 @@ struct hwrm_func_backing_store_cfg_v2_input { #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID __le16 instance; @@ -3546,6 +3645,8 @@ struct hwrm_func_backing_store_qcfg_v2_input { #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID __le16 instance; @@ -3559,22 +3660,24 @@ struct hwrm_func_backing_store_qcfg_v2_output { __le16 seq_id; __le16 resp_len; __le16 type; - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID __le16 instance; __le32 flags; __le64 page_dir; @@ -3609,7 +3712,8 @@ struct hwrm_func_backing_store_qcfg_v2_output { struct qpc_split_entries { __le32 qp_num_l2_entries; __le32 qp_num_qp1_entries; - __le32 rsvd[2]; + __le32 qp_num_fast_qpmd_entries; + __le32 rsvd; }; /* srq_split_entries (size:128b/16B) */ @@ -3666,6 +3770,8 @@ struct hwrm_func_backing_store_qcaps_v2_input { #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID u8 rsvd[6]; @@ -3696,13 +3802,16 @@ struct hwrm_func_backing_store_qcaps_v2_output { #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID __le16 entry_size; __le32 flags; - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL __le32 instance_bit_map; u8 ctx_init_value; u8 ctx_init_offset; @@ -3712,7 +3821,13 @@ struct hwrm_func_backing_store_qcaps_v2_output { __le32 min_num_entries; __le16 next_valid_type; u8 subtype_valid_cnt; - u8 rsvd2; + u8 exact_cnt_bit_map; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4 __le32 split_entry_0; __le32 split_entry_1; __le32 split_entry_2; @@ -3721,6 +3836,60 @@ struct hwrm_func_backing_store_qcaps_v2_output { u8 valid; }; +/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */ +struct hwrm_func_dbr_pacing_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */ +struct hwrm_func_dbr_pacing_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; +#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL + u8 unused_0[7]; + __le32 dbr_stat_db_fifo_reg; +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \ + FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2 + __le32 dbr_stat_db_fifo_reg_watermark_mask; + u8 dbr_stat_db_fifo_reg_watermark_shift; + u8 unused_1[3]; + __le32 dbr_stat_db_fifo_reg_fifo_room_mask; + u8 dbr_stat_db_fifo_reg_fifo_room_shift; + u8 unused_2[3]; + __le32 dbr_throttling_aeq_arm_reg; +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \ + FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL +#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2 + u8 dbr_throttling_aeq_arm_reg_val; + u8 unused_3[7]; + __le32 primary_nq_id; + __le32 pacing_threshold; + u8 unused_4[7]; + u8 valid; +}; + /* hwrm_func_drv_if_change_input (size:192b/24B) */ struct hwrm_func_drv_if_change_input { __le16 req_type; @@ -4545,7 +4714,7 @@ struct tx_port_stats_ext { __le64 pfc_pri7_tx_transitions; }; -/* rx_port_stats_ext (size:3776b/472B) */ +/* rx_port_stats_ext (size:3904b/488B) */ struct rx_port_stats_ext { __le64 link_down_events; __le64 continuous_pause_events; @@ -4606,6 +4775,8 @@ struct rx_port_stats_ext { __le64 rx_discard_packets_cos7; __le64 rx_fec_corrected_blocks; __le64 rx_fec_uncorrectable_blocks; + __le64 rx_filter_miss; + __le64 rx_fec_symbol_err; }; /* hwrm_port_qstats_ext_input (size:320b/40B) */ @@ -5739,286 +5910,48 @@ struct hwrm_queue_cos2bw_qcfg_output { #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; u8 unused_2[4]; u8 valid; }; @@ -6082,286 +6015,48 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; u8 unused_1[5]; }; @@ -6514,6 +6209,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -6603,12 +6299,16 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL + #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL + #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL + #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; }; -/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ +/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */ struct hwrm_vnic_tpa_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -6630,6 +6330,7 @@ struct hwrm_vnic_tpa_cfg_input { #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL __le16 vnic_id; __le16 max_agg_segs; #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL @@ -6649,6 +6350,25 @@ struct hwrm_vnic_tpa_cfg_input { u8 unused_0[2]; __le32 max_agg_timer; __le32 min_agg_len; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_1[4]; }; /* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ @@ -6704,7 +6424,25 @@ struct hwrm_vnic_tpa_qcfg_output { #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX __le32 max_agg_timer; __le32 min_agg_len; - u8 unused_0[7]; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_0[3]; u8 valid; }; @@ -6739,8 +6477,9 @@ struct hwrm_vnic_rss_cfg_input { __le64 hash_key_tbl_addr; __le16 rss_ctx_idx; u8 flags; - #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL - #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL + #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL u8 ring_select_mode; #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL @@ -6902,14 +6641,15 @@ struct hwrm_ring_alloc_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL - #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL - #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL - #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL - #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL - #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL - #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL - #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL + #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL @@ -6963,7 +6703,7 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 - __le16 unused_3; + __le16 steering_tag; __le32 reserved3; __le32 stat_ctx_id; __le32 reserved4; @@ -7339,6 +7079,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_4; @@ -7521,6 +7262,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input { #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 tunnel_flags; @@ -7655,7 +7397,8 @@ struct hwrm_cfa_encap_record_alloc_input { #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE u8 unused_0[3]; __le32 encap_data[20]; }; @@ -7760,6 +7503,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 pri_hint; @@ -7907,6 +7651,7 @@ struct hwrm_cfa_decap_filter_alloc_input { #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_0; @@ -8050,6 +7795,7 @@ struct hwrm_cfa_flow_alloc_input { #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL }; @@ -8475,8 +8221,11 @@ struct hwrm_tunnel_dst_port_query_input { #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI - u8 unused_0[7]; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE + u8 tunnel_next_proto; + u8 unused_0[6]; }; /* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ @@ -8516,10 +8265,12 @@ struct hwrm_tunnel_dst_port_alloc_input { #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI - u8 unused_0; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE + u8 tunnel_next_proto; __be16 tunnel_dst_port_val; - u8 unused_1[4]; + u8 unused_0[4]; }; /* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ @@ -8563,10 +8314,12 @@ struct hwrm_tunnel_dst_port_free_input { #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI - u8 unused_0; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE + u8 tunnel_next_proto; __le16 tunnel_dst_port_id; - u8 unused_1[4]; + u8 unused_0[4]; }; /* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ @@ -8634,7 +8387,7 @@ struct ctx_hw_stats_ext { __le64 rx_tpa_events; }; -/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ +/* hwrm_stat_ctx_alloc_input (size:320b/40B) */ struct hwrm_stat_ctx_alloc_input { __le16 req_type; __le16 cmpl_ring; @@ -8647,6 +8400,10 @@ struct hwrm_stat_ctx_alloc_input { #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL u8 unused_0; __le16 stats_dma_length; + __le16 flags; + #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL + __le16 steering_tag; + __le32 unused_1; }; /* hwrm_stat_ctx_alloc_output (size:128b/16B) */ @@ -8854,7 +8611,7 @@ struct hwrm_stat_generic_qstats_output { u8 valid; }; -/* generic_sw_hw_stats (size:1216b/152B) */ +/* generic_sw_hw_stats (size:1408b/176B) */ struct generic_sw_hw_stats { __le64 pcie_statistics_tx_tlp; __le64 pcie_statistics_rx_tlp; @@ -8875,6 +8632,9 @@ struct generic_sw_hw_stats { __le64 cache_miss_count_cfcs; __le64 cache_miss_count_cfcc; __le64 cache_miss_count_cfcm; + __le64 hw_db_recov_dbs_dropped; + __le64 hw_db_recov_drops_serviced; + __le64 hw_db_recov_dbs_recovered; }; /* hwrm_fw_reset_input (size:192b/24B) */ @@ -9298,7 +9058,7 @@ struct hwrm_temp_monitor_query_input { __le64 resp_addr; }; -/* hwrm_temp_monitor_query_output (size:128b/16B) */ +/* hwrm_temp_monitor_query_output (size:192b/24B) */ struct hwrm_temp_monitor_query_output { __le16 error_code; __le16 req_type; @@ -9308,14 +9068,20 @@ struct hwrm_temp_monitor_query_output { u8 phy_temp; u8 om_temp; u8 flags; - #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL - #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL u8 temp2; u8 phy_temp2; u8 om_temp2; + u8 warn_threshold; + u8 critical_threshold; + u8 fatal_threshold; + u8 shutdown_threshold; + u8 unused_0[4]; u8 valid; }; @@ -9739,7 +9505,8 @@ struct hwrm_dbg_ring_info_get_output { __le32 producer_index; __le32 consumer_index; __le32 cag_vector_ctrl; - u8 unused_0[3]; + __le16 st_tag; + u8 unused_0; u8 valid; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c new file mode 100644 index 000000000000..669d24ba0e87 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c @@ -0,0 +1,241 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/dev_printk.h> +#include <linux/errno.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/pci.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_hwmon.h" + +void bnxt_hwmon_notify_event(struct bnxt *bp) +{ + u32 attr; + + if (!bp->hwmon_dev) + return; + + switch (bp->thermal_threshold_type) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: + attr = hwmon_temp_max_alarm; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: + attr = hwmon_temp_crit_alarm; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: + attr = hwmon_temp_emergency_alarm; + break; + default: + return; + } + + hwmon_notify_event(&bp->pdev->dev, hwmon_temp, attr, 0); +} + +static int bnxt_hwrm_temp_query(struct bnxt *bp, u8 *temp) +{ + struct hwrm_temp_monitor_query_output *resp; + struct hwrm_temp_monitor_query_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) + goto drop_req; + + if (temp) { + *temp = resp->temp; + } else if (resp->flags & + TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE) { + bp->fw_cap |= BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED; + bp->warn_thresh_temp = resp->warn_threshold; + bp->crit_thresh_temp = resp->critical_threshold; + bp->fatal_thresh_temp = resp->fatal_threshold; + bp->shutdown_thresh_temp = resp->shutdown_threshold; + } +drop_req: + hwrm_req_drop(bp, req); + return rc; +} + +static umode_t bnxt_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct bnxt *bp = _data; + + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_emergency: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_emergency_alarm: + if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED)) + return 0; + return 0444; + default: + return 0; + } +} + +static int bnxt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct bnxt *bp = dev_get_drvdata(dev); + u8 temp = 0; + int rc; + + switch (attr) { + case hwmon_temp_input: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp * 1000; + return rc; + case hwmon_temp_max: + *val = bp->warn_thresh_temp * 1000; + return 0; + case hwmon_temp_crit: + *val = bp->crit_thresh_temp * 1000; + return 0; + case hwmon_temp_emergency: + *val = bp->fatal_thresh_temp * 1000; + return 0; + case hwmon_temp_max_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->warn_thresh_temp; + return rc; + case hwmon_temp_crit_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->crit_thresh_temp; + return rc; + case hwmon_temp_emergency_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->fatal_thresh_temp; + return rc; + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_channel_info *bnxt_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_EMERGENCY | HWMON_T_MAX_ALARM | + HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM), + NULL +}; + +static const struct hwmon_ops bnxt_hwmon_ops = { + .is_visible = bnxt_hwmon_is_visible, + .read = bnxt_hwmon_read, +}; + +static const struct hwmon_chip_info bnxt_hwmon_chip_info = { + .ops = &bnxt_hwmon_ops, + .info = bnxt_hwmon_info, +}; + +static ssize_t temp1_shutdown_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnxt *bp = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%u\n", bp->shutdown_thresh_temp * 1000); +} + +static ssize_t temp1_shutdown_alarm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnxt *bp = dev_get_drvdata(dev); + u8 temp; + int rc; + + rc = bnxt_hwrm_temp_query(bp, &temp); + if (rc) + return -EIO; + + return sysfs_emit(buf, "%u\n", temp >= bp->shutdown_thresh_temp); +} + +static DEVICE_ATTR_RO(temp1_shutdown); +static DEVICE_ATTR_RO(temp1_shutdown_alarm); + +static struct attribute *bnxt_temp_extra_attrs[] = { + &dev_attr_temp1_shutdown.attr, + &dev_attr_temp1_shutdown_alarm.attr, + NULL, +}; + +static umode_t bnxt_temp_extra_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct bnxt *bp = dev_get_drvdata(dev); + + /* Shutdown temperature setting in NVM is optional */ + if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED) || + !bp->shutdown_thresh_temp) + return 0; + + return attr->mode; +} + +static const struct attribute_group bnxt_temp_extra_group = { + .attrs = bnxt_temp_extra_attrs, + .is_visible = bnxt_temp_extra_attrs_visible, +}; +__ATTRIBUTE_GROUPS(bnxt_temp_extra); + +void bnxt_hwmon_uninit(struct bnxt *bp) +{ + if (bp->hwmon_dev) { + hwmon_device_unregister(bp->hwmon_dev); + bp->hwmon_dev = NULL; + } +} + +void bnxt_hwmon_init(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int rc; + + /* temp1_xxx is only sensor, ensure not registered if it will fail */ + rc = bnxt_hwrm_temp_query(bp, NULL); + if (rc == -EACCES || rc == -EOPNOTSUPP) { + bnxt_hwmon_uninit(bp); + return; + } + + if (bp->hwmon_dev) + return; + + bp->hwmon_dev = hwmon_device_register_with_info(&pdev->dev, + DRV_MODULE_NAME, bp, + &bnxt_hwmon_chip_info, + bnxt_temp_extra_groups); + if (IS_ERR(bp->hwmon_dev)) { + bp->hwmon_dev = NULL; + dev_warn(&pdev->dev, "Cannot register hwmon device\n"); + } +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h new file mode 100644 index 000000000000..de54a562e06a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h @@ -0,0 +1,30 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWMON_H +#define BNXT_HWMON_H + +#ifdef CONFIG_BNXT_HWMON +void bnxt_hwmon_notify_event(struct bnxt *bp); +void bnxt_hwmon_uninit(struct bnxt *bp); +void bnxt_hwmon_init(struct bnxt *bp); +#else +static inline void bnxt_hwmon_notify_event(struct bnxt *bp) +{ +} + +static inline void bnxt_hwmon_uninit(struct bnxt *bp) +{ +} + +static inline void bnxt_hwmon_init(struct bnxt *bp) +{ +} +#endif +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 132442f16fe6..1df3d56cc4b5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -485,6 +485,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) if (msg_len > BNXT_HWRM_MAX_REQ_LEN && msg_len > bp->hwrm_max_ext_req_len) { + netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x", + req_type); rc = -E2BIG; goto exit; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h index c98032e38188..15ca51b5d204 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -137,4 +137,18 @@ int hwrm_req_send_silent(struct bnxt *bp, void *req); int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len); void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags); void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma); + +/* Older devices can only support req length of 128. + * HWRM_FUNC_CFG requests which don't need fields starting at + * num_quic_tx_key_ctxs can use this helper to avoid getting -E2BIG. + */ +static inline int +bnxt_hwrm_func_cfg_short_req_init(struct bnxt *bp, + struct hwrm_func_cfg_input **req) +{ + u32 req_len; + + req_len = min_t(u32, sizeof(**req), bp->hwrm_max_ext_req_len); + return __hwrm_req_init(bp, (void **)req, HWRM_FUNC_CFG, req_len); +} #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f3886710e778..6e3da3362bd6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -521,9 +521,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) if (netif_running(bp->dev)) { if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) { - rc = bnxt_close_nic(bp, false, false); - if (!rc) - rc = bnxt_open_nic(bp, false, false); + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); } else { bnxt_ptp_cfg_tstamp_filters(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index dde327f2c57e..c722b3b41730 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -95,7 +95,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { req->fid = cpu_to_le16(vf->fw_fid); req->flags = cpu_to_le32(func_flags); @@ -146,7 +146,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -232,7 +232,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) } vf = &bp->pf.vf[vf_id]; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -274,7 +274,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (vlan_tag == vf->vlan) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { req->fid = cpu_to_le16(vf->fw_fid); req->dflt_vlan = cpu_to_le16(vlan_tag); @@ -314,7 +314,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { req->fid = cpu_to_le16(vf->fw_fid); req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | @@ -491,7 +491,7 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) struct bnxt_vf_info *vf; int rc; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; @@ -550,7 +550,6 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; vf_vnics = hw_resc->max_vnics - bp->nr_vnics; - vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); @@ -572,11 +571,20 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; vf_rx_rings /= num_vfs; - vf_vnics /= num_vfs; + if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) && + vf_vnics >= pf->max_vfs) { + /* Take into account that FW has pre-reserved 1 VNIC for + * each pf->max_vfs. + */ + vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs; + } else { + vf_vnics /= num_vfs; + } vf_stat_ctx /= num_vfs; vf_ring_grps /= num_vfs; vf_rss /= num_vfs; + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); req->min_tx_rings = cpu_to_le16(vf_tx_rings); req->min_rx_rings = cpu_to_le16(vf_rx_rings); @@ -645,7 +653,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) u32 mtu, i; int rc; - rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (rc) return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d8afcf8d6b30..273c9ba48f09 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -373,9 +373,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, struct flow_dissector *dissector = rule->match.dissector; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ - if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || - (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { - netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n", + if ((dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -2075,6 +2075,7 @@ destroy_flow_table: rhashtable_destroy(&tc_info->flow_table); free_tc_info: kfree(tc_info); + bp->tc_info = NULL; return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 852eb449ccae..6ba2b9398633 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -345,7 +345,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) edev->hw_ring_stats_size = bp->hw_ring_stats_size; edev->pf_port_id = bp->pf.port_id; edev->en_state = bp->state; - + edev->bar0 = bp->bar0; edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 80cbc4b6130a..6ff77f082e6c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -81,6 +81,7 @@ struct bnxt_en_dev { * mode only. Will be * updated in resume. */ + void __iomem *bar0; }; static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index fb43232310b2..8cb9a99154aa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -15,7 +15,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/filter.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_xdp.h" @@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, for (i = 0; i < num_frags ; i++) { skb_frag_t *frag = &sinfo->frags[i]; struct bnxt_sw_tx_bd *frag_tx_buf; - struct pci_dev *pdev = bp->pdev; dma_addr_t frag_mapping; int frag_len; @@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; frag_len = skb_frag_size(frag); - frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, - frag_len, DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping))) - return NULL; - - dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); - flags = frag_len << TX_BD_LEN_SHIFT; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) + + skb_frag_off(frag); txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); len = frag_len; @@ -153,6 +146,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) tx_buf->action = 0; tx_buf->xdpf = NULL; } else if (tx_buf->action == XDP_TX) { + tx_buf->action = 0; rx_doorbell_needed = true; last_tx_cons = tx_cons; @@ -162,6 +156,9 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) tx_buf = &txr->tx_buf_ring[tx_cons]; page_pool_recycle_direct(rxr->page_pool, tx_buf->page); } + } else { + bnxt_sched_reset_txr(bp, txr, i); + return; } tx_cons = NEXT_TX(tx_cons); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2b5761ad2f92..9282403d1bf6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2077,12 +2077,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock(&ring->lock); if (ring->free_bds <= (nr_frags + 1)) { - if (!netif_tx_queue_stopped(txq)) { + if (!netif_tx_queue_stopped(txq)) netif_tx_stop_queue(txq); - netdev_err(dev, - "%s: tx ring %d full when queue %d awake\n", - __func__, index, ring->queue); - } ret = NETDEV_TX_BUSY; goto out; } @@ -3251,23 +3247,6 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void bcmgenet_poll_controller(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - - /* Invoke the main RX/TX interrupt handler */ - disable_irq(priv->irq0); - bcmgenet_isr0(priv->irq0, priv); - enable_irq(priv->irq0); - - /* And the interrupt handler for RX/TX priority queues */ - disable_irq(priv->irq1); - bcmgenet_isr1(priv->irq1, priv); - enable_irq(priv->irq1); -} -#endif - static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) { u32 reg; @@ -3724,9 +3703,6 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_set_mac_address = bcmgenet_set_mac_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = bcmgenet_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bcmgenet_poll_controller, -#endif .ndo_get_stats = bcmgenet_get_stats, .ndo_change_carrier = bcmgenet_change_carrier, }; @@ -4168,7 +4144,7 @@ err: return err; } -static int bcmgenet_remove(struct platform_device *pdev) +static void bcmgenet_remove(struct platform_device *pdev) { struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); @@ -4176,8 +4152,6 @@ static int bcmgenet_remove(struct platform_device *pdev) unregister_netdev(priv->dev); bcmgenet_mii_exit(priv->dev); free_netdev(priv->dev); - - return 0; } static void bcmgenet_shutdown(struct platform_device *pdev) @@ -4356,7 +4330,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match); static struct platform_driver bcmgenet_driver = { .probe = bcmgenet_probe, - .remove = bcmgenet_remove, + .remove_new = bcmgenet_remove, .shutdown = bcmgenet_shutdown, .driver = { .name = "bcmgenet", diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index cc3afb605b1e..97ea76d443ab 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -619,7 +619,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); - return -ENODEV; + return PTR_ERR(phydev); } /* Make sure we initialize MoCA PHYs with a link down */ diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 3a6763c5e8b3..fcf8485f3446 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2593,7 +2593,7 @@ out_out: return err; } -static int sbmac_remove(struct platform_device *pldev) +static void sbmac_remove(struct platform_device *pldev) { struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); @@ -2604,13 +2604,11 @@ static int sbmac_remove(struct platform_device *pldev) mdiobus_free(sc->mii_bus); iounmap(sc->sbm_base); free_netdev(dev); - - return 0; } static struct platform_driver sbmac_driver = { .probe = sbmac_probe, - .remove = sbmac_remove, + .remove_new = sbmac_remove, .driver = { .name = sbmac_string, }, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index cb2810f175cc..f52830dfb26a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1539,8 +1539,7 @@ static int tg3_mdio_init(struct tg3 *tp) return -ENOMEM; tp->mdio_bus->name = "tg3 mdio bus"; - snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", - (tp->pdev->bus->number << 8) | tp->pdev->devfn); + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); tp->mdio_bus->priv = tp; tp->mdio_bus->parent = &tp->pdev->dev; tp->mdio_bus->read = &tg3_mdio_read; @@ -6315,6 +6314,46 @@ err_out: return -EOPNOTSUPP; } +static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, + struct skb_shared_hwtstamps *timestamp) +{ + memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); + timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + + tp->ptp_adjust); +} + +static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock) +{ + *hwclock = tr32(TG3_TX_TSTAMP_LSB); + *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; +} + +static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp) +{ + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + struct skb_shared_hwtstamps timestamp; + u64 hwclock; + + if (tp->ptp_txts_retrycnt > 2) + goto done; + + tg3_read_tx_tstamp(tp, &hwclock); + + if (hwclock != tp->pre_tx_ts) { + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp); + goto done; + } + tp->ptp_txts_retrycnt++; + return HZ / 10; +done: + dev_consume_skb_any(tp->tx_tstamp_skb); + tp->tx_tstamp_skb = NULL; + tp->ptp_txts_retrycnt = 0; + tp->pre_tx_ts = 0; + return -1; +} + static const struct ptp_clock_info tg3_ptp_caps = { .owner = THIS_MODULE, .name = "tg3 clock", @@ -6326,19 +6365,12 @@ static const struct ptp_clock_info tg3_ptp_caps = { .pps = 0, .adjfine = tg3_ptp_adjfine, .adjtime = tg3_ptp_adjtime, + .do_aux_work = tg3_ptp_ts_aux_work, .gettimex64 = tg3_ptp_gettimex, .settime64 = tg3_ptp_settime, .enable = tg3_ptp_enable, }; -static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, - struct skb_shared_hwtstamps *timestamp) -{ - memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); - timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + - tp->ptp_adjust); -} - /* tp->lock must be held */ static void tg3_ptp_init(struct tg3 *tp) { @@ -6369,6 +6401,8 @@ static void tg3_ptp_fini(struct tg3 *tp) ptp_clock_unregister(tp->ptp_clock); tp->ptp_clock = NULL; tp->ptp_adjust = 0; + dev_consume_skb_any(tp->tx_tstamp_skb); + tp->tx_tstamp_skb = NULL; } static inline int tg3_irq_sync(struct tg3 *tp) @@ -6440,6 +6474,14 @@ static void tg3_dump_state(struct tg3 *tp) int i; u32 *regs; + /* If it is a PCI error, all registers will be 0xffff, + * we don't dump them out, just report the error and return + */ + if (tp->pdev->error_state != pci_channel_io_normal) { + netdev_err(tp->dev, "PCI channel ERROR!\n"); + return; + } + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); if (!regs) return; @@ -6539,6 +6581,7 @@ static void tg3_tx(struct tg3_napi *tnapi) while (sw_idx != hw_idx) { struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + bool complete_skb_later = false; struct sk_buff *skb = ri->skb; int i, tx_bug = 0; @@ -6549,12 +6592,17 @@ static void tg3_tx(struct tg3_napi *tnapi) if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { struct skb_shared_hwtstamps timestamp; - u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); - hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; + u64 hwclock; - tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); - - skb_tstamp_tx(skb, ×tamp); + tg3_read_tx_tstamp(tp, &hwclock); + if (hwclock != tp->pre_tx_ts) { + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + skb_tstamp_tx(skb, ×tamp); + tp->pre_tx_ts = 0; + } else { + tp->tx_tstamp_skb = skb; + complete_skb_later = true; + } } dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), @@ -6592,7 +6640,10 @@ static void tg3_tx(struct tg3_napi *tnapi) pkts_compl++; bytes_compl += skb->len; - dev_consume_skb_any(skb); + if (!complete_skb_later) + dev_consume_skb_any(skb); + else + ptp_schedule_worker(tp->ptp_clock, 0); if (unlikely(tx_bug)) { tg3_tx_recover(tp); @@ -6604,9 +6655,9 @@ static void tg3_tx(struct tg3_napi *tnapi) tnapi->tx_cons = sw_idx; - /* Need to make the tx_cons update visible to tg3_start_xmit() + /* Need to make the tx_cons update visible to __tg3_start_xmit() * before checking for netif_queue_stopped(). Without the - * memory barrier, there is a small possibility that tg3_start_xmit() + * memory barrier, there is a small possibility that __tg3_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); @@ -6846,7 +6897,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ - tp->rx_dropped++; + tnapi->rx_dropped++; goto next_pkt; } @@ -7846,7 +7897,7 @@ static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; } -static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); +static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *); /* Use GSO to workaround all TSO packets that meet HW bug conditions * indicated in tg3_tx_frag_set() @@ -7875,12 +7926,14 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); - if (IS_ERR(segs) || !segs) + if (IS_ERR(segs) || !segs) { + tnapi->tx_dropped++; goto tg3_tso_bug_end; + } skb_list_walk_safe(segs, seg, next) { skb_mark_not_on_list(seg); - tg3_start_xmit(seg, tp->dev); + __tg3_start_xmit(seg, tp->dev); } tg3_tso_bug_end: @@ -7890,7 +7943,7 @@ tg3_tso_bug_end: } /* hard_start_xmit for all devices */ -static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry, base_flags, mss, vlan = 0; @@ -8029,8 +8082,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && tg3_flag(tp, TX_TSTAMP_EN)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - base_flags |= TXD_FLAG_HWTSTAMP; + tg3_full_lock(tp, 0); + if (!tp->pre_tx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + base_flags |= TXD_FLAG_HWTSTAMP; + tg3_read_tx_tstamp(tp, &tp->pre_tx_ts); + } + tg3_full_unlock(tp); } len = skb_headlen(skb); @@ -8134,11 +8192,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_wake_queue(txq); } - if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { - /* Packets are ready, update Tx producer idx on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); - } - return NETDEV_TX_OK; dma_error: @@ -8147,10 +8200,46 @@ dma_error: drop: dev_kfree_skb_any(skb); drop_nofree: - tp->tx_dropped++; + tnapi->tx_dropped++; return NETDEV_TX_OK; } +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_queue *txq; + u16 skb_queue_mapping; + netdev_tx_t ret; + + skb_queue_mapping = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, skb_queue_mapping); + + ret = __tg3_start_xmit(skb, dev); + + /* Notify the hardware that packets are ready by updating the TX ring + * tail pointer. We respect netdev_xmit_more() thus avoiding poking + * the hardware for every packet. To guarantee forward progress the TX + * ring must be drained when it is full as indicated by + * netif_xmit_stopped(). This needs to happen even when the current + * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets + * queued by previous __tg3_start_xmit() calls might get stuck in + * the queue forever. + */ + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { + struct tg3_napi *tnapi; + struct tg3 *tp; + + tp = netdev_priv(dev); + tnapi = &tp->napi[skb_queue_mapping]; + + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + } + + return ret; +} + static void tg3_mac_loopback(struct tg3 *tp, bool enable) { if (enable) { @@ -9326,7 +9415,7 @@ static void __tg3_set_rx_mode(struct net_device *); /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, bool silent) { - int err; + int err, i; tg3_stop_fw(tp); @@ -9347,6 +9436,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent) /* And make sure the next sample is new data */ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->rx_dropped = 0; + tnapi->tx_dropped = 0; + } } return err; @@ -11171,7 +11267,8 @@ static void tg3_reset_task(struct work_struct *work) rtnl_lock(); tg3_full_lock(tp, 0); - if (tp->pcierr_recovery || !netif_running(tp->dev)) { + if (tp->pcierr_recovery || !netif_running(tp->dev) || + tp->pdev->error_state != pci_channel_io_normal) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); rtnl_unlock(); @@ -11896,6 +11993,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) { struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; + unsigned long rx_dropped; + unsigned long tx_dropped; + int i; stats->rx_packets = old_stats->rx_packets + get_stat64(&hw_stats->rx_ucast_packets) + @@ -11942,8 +12042,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) stats->rx_missed_errors = old_stats->rx_missed_errors + get_stat64(&hw_stats->rx_discards); - stats->rx_dropped = tp->rx_dropped; - stats->tx_dropped = tp->tx_dropped; + /* Aggregate per-queue counters. The per-queue counters are updated + * by a single writer, race-free. The result computed by this loop + * might not be 100% accurate (counters can be updated in the middle of + * the loop) but the next tg3_get_nstats() will recompute the current + * value so it is acceptable. + * + * Note that these counters wrap around at 4G on 32bit machines. + */ + rx_dropped = (unsigned long)(old_stats->rx_dropped); + tx_dropped = (unsigned long)(old_stats->tx_dropped); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + rx_dropped += tnapi->rx_dropped; + tx_dropped += tnapi->tx_dropped; + } + + stats->rx_dropped = rx_dropped; + stats->tx_dropped = tx_dropped; } static int tg3_get_regs_len(struct net_device *dev) @@ -17006,7 +17124,7 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) !tg3_flag(tp, PCI_EXPRESS)) goto out; -#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) +#if defined(CONFIG_PPC64) || defined(CONFIG_PARISC) goal = BOUNDARY_MULTI_CACHELINE; #else #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) @@ -17681,7 +17799,7 @@ static int tg3_init_one(struct pci_dev *pdev, * device behind the EPB cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. * On 64-bit systems without IOMMU, use 64-bit dma_mask and - * do DMA address check in tg3_start_xmit(). + * do DMA address check in __tg3_start_xmit(). */ if (tg3_flag(tp, IS_5788)) persist_dma_mask = dma_mask = DMA_BIT_MASK(32); @@ -17795,10 +17913,7 @@ static int tg3_init_one(struct pci_dev *pdev, tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; tnapi->int_mbox = intmbx; - if (i <= 4) - intmbx += 0x8; - else - intmbx += 0x4; + intmbx += 0x8; tnapi->consmbox = rcvmbx; tnapi->prodmbox = sndmbx; @@ -18082,7 +18197,8 @@ static void tg3_shutdown(struct pci_dev *pdev) if (netif_running(dev)) dev_close(dev); - tg3_power_down(tp); + if (system_state == SYSTEM_POWER_OFF) + tg3_power_down(tp); rtnl_unlock(); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 1000c894064f..5016475e5005 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3018,6 +3018,7 @@ struct tg3_napi { u16 *rx_rcb_prod_idx; struct tg3_rx_prodring_set prodring; struct tg3_rx_buffer_desc *rx_rcb; + unsigned long rx_dropped; u32 tx_prod ____cacheline_aligned; u32 tx_cons; @@ -3026,6 +3027,7 @@ struct tg3_napi { u32 prodmbox; struct tg3_tx_buffer_desc *tx_ring; struct tg3_tx_ring_info *tx_buffers; + unsigned long tx_dropped; dma_addr_t status_mapping; dma_addr_t rx_rcb_mapping; @@ -3190,6 +3192,7 @@ struct tg3 { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; s64 ptp_adjust; + u8 ptp_txts_retrycnt; /* begin "tx thread" cacheline section */ void (*write32_tx_mbox) (struct tg3 *, u32, @@ -3219,8 +3222,6 @@ struct tg3 { /* begin "everything else" cacheline(s) section */ - unsigned long rx_dropped; - unsigned long tx_dropped; struct rtnl_link_stats64 net_stats_prev; struct tg3_ethtool_stats estats_prev; @@ -3372,6 +3373,8 @@ struct tg3 { struct tg3_hw_stats *hw_stats; dma_addr_t stats_mapping; struct work_struct reset_task; + struct sk_buff *tx_tstamp_skb; + u64 pre_tx_ts; int nvram_lock_cnt; u32 nvram_size; diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b07522ac3e74..9c80ab07a735 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2839,7 +2839,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) { - strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + strscpy_pad(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } static void diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index d6d90f9722a7..31191b520b58 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1037,8 +1037,7 @@ bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) static void bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) { - struct bnad_tx_info *tx_info = - (struct bnad_tx_info *)tx->priv; + struct bnad_tx_info *tx_info = tx->priv; struct bna_tcb *tcb; u32 txq_id; int i; @@ -1056,7 +1055,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) static void bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) { - struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; + struct bnad_tx_info *tx_info = tx->priv; struct bna_tcb *tcb; u32 txq_id; int i; @@ -1133,7 +1132,7 @@ bnad_tx_cleanup(struct delayed_work *work) static void bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) { - struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; + struct bnad_tx_info *tx_info = tx->priv; struct bna_tcb *tcb; int i; @@ -1149,7 +1148,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) static void bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) { - struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bnad_rx_info *rx_info = rx->priv; struct bna_ccb *ccb; struct bnad_rx_ctrl *rx_ctrl; int i; @@ -1208,7 +1207,7 @@ bnad_rx_cleanup(void *work) static void bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) { - struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bnad_rx_info *rx_info = rx->priv; struct bna_ccb *ccb; struct bnad_rx_ctrl *rx_ctrl; int i; @@ -1231,7 +1230,7 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) static void bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) { - struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bnad_rx_info *rx_info = rx->priv; struct bna_ccb *ccb; struct bna_rcb *rcb; struct bnad_rx_ctrl *rx_ctrl; diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 627a93ce38ab..10b1e534030e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -19,7 +19,6 @@ #include <linux/firmware.h> #include <linux/if_vlan.h> -/* Fix for IA64 */ #include <asm/checksum.h> #include <net/ip6_checksum.h> diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 82929ee76739..cebae0f418f2 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -26,7 +26,6 @@ #include <linux/platform_device.h> #include <linux/phylink.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_mdio.h> #include <linux/of_net.h> @@ -757,8 +756,6 @@ static void macb_mac_link_up(struct phylink_config *config, if (rx_pause) ctrl |= MACB_BIT(PAE); - macb_set_tx_clk(bp, speed); - /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down * cleared the pipeline and control registers. */ @@ -778,6 +775,9 @@ static void macb_mac_link_up(struct phylink_config *config, spin_unlock_irqrestore(&bp->lock, flags); + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) + macb_set_tx_clk(bp, speed); + /* Enable Rx and Tx; Enable PTP unicast */ ctrl = macb_readl(bp, NCR); if (gem_has_ptp(bp)) @@ -5156,7 +5156,7 @@ err_disable_clocks: return err; } -static int macb_remove(struct platform_device *pdev) +static void macb_remove(struct platform_device *pdev) { struct net_device *dev; struct macb *bp; @@ -5181,8 +5181,6 @@ static int macb_remove(struct platform_device *pdev) phylink_destroy(bp->phylink); free_netdev(dev); } - - return 0; } static int __maybe_unused macb_suspend(struct device *dev) @@ -5398,7 +5396,7 @@ static const struct dev_pm_ops macb_pm_ops = { static struct platform_driver macb_driver = { .probe = macb_probe, - .remove = macb_remove, + .remove_new = macb_remove, .driver = { .name = "macb", .of_match_table = of_match_ptr(macb_dt_ids), diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index f4f87dfa9687..5e97f1e4e38e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1820,7 +1820,7 @@ err_alloc: * changes the link status, releases the DMA descriptor rings, * unregisters the MDIO bus and unmaps the allocated memory. */ -static int xgmac_remove(struct platform_device *pdev) +static void xgmac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct xgmac_priv *priv = netdev_priv(ndev); @@ -1840,8 +1840,6 @@ static int xgmac_remove(struct platform_device *pdev) release_mem_region(res->start, resource_size(res)); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -1921,7 +1919,7 @@ static struct platform_driver xgmac_driver = { .pm = &xgmac_pm_ops, }, .probe = xgmac_probe, - .remove = xgmac_remove, + .remove_new = xgmac_remove, }; module_platform_driver(xgmac_driver); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 9d56181a301f..d3e07b6ed5e1 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -442,10 +442,11 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) oct = lio->oct_dev; memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); - strcpy(drvinfo->driver, "liquidio"); - strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, - ETHTOOL_FWVERS_LEN); - strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); + strscpy(drvinfo->driver, "liquidio", sizeof(drvinfo->driver)); + strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(oct->pci_dev), + sizeof(drvinfo->bus_info)); } static void @@ -458,10 +459,11 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) oct = lio->oct_dev; memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); - strcpy(drvinfo->driver, "liquidio_vf"); - strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, - ETHTOOL_FWVERS_LEN); - strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); + strscpy(drvinfo->driver, "liquidio_vf", sizeof(drvinfo->driver)); + strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(oct->pci_dev), + sizeof(drvinfo->bus_info)); } static int diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 100daadbea2a..34f02a8ec2ca 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1689,7 +1689,7 @@ static int load_firmware(struct octeon_device *oct) if (fw_type_is_auto()) { tmp_fw_type = LIO_FW_NAME_TYPE_NIC; - strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); + strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type)); } else { tmp_fw_type = fw_type; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index 600de587d7a9..aa6c0dfb6f1c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -638,7 +638,8 @@ lio_vf_rep_netdev_event(struct notifier_block *nb, memset(&rep_cfg, 0, sizeof(rep_cfg)); rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME; rep_cfg.ifidx = vf_rep->ifidx; - strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE); + strscpy(rep_cfg.rep_name.name, ndev->name, + sizeof(rep_cfg.rep_name.name)); ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg), NULL, 0); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 364f4f912dc2..6b6cb73482d7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1217,10 +1217,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) goto core_drv_init_err; } - strncpy(app_name, + strscpy(app_name, get_oct_app_string( (u32)recv_pkt->rh.r_core_drv_init.app_mode), - sizeof(app_name) - 1); + sizeof(app_name)); oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) { oct->fw_info.max_nic_ports = @@ -1257,9 +1257,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) memcpy(cs, get_rbd( recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs)); - strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); - strncpy(oct->boardinfo.serial_number, cs->board_serial_number, - OCT_SERIAL_LEN); + strscpy(oct->boardinfo.name, cs->boardname, + sizeof(oct->boardinfo.name)); + strscpy(oct->boardinfo.serial_number, cs->board_serial_number, + sizeof(oct->boardinfo.serial_number)); octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3)); diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index edde0b8fa49c..007d4b06819e 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1521,7 +1521,7 @@ err: return result; } -static int octeon_mgmt_remove(struct platform_device *pdev) +static void octeon_mgmt_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct octeon_mgmt *p = netdev_priv(netdev); @@ -1529,7 +1529,6 @@ static int octeon_mgmt_remove(struct platform_device *pdev) unregister_netdev(netdev); of_node_put(p->phy_np); free_netdev(netdev); - return 0; } static const struct of_device_id octeon_mgmt_match[] = { @@ -1546,7 +1545,7 @@ static struct platform_driver octeon_mgmt_driver = { .of_match_table = octeon_mgmt_match, }, .probe = octeon_mgmt_probe, - .remove = octeon_mgmt_remove, + .remove_new = octeon_mgmt_remove, }; module_platform_driver(octeon_mgmt_driver); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 9b84c8d8d309..d117022d15d7 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2126,7 +2126,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_link_ksettings = set_link_ksettings, }; -static int in_range(int val, int lo, int hi) +static int cxgb_in_range(int val, int lo, int hi) { return val < 0 || (val <= hi && val >= lo); } @@ -2162,19 +2162,19 @@ static int cxgb_siocdevprivate(struct net_device *dev, return -EINVAL; if (t.qset_idx >= SGE_QSETS) return -EINVAL; - if (!in_range(t.intr_lat, 0, M_NEWTIMER) || - !in_range(t.cong_thres, 0, 255) || - !in_range(t.txq_size[0], MIN_TXQ_ENTRIES, + if (!cxgb_in_range(t.intr_lat, 0, M_NEWTIMER) || + !cxgb_in_range(t.cong_thres, 0, 255) || + !cxgb_in_range(t.txq_size[0], MIN_TXQ_ENTRIES, MAX_TXQ_ENTRIES) || - !in_range(t.txq_size[1], MIN_TXQ_ENTRIES, + !cxgb_in_range(t.txq_size[1], MIN_TXQ_ENTRIES, MAX_TXQ_ENTRIES) || - !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES, + !cxgb_in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES, MAX_CTRL_TXQ_ENTRIES) || - !in_range(t.fl_size[0], MIN_FL_ENTRIES, + !cxgb_in_range(t.fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) || - !in_range(t.fl_size[1], MIN_FL_ENTRIES, + !cxgb_in_range(t.fl_size[1], MIN_FL_ENTRIES, MAX_RX_JUMBO_BUFFERS) || - !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, + !cxgb_in_range(t.rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES)) return -EINVAL; diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index ea75f275023f..646ca0bc25bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -76,7 +76,7 @@ struct l2t_data { atomic_t nfree; /* number of free entries */ rwlock_t lock; struct rcu_head rcu_head; /* to handle rcu cleanup */ - struct l2t_entry l2tab[]; + struct l2t_entry l2tab[] __counted_by(nentries); }; typedef void (*arp_failure_handler_func)(struct t3cdev * dev, diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 2e9a74fe0970..6268f96cb4aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2501,14 +2501,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) return work_done; } -/* - * Returns true if the device is already scheduled for polling. - */ -static inline int napi_is_scheduled(struct napi_struct *napi) -{ - return test_bit(NAPI_STATE_SCHED, &napi->state); -} - /** * process_pure_responses - process pure responses from a response queue * @adap: the adapter @@ -2674,12 +2666,7 @@ static int rspq_check_napi(struct sge_qset *qs) { struct sge_rspq *q = &qs->rspq; - if (!napi_is_scheduled(&qs->napi) && - is_new_response(&q->desc[q->cidx], q)) { - napi_schedule(&qs->napi); - return 1; - } - return 0; + return is_new_response(&q->desc[q->cidx], q) && napi_schedule(&qs->napi); } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 290c1058069a..847c7fc2bbd9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h @@ -29,7 +29,7 @@ struct clip_tbl { atomic_t nfree; struct list_head ce_free_head; void *cl_list; - struct list_head hash_list[]; + struct list_head hash_list[] __counted_by(clipt_size); }; enum { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index d3541159487d..72ac4a34424b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -313,15 +313,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev, u16 ethtype_key = 0; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IP))) { - netdev_warn(dev, "Unsupported key used: 0x%x\n", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP))) { + netdev_warn(dev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index f59dd4b2ae6f..9050568a034c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -331,6 +331,6 @@ struct cxgb4_link { struct cxgb4_tc_u32_table { unsigned int size; /* number of entries in table */ - struct cxgb4_link table[]; /* Jump table */ + struct cxgb4_link table[] __counted_by(size); /* Jump table */ }; #endif /* __CXGB4_TC_U32_PARSE_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index a10a6862a9a4..1e5f5b1a22a6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -59,7 +59,7 @@ struct l2t_data { rwlock_t lock; atomic_t nfree; /* number of free entries */ struct l2t_entry *rover; /* starting point for next allocation */ - struct l2t_entry l2tab[]; /* MUST BE LAST */ + struct l2t_entry l2tab[] __counted_by(l2t_size); /* MUST BE LAST */ }; static inline unsigned int vlan_prio(const struct l2t_entry *e) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 5f8b871d79af..6b3c778815f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -82,7 +82,7 @@ struct sched_class { struct sched_table { /* per port scheduling table */ u8 sched_size; - struct sched_class tab[]; + struct sched_class tab[] __counted_by(sched_size); }; static inline bool can_sched(struct net_device *dev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 98dd78551d89..b5ff2e1a9975 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -4261,7 +4261,7 @@ static void sge_rx_timer_cb(struct timer_list *t) if (fl_starving(adap, fl)) { rxq = container_of(fl, struct sge_eth_rxq, fl); - if (napi_reschedule(&rxq->rspq.napi)) + if (napi_schedule(&rxq->rspq.napi)) fl->starving++; else set_bit(id, s->starving_fl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h index 541249d78914..109c1dff563a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.h +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h @@ -66,7 +66,7 @@ struct smt_entry { struct smt_data { unsigned int smt_size; rwlock_t lock; - struct smt_entry smtab[]; + struct smt_entry smtab[] __counted_by(smt_size); }; struct smt_data *t4_init_smt(void); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 8d719f82854a..76de55306c4d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win, FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val, 30000); + if (ret) + return ret; /* If we have version number support, then check to see that the new * firmware got loaded properly. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 2d0cf76fb3c5..5b1d746e6563 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2094,7 +2094,7 @@ static void sge_rx_timer_cb(struct timer_list *t) struct sge_eth_rxq *rxq; rxq = container_of(fl, struct sge_eth_rxq, fl); - if (napi_reschedule(&rxq->rspq.napi)) + if (napi_schedule(&rxq->rspq.napi)) fl->starving++; else set_bit(id, s->starving_fl); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index 3731c93f8f95..c7338ac6a5bb 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -39,7 +39,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/crypto.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/highmem.h> @@ -49,7 +48,6 @@ #include <net/esp.h> #include <net/xfrm.h> #include <crypto/aes.h> -#include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/sha1.h> #include <crypto/sha2.h> diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h index 1d110d2edd64..0d42e7d15714 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h @@ -4,7 +4,6 @@ #ifndef __CHCR_IPSEC_H__ #define __CHCR_IPSEC_H__ -#include <crypto/algapi.h> #include "t4_hw.h" #include "cxgb4.h" #include "t4_msg.h" diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index bcdc7fc2f427..6482728794dd 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -361,9 +361,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev, struct tls_context *tls_ctx, enum tls_offload_ctx_dir direction) { - struct chcr_ktls_ofld_ctx_tx *tx_ctx = - chcr_get_ktls_tx_context(tls_ctx); - struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; + struct chcr_ktls_info *tx_info = chcr_get_ktls_tx_info(tls_ctx); struct ch_ktls_port_stats_debug *port_stats; struct chcr_ktls_uld_ctx *u_ctx; @@ -396,7 +394,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev, port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; atomic64_inc(&port_stats->ktls_tx_connection_close); kvfree(tx_info); - tx_ctx->chcr_info = NULL; + chcr_set_ktls_tx_info(tls_ctx, NULL); /* release module refcount */ module_put(THIS_MODULE); } @@ -417,7 +415,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct ch_ktls_port_stats_debug *port_stats; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct chcr_ktls_uld_ctx *u_ctx; struct chcr_ktls_info *tx_info; struct dst_entry *dst; @@ -427,8 +424,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, u8 daaddr[16]; int ret = -1; - tx_ctx = chcr_get_ktls_tx_context(tls_ctx); - pi = netdev_priv(netdev); adap = pi->adapter; port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id]; @@ -440,7 +435,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, goto out; } - if (tx_ctx->chcr_info) + if (chcr_get_ktls_tx_info(tls_ctx)) goto out; if (u_ctx && u_ctx->detach) @@ -566,7 +561,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, goto free_tid; atomic64_inc(&port_stats->ktls_tx_ctx); - tx_ctx->chcr_info = tx_info; + chcr_set_ktls_tx_info(tls_ctx, tx_info); return 0; @@ -647,7 +642,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, { const struct cpl_act_open_rpl *p = (void *)input; struct chcr_ktls_info *tx_info = NULL; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; + struct tls_offload_context_tx *tx_ctx; struct chcr_ktls_uld_ctx *u_ctx; unsigned int atid, tid, status; struct tls_context *tls_ctx; @@ -686,7 +681,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family); /* Adding tid */ tls_ctx = tls_get_ctx(tx_info->sk); - tx_ctx = chcr_get_ktls_tx_context(tls_ctx); + tx_ctx = tls_offload_ctx_tx(tls_ctx); u_ctx = adap->uld[CXGB4_ULD_KTLS].handle; if (u_ctx) { ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx, @@ -1924,7 +1919,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) { u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset; struct ch_ktls_port_stats_debug *port_stats; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; + struct tls_offload_context_tx *tx_ctx; struct ch_ktls_stats_debug *stats; struct tcphdr *th = tcp_hdr(skb); int data_len, qidx, ret = 0, mss; @@ -1944,6 +1939,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len; tls_ctx = tls_get_ctx(skb->sk); + tx_ctx = tls_offload_ctx_tx(tls_ctx); tls_netdev = rcu_dereference_bh(tls_ctx->netdev); /* Don't quit on NULL: if tls_device_down is running in parallel, * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was @@ -1952,8 +1948,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(tls_netdev && tls_netdev != dev)) goto out; - tx_ctx = chcr_get_ktls_tx_context(tls_ctx); - tx_info = tx_ctx->chcr_info; + tx_info = chcr_get_ktls_tx_info(tls_ctx); if (unlikely(!tx_info)) goto out; @@ -1979,19 +1974,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) * we will send the complete record again. */ - spin_lock_irqsave(&tx_ctx->base.lock, flags); + spin_lock_irqsave(&tx_ctx->lock, flags); do { cxgb4_reclaim_completed_tx(adap, &q->q, true); /* fetch the tls record */ - record = tls_get_record(&tx_ctx->base, tcp_seq, + record = tls_get_record(tx_ctx, tcp_seq, &tx_info->record_no); /* By the time packet reached to us, ACK is received, and record * won't be found in that case, handle it gracefully. */ if (unlikely(!record)) { - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data); goto out; } @@ -2015,7 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) tls_end_offset != record->len); if (ret) { - spin_unlock_irqrestore(&tx_ctx->base.lock, + spin_unlock_irqrestore(&tx_ctx->lock, flags); goto out; } @@ -2046,7 +2041,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) /* free the refcount taken earlier */ if (tls_end_offset < data_len) dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); goto out; } @@ -2082,7 +2077,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) /* if any failure, come out from the loop. */ if (ret) { - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); if (th->fin) dev_kfree_skb_any(skb); @@ -2097,7 +2092,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) } while (data_len > 0); - spin_unlock_irqrestore(&tx_ctx->base.lock, flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); @@ -2185,17 +2180,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info) static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx) { struct ch_ktls_port_stats_debug *port_stats; - struct chcr_ktls_ofld_ctx_tx *tx_ctx; + struct tls_offload_context_tx *tx_ctx; struct chcr_ktls_info *tx_info; unsigned long index; xa_for_each(&u_ctx->tid_list, index, tx_ctx) { - tx_info = tx_ctx->chcr_info; + tx_info = __chcr_get_ktls_tx_info(tx_ctx); clear_conn_resources(tx_info); port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; atomic64_inc(&port_stats->ktls_tx_connection_close); kvfree(tx_info); - tx_ctx->chcr_info = NULL; + memset(tx_ctx->driver_state, 0, TLS_DRIVER_STATE_SIZE_TX); /* release module refcount */ module_put(THIS_MODULE); } diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h index 10572dc55365..dbbba92bf540 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h @@ -67,8 +67,7 @@ struct chcr_ktls_info { bool pending_close; }; -struct chcr_ktls_ofld_ctx_tx { - struct tls_offload_context_tx base; +struct chcr_ktls_ctx_tx { struct chcr_ktls_info *chcr_info; }; @@ -79,14 +78,33 @@ struct chcr_ktls_uld_ctx { bool detach; }; -static inline struct chcr_ktls_ofld_ctx_tx * -chcr_get_ktls_tx_context(struct tls_context *tls_ctx) +static inline struct chcr_ktls_info * +__chcr_get_ktls_tx_info(struct tls_offload_context_tx *octx) { - BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) > - TLS_OFFLOAD_CONTEXT_SIZE_TX); - return container_of(tls_offload_ctx_tx(tls_ctx), - struct chcr_ktls_ofld_ctx_tx, - base); + struct chcr_ktls_ctx_tx *priv_ctx; + + BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX); + priv_ctx = (struct chcr_ktls_ctx_tx *)octx->driver_state; + return priv_ctx->chcr_info; +} + +static inline struct chcr_ktls_info * +chcr_get_ktls_tx_info(struct tls_context *tls_ctx) +{ + struct chcr_ktls_ctx_tx *priv_ctx; + + BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX); + priv_ctx = (struct chcr_ktls_ctx_tx *)__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + return priv_ctx->chcr_info; +} + +static inline void +chcr_set_ktls_tx_info(struct tls_context *tls_ctx, struct chcr_ktls_info *chcr_info) +{ + struct chcr_ktls_ctx_tx *priv_ctx; + + priv_ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + priv_ctx->chcr_info = chcr_info; } static inline int chcr_get_first_rx_qid(struct adapter *adap) diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h index 68562a82d036..7ff82b6778ba 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h @@ -7,7 +7,6 @@ #define __CHTLS_H__ #include <crypto/aes.h> -#include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/sha1.h> #include <crypto/sha2.h> @@ -22,6 +21,7 @@ #include <crypto/internal/hash.h> #include <linux/tls.h> #include <net/tls.h> +#include <net/tls_prot.h> #include <net/tls_toe.h> #include "t4fw_api.h" diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 7750702900fa..6f6525983130 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) if (tp->snd_una != snd_una) { tp->snd_una = snd_una; - tp->rcv_tstamp = tcp_time_stamp(tp); + tp->rcv_tstamp = tcp_jiffies32; if (tp->snd_una == tp->snd_nxt && !csk_flag_nochk(csk, CSK_TX_FAILOVER)) csk_reset_flag(csk, CSK_TX_WAIT_IDLE); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index 5fc64e47568a..d567e42e1760 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -911,7 +911,7 @@ static int csk_wait_memory(struct chtls_dev *cdev, struct sock *sk, long *timeo_p) { DEFINE_WAIT_FUNC(wait, woken_wake_function); - int err = 0; + int ret, err = 0; long current_timeo; long vm_wait = 0; bool noblock; @@ -942,10 +942,13 @@ static int csk_wait_memory(struct chtls_dev *cdev, set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; - sk_wait_event(sk, ¤t_timeo, sk->sk_err || - (sk->sk_shutdown & SEND_SHUTDOWN) || - (csk_mem_free(cdev, sk) && !vm_wait), &wait); + ret = sk_wait_event(sk, ¤t_timeo, sk->sk_err || + (sk->sk_shutdown & SEND_SHUTDOWN) || + (csk_mem_free(cdev, sk) && !vm_wait), + &wait); sk->sk_write_pending--; + if (ret < 0) + goto do_error; if (vm_wait) { vm_wait -= current_timeo; @@ -1348,6 +1351,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int copied = 0; int target; long timeo; + int ret; buffers_freed = 0; @@ -1423,7 +1427,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (copied >= target) break; chtls_cleanup_rbuf(sk, copied); - sk_wait_data(sk, &timeo, NULL); + ret = sk_wait_data(sk, &timeo, NULL); + if (ret < 0) { + copied = copied ? : ret; + goto unlock; + } continue; found_ok_skb: if (!skb->len) { @@ -1518,6 +1526,8 @@ skip_copy: if (buffers_freed) chtls_cleanup_rbuf(sk, copied); + +unlock: release_sock(sk); return copied; } @@ -1534,6 +1544,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, int copied = 0; size_t avail; /* amount of available data in current skb */ long timeo; + int ret; lock_sock(sk); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); @@ -1585,7 +1596,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, release_sock(sk); lock_sock(sk); } else { - sk_wait_data(sk, &timeo, NULL); + ret = sk_wait_data(sk, &timeo, NULL); + if (ret < 0) { + /* here 'copied' is 0 due to previous checks */ + copied = ret; + break; + } } if (unlikely(peek_seq != tp->copied_seq)) { @@ -1656,6 +1672,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int copied = 0; long timeo; int target; /* Read at least this many bytes */ + int ret; buffers_freed = 0; @@ -1747,7 +1764,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (copied >= target) break; chtls_cleanup_rbuf(sk, copied); - sk_wait_data(sk, &timeo, NULL); + ret = sk_wait_data(sk, &timeo, NULL); + if (ret < 0) { + copied = copied ? : ret; + goto unlock; + } continue; found_ok_skb: @@ -1816,6 +1837,7 @@ skip_copy: if (buffers_freed) chtls_cleanup_rbuf(sk, copied); +unlock: release_sock(sk); return copied; } diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 276c32c3926a..0a21a10a791c 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -54,7 +54,6 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/types.h> @@ -1855,9 +1854,8 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev) return -ENOMEM; dev->irq = platform_get_irq(pdev, 0); - if (dev->irq <= 0) { - dev_warn(&dev->dev, "interrupt resource missing\n"); - err = -ENXIO; + if (dev->irq < 0) { + err = dev->irq; goto free; } @@ -1881,7 +1879,7 @@ free: return err; } -static int cs89x0_platform_remove(struct platform_device *pdev) +static void cs89x0_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -1891,7 +1889,6 @@ static int cs89x0_platform_remove(struct platform_device *pdev) */ unregister_netdev(dev); free_netdev(dev); - return 0; } static const struct of_device_id __maybe_unused cs89x0_match[] = { @@ -1906,7 +1903,7 @@ static struct platform_driver cs89x0_driver = { .name = DRV_NAME, .of_match_table = of_match_ptr(cs89x0_match), }, - .remove = cs89x0_platform_remove, + .remove_new = cs89x0_platform_remove, }; module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 8627ab19d470..1c2a540db13d 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -757,7 +757,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) } -static int ep93xx_eth_remove(struct platform_device *pdev) +static void ep93xx_eth_remove(struct platform_device *pdev) { struct net_device *dev; struct ep93xx_priv *ep; @@ -765,7 +765,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev) dev = platform_get_drvdata(pdev); if (dev == NULL) - return 0; + return; ep = netdev_priv(dev); @@ -782,8 +782,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev) } free_netdev(dev); - - return 0; } static int ep93xx_eth_probe(struct platform_device *pdev) @@ -862,7 +860,7 @@ err_out: static struct platform_driver ep93xx_eth_driver = { .probe = ep93xx_eth_probe, - .remove = ep93xx_eth_remove, + .remove_new = ep93xx_eth_remove, .driver = { .name = "ep93xx-eth", }, diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 21a70b1f0ac5..887876f35f10 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -556,19 +556,18 @@ static int set_mac_address(struct net_device *dev, void *addr) MODULE_LICENSE("GPL"); -static int mac89x0_device_remove(struct platform_device *pdev) +static void mac89x0_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); nubus_writew(0, dev->base_addr + ADD_PORT); free_netdev(dev); - return 0; } static struct platform_driver mac89x0_platform_driver = { .probe = mac89x0_device_probe, - .remove = mac89x0_device_remove, + .remove_new = mac89x0_device_remove, .driver = { .name = "mac89x0", }, diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5715b9ab2712..78287cfcbf63 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = { .val = CONFIG0_MAXLEN_1536, }, { - .max_l3_len = 1542, - .val = CONFIG0_MAXLEN_1542, + .max_l3_len = 1548, + .val = CONFIG0_MAXLEN_1548, }, { .max_l3_len = 9212, @@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, dma_addr_t mapping; unsigned short mtu; void *buffer; + int ret; mtu = ETH_HLEN; mtu += netdev->mtu; @@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, word3 |= mtu; } - if (skb->ip_summed != CHECKSUM_NONE) { + if (skb->len >= ETH_FRAME_LEN) { + /* Hardware offloaded checksumming isn't working on frames + * bigger than 1514 bytes. A hypothesis about this is that the + * checksum buffer is only 1518 bytes, so when the frames get + * bigger they get truncated, or the last few bytes get + * overwritten by the FCS. + * + * Just use software checksumming and bypass on bigger frames. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = skb_checksum_help(skb); + if (ret) + return ret; + } + word1 |= TSS_BYPASS_BIT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { int tcp = 0; + /* We do not switch off the checksumming on non TCP/UDP + * frames: as is shown from tests, the checksumming engine + * is smart enough to see that a frame is not actually TCP + * or UDP and then just pass it through without any changes + * to the frame. + */ if (skb->protocol == htons(ETH_P_IP)) { word1 |= TSS_IP_CHKSUM_BIT; tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; @@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static netdev_features_t gmac_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK) - features &= ~GMAC_OFFLOAD_FEATURES; - - return features; -} - static int gmac_set_features(struct net_device *netdev, netdev_features_t features) { @@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = { .ndo_set_mac_address = gmac_set_mac_address, .ndo_get_stats64 = gmac_get_stats64, .ndo_change_mtu = gmac_change_mtu, - .ndo_fix_features = gmac_fix_features, .ndo_set_features = gmac_set_features, }; @@ -2415,8 +2427,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) /* Interrupt */ irq = platform_get_irq(pdev, 0); - if (irq <= 0) - return irq ? irq : -ENODEV; + if (irq < 0) + return irq; port->irq = irq; /* Clock the port */ @@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->hw_features = GMAC_OFFLOAD_FEATURES; netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; - /* We can handle jumbo frames up to 10236 bytes so, let's accept - * payloads of 10236 bytes minus VLAN and ethernet header + /* We can receive jumbo frames up to 10236 bytes but only + * transmit 2047 bytes so, let's accept payloads of 2047 + * bytes minus VLAN and ethernet header */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = 10236 - VLAN_ETH_HLEN; + netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN; port->freeq_refill = 0; netif_napi_add(netdev, &port->napi, gmac_napi_poll); @@ -2518,13 +2531,11 @@ unprepare: return ret; } -static int gemini_ethernet_port_remove(struct platform_device *pdev) +static void gemini_ethernet_port_remove(struct platform_device *pdev) { struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); - - return 0; } static const struct of_device_id gemini_ethernet_port_of_match[] = { @@ -2538,10 +2549,10 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match); static struct platform_driver gemini_ethernet_port_driver = { .driver = { .name = "gemini-ethernet-port", - .of_match_table = of_match_ptr(gemini_ethernet_port_of_match), + .of_match_table = gemini_ethernet_port_of_match, }, .probe = gemini_ethernet_port_probe, - .remove = gemini_ethernet_port_remove, + .remove_new = gemini_ethernet_port_remove, }; static int gemini_ethernet_probe(struct platform_device *pdev) @@ -2583,14 +2594,12 @@ static int gemini_ethernet_probe(struct platform_device *pdev) return devm_of_platform_populate(dev); } -static int gemini_ethernet_remove(struct platform_device *pdev) +static void gemini_ethernet_remove(struct platform_device *pdev) { struct gemini_ethernet *geth = platform_get_drvdata(pdev); geth_cleanup_freeq(geth); geth->initialized = false; - - return 0; } static const struct of_device_id gemini_ethernet_of_match[] = { @@ -2604,10 +2613,10 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match); static struct platform_driver gemini_ethernet_driver = { .driver = { .name = DRV_NAME, - .of_match_table = of_match_ptr(gemini_ethernet_of_match), + .of_match_table = gemini_ethernet_of_match, }, .probe = gemini_ethernet_probe, - .remove = gemini_ethernet_remove, + .remove_new = gemini_ethernet_remove, }; static int __init gemini_ethernet_module_init(void) diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h index 9fdf77d5eb37..24bb989981f2 100644 --- a/drivers/net/ethernet/cortina/gemini.h +++ b/drivers/net/ethernet/cortina/gemini.h @@ -502,7 +502,7 @@ union gmac_txdesc_3 { #define SOF_BIT 0x80000000 #define EOF_BIT 0x40000000 #define EOFIE_BIT BIT(29) -#define MTU_SIZE_BIT_MASK 0x1fff +#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */ /* GMAC Tx Descriptor */ struct gmac_txdesc { @@ -787,7 +787,7 @@ union gmac_config0 { #define CONFIG0_MAXLEN_1536 0 #define CONFIG0_MAXLEN_1518 1 #define CONFIG0_MAXLEN_1522 2 -#define CONFIG0_MAXLEN_1542 3 +#define CONFIG0_MAXLEN_1548 3 #define CONFIG0_MAXLEN_9k 4 /* 9212 */ #define CONFIG0_MAXLEN_10k 5 /* 10236 */ #define CONFIG0_MAXLEN_1518__6 6 diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 05a89ab6766c..150cc94ae9f8 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1770,8 +1770,7 @@ static const struct dev_pm_ops dm9000_drv_pm_ops = { .resume = dm9000_drv_resume, }; -static int -dm9000_drv_remove(struct platform_device *pdev) +static void dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct board_info *dm = to_dm9000_board(ndev); @@ -1783,7 +1782,6 @@ dm9000_drv_remove(struct platform_device *pdev) regulator_disable(dm->power_supply); dev_dbg(&pdev->dev, "released and freed device\n"); - return 0; } #ifdef CONFIG_OF @@ -1801,7 +1799,7 @@ static struct platform_driver dm9000_driver = { .of_match_table = of_match_ptr(dm9000_of_matches), }, .probe = dm9000_probe, - .remove = dm9000_drv_remove, + .remove_new = dm9000_drv_remove, }; module_platform_driver(dm9000_driver); diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c index 70728b2e5f18..bcfe52c11804 100644 --- a/drivers/net/ethernet/davicom/dm9051.c +++ b/drivers/net/ethernet/davicom/dm9051.c @@ -510,10 +510,7 @@ static int dm9051_map_init(struct spi_device *spi, struct board_info *db) regconfigdmbulk.lock_arg = db; db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, ®configdmbulk); - if (IS_ERR(db->regmap_dmbulk)) - return PTR_ERR(db->regmap_dmbulk); - - return 0; + return PTR_ERR_OR_ZERO(db->regmap_dmbulk); } static int dm9051_map_chipid(struct board_info *db) @@ -1161,9 +1158,7 @@ static int dm9051_phy_connect(struct board_info *db) db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change, PHY_INTERFACE_MODE_MII); - if (IS_ERR(db->phydev)) - return PTR_ERR_OR_ZERO(db->phydev); - return 0; + return PTR_ERR_OR_ZERO(db->phydev); } static int dm9051_probe(struct spi_device *spi) diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h index 0ed598dc7569..bd786dfbc066 100644 --- a/drivers/net/ethernet/dec/tulip/tulip.h +++ b/drivers/net/ethernet/dec/tulip/tulip.h @@ -381,7 +381,7 @@ struct mediatable { unsigned has_reset:6; u32 csr15dir; u32 csr15val; /* 21143 NWay setting. */ - struct medialeaf mleaf[]; + struct medialeaf mleaf[] __counted_by(leafcount); }; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 151ca9573be9..2a18df3605f1 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -841,7 +841,7 @@ err_out_free_dev: return err; } -static int dnet_remove(struct platform_device *pdev) +static void dnet_remove(struct platform_device *pdev) { struct net_device *dev; @@ -859,13 +859,11 @@ static int dnet_remove(struct platform_device *pdev) free_irq(dev->irq, dev); free_netdev(dev); } - - return 0; } static struct platform_driver dnet_driver = { .probe = dnet_probe, - .remove = dnet_remove, + .remove_new = dnet_remove, .driver = { .name = "dnet", }, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0616b5fe241c..ad862ed7888a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4986,9 +4986,6 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; - if (nla_len(attr) < sizeof(mode)) - return -EINVAL; - mode = nla_get_u16(attr); if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index 11b29f56aaf9..f188fba021a6 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -14,6 +14,7 @@ #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> #include <linux/miscdevice.h> +#include <net/xdp.h> #define TSNEP "tsnep" @@ -142,7 +143,7 @@ struct tsnep_rx { struct tsnep_queue { struct tsnep_adapter *adapter; - char name[IFNAMSIZ + 9]; + char name[IFNAMSIZ + 16]; struct tsnep_tx *tx; struct tsnep_rx *rx; diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c index 716815dad7d2..65ec1abc9442 100644 --- a/drivers/net/ethernet/engleder/tsnep_ethtool.c +++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c @@ -300,10 +300,8 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev, { struct tsnep_adapter *adapter = netdev_priv(netdev); - ch->max_rx = adapter->num_rx_queues; - ch->max_tx = adapter->num_tx_queues; - ch->rx_count = adapter->num_rx_queues; - ch->tx_count = adapter->num_tx_queues; + ch->max_combined = adapter->num_queues; + ch->combined_count = adapter->num_queues; } static int tsnep_ethtool_get_ts_info(struct net_device *netdev, diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h index 55e1caf193a6..64c97eb66f67 100644 --- a/drivers/net/ethernet/engleder/tsnep_hw.h +++ b/drivers/net/ethernet/engleder/tsnep_hw.h @@ -181,6 +181,8 @@ struct tsnep_gcl_operation { #define TSNEP_DESC_SIZE 256 #define TSNEP_DESC_SIZE_DATA_AFTER 2048 #define TSNEP_DESC_OFFSET 128 +#define TSNEP_DESC_SIZE_DATA_AFTER_INLINE (64 - sizeof(struct tsnep_tx_desc) + \ + sizeof_field(struct tsnep_tx_desc, tx)) #define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000 #define TSNEP_DESC_OWNER_COUNTER_SHIFT 30 #define TSNEP_DESC_LENGTH_MASK 0x00003FFF diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 84751bb303a6..df40c720e7b2 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -28,6 +28,7 @@ #include <linux/iopoll.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> +#include <net/page_pool/helpers.h> #include <net/xdp_sock_drv.h> #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) @@ -50,12 +51,22 @@ #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) -#define TSNEP_TX_TYPE_SKB BIT(0) -#define TSNEP_TX_TYPE_SKB_FRAG BIT(1) -#define TSNEP_TX_TYPE_XDP_TX BIT(2) -#define TSNEP_TX_TYPE_XDP_NDO BIT(3) -#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) -#define TSNEP_TX_TYPE_XSK BIT(4) +/* mapping type */ +#define TSNEP_TX_TYPE_MAP BIT(0) +#define TSNEP_TX_TYPE_MAP_PAGE BIT(1) +#define TSNEP_TX_TYPE_INLINE BIT(2) +/* buffer type */ +#define TSNEP_TX_TYPE_SKB BIT(8) +#define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP) +#define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE) +#define TSNEP_TX_TYPE_SKB_FRAG BIT(9) +#define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE) +#define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE) +#define TSNEP_TX_TYPE_XDP_TX BIT(10) +#define TSNEP_TX_TYPE_XDP_NDO BIT(11) +#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE) +#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) +#define TSNEP_TX_TYPE_XSK BIT(12) #define TSNEP_XDP_TX BIT(0) #define TSNEP_XDP_REDIRECT BIT(1) @@ -86,8 +97,11 @@ static irqreturn_t tsnep_irq(int irq, void *arg) /* handle TX/RX queue 0 interrupt */ if ((active & adapter->queue[0].irq_mask) != 0) { - tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); - napi_schedule(&adapter->queue[0].napi); + if (napi_schedule_prep(&adapter->queue[0].napi)) { + tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); + /* schedule after masking to avoid races */ + __napi_schedule(&adapter->queue[0].napi); + } } return IRQ_HANDLED; @@ -98,8 +112,11 @@ static irqreturn_t tsnep_irq_txrx(int irq, void *arg) struct tsnep_queue *queue = arg; /* handle TX/RX queue interrupt */ - tsnep_disable_irq(queue->adapter, queue->irq_mask); - napi_schedule(&queue->napi); + if (napi_schedule_prep(&queue->napi)) { + tsnep_disable_irq(queue->adapter, queue->irq_mask); + /* schedule after masking to avoid races */ + __napi_schedule(&queue->napi); + } return IRQ_HANDLED; } @@ -409,6 +426,8 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; entry->desc->more_properties = __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); + if (entry->type & TSNEP_TX_TYPE_INLINE) + entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; /* descriptor properties shall be written last, because valid data is * signaled there @@ -426,39 +445,79 @@ static int tsnep_tx_desc_available(struct tsnep_tx *tx) return tx->read - tx->write - 1; } +static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry, + struct device *dmadev, dma_addr_t *dma) +{ + unsigned int len; + int mapped; + + len = skb_frag_size(frag); + if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { + *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE); + if (dma_mapping_error(dmadev, *dma)) + return -ENOMEM; + entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; + mapped = 1; + } else { + void *fragdata = skb_frag_address_safe(frag); + + if (likely(fragdata)) { + memcpy(&entry->desc->tx, fragdata, len); + } else { + struct page *page = skb_frag_page(frag); + + fragdata = kmap_local_page(page); + memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), + len); + kunmap_local(fragdata); + } + entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; + mapped = 0; + } + + return mapped; +} + static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) { struct device *dmadev = tx->adapter->dmadev; struct tsnep_tx_entry *entry; unsigned int len; - dma_addr_t dma; int map_len = 0; - int i; + dma_addr_t dma; + int i, mapped; for (i = 0; i < count; i++) { entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; if (!i) { len = skb_headlen(skb); - dma = dma_map_single(dmadev, skb->data, len, - DMA_TO_DEVICE); - - entry->type = TSNEP_TX_TYPE_SKB; + if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) { + dma = dma_map_single(dmadev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(dmadev, dma)) + return -ENOMEM; + entry->type = TSNEP_TX_TYPE_SKB_MAP; + mapped = 1; + } else { + memcpy(&entry->desc->tx, skb->data, len); + entry->type = TSNEP_TX_TYPE_SKB_INLINE; + mapped = 0; + } } else { - len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); - dma = skb_frag_dma_map(dmadev, - &skb_shinfo(skb)->frags[i - 1], - 0, len, DMA_TO_DEVICE); + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - entry->type = TSNEP_TX_TYPE_SKB_FRAG; + len = skb_frag_size(frag); + mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma); + if (mapped < 0) + return mapped; } - if (dma_mapping_error(dmadev, dma)) - return -ENOMEM; entry->len = len; - dma_unmap_addr_set(entry, dma, dma); - - entry->desc->tx = __cpu_to_le64(dma); + if (likely(mapped)) { + dma_unmap_addr_set(entry, dma, dma); + entry->desc->tx = __cpu_to_le64(dma); + } map_len += len; } @@ -477,13 +536,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; if (entry->len) { - if (entry->type & TSNEP_TX_TYPE_SKB) + if (entry->type & TSNEP_TX_TYPE_MAP) dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), DMA_TO_DEVICE); - else if (entry->type & - (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO)) + else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) dma_unmap_page(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), @@ -579,7 +637,7 @@ static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, if (dma_mapping_error(dmadev, dma)) return -ENOMEM; - entry->type = TSNEP_TX_TYPE_XDP_NDO; + entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; } else { page = unlikely(frag) ? skb_frag_page(frag) : virt_to_page(xdpf->data); @@ -1333,7 +1391,7 @@ static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, skb = tsnep_build_skb(rx, page, length); if (skb) { - page_pool_release_page(rx->page_pool, page); + skb_mark_for_recycle(skb); rx->packets++; rx->bytes += length; @@ -1727,6 +1785,10 @@ static int tsnep_poll(struct napi_struct *napi, int budget) if (queue->tx) complete = tsnep_tx_poll(queue->tx, budget); + /* handle case where we are called by netpoll with a budget of 0 */ + if (unlikely(budget <= 0)) + return budget; + if (queue->rx) { done = queue->rx->xsk_pool ? tsnep_rx_poll_zc(queue->rx, napi, budget) : @@ -1768,14 +1830,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first) dev = queue->adapter; } else { if (queue->tx && queue->rx) - sprintf(queue->name, "%s-txrx-%d", name, - queue->rx->queue_index); + snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d", + name, queue->rx->queue_index); else if (queue->tx) - sprintf(queue->name, "%s-tx-%d", name, - queue->tx->queue_index); + snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", + name, queue->tx->queue_index); else - sprintf(queue->name, "%s-rx-%d", name, - queue->rx->queue_index); + snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", + name, queue->rx->queue_index); handler = tsnep_irq_txrx; dev = queue; } @@ -2576,7 +2638,7 @@ mdio_init_failed: return retval; } -static int tsnep_remove(struct platform_device *pdev) +static void tsnep_remove(struct platform_device *pdev) { struct tsnep_adapter *adapter = platform_get_drvdata(pdev); @@ -2592,8 +2654,6 @@ static int tsnep_remove(struct platform_device *pdev) mdiobus_unregister(adapter->mdiobus); tsnep_disable_irq(adapter, ECM_INT_ALL); - - return 0; } static const struct of_device_id tsnep_of_match[] = { @@ -2608,7 +2668,7 @@ static struct platform_driver tsnep_driver = { .of_match_table = tsnep_of_match, }, .probe = tsnep_probe, - .remove = tsnep_remove, + .remove_new = tsnep_remove, }; module_platform_driver(tsnep_driver); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 95cbad198b4b..ad41c9019018 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1254,7 +1254,7 @@ out: * ethoc_remove - shutdown OpenCores ethernet MAC * @pdev: platform device */ -static int ethoc_remove(struct platform_device *pdev) +static void ethoc_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); @@ -1271,8 +1271,6 @@ static int ethoc_remove(struct platform_device *pdev) unregister_netdev(netdev); free_netdev(netdev); } - - return 0; } #ifdef CONFIG_PM @@ -1298,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match); static struct platform_driver ethoc_driver = { .probe = ethoc_probe, - .remove = ethoc_remove, + .remove_new = ethoc_remove, .suspend = ethoc_suspend, .resume = ethoc_resume, .driver = { diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index f1eb660aaee2..4d7184d46824 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -6,10 +6,9 @@ #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> +#include <linux/mod_devicetable.h> #include <linux/of_net.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include "nps_enet.h" #define DRV_NAME "nps_mgt_enet" @@ -199,7 +198,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) */ if (nps_enet_is_tx_pending(priv)) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); - napi_reschedule(napi); + napi_schedule(napi); } } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index a03879a27b04..fddfd1dd5070 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -177,16 +177,20 @@ static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac) iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); } -static void ftgmac100_initial_mac(struct ftgmac100 *priv) +static int ftgmac100_initial_mac(struct ftgmac100 *priv) { u8 mac[ETH_ALEN]; unsigned int m; unsigned int l; + int err; - if (!device_get_ethdev_address(priv->dev, priv->netdev)) { + err = of_get_ethdev_address(priv->dev->of_node, priv->netdev); + if (err == -EPROBE_DEFER) + return err; + if (!err) { dev_info(priv->dev, "Read MAC address %pM from device tree\n", priv->netdev->dev_addr); - return; + return 0; } m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); @@ -207,6 +211,8 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv) dev_info(priv->dev, "Generated random MAC address %pM\n", priv->netdev->dev_addr); } + + return 0; } static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) @@ -1843,7 +1849,9 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->aneg_pause = true; /* MAC address from chip or random one */ - ftgmac100_initial_mac(priv); + err = ftgmac100_initial_mac(priv); + if (err) + goto err_phy_connect; np = pdev->dev.of_node; if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || @@ -2004,7 +2012,7 @@ err_alloc_etherdev: return err; } -static int ftgmac100_remove(struct platform_device *pdev) +static void ftgmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftgmac100 *priv; @@ -2032,7 +2040,6 @@ static int ftgmac100_remove(struct platform_device *pdev) netif_napi_del(&priv->napi); free_netdev(netdev); - return 0; } static const struct of_device_id ftgmac100_of_match[] = { @@ -2043,7 +2050,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match); static struct platform_driver ftgmac100_driver = { .probe = ftgmac100_probe, - .remove = ftgmac100_remove, + .remove_new = ftgmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftgmac100_of_match, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 139fe66f8bcd..003bc9a45c65 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -149,6 +149,40 @@ static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac) iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR); } +static void ftmac100_setup_mc_ht(struct ftmac100 *priv) +{ + struct netdev_hw_addr *ha; + u64 maht = 0; /* Multicast Address Hash Table */ + + netdev_for_each_mc_addr(ha, priv->netdev) { + u32 hash = ether_crc(ETH_ALEN, ha->addr) >> 26; + + maht |= BIT_ULL(hash); + } + + iowrite32(lower_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT0); + iowrite32(upper_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT1); +} + +static void ftmac100_set_rx_bits(struct ftmac100 *priv, unsigned int *maccr) +{ + struct net_device *netdev = priv->netdev; + + /* Clear all */ + *maccr &= ~(FTMAC100_MACCR_RCV_ALL | FTMAC100_MACCR_RX_MULTIPKT | + FTMAC100_MACCR_HT_MULTI_EN); + + /* Set the requested bits */ + if (netdev->flags & IFF_PROMISC) + *maccr |= FTMAC100_MACCR_RCV_ALL; + if (netdev->flags & IFF_ALLMULTI) + *maccr |= FTMAC100_MACCR_RX_MULTIPKT; + else if (netdev_mc_count(netdev)) { + *maccr |= FTMAC100_MACCR_HT_MULTI_EN; + ftmac100_setup_mc_ht(priv); + } +} + #define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \ FTMAC100_MACCR_RCV_EN | \ FTMAC100_MACCR_XDMA_EN | \ @@ -182,11 +216,7 @@ static int ftmac100_start_hw(struct ftmac100 *priv) if (netdev->mtu > ETH_DATA_LEN) maccr |= FTMAC100_MACCR_RX_FTL; - /* Add other bits as needed */ - if (netdev->flags & IFF_PROMISC) - maccr |= FTMAC100_MACCR_RCV_ALL; - if (netdev->flags & IFF_ALLMULTI) - maccr |= FTMAC100_MACCR_RX_MULTIPKT; + ftmac100_set_rx_bits(priv, &maccr); iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR); return 0; @@ -1067,6 +1097,15 @@ static int ftmac100_change_mtu(struct net_device *netdev, int mtu) return 0; } +static void ftmac100_set_rx_mode(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + unsigned int maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR); + + ftmac100_set_rx_bits(priv, &maccr); + iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR); +} + static const struct net_device_ops ftmac100_netdev_ops = { .ndo_open = ftmac100_open, .ndo_stop = ftmac100_stop, @@ -1075,6 +1114,7 @@ static const struct net_device_ops ftmac100_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = ftmac100_do_ioctl, .ndo_change_mtu = ftmac100_change_mtu, + .ndo_set_rx_mode = ftmac100_set_rx_mode, }; /****************************************************************************** @@ -1179,7 +1219,7 @@ err_alloc_etherdev: return err; } -static int ftmac100_remove(struct platform_device *pdev) +static void ftmac100_remove(struct platform_device *pdev) { struct net_device *netdev; struct ftmac100 *priv; @@ -1194,7 +1234,6 @@ static int ftmac100_remove(struct platform_device *pdev) netif_napi_del(&priv->napi); free_netdev(netdev); - return 0; } static const struct of_device_id ftmac100_of_ids[] = { @@ -1204,7 +1243,7 @@ static const struct of_device_id ftmac100_of_ids[] = { static struct platform_driver ftmac100_driver = { .probe = ftmac100_probe, - .remove = ftmac100_remove, + .remove_new = ftmac100_remove, .driver = { .name = DRV_NAME, .of_match_table = ftmac100_of_ids diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 431f8917dc39..dcbc598b11c6 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -7,8 +7,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/io.h> @@ -17,6 +17,7 @@ #include <linux/icmp.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/platform_device.h> #include <linux/udp.h> #include <linux/tcp.h> #include <linux/net.h> @@ -3497,7 +3498,7 @@ free_netdev: return err; } -static int dpaa_remove(struct platform_device *pdev) +static void dpaa_remove(struct platform_device *pdev) { struct net_device *net_dev; struct dpaa_priv *priv; @@ -3516,6 +3517,9 @@ static int dpaa_remove(struct platform_device *pdev) phylink_destroy(priv->mac_dev->phylink); err = dpaa_fq_free(dev, &priv->dpaa_fq_list); + if (err) + dev_err(dev, "Failed to free FQs on remove (%pE)\n", + ERR_PTR(err)); qman_delete_cgr_safe(&priv->ingress_cgr); qman_release_cgrid(priv->ingress_cgr.cgrid); @@ -3527,8 +3531,6 @@ static int dpaa_remove(struct platform_device *pdev) dpaa_bps_free(priv); free_netdev(net_dev); - - return err; } static const struct platform_device_id dpaa_devtype[] = { @@ -3546,7 +3548,7 @@ static struct platform_driver dpaa_driver = { }, .id_table = dpaa_devtype, .probe = dpaa_eth_probe, - .remove = dpaa_remove + .remove_new = dpaa_remove }; static int __init dpaa_load(void) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index 35b8cea7f886..ac3c8ed57bbe 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -8,6 +8,7 @@ #include <linux/netdevice.h> #include <linux/refcount.h> +#include <net/xdp.h> #include <soc/fsl/qman.h> #include <soc/fsl/bman.h> diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 9c71cbbb13d8..5bd0b36d1feb 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -6,7 +6,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/string.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/net_tstamp.h> #include <linux/fsl/ptp_qoriq.h> diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index a9676d0dece8..888509cf1f21 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, memcpy(skb->data, fd_vaddr + fd_offset, fd_length); - dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); - return skb; } @@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct rtnl_link_stats64 *percpu_stats; struct dpaa2_eth_drv_stats *percpu_extras; struct device *dev = priv->net_dev->dev.parent; + bool recycle_rx_buf = false; void *buf_data; u32 xdp_act; @@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); + } else { + recycle_rx_buf = true; } } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); @@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, goto err_build_skb; dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); + + if (recycle_rx_buf) + dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); return; err_build_skb: @@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, dma_addr_t addr; buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); - - /* If there's enough room to align the FD address, do it. - * It will help hardware optimize accesses. - */ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, DPAA2_ETH_TX_BUF_ALIGN); if (aligned_start >= skb->head) buffer_start = aligned_start; + else + return -ENOMEM; /* Store a backpointer to the skb at the beginning of the buffer * (in the private data area) such that we can release it @@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) if (err) goto err_dl_port_add; + net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; + err = register_netdev(net_dev); if (err < 0) { dev_err(dev, "register_netdev() failed\n"); @@ -5087,7 +5091,6 @@ MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); static struct fsl_mc_driver dpaa2_eth_driver = { .driver = { .name = KBUILD_MODNAME, - .owner = THIS_MODULE, }, .probe = dpaa2_eth_probe, .remove = dpaa2_eth_remove, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index d56d7a13262e..834cba8c3a41 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -12,6 +12,7 @@ #include <linux/fsl/mc.h> #include <linux/net_tstamp.h> #include <net/devlink.h> +#include <net/xdp.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> @@ -739,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options) static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb) { - unsigned int headroom = DPAA2_ETH_SWA_SIZE; + unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; /* If we don't have an skb (e.g. XDP buffer), we only need space for * the software annotation area diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index c39b866e2582..b6a534a3e0b1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -17,14 +17,14 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, struct dpsw_acl_fields *acl_h, *acl_m; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_IP) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported keys used"); return -EOPNOTSUPP; @@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block, err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, filter_block->acl_id, acl_entry_cfg); - dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + dma_unmap_single(dev, acl_entry_cfg->key_iova, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); @@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, block->acl_id, acl_entry_cfg); - dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), - DMA_TO_DEVICE); + dma_unmap_single(dev, acl_entry_cfg->key_iova, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); kfree(cmd_buff); @@ -539,9 +540,9 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, int ret = -EOPNOTSUPP; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_VLAN))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { NL_SET_ERR_MSG_MOD(extack, "Mirroring is supported only per VLAN"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 21cc4e52425a..e01a246124ac 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1998,9 +1998,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, return notifier_from_errno(err); } -static struct notifier_block dpaa2_switch_port_switchdev_nb; -static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; - static int dpaa2_switch_port_bridge_join(struct net_device *netdev, struct net_device *upper_dev, struct netlink_ext_ack *extack) @@ -2043,9 +2040,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev, goto err_egress_flood; err = switchdev_bridge_port_offload(netdev, netdev, NULL, - &dpaa2_switch_port_switchdev_nb, - &dpaa2_switch_port_switchdev_blocking_nb, - false, extack); + NULL, NULL, false, extack); if (err) goto err_switchdev_offload; @@ -2079,9 +2074,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) { - switchdev_bridge_port_unoffload(netdev, NULL, - &dpaa2_switch_port_switchdev_nb, - &dpaa2_switch_port_switchdev_blocking_nb); + switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL); } static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) @@ -3457,7 +3450,6 @@ MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table); static struct fsl_mc_driver dpaa2_switch_drv = { .driver = { .name = KBUILD_MODNAME, - .owner = THIS_MODULE, }, .probe = dpaa2_switch_probe, .remove = dpaa2_switch_remove, diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 35461165de0d..cffbf27c4656 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1655,7 +1655,7 @@ out: rx_ring->stats.bytes += rx_byte_cnt; if (xdp_redirect_frm_cnt) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_tx_frm_cnt) enetc_update_tx_ring_tail(tx_ring); @@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > priv->num_tx_rings) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)", + "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)", num_xdp_tx_queues, priv->min_num_stack_tx_queues, priv->num_tx_rings); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 8577cf7699a0..a9c2ff22431c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -11,6 +11,7 @@ #include <linux/if_vlan.h> #include <linux/phylink.h> #include <linux/dim.h> +#include <net/xdp.h> #include "enetc_hw.h" @@ -296,7 +297,7 @@ struct enetc_int_vector { char name[ENETC_INT_NAME_MAX]; struct enetc_bdr rx_ring; - struct enetc_bdr tx_ring[]; + struct enetc_bdr tx_ring[] __counted_by(count_tx_rings); } ____cacheline_aligned_in_smp; struct enetc_cls_rule { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c index b307bef4dc29..d39617ab9306 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c @@ -18,8 +18,8 @@ */ #include <linux/io.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/pci.h> #include <linux/platform_device.h> #include "enetc.h" diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index e0a4cb7e3f50..c153dc083aff 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1402,7 +1402,7 @@ static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev) return; si = enetc_psi_create(pdev); - if (si) + if (!IS_ERR(si)) enetc_psi_destroy(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index 17c097cef7d4..5243fc031058 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -8,7 +8,7 @@ #include "enetc.h" int enetc_phc_index = -1; -EXPORT_SYMBOL(enetc_phc_index); +EXPORT_SYMBOL_GPL(enetc_phc_index); static struct ptp_clock_info enetc_ptp_caps = { .owner = THIS_MODULE, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 270cbd5e8684..b65da49dd926 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -443,7 +443,7 @@ struct enetc_psfp_gate { u32 num_entries; refcount_t refcount; struct hlist_node node; - struct action_gate_entry entries[]; + struct action_gate_entry entries[] __counted_by(num_entries); }; /* Only enable the green color frame now @@ -483,13 +483,13 @@ struct enetc_psfp { static struct actions_fwd enetc_act_fwd[] = { { BIT(FLOW_ACTION_GATE), - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS), FILTER_ACTION_TYPE_PSFP }, { BIT(FLOW_ACTION_POLICE) | BIT(FLOW_ACTION_GATE), - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS), FILTER_ACTION_TYPE_PSFP }, /* example for ACL actions */ @@ -1069,8 +1069,8 @@ revert_sid: return err; } -static struct actions_fwd *enetc_check_flow_actions(u64 acts, - unsigned int inputkeys) +static struct actions_fwd * +enetc_check_flow_actions(u64 acts, unsigned long long inputkeys) { int i; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 63a053dea819..a8fbcada6b01 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -22,6 +22,7 @@ #include <linux/timecounter.h> #include <dt-bindings/firmware/imx/rsrc.h> #include <linux/firmware/imx/sci.h> +#include <net/xdp.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ @@ -547,13 +548,11 @@ enum { enum fec_txbuf_type { FEC_TXBUF_T_SKB, FEC_TXBUF_T_XDP_NDO, + FEC_TXBUF_T_XDP_TX, }; struct fec_tx_buffer { - union { - struct sk_buff *skb; - struct xdp_frame *xdp; - }; + void *buf_p; enum fec_txbuf_type type; }; @@ -651,12 +650,9 @@ struct fec_enet_private { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; - unsigned long last_overflow_check; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; - int rx_hwtstamp_filter; - u32 base_incval; u32 cycle_speed; int hwts_rx_en; int hwts_tx_en; @@ -679,8 +675,6 @@ struct fec_enet_private { struct ethtool_eee eee; unsigned int clk_ref_rate; - u32 rx_copybreak; - /* ptp clock period in ns*/ unsigned int ptp_inc; @@ -703,9 +697,9 @@ struct fec_enet_private { void fec_ptp_init(struct platform_device *pdev, int irq_idx); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); -void fec_ptp_disable_hwts(struct net_device *ndev); -int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); -int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); +int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config); /****************************************************************************/ #endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 66b5cbdb43b9..e08c7b572497 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -38,6 +38,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <net/ip.h> +#include <net/page_pool/helpers.h> #include <net/selftests.h> #include <net/tso.h> #include <linux/tcp.h> @@ -51,11 +52,11 @@ #include <linux/clk.h> #include <linux/crc32.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/mdio.h> #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/regulator/consumer.h> @@ -68,6 +69,7 @@ #include <soc/imx/cpuidle.h> #include <linux/filter.h> #include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <asm/cacheflush.h> @@ -75,6 +77,9 @@ static void set_multicast_list(struct net_device *ndev); static void fec_enet_itr_coal_set(struct net_device *ndev); +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len); #define DRIVER_NAME "fec" @@ -182,65 +187,22 @@ static struct platform_device_id fec_devtype[] = { .name = DRIVER_NAME, .driver_data = 0, }, { - .name = "imx25-fec", - .driver_data = (kernel_ulong_t)&fec_imx25_info, - }, { - .name = "imx27-fec", - .driver_data = (kernel_ulong_t)&fec_imx27_info, - }, { - .name = "imx28-fec", - .driver_data = (kernel_ulong_t)&fec_imx28_info, - }, { - .name = "imx6q-fec", - .driver_data = (kernel_ulong_t)&fec_imx6q_info, - }, { - .name = "mvf600-fec", - .driver_data = (kernel_ulong_t)&fec_mvf600_info, - }, { - .name = "imx6sx-fec", - .driver_data = (kernel_ulong_t)&fec_imx6x_info, - }, { - .name = "imx6ul-fec", - .driver_data = (kernel_ulong_t)&fec_imx6ul_info, - }, { - .name = "imx8mq-fec", - .driver_data = (kernel_ulong_t)&fec_imx8mq_info, - }, { - .name = "imx8qm-fec", - .driver_data = (kernel_ulong_t)&fec_imx8qm_info, - }, { - .name = "s32v234-fec", - .driver_data = (kernel_ulong_t)&fec_s32v234_info, - }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, fec_devtype); -enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ - IMX27_FEC, /* runs on i.mx27/35/51 */ - IMX28_FEC, - IMX6Q_FEC, - MVF600_FEC, - IMX6SX_FEC, - IMX6UL_FEC, - IMX8MQ_FEC, - IMX8QM_FEC, - S32V234_FEC, -}; - static const struct of_device_id fec_dt_ids[] = { - { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, - { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, - { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, - { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, - { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, - { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, - { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, - { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, - { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], }, + { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, }, + { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, }, + { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, + { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, + { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, + { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, + { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, + { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -325,8 +287,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_WOL_FLAG_ENABLE (0x1 << 1) #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) -#define COPYBREAK_DEFAULT 256 - /* Max number of allowed TCP segments for software TSO */ #define FEC_MAX_TSO_SEGS 100 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) @@ -397,12 +357,76 @@ static void fec_dump(struct net_device *ndev) fec16_to_cpu(bdp->cbd_sc), fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), - txq->tx_buf[index].skb); + txq->tx_buf[index].buf_p); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); index++; } while (bdp != txq->bd.base); } +/* + * Coldfire does not support DMA coherent allocations, and has historically used + * a band-aid with a manual flush in fec_enet_rx_queue. + */ +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); +} +#else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ +static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + return dma_alloc_coherent(dev, size, handle, gfp); +} + +static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ + dma_free_coherent(dev, size, cpu_addr, handle); +} +#endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ + +struct fec_dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; +}; + +static void fec_dmam_release(struct device *dev, void *res) +{ + struct fec_dma_devres *this = res; + + fec_dma_free(dev, this->size, this->vaddr, this->dma_handle); +} + +static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp) +{ + struct fec_dma_devres *dr; + void *vaddr; + + dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + vaddr = fec_dma_alloc(dev, size, handle, gfp); + if (!vaddr) { + devres_free(dr); + return NULL; + } + dr->vaddr = vaddr; + dr->dma_handle = *handle; + dr->size = size; + devres_add(dev, dr); + return vaddr; +} + static inline bool is_ipv4_pkt(struct sk_buff *skb) { return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; @@ -654,7 +678,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */ - txq->tx_buf[index].skb = skb; + txq->tx_buf[index].buf_p = skb; /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. @@ -860,7 +884,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Save skb pointer */ - txq->tx_buf[index].skb = skb; + txq->tx_buf[index].buf_p = skb; skb_tx_timestamp(skb); txq->bd.cur = bdp; @@ -957,26 +981,27 @@ static void fec_enet_bd_init(struct net_device *dev) fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); - if (txq->tx_buf[i].skb) { - dev_kfree_skb_any(txq->tx_buf[i].skb); - txq->tx_buf[i].skb = NULL; - } - } else { + if (txq->tx_buf[i].buf_p) + dev_kfree_skb_any(txq->tx_buf[i].buf_p); + } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { if (bdp->cbd_bufaddr) dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); - if (txq->tx_buf[i].xdp) { - xdp_return_frame(txq->tx_buf[i].xdp); - txq->tx_buf[i].xdp = NULL; - } + if (txq->tx_buf[i].buf_p) + xdp_return_frame(txq->tx_buf[i].buf_p); + } else { + struct page *page = txq->tx_buf[i].buf_p; - /* restore default tx buffer type: FEC_TXBUF_T_SKB */ - txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + if (page) + page_pool_put_page(page->pp, page, 0, false); } + txq->tx_buf[i].buf_p = NULL; + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; bdp->cbd_bufaddr = cpu_to_fec32(0); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } @@ -1383,6 +1408,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) struct netdev_queue *nq; int index = 0; int entries_free; + struct page *page; + int frame_len; fep = netdev_priv(ndev); @@ -1404,8 +1431,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) index = fec_enet_get_bd_index(bdp, &txq->bd); if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { - skb = txq->tx_buf[index].skb; - txq->tx_buf[index].skb = NULL; + skb = txq->tx_buf[index].buf_p; if (bdp->cbd_bufaddr && !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) dma_unmap_single(&fep->pdev->dev, @@ -1424,17 +1450,24 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) if (unlikely(!budget)) break; - xdpf = txq->tx_buf[index].xdp; - if (bdp->cbd_bufaddr) - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - fec16_to_cpu(bdp->cbd_datlen), - DMA_TO_DEVICE); + if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { + xdpf = txq->tx_buf[index].buf_p; + if (bdp->cbd_bufaddr) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + } else { + page = txq->tx_buf[index].buf_p; + } + bdp->cbd_bufaddr = cpu_to_fec32(0); - if (!xdpf) { + if (unlikely(!txq->tx_buf[index].buf_p)) { txq->tx_buf[index].type = FEC_TXBUF_T_SKB; goto tx_buf_done; } + + frame_len = fec16_to_cpu(bdp->cbd_datlen); } /* Check for errors. */ @@ -1458,7 +1491,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) ndev->stats.tx_bytes += skb->len; else - ndev->stats.tx_bytes += xdpf->len; + ndev->stats.tx_bytes += frame_len; } /* Deferred means some collisions occurred during transmit, @@ -1482,15 +1515,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) } /* Free the sk buffer associated with this last transmit */ - dev_kfree_skb_any(skb); - } else { - xdp_return_frame(xdpf); - - txq->tx_buf[index].xdp = NULL; - /* restore default tx buffer type: FEC_TXBUF_T_SKB */ - txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + napi_consume_skb(skb, budget); + } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { + xdp_return_frame_rx_napi(xdpf); + } else { /* recycle pages of XDP_TX frames */ + /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ + page_pool_put_page(page->pp, page, 0, true); } + txq->tx_buf[index].buf_p = NULL; + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + tx_buf_done: /* Make sure the update to bdp and tx_buf are performed * before dirty_tx @@ -1543,7 +1579,7 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, static u32 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, - struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) + struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) { unsigned int sync, len = xdp->data_end - xdp->data; u32 ret = FEC_ENET_XDP_PASS; @@ -1553,8 +1589,10 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, act = bpf_prog_run_xdp(prog, xdp); - /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ - sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM; + /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover + * max len CPU touch + */ + sync = xdp->data_end - xdp->data; sync = max(sync, len); switch (act) { @@ -1566,31 +1604,38 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, case XDP_REDIRECT: rxq->stats[RX_XDP_REDIRECT]++; err = xdp_do_redirect(fep->netdev, xdp, prog); - if (!err) { - ret = FEC_ENET_XDP_REDIR; - } else { - ret = FEC_ENET_XDP_CONSUMED; - page = virt_to_head_page(xdp->data); - page_pool_put_page(rxq->page_pool, page, sync, true); + if (unlikely(err)) + goto xdp_err; + + ret = FEC_ENET_XDP_REDIR; + break; + + case XDP_TX: + rxq->stats[RX_XDP_TX]++; + err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); + if (unlikely(err)) { + rxq->stats[RX_XDP_TX_ERRORS]++; + goto xdp_err; } + + ret = FEC_ENET_XDP_TX; break; default: bpf_warn_invalid_xdp_action(fep->netdev, prog, act); fallthrough; - case XDP_TX: - bpf_warn_invalid_xdp_action(fep->netdev, prog, act); - fallthrough; - case XDP_ABORTED: fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: rxq->stats[RX_XDP_DROP]++; +xdp_err: ret = FEC_ENET_XDP_CONSUMED; page = virt_to_head_page(xdp->data); page_pool_put_page(rxq->page_pool, page, sync, true); + if (act != XDP_DROP) + trace_xdp_exception(fep->netdev, prog, act); break; } @@ -1621,6 +1666,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); u32 ret, xdp_result = FEC_ENET_XDP_PASS; u32 data_start = FEC_ENET_XDP_HEADROOM; + int cpu = smp_processor_id(); struct xdp_buff xdp; struct page *page; u32 sub_len = 4; @@ -1635,7 +1681,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) } #endif -#ifdef CONFIG_M532x +#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) + /* + * Hacky flush of all caches instead of using the DMA API for the TSO + * headers. + */ flush_cache_all(); #endif rxq = fep->rx_queue[queue_id]; @@ -1699,7 +1749,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* subtract 16bit shift and FCS */ xdp_prepare_buff(&xdp, page_address(page), data_start, pkt_len - sub_len, false); - ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); xdp_result |= ret; if (ret != FEC_ENET_XDP_PASS) goto rx_processing_done; @@ -1807,7 +1857,7 @@ rx_processing_done: rxq->bd.cur = bdp; if (xdp_result & FEC_ENET_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); return pkt_received; } @@ -2882,12 +2932,10 @@ static void fec_enet_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { - memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "%s", fec_stats[i].name); } for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { - strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + ethtool_sprintf(&data, "%s", fec_xdp_stat_strs[i]); } page_pool_ethtool_stats_get_strings(data); @@ -3059,44 +3107,6 @@ static int fec_enet_set_coalesce(struct net_device *ndev, return 0; } -static int fec_enet_get_tunable(struct net_device *netdev, - const struct ethtool_tunable *tuna, - void *data) -{ - struct fec_enet_private *fep = netdev_priv(netdev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - *(u32 *)data = fep->rx_copybreak; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int fec_enet_set_tunable(struct net_device *netdev, - const struct ethtool_tunable *tuna, - const void *data) -{ - struct fec_enet_private *fep = netdev_priv(netdev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - fep->rx_copybreak = *(u32 *)data; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - /* LPI Sleep Ts count base on tx clk (clk_ref). * The lpi sleep cnt value = X us / (cycle_ns). */ @@ -3234,8 +3244,6 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_sset_count = fec_enet_get_sset_count, #endif .get_ts_info = fec_enet_get_ts_info, - .get_tunable = fec_enet_get_tunable, - .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, .get_eee = fec_enet_get_eee, @@ -3245,38 +3253,10 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .self_test = net_selftest, }; -static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - if (fep->bufdesc_ex) { - bool use_fec_hwts = !phy_has_hwtstamp(phydev); - - if (cmd == SIOCSHWTSTAMP) { - if (use_fec_hwts) - return fec_ptp_set(ndev, rq); - fec_ptp_disable_hwts(ndev); - } else if (cmd == SIOCGHWTSTAMP) { - if (use_fec_hwts) - return fec_ptp_get(ndev, rq); - } - } - - return phy_mii_ioctl(phydev, rq, cmd); -} - static void fec_enet_free_buffers(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; - struct sk_buff *skb; struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_rx_q *rxq; unsigned int q; @@ -3301,18 +3281,23 @@ static void fec_enet_free_buffers(struct net_device *ndev) kfree(txq->tx_bounce[i]); txq->tx_bounce[i] = NULL; + if (!txq->tx_buf[i].buf_p) { + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + continue; + } + if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { - skb = txq->tx_buf[i].skb; - txq->tx_buf[i].skb = NULL; - dev_kfree_skb(skb); + dev_kfree_skb(txq->tx_buf[i].buf_p); + } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { + xdp_return_frame(txq->tx_buf[i].buf_p); } else { - if (txq->tx_buf[i].xdp) { - xdp_return_frame(txq->tx_buf[i].xdp); - txq->tx_buf[i].xdp = NULL; - } + struct page *page = txq->tx_buf[i].buf_p; - txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + page_pool_put_page(page->pp, page, 0, false); } + + txq->tx_buf[i].buf_p = NULL; + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; } } } @@ -3326,10 +3311,9 @@ static void fec_enet_free_queue(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; - dma_free_coherent(&fep->pdev->dev, - txq->bd.ring_size * TSO_HEADER_SIZE, - txq->tso_hdrs, - txq->tso_hdrs_dma); + fec_dma_free(&fep->pdev->dev, + txq->bd.ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_dma); } for (i = 0; i < fep->num_rx_queues; i++) @@ -3359,10 +3343,9 @@ static int fec_enet_alloc_queue(struct net_device *ndev) txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; - txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, + txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev, txq->bd.ring_size * TSO_HEADER_SIZE, - &txq->tso_hdrs_dma, - GFP_KERNEL); + &txq->tso_hdrs_dma, GFP_KERNEL); if (!txq->tso_hdrs) { ret = -ENOMEM; goto alloc_failed; @@ -3748,31 +3731,26 @@ static int fec_set_features(struct net_device *netdev, return 0; } -static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb) -{ - struct vlan_ethhdr *vhdr; - unsigned short vlan_TCI = 0; - - if (skb->protocol == htons(ETH_P_ALL)) { - vhdr = (struct vlan_ethhdr *)(skb->data); - vlan_TCI = ntohs(vhdr->h_vlan_TCI); - } - - return vlan_TCI; -} - static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, struct net_device *sb_dev) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 vlan_tag; + u16 vlan_tag = 0; if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return netdev_pick_tx(ndev, skb, NULL); - vlan_tag = fec_enet_get_raw_vlan_tci(skb); - if (!vlan_tag) + /* VLAN is present in the payload.*/ + if (eth_type_vlan(skb->protocol)) { + struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); + + vlan_tag = ntohs(vhdr->h_vlan_TCI); + /* VLAN is present in the skb but not yet pushed in the payload.*/ + } else if (skb_vlan_tag_present(skb)) { + vlan_tag = skb->vlan_tci; + } else { return vlan_tag; + } return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; } @@ -3835,12 +3813,14 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, struct fec_enet_priv_tx_q *txq, - struct xdp_frame *frame) + void *frame, u32 dma_sync_len, + bool ndo_xmit) { unsigned int index, status, estatus; struct bufdesc *bdp; dma_addr_t dma_addr; int entries_free; + u16 frame_len; entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { @@ -3855,17 +3835,37 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, index = fec_enet_get_bd_index(bdp, &txq->bd); - dma_addr = dma_map_single(&fep->pdev->dev, frame->data, - frame->len, DMA_TO_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, dma_addr)) - return -ENOMEM; + if (ndo_xmit) { + struct xdp_frame *xdpf = frame; + + dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dma_addr)) + return -ENOMEM; + + frame_len = xdpf->len; + txq->tx_buf[index].buf_p = xdpf; + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; + } else { + struct xdp_buff *xdpb = frame; + struct page *page; + + page = virt_to_page(xdpb->data); + dma_addr = page_pool_get_dma_addr(page) + + (xdpb->data - xdpb->data_hard_start); + dma_sync_single_for_device(&fep->pdev->dev, dma_addr, + dma_sync_len, DMA_BIDIRECTIONAL); + frame_len = xdpb->data_end - xdpb->data; + txq->tx_buf[index].buf_p = page; + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; + } status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) estatus = BD_ENET_TX_INT; bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); - bdp->cbd_datlen = cpu_to_fec16(frame->len); + bdp->cbd_datlen = cpu_to_fec16(frame_len); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; @@ -3877,9 +3877,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, ebdp->cbd_esc = cpu_to_fec32(estatus); } - txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; - txq->tx_buf[index].xdp = frame; - /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. */ @@ -3905,6 +3902,29 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, return 0; } +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len) +{ + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; + int queue, ret; + + queue = fec_enet_xdp_get_tx_queue(fep, cpu); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(fep->netdev, queue); + + __netif_tx_lock(nq, cpu); + + /* Avoid tx timeout as XDP shares the queue with kernel stack */ + txq_trans_cond_update(nq); + ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false); + + __netif_tx_unlock(nq); + + return ret; +} + static int fec_enet_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, @@ -3927,7 +3947,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev, /* Avoid tx timeout as XDP shares the queue with kernel stack */ txq_trans_cond_update(nq); for (i = 0; i < num_frames; i++) { - if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0) + if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) break; sent_frames++; } @@ -3937,6 +3957,37 @@ static int fec_enet_xdp_xmit(struct net_device *dev, return sent_frames; } +static int fec_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!fep->bufdesc_ex) + return -EOPNOTSUPP; + + fec_ptp_get(ndev, config); + + return 0; +} + +static int fec_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!fep->bufdesc_ex) + return -EOPNOTSUPP; + + return fec_ptp_set(ndev, config, extack); +} + static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, @@ -3946,13 +3997,15 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, - .ndo_eth_ioctl = fec_enet_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fec_poll_controller, #endif .ndo_set_features = fec_set_features, .ndo_bpf = fec_enet_bpf, .ndo_xdp_xmit = fec_enet_xdp_xmit, + .ndo_hwtstamp_get = fec_hwtstamp_get, + .ndo_hwtstamp_set = fec_hwtstamp_set, }; static const unsigned short offset_des_active_rxq[] = { @@ -4006,8 +4059,8 @@ static int fec_enet_init(struct net_device *ndev) bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; /* Allocate memory for buffer descriptors. */ - cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, - GFP_KERNEL); + cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); if (!cbd_base) { ret = -ENOMEM; goto free_queue_mem; @@ -4018,9 +4071,6 @@ static int fec_enet_init(struct net_device *ndev) if (ret) goto free_queue_mem; - /* make sure MAC we just acquired is programmed into the hw */ - fec_set_mac_address(ndev, NULL); - /* Set receive and transmit descriptor base. */ for (i = 0; i < fep->num_rx_queues; i++) { struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; @@ -4258,14 +4308,13 @@ fec_probe(struct platform_device *pdev) phy_interface_t interface; struct net_device *ndev; int i, irq, ret = 0; - const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; int num_tx_qs; int num_rx_qs; char irq_name[8]; int irq_cnt; - struct fec_devinfo *dev_info; + const struct fec_devinfo *dev_info; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -4280,10 +4329,9 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); - of_id = of_match_device(fec_dt_ids, &pdev->dev); - if (of_id) - pdev->id_entry = of_id->data; - dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; + dev_info = device_get_match_data(&pdev->dev); + if (!dev_info) + dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; if (dev_info) fep->quirks = dev_info->quirks; @@ -4486,7 +4534,6 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); - fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); pm_runtime_mark_last_busy(&pdev->dev); @@ -4526,7 +4573,7 @@ failed_ioremap: return ret; } -static int +static void fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); @@ -4562,7 +4609,6 @@ fec_drv_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); free_netdev(ndev); - return 0; } static int __maybe_unused fec_suspend(struct device *dev) @@ -4718,7 +4764,7 @@ static struct platform_driver fec_driver = { }, .id_table = fec_devtype, .probe = fec_probe, - .remove = fec_drv_remove, + .remove_new = fec_drv_remove, }; module_platform_driver(fec_driver); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b88816b71ddf..ebae71ec26c6 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -29,12 +29,12 @@ #include <linux/crc32.h> #include <linux/hardirq.h> #include <linux/delay.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -974,7 +974,7 @@ err_netdev: return rv; } -static int +static void mpc52xx_fec_remove(struct platform_device *op) { struct net_device *ndev; @@ -998,8 +998,6 @@ mpc52xx_fec_remove(struct platform_device *op) release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec)); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -1042,7 +1040,7 @@ static struct platform_driver mpc52xx_fec_driver = { .of_match_table = mpc52xx_fec_match, }, .probe = mpc52xx_fec_probe, - .remove = mpc52xx_fec_remove, + .remove_new = mpc52xx_fec_remove, #ifdef CONFIG_PM .suspend = mpc52xx_fec_of_suspend, .resume = mpc52xx_fec_of_resume, diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 95f778cce98c..39689826cc8f 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -13,10 +13,11 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> -#include <linux/of_platform.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_mdio.h> +#include <linux/platform_device.h> #include <asm/io.h> #include <asm/mpc52xx.h> #include "fec_mpc52xx.h" @@ -117,7 +118,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) return err; } -static int mpc52xx_fec_mdio_remove(struct platform_device *of) +static void mpc52xx_fec_mdio_remove(struct platform_device *of) { struct mii_bus *bus = platform_get_drvdata(of); struct mpc52xx_fec_mdio_priv *priv = bus->priv; @@ -126,8 +127,6 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) iounmap(priv->regs); kfree(priv); mdiobus_free(bus); - - return 0; } static const struct of_device_id mpc52xx_fec_mdio_match[] = { @@ -145,7 +144,7 @@ struct platform_driver mpc52xx_fec_mdio_driver = { .of_match_table = mpc52xx_fec_mdio_match, }, .probe = mpc52xx_fec_mdio_probe, - .remove = mpc52xx_fec_mdio_remove, + .remove_new = mpc52xx_fec_mdio_remove, }; /* let fec driver call it, since this has to be registered before it */ diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index ab86bb8562ef..181d9bfbee22 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -30,7 +30,6 @@ #include <linux/phy.h> #include <linux/fec.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_net.h> @@ -443,21 +442,21 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) */ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct fec_enet_private *adapter = + struct fec_enet_private *fep = container_of(ptp, struct fec_enet_private, ptp_caps); u64 ns; unsigned long flags; - mutex_lock(&adapter->ptp_clk_mutex); + mutex_lock(&fep->ptp_clk_mutex); /* Check the ptp clock */ - if (!adapter->ptp_clk_on) { - mutex_unlock(&adapter->ptp_clk_mutex); + if (!fep->ptp_clk_on) { + mutex_unlock(&fep->ptp_clk_mutex); return -EINVAL; } - spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->tc); - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - mutex_unlock(&adapter->ptp_clk_mutex); + spin_lock_irqsave(&fep->tmreg_lock, flags); + ns = timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); *ts = ns_to_timespec64(ns); @@ -606,28 +605,12 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, } } -/** - * fec_ptp_disable_hwts - disable hardware time stamping - * @ndev: pointer to net_device - */ -void fec_ptp_disable_hwts(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - - fep->hwts_tx_en = 0; - fep->hwts_rx_en = 0; -} - -int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) +int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct fec_enet_private *fep = netdev_priv(ndev); - struct hwtstamp_config config; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: fep->hwts_tx_en = 0; break; @@ -638,33 +621,28 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: fep->hwts_rx_en = 0; break; default: fep->hwts_rx_en = 1; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; } - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } -int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) +void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config) { struct fec_enet_private *fep = netdev_priv(ndev); - struct hwtstamp_config config; - - config.flags = 0; - config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = (fep->hwts_rx_en ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + config->flags = 0; + config->tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = (fep->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); } /* diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 9d85fb136e34..d96028f01770 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fsl/guts.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 3b75cc543be9..9ba15d3183d7 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -618,18 +618,17 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) return 0; } -static void memac_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) +static unsigned long memac_get_caps(struct phylink_config *config, + phy_interface_t interface) { struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; unsigned long caps = config->mac_capabilities; - if (phy_interface_mode_is_rgmii(state->interface) && + if (phy_interface_mode_is_rgmii(interface) && memac->rgmii_no_half_duplex) caps &= ~(MAC_10HD | MAC_100HD); - phylink_validate_mask_caps(supported, state, caps); + return caps; } /** @@ -776,7 +775,7 @@ static void memac_link_down(struct phylink_config *config, unsigned int mode, } static const struct phylink_mac_ops memac_mac_ops = { - .validate = memac_validate, + .mac_get_caps = memac_get_caps, .mac_select_pcs = memac_select_pcs, .mac_prepare = memac_prepare, .mac_config = memac_mac_config, diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ab90fe2bee5e..406e75e9e5ea 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/io.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 43665806c590..9767586b4eb3 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -18,6 +18,7 @@ #include <linux/phylink.h> #include <linux/etherdevice.h> #include <linux/libfdt_env.h> +#include <linux/platform_device.h> #include "mac.h" #include "fman_mac.h" @@ -331,12 +332,11 @@ _return_of_node_put: return err; } -static int mac_remove(struct platform_device *pdev) +static void mac_remove(struct platform_device *pdev) { struct mac_device *mac_dev = platform_get_drvdata(pdev); platform_device_unregister(mac_dev->priv->eth_dev); - return 0; } static struct platform_driver mac_driver = { @@ -345,7 +345,7 @@ static struct platform_driver mac_driver = { .of_match_table = mac_match, }, .probe = mac_probe, - .remove = mac_remove, + .remove_new = mac_remove, }; builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index ad06f8d7924b..fe747915cc73 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -68,10 +68,6 @@ struct dpaa_eth_data { extern const char *mac_driver_description; -int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx); - -void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, - bool *tx_pause); int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev); #endif /* __MAC_H */ diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 8844a9a04fcf..cf392faa6105 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -35,10 +35,9 @@ #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/phy.h> +#include <linux/property.h> #include <linux/of.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> -#include <linux/of_gpio.h> #include <linux/of_net.h> #include <linux/pgtable.h> @@ -318,14 +317,12 @@ fs_enet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct fs_enet_private *fep; - const struct fs_platform_info *fpi; u32 int_events; u32 int_clr_events; int nr, napi_ok; int handled; fep = netdev_priv(dev); - fpi = fep->fpi; nr = 0; while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { @@ -886,9 +883,9 @@ static const struct ethtool_ops fs_ethtool_ops = { /**************************************************************************************/ #ifdef CONFIG_FS_ENET_HAS_FEC -#define IS_FEC(match) ((match)->data == &fs_fec_ops) +#define IS_FEC(ops) ((ops) == &fs_fec_ops) #else -#define IS_FEC(match) 0 +#define IS_FEC(ops) 0 #endif static const struct net_device_ops fs_enet_netdev_ops = { @@ -905,10 +902,9 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; -static const struct of_device_id fs_enet_match[]; static int fs_enet_probe(struct platform_device *ofdev) { - const struct of_device_id *match; + const struct fs_ops *ops; struct net_device *ndev; struct fs_enet_private *fep; struct fs_platform_info *fpi; @@ -918,15 +914,15 @@ static int fs_enet_probe(struct platform_device *ofdev) const char *phy_connection_type; int privsize, len, ret = -ENODEV; - match = of_match_device(fs_enet_match, &ofdev->dev); - if (!match) + ops = device_get_match_data(&ofdev->dev); + if (!ops) return -EINVAL; fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); if (!fpi) return -ENOMEM; - if (!IS_FEC(match)) { + if (!IS_FEC(ops)) { data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) goto out_free_fpi; @@ -988,7 +984,7 @@ static int fs_enet_probe(struct platform_device *ofdev) fep->dev = &ofdev->dev; fep->ndev = ndev; fep->fpi = fpi; - fep->ops = match->data; + fep->ops = ops; ret = fep->ops->setup_data(ndev); if (ret) @@ -1051,7 +1047,7 @@ out_free_fpi: return ret; } -static int fs_enet_remove(struct platform_device *ofdev) +static void fs_enet_remove(struct platform_device *ofdev) { struct net_device *ndev = platform_get_drvdata(ofdev); struct fs_enet_private *fep = netdev_priv(ndev); @@ -1066,7 +1062,6 @@ static int fs_enet_remove(struct platform_device *ofdev) if (of_phy_is_fixed_link(ofdev->dev.of_node)) of_phy_deregister_fixed_link(ofdev->dev.of_node); free_netdev(ndev); - return 0; } static const struct of_device_id fs_enet_match[] = { @@ -1113,7 +1108,7 @@ static struct platform_driver fs_enet_driver = { .of_match_table = fs_enet_match, }, .probe = fs_enet_probe, - .remove = fs_enet_remove, + .remove_new = fs_enet_remove, }; #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index cb419aef8d1b..21c07ac05225 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -2,6 +2,7 @@ #ifndef FS_ENET_H #define FS_ENET_H +#include <linux/clk.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/types.h> @@ -9,9 +10,6 @@ #include <linux/phy.h> #include <linux/dma-mapping.h> -#include <linux/fs_enet_pd.h> -#include <asm/fs_pd.h> - #ifdef CONFIG_CPM1 #include <asm/cpm1.h> #endif @@ -118,6 +116,23 @@ struct phy_info { #define ENET_RX_ALIGN 16 #define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) +struct fs_platform_info { + /* device specific information */ + u32 cp_command; /* CPM page/sblock/mcn */ + + u32 dpram_offset; + + struct device_node *phy_node; + + int rx_ring, tx_ring; /* number of buffers on rx */ + int rx_copybreak; /* limit we copy small frames */ + int napi_weight; /* NAPI weight */ + + int use_rmii; /* use RMII mode */ + + struct clk *clk_per; /* 'per' clock for register access */ +}; + struct fs_enet_private { struct napi_struct napi; struct device *dev; /* pointer back to the device (must be initialized first) */ @@ -192,11 +207,6 @@ void fs_cleanup_bds(struct net_device *dev); #define PFX DRV_MODULE_NAME ": " /***************************************************************************/ - -int fs_enet_platform_init(void); -void fs_enet_platform_cleanup(void); - -/***************************************************************************/ /* buffer descriptor access macros */ /* access macros */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index b47490be872c..e2ffac9eb2ad 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -32,13 +32,11 @@ #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/gfp.h> #include <linux/pgtable.h> #include <asm/immap_cpm2.h> -#include <asm/mpc8260.h> #include <asm/cpm2.h> #include <asm/irq.h> @@ -106,7 +104,7 @@ static int do_pd_setup(struct fs_enet_private *fep) goto out_ep; fep->fcc.mem = (void __iomem *)cpm2_immr; - fpi->dpram_offset = cpm_dpalloc(128, 32); + fpi->dpram_offset = cpm_muram_alloc(128, 32); if (IS_ERR_VALUE(fpi->dpram_offset)) { ret = fpi->dpram_offset; goto out_fcccp; @@ -548,7 +546,7 @@ static void tx_restart(struct net_device *dev) } /* Now update the TBPTR and dirty flag to the current buffer */ W32(ep, fen_genfcc.fcc_tbptr, - (uint) (((void *)recheck_bd - fep->ring_base) + + (uint)(((void __iomem *)recheck_bd - fep->ring_base) + fep->ring_mem_addr)); fep->dirty_tx = recheck_bd; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 61f4b6e50d29..cdc89d83cf07 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -32,7 +32,6 @@ #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/gfp.h> @@ -340,11 +339,7 @@ static void restart(struct net_device *dev) static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; struct fec __iomem *fecp = fep->fec.fecp; - - struct fec_info *feci = dev->phydev->mdio.bus->priv; - int i; if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) @@ -364,16 +359,6 @@ static void stop(struct net_device *dev) FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); fs_cleanup_bds(dev); - - /* shut down FEC1? that's where the mii bus is */ - if (fpi->has_phy) { - FS(fecp, r_cntrl, fpi->use_rmii ? - FEC_RCNTRL_RMII_MODE : - FEC_RCNTRL_MII_MODE); /* MII/RMII enable */ - FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); - FW(fecp, ievent, FEC_ENET_MII); - FW(fecp, mii_speed, feci->mii_speed); - } } static void napi_clear_event_fs(struct net_device *dev) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 64300ac13e02..a64cb6270515 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -32,7 +32,6 @@ #include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <asm/irq.h> #include <linux/uaccess.h> @@ -134,13 +133,13 @@ static int allocate_bd(struct net_device *dev) struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * - sizeof(cbd_t), 8); + fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), 8); if (IS_ERR_VALUE(fep->ring_mem_addr)) return -ENOMEM; fep->ring_base = (void __iomem __force*) - cpm_dpram_addr(fep->ring_mem_addr); + cpm_muram_addr(fep->ring_mem_addr); return 0; } @@ -150,7 +149,7 @@ static void free_bd(struct net_device *dev) struct fs_enet_private *fep = netdev_priv(dev); if (fep->ring_base) - cpm_dpfree(fep->ring_mem_addr); + cpm_muram_free(fep->ring_mem_addr); } static void cleanup_data(struct net_device *dev) diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 21de56345503..f965a2329055 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -29,8 +29,8 @@ struct bb_info { struct mdiobb_ctrl ctrl; - __be32 __iomem *dir; - __be32 __iomem *dat; + u32 __iomem *dir; + u32 __iomem *dat; u32 mdio_msk; u32 mdc_msk; }; @@ -192,7 +192,7 @@ out: return ret; } -static int fs_enet_mdio_remove(struct platform_device *ofdev) +static void fs_enet_mdio_remove(struct platform_device *ofdev) { struct mii_bus *bus = platform_get_drvdata(ofdev); struct bb_info *bitbang = bus->priv; @@ -201,8 +201,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) free_mdio_bitbang(bus); iounmap(bitbang->dir); kfree(bitbang); - - return 0; } static const struct of_device_id fs_enet_mdio_bb_match[] = { @@ -219,7 +217,7 @@ static struct platform_driver fs_enet_bb_mdio_driver = { .of_match_table = fs_enet_mdio_bb_match, }, .probe = fs_enet_mdio_probe, - .remove = fs_enet_mdio_remove, + .remove_new = fs_enet_mdio_remove, }; module_platform_driver(fs_enet_bb_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 59a8f0bd0f5c..7bb69727952a 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -30,8 +30,10 @@ #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> +#include <linux/of_mdio.h> #include <linux/pgtable.h> #include <asm/irq.h> @@ -95,20 +97,15 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, } -static const struct of_device_id fs_enet_mdio_fec_match[]; static int fs_enet_mdio_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct resource res; struct mii_bus *new_bus; struct fec_info *fec; int (*get_bus_freq)(struct device *); int ret = -ENOMEM, clock, speed; - match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); - if (!match) - return -EINVAL; - get_bus_freq = match->data; + get_bus_freq = device_get_match_data(&ofdev->dev); new_bus = mdiobus_alloc(); if (!new_bus) @@ -187,7 +184,7 @@ out: return ret; } -static int fs_enet_mdio_remove(struct platform_device *ofdev) +static void fs_enet_mdio_remove(struct platform_device *ofdev) { struct mii_bus *bus = platform_get_drvdata(ofdev); struct fec_info *fec = bus->priv; @@ -196,8 +193,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) iounmap(fec->fecp); kfree(fec); mdiobus_free(bus); - - return 0; } static const struct of_device_id fs_enet_mdio_fec_match[] = { @@ -220,7 +215,7 @@ static struct platform_driver fs_enet_fec_mdio_driver = { .of_match_table = fs_enet_mdio_fec_match, }, .probe = fs_enet_mdio_probe, - .remove = fs_enet_mdio_remove, + .remove_new = fs_enet_mdio_remove, }; module_platform_driver(fs_enet_fec_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 9d58d8334467..70dd982a5edc 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -12,15 +12,17 @@ */ #include <linux/kernel.h> +#include <linux/platform_device.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/mii.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/of_device.h> +#include <linux/property.h> #include <asm/io.h> #if IS_ENABLED(CONFIG_UCC_GETH) @@ -406,8 +408,6 @@ static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev, static int fsl_pq_mdio_probe(struct platform_device *pdev) { - const struct of_device_id *id = - of_match_device(fsl_pq_mdio_match, &pdev->dev); const struct fsl_pq_mdio_data *data; struct device_node *np = pdev->dev.of_node; struct resource res; @@ -416,15 +416,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) struct mii_bus *new_bus; int err; - if (!id) { + data = device_get_match_data(&pdev->dev); + if (!data) { dev_err(&pdev->dev, "Failed to match device\n"); return -ENODEV; } - data = id->data; - - dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); - new_bus = mdiobus_alloc_size(sizeof(*priv)); if (!new_bus) return -ENOMEM; @@ -511,7 +508,7 @@ error: } -static int fsl_pq_mdio_remove(struct platform_device *pdev) +static void fsl_pq_mdio_remove(struct platform_device *pdev) { struct device *device = &pdev->dev; struct mii_bus *bus = dev_get_drvdata(device); @@ -521,8 +518,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev) iounmap(priv->map); mdiobus_free(bus); - - return 0; } static struct platform_driver fsl_pq_mdio_driver = { @@ -531,7 +526,7 @@ static struct platform_driver fsl_pq_mdio_driver = { .of_match_table = fsl_pq_mdio_match, }, .probe = fsl_pq_mdio_probe, - .remove = fsl_pq_mdio_remove, + .remove_new = fsl_pq_mdio_remove, }; module_platform_driver(fsl_pq_mdio_driver); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 38d5013c6fed..e3dfbd7a4236 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -60,6 +60,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> +#include <linux/platform_device.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> @@ -75,7 +76,6 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> @@ -3364,7 +3364,7 @@ register_fail: return err; } -static int gfar_remove(struct platform_device *ofdev) +static void gfar_remove(struct platform_device *ofdev) { struct gfar_private *priv = platform_get_drvdata(ofdev); struct device_node *np = ofdev->dev.of_node; @@ -3381,8 +3381,6 @@ static int gfar_remove(struct platform_device *ofdev) gfar_free_rx_queues(priv); gfar_free_tx_queues(priv); free_gfar_dev(priv); - - return 0; } #ifdef CONFIG_PM @@ -3642,7 +3640,7 @@ static struct platform_driver gfar_driver = { .of_match_table = gfar_match, }, .probe = gfar_probe, - .remove = gfar_remove, + .remove_new = gfar_remove, }; module_platform_driver(gfar_driver); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index b2b0d3c26fcc..7a15b9245698 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -38,7 +38,9 @@ #include <linux/phy.h> #include <linux/sort.h> #include <linux/if_vlan.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/fsl/ptp_qoriq.h> #include "gianfar.h" diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 7a4cb4f07c32..ab421243a419 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -28,11 +28,12 @@ #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/workqueue.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/uaccess.h> #include <asm/irq.h> @@ -3753,7 +3754,7 @@ err_free_info: return err; } -static int ucc_geth_remove(struct platform_device* ofdev) +static void ucc_geth_remove(struct platform_device* ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(dev); @@ -3767,8 +3768,6 @@ static int ucc_geth_remove(struct platform_device* ofdev) of_node_put(ugeth->ug_info->phy_node); kfree(ugeth->ug_info); free_netdev(dev); - - return 0; } static const struct of_device_id ucc_geth_match[] = { @@ -3787,7 +3786,7 @@ static struct platform_driver ucc_geth_driver = { .of_match_table = ucc_geth_match, }, .probe = ucc_geth_probe, - .remove = ucc_geth_remove, + .remove_new = ucc_geth_remove, .suspend = ucc_geth_suspend, .resume = ucc_geth_resume, }; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index a13b4ba4d6e1..65dc07d0df0f 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -19,10 +19,10 @@ #include <linux/kernel.h> #include <linux/mdio.h> #include <linux/module.h> -#include <linux/of_address.h> +#include <linux/of.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> #include <linux/phy.h> +#include <linux/platform_device.h> #include <linux/slab.h> /* Number of microseconds to wait for a register to respond */ diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h index 53b7e95213a8..5eec552a1f24 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h +++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h @@ -5,6 +5,7 @@ #include <linux/netdevice.h> #include <linux/u64_stats_sync.h> +#include <net/xdp.h> /* Tx descriptor size */ #define FUNETH_SQE_SIZE 64U diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 4b425bf71ede..0d1e681be250 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -11,6 +11,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/u64_stats_sync.h> +#include <net/xdp.h> #include "gve_desc.h" #include "gve_desc_dqo.h" @@ -51,6 +52,26 @@ #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 +#define DQO_QPL_DEFAULT_TX_PAGES 512 +#define DQO_QPL_DEFAULT_RX_PAGES 2048 + +/* Maximum TSO size supported on DQO */ +#define GVE_DQO_TX_MAX 0x3FFFF + +#define GVE_TX_BUF_SHIFT_DQO 11 + +/* 2K buffers for DQO-QPL */ +#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) +#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) +#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO)) + +/* If number of free/recyclable buffers are less than this threshold; driver + * allocs and uses a non-qpl page on the receive path of DQO QPL to free + * up buffers. + * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC. + */ +#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96 + /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { struct gve_rx_desc *desc_ring; /* the descriptor ring */ @@ -217,6 +238,15 @@ struct gve_rx_ring { * which cannot be reused yet. */ struct gve_index_list used_buf_states; + + /* qpl assigned to this queue */ + struct gve_queue_page_list *qpl; + + /* index into queue page list */ + u32 next_qpl_page_idx; + + /* track number of used buffers */ + u16 used_buf_states_cnt; } dqo; }; @@ -328,8 +358,14 @@ struct gve_tx_pending_packet_dqo { * All others correspond to `skb`'s frags and should be unmapped with * `dma_unmap_page`. */ - DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); - DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); + union { + struct { + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); + }; + s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT]; + }; + u16 num_bufs; /* Linked list index to next element in the list, or -1 if none */ @@ -384,6 +420,32 @@ struct gve_tx_ring { * set. */ u32 last_re_idx; + + /* free running number of packet buf descriptors posted */ + u16 posted_packet_desc_cnt; + /* free running number of packet buf descriptors completed */ + u16 completed_packet_desc_cnt; + + /* QPL fields */ + struct { + /* Linked list of gve_tx_buf_dqo. Index into + * tx_qpl_buf_next, or -1 if empty. + * + * This is a consumer list owned by the TX path. When it + * runs out, the producer list is stolen from the + * completion handling path + * (dqo_compl.free_tx_qpl_buf_head). + */ + s16 free_tx_qpl_buf_head; + + /* Free running count of the number of QPL tx buffers + * allocated + */ + u32 alloc_tx_qpl_buf_cnt; + + /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */ + u32 free_tx_qpl_buf_cnt; + }; } dqo_tx; }; @@ -427,6 +489,24 @@ struct gve_tx_ring { * reached a specified timeout. */ struct gve_index_list timed_out_completions; + + /* QPL fields */ + struct { + /* Linked list of gve_tx_buf_dqo. Index into + * tx_qpl_buf_next, or -1 if empty. + * + * This is the producer list, owned by the completion + * handling path. When the consumer list + * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list + * will be stolen. + */ + atomic_t free_tx_qpl_buf_head; + + /* Free running count of the number of tx buffers + * freed + */ + atomic_t free_tx_qpl_buf_cnt; + }; } dqo_compl; } ____cacheline_aligned; u64 pkt_done; /* free-running - total packets completed */ @@ -453,6 +533,21 @@ struct gve_tx_ring { s16 num_pending_packets; u32 complq_mask; /* complq size is complq_mask + 1 */ + + /* QPL fields */ + struct { + /* qpl assigned to this queue */ + struct gve_queue_page_list *qpl; + + /* Each QPL page is divided into TX bounce buffers + * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is + * an array to manage linked lists of TX buffers. + * An entry j at index i implies that j'th buffer + * is next on the list after i + */ + s16 *tx_qpl_buf_next; + u32 num_tx_qpl_bufs; + }; } dqo; } ____cacheline_aligned; struct netdev_queue *netdev_txq; @@ -531,6 +626,7 @@ enum gve_queue_format { GVE_GQI_RDA_FORMAT = 0x1, GVE_GQI_QPL_FORMAT = 0x2, GVE_DQO_RDA_FORMAT = 0x3, + GVE_DQO_QPL_FORMAT = 0x4, }; struct gve_priv { @@ -550,7 +646,8 @@ struct gve_priv { u16 num_event_counters; u16 tx_desc_cnt; /* num desc per ring */ u16 rx_desc_cnt; /* num desc per ring */ - u16 tx_pages_per_qpl; /* tx buffer length */ + u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ + u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ @@ -808,11 +905,17 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) return (priv->num_ntfy_blks / 2) + queue_idx; } +static inline bool gve_is_qpl(struct gve_priv *priv) +{ + return priv->queue_format == GVE_GQI_QPL_FORMAT || + priv->queue_format == GVE_DQO_QPL_FORMAT; +} + /* Returns the number of tx queue page lists */ static inline u32 gve_num_tx_qpls(struct gve_priv *priv) { - if (priv->queue_format != GVE_GQI_QPL_FORMAT) + if (!gve_is_qpl(priv)) return 0; return priv->tx_cfg.num_queues + priv->num_xdp_queues; @@ -832,7 +935,7 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) */ static inline u32 gve_num_rx_qpls(struct gve_priv *priv) { - if (priv->queue_format != GVE_GQI_QPL_FORMAT) + if (!gve_is_qpl(priv)) return 0; return priv->rx_cfg.num_queues; diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 252974202a3f..79db7a6d42bc 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -39,7 +39,8 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, - struct gve_device_option_jumbo_frames **dev_op_jumbo_frames) + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, + struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u16 option_length = be16_to_cpu(option->option_length); @@ -112,6 +113,22 @@ void gve_parse_device_option(struct gve_priv *priv, } *dev_op_dqo_rda = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_DQO_QPL: + if (option_length < sizeof(**dev_op_dqo_qpl) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "DQO QPL", (int)sizeof(**dev_op_dqo_qpl), + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_dqo_qpl)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL"); + } + *dev_op_dqo_qpl = (void *)(option + 1); + break; case GVE_DEV_OPT_ID_JUMBO_FRAMES: if (option_length < sizeof(**dev_op_jumbo_frames) || req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { @@ -146,7 +163,8 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_gqi_rda **dev_op_gqi_rda, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_dqo_rda **dev_op_dqo_rda, - struct gve_device_option_jumbo_frames **dev_op_jumbo_frames) + struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, + struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) { const int num_options = be16_to_cpu(descriptor->num_device_options); struct gve_device_option *dev_opt; @@ -166,7 +184,8 @@ gve_process_device_options(struct gve_priv *priv, gve_parse_device_option(priv, descriptor, dev_opt, dev_op_gqi_rda, dev_op_gqi_qpl, - dev_op_dqo_rda, dev_op_jumbo_frames); + dev_op_dqo_rda, dev_op_jumbo_frames, + dev_op_dqo_qpl); dev_opt = next_opt; } @@ -505,12 +524,24 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); } else { + u16 comp_ring_size; + u32 qpl_id = 0; + + if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + qpl_id = GVE_RAW_ADDRESSING_QPL_ID; + comp_ring_size = + priv->options_dqo_rda.tx_comp_ring_entries; + } else { + qpl_id = tx->dqo.qpl->id; + comp_ring_size = priv->tx_desc_cnt; + } + cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_tx_queue.tx_ring_size = cpu_to_be16(priv->tx_desc_cnt); cmd.create_tx_queue.tx_comp_ring_addr = cpu_to_be64(tx->complq_bus_dqo); cmd.create_tx_queue.tx_comp_ring_size = - cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries); + cpu_to_be16(comp_ring_size); } return gve_adminq_issue_cmd(priv, &cmd); @@ -555,6 +586,18 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); } else { + u16 rx_buff_ring_entries; + u32 qpl_id = 0; + + if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + qpl_id = GVE_RAW_ADDRESSING_QPL_ID; + rx_buff_ring_entries = + priv->options_dqo_rda.rx_buff_ring_entries; + } else { + qpl_id = rx->dqo.qpl->id; + rx_buff_ring_entries = priv->rx_desc_cnt; + } + cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt); cmd.create_rx_queue.rx_desc_ring_addr = @@ -564,7 +607,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(priv->data_buffer_size_dqo); cmd.create_rx_queue.rx_buff_ring_size = - cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries); + cpu_to_be16(rx_buff_ring_entries); cmd.create_rx_queue.enable_rsc = !!(priv->dev->features & NETIF_F_LRO); } @@ -675,9 +718,13 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv, const struct gve_device_option_dqo_rda *dev_op_dqo_rda) { priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); + priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); + + if (priv->queue_format == GVE_DQO_QPL_FORMAT) + return 0; + priv->options_dqo_rda.tx_comp_ring_entries = be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries); - priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); priv->options_dqo_rda.rx_buff_ring_entries = be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries); @@ -687,7 +734,9 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv, static void gve_enable_supported_features(struct gve_priv *priv, u32 supported_features_mask, const struct gve_device_option_jumbo_frames - *dev_op_jumbo_frames) + *dev_op_jumbo_frames, + const struct gve_device_option_dqo_qpl + *dev_op_dqo_qpl) { /* Before control reaches this point, the page-size-capped max MTU from * the gve_device_descriptor field has already been stored in @@ -699,6 +748,18 @@ static void gve_enable_supported_features(struct gve_priv *priv, "JUMBO FRAMES device option enabled.\n"); priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); } + + /* Override pages for qpl for DQO-QPL */ + if (dev_op_dqo_qpl) { + priv->tx_pages_per_qpl = + be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); + priv->rx_pages_per_qpl = + be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl); + if (priv->tx_pages_per_qpl == 0) + priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; + if (priv->rx_pages_per_qpl == 0) + priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES; + } } int gve_adminq_describe_device(struct gve_priv *priv) @@ -707,6 +768,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; + struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL; struct gve_device_descriptor *descriptor; u32 supported_features_mask = 0; union gve_adminq_command cmd; @@ -733,13 +795,14 @@ int gve_adminq_describe_device(struct gve_priv *priv) err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, &dev_op_gqi_qpl, &dev_op_dqo_rda, - &dev_op_jumbo_frames); + &dev_op_jumbo_frames, + &dev_op_dqo_qpl); if (err) goto free_device_descriptor; /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format * is not set to GqiRda, choose the queue format in a priority order: - * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default. + * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default. */ if (dev_op_dqo_rda) { priv->queue_format = GVE_DQO_RDA_FORMAT; @@ -747,7 +810,11 @@ int gve_adminq_describe_device(struct gve_priv *priv) "Driver is running with DQO RDA queue format.\n"); supported_features_mask = be32_to_cpu(dev_op_dqo_rda->supported_features_mask); - } else if (dev_op_gqi_rda) { + } else if (dev_op_dqo_qpl) { + priv->queue_format = GVE_DQO_QPL_FORMAT; + supported_features_mask = + be32_to_cpu(dev_op_dqo_qpl->supported_features_mask); + } else if (dev_op_gqi_rda) { priv->queue_format = GVE_GQI_RDA_FORMAT; dev_info(&priv->pdev->dev, "Driver is running with GQI RDA queue format.\n"); @@ -798,7 +865,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); gve_enable_supported_features(priv, supported_features_mask, - dev_op_jumbo_frames); + dev_op_jumbo_frames, dev_op_dqo_qpl); free_device_descriptor: dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index f894beb3deaf..38a22279e863 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -109,6 +109,14 @@ struct gve_device_option_dqo_rda { static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); +struct gve_device_option_dqo_qpl { + __be32 supported_features_mask; + __be16 tx_pages_per_qpl; + __be16 rx_pages_per_qpl; +}; + +static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8); + struct gve_device_option_jumbo_frames { __be32 supported_features_mask; __be16 max_mtu; @@ -130,6 +138,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_GQI_RDA = 0x2, GVE_DEV_OPT_ID_GQI_QPL = 0x3, GVE_DEV_OPT_ID_DQO_RDA = 0x4, + GVE_DEV_OPT_ID_DQO_QPL = 0x7, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, }; @@ -139,6 +148,7 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, }; enum gve_sup_feature_mask { diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h index f4ae9e19b844..c2874cdcf40c 100644 --- a/drivers/net/ethernet/google/gve/gve_desc.h +++ b/drivers/net/ethernet/google/gve/gve_desc.h @@ -105,10 +105,10 @@ union gve_rx_data_slot { __be64 addr; }; -/* GVE Recive Packet Descriptor Seq No */ +/* GVE Receive Packet Descriptor Seq No */ #define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7) -/* GVE Recive Packet Descriptor Flags */ +/* GVE Receive Packet Descriptor Flags */ #define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x))) #define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ #define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index e6f1711d9be0..2d42e733837b 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -31,7 +31,6 @@ // Minimum amount of time between queue kicks in msec (10 seconds) #define MIN_TX_TIMEOUT_GAP (1000 * 10) -#define DQO_TX_MAX 0x3FFFF char gve_driver_name[] = "gve"; const char gve_version_str[] = GVE_VERSION; @@ -191,7 +190,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv) rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * priv->rx_cfg.num_queues; priv->stats_report_len = struct_size(priv->stats_report, stats, - tx_stats_num + rx_stats_num); + size_add(tx_stats_num, rx_stats_num)); priv->stats_report = dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, &priv->stats_report_bus, GFP_KERNEL); @@ -255,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) if (block->tx) { if (block->tx->q_num < priv->tx_cfg.num_queues) reschedule |= gve_tx_poll(block, budget); - else + else if (budget) reschedule |= gve_xdp_poll(block, budget); } + if (!budget) + return 0; + if (block->rx) { work_done = gve_rx_poll(block, budget); reschedule |= work_done == budget; @@ -282,7 +284,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) if (block->rx) reschedule |= gve_rx_work_pending(block->rx); - if (reschedule && napi_reschedule(napi)) + if (reschedule && napi_schedule(napi)) iowrite32be(GVE_IRQ_MASK, irq_doorbell); } return work_done; @@ -299,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) if (block->tx) reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true); + if (!budget) + return 0; + if (block->rx) { work_done = gve_rx_poll_dqo(block, budget); reschedule |= work_done == budget; @@ -494,7 +499,7 @@ static int gve_setup_device_resources(struct gve_priv *priv) goto abort_with_stats_report; } - if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + if (!gve_is_gqi(priv)) { priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), GFP_KERNEL); if (!priv->ptype_lut_dqo) { @@ -1083,11 +1088,12 @@ free_qpls: static int gve_alloc_qpls(struct gve_priv *priv) { int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; + int page_count; int start_id; int i, j; int err; - if (priv->queue_format != GVE_GQI_QPL_FORMAT) + if (!gve_is_qpl(priv)) return 0; priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL); @@ -1095,17 +1101,25 @@ static int gve_alloc_qpls(struct gve_priv *priv) return -ENOMEM; start_id = gve_tx_start_qpl_id(priv); + page_count = priv->tx_pages_per_qpl; for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { err = gve_alloc_queue_page_list(priv, i, - priv->tx_pages_per_qpl); + page_count); if (err) goto free_qpls; } start_id = gve_rx_start_qpl_id(priv); + + /* For GQI_QPL number of pages allocated have 1:1 relationship with + * number of descriptors. For DQO, number of pages required are + * more than descriptors (because of out of order completions). + */ + page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ? + priv->rx_data_slot_cnt : priv->rx_pages_per_qpl; for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { err = gve_alloc_queue_page_list(priv, i, - priv->rx_data_slot_cnt); + page_count); if (err) goto free_qpls; } @@ -2051,7 +2065,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) /* Big TCP is only supported on DQ*/ if (!gve_is_gqi(priv)) - netif_set_tso_max_size(priv->dev, DQO_TX_MAX); + netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); priv->num_registered_pages = 0; priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index d1da7413dc4d..73655347902d 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -146,7 +146,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], &rx->data.data_ring[i]); if (err) - goto alloc_err; + goto alloc_err_rda; } if (!rx->data.raw_addressing) { @@ -171,12 +171,26 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) return slots; alloc_err_qpl: + /* Fully free the copy pool pages. */ while (j--) { page_ref_sub(rx->qpl_copy_pool[j].page, rx->qpl_copy_pool[j].pagecnt_bias - 1); put_page(rx->qpl_copy_pool[j].page); } -alloc_err: + + /* Do not fully free QPL pages - only remove the bias added in this + * function with gve_setup_rx_buffer. + */ + while (i--) + page_ref_sub(rx->data.page_info[i].page, + rx->data.page_info[i].pagecnt_bias - 1); + + gve_unassign_qpl(priv, rx->data.qpl->id); + rx->data.qpl = NULL; + + return err; + +alloc_err_rda: while (i--) gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], @@ -993,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget) feat = block->napi.dev->features; - /* If budget is 0, do all the work */ - if (budget == 0) - budget = INT_MAX; - if (budget > 0) work_done = gve_clean_rx_done(rx, budget, feat); diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index e57b73eb70f6..f281e42a7ef9 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -22,11 +22,13 @@ static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs) } static void gve_free_page_dqo(struct gve_priv *priv, - struct gve_rx_buf_state_dqo *bs) + struct gve_rx_buf_state_dqo *bs, + bool free_page) { page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1); - gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr, - DMA_FROM_DEVICE); + if (free_page) + gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr, + DMA_FROM_DEVICE); bs->page_info.page = NULL; } @@ -130,12 +132,20 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx) */ for (i = 0; i < 5; i++) { buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); - if (gve_buf_ref_cnt(buf_state) == 0) + if (gve_buf_ref_cnt(buf_state) == 0) { + rx->dqo.used_buf_states_cnt--; return buf_state; + } gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); } + /* For QPL, we cannot allocate any new buffers and must + * wait for the existing ones to be available. + */ + if (rx->dqo.qpl) + return NULL; + /* If there are no free buf states discard an entry from * `used_buf_states` so it can be used. */ @@ -144,23 +154,39 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx) if (gve_buf_ref_cnt(buf_state) == 0) return buf_state; - gve_free_page_dqo(rx->gve, buf_state); + gve_free_page_dqo(rx->gve, buf_state, true); gve_free_buf_state(rx, buf_state); } return NULL; } -static int gve_alloc_page_dqo(struct gve_priv *priv, +static int gve_alloc_page_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - int err; + struct gve_priv *priv = rx->gve; + u32 idx; - err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page, - &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC); - if (err) - return err; + if (!rx->dqo.qpl) { + int err; + err = gve_alloc_page(priv, &priv->pdev->dev, + &buf_state->page_info.page, + &buf_state->addr, + DMA_FROM_DEVICE, GFP_ATOMIC); + if (err) + return err; + } else { + idx = rx->dqo.next_qpl_page_idx; + if (idx >= priv->rx_pages_per_qpl) { + net_err_ratelimited("%s: Out of QPL pages\n", + priv->dev->name); + return -ENOMEM; + } + buf_state->page_info.page = rx->dqo.qpl->pages[idx]; + buf_state->addr = rx->dqo.qpl->page_buses[idx]; + rx->dqo.next_qpl_page_idx++; + } buf_state->page_info.page_offset = 0; buf_state->page_info.page_address = page_address(buf_state->page_info.page); @@ -195,9 +221,13 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) for (i = 0; i < rx->dqo.num_buf_states; i++) { struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; - + /* Only free page for RDA. QPL pages are freed in gve_main. */ if (bs->page_info.page) - gve_free_page_dqo(priv, bs); + gve_free_page_dqo(priv, bs, !rx->dqo.qpl); + } + if (rx->dqo.qpl) { + gve_unassign_qpl(priv, rx->dqo.qpl->id); + rx->dqo.qpl = NULL; } if (rx->dqo.bufq.desc_ring) { @@ -229,7 +259,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) int i; const u32 buffer_queue_slots = - priv->options_dqo_rda.rx_buff_ring_entries; + priv->queue_format == GVE_DQO_RDA_FORMAT ? + priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt; const u32 completion_queue_slots = priv->rx_desc_cnt; netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); @@ -243,7 +274,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) rx->ctx.skb_head = NULL; rx->ctx.skb_tail = NULL; - rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4); + rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ? + min_t(s16, S16_MAX, buffer_queue_slots * 4) : + priv->rx_pages_per_qpl; rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, sizeof(rx->dqo.buf_states[0]), GFP_KERNEL); @@ -275,6 +308,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) if (!rx->dqo.bufq.desc_ring) goto err; + if (priv->queue_format != GVE_DQO_RDA_FORMAT) { + rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num); + if (!rx->dqo.qpl) + goto err; + rx->dqo.next_qpl_page_idx = 0; + } + rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), &rx->q_resources_bus, GFP_KERNEL); if (!rx->q_resources) @@ -352,7 +392,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) if (unlikely(!buf_state)) break; - if (unlikely(gve_alloc_page_dqo(priv, buf_state))) { + if (unlikely(gve_alloc_page_dqo(rx, buf_state))) { u64_stats_update_begin(&rx->statss); rx->rx_buf_alloc_fail++; u64_stats_update_end(&rx->statss); @@ -415,6 +455,7 @@ static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, mark_used: gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); + rx->dqo.used_buf_states_cnt++; } static void gve_rx_skb_csum(struct sk_buff *skb, @@ -475,6 +516,43 @@ static void gve_rx_free_skb(struct gve_rx_ring *rx) rx->ctx.skb_tail = NULL; } +static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx) +{ + if (!rx->dqo.qpl) + return false; + if (rx->dqo.used_buf_states_cnt < + (rx->dqo.num_buf_states - + GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD)) + return false; + return true; +} + +static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state, + u16 buf_len) +{ + struct page *page = alloc_page(GFP_ATOMIC); + int num_frags; + + if (!page) + return -ENOMEM; + + memcpy(page_address(page), + buf_state->page_info.page_address + + buf_state->page_info.page_offset, + buf_len); + num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page, + 0, buf_len, PAGE_SIZE); + + u64_stats_update_begin(&rx->statss); + rx->rx_frag_alloc_cnt++; + u64_stats_update_end(&rx->statss); + /* Return unused buffer. */ + gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); + return 0; +} + /* Chains multi skbs for single rx packet. * Returns 0 if buffer is appended, -1 otherwise. */ @@ -492,7 +570,10 @@ static int gve_rx_append_frags(struct napi_struct *napi, if (!skb) return -1; - skb_shinfo(rx->ctx.skb_tail)->frag_list = skb; + if (rx->ctx.skb_tail == rx->ctx.skb_head) + skb_shinfo(rx->ctx.skb_head)->frag_list = skb; + else + rx->ctx.skb_tail->next = skb; rx->ctx.skb_tail = skb; num_frags = 0; } @@ -502,12 +583,20 @@ static int gve_rx_append_frags(struct napi_struct *napi, rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; } + /* Trigger ondemand page allocation if we are running low on buffers */ + if (gve_rx_should_trigger_copy_ondemand(rx)) + return gve_rx_copy_ondemand(rx, buf_state, buf_len); + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, buf_state->page_info.page, buf_state->page_info.page_offset, buf_len, priv->data_buffer_size_dqo); gve_dec_pagecnt_bias(&buf_state->page_info); + /* Advances buffer page-offset if page is partially used. + * Marks buffer as used if page is full. + */ + gve_try_recycle_buf(priv, rx, buf_state); return 0; } @@ -561,8 +650,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, priv)) != 0) { goto error; } - - gve_try_recycle_buf(priv, rx, buf_state); return 0; } @@ -588,6 +675,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, goto error; rx->ctx.skb_tail = rx->ctx.skb_head; + if (gve_rx_should_trigger_copy_ondemand(rx)) { + if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0) + goto error; + return 0; + } + skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, buf_state->page_info.page_offset, buf_len, priv->data_buffer_size_dqo); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 6957a865cff3..9f6ffc4a54f0 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget) bool repoll; u32 to_do; - /* If budget is 0, do all the work */ - if (budget == 0) - budget = INT_MAX; - /* Find out how much work there is to be done */ nic_done = gve_tx_load_event_counter(priv, tx); to_do = min_t(u32, (nic_done - tx->done), budget); diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 3c09e66ba1ab..1e19b834a613 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -13,6 +13,89 @@ #include <linux/slab.h> #include <linux/skbuff.h> +/* Returns true if tx_bufs are available. */ +static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count) +{ + int num_avail; + + if (!tx->dqo.qpl) + return true; + + num_avail = tx->dqo.num_tx_qpl_bufs - + (tx->dqo_tx.alloc_tx_qpl_buf_cnt - + tx->dqo_tx.free_tx_qpl_buf_cnt); + + if (count <= num_avail) + return true; + + /* Update cached value from dqo_compl. */ + tx->dqo_tx.free_tx_qpl_buf_cnt = + atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); + + num_avail = tx->dqo.num_tx_qpl_bufs - + (tx->dqo_tx.alloc_tx_qpl_buf_cnt - + tx->dqo_tx.free_tx_qpl_buf_cnt); + + return count <= num_avail; +} + +static s16 +gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx) +{ + s16 index; + + index = tx->dqo_tx.free_tx_qpl_buf_head; + + /* No TX buffers available, try to steal the list from the + * completion handler. + */ + if (unlikely(index == -1)) { + tx->dqo_tx.free_tx_qpl_buf_head = + atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1); + index = tx->dqo_tx.free_tx_qpl_buf_head; + + if (unlikely(index == -1)) + return index; + } + + /* Remove TX buf from free list */ + tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index]; + + return index; +} + +static void +gve_free_tx_qpl_bufs(struct gve_tx_ring *tx, + struct gve_tx_pending_packet_dqo *pkt) +{ + s16 index; + int i; + + if (!pkt->num_bufs) + return; + + index = pkt->tx_qpl_buf_ids[0]; + /* Create a linked list of buffers to be added to the free list */ + for (i = 1; i < pkt->num_bufs; i++) { + tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i]; + index = pkt->tx_qpl_buf_ids[i]; + } + + while (true) { + s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head); + + tx->dqo.tx_qpl_buf_next[index] = old_head; + if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head, + old_head, + pkt->tx_qpl_buf_ids[0]) == old_head) { + break; + } + } + + atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt); + pkt->num_bufs = 0; +} + /* Returns true if a gve_tx_pending_packet_dqo object is available. */ static bool gve_has_pending_packet(struct gve_tx_ring *tx) { @@ -136,9 +219,40 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx) kvfree(tx->dqo.pending_packets); tx->dqo.pending_packets = NULL; + kvfree(tx->dqo.tx_qpl_buf_next); + tx->dqo.tx_qpl_buf_next = NULL; + + if (tx->dqo.qpl) { + gve_unassign_qpl(priv, tx->dqo.qpl->id); + tx->dqo.qpl = NULL; + } + netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); } +static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) +{ + int num_tx_qpl_bufs = GVE_TX_BUFS_PER_PAGE_DQO * + tx->dqo.qpl->num_entries; + int i; + + tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs, + sizeof(tx->dqo.tx_qpl_buf_next[0]), + GFP_KERNEL); + if (!tx->dqo.tx_qpl_buf_next) + return -ENOMEM; + + tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs; + + /* Generate free TX buf list */ + for (i = 0; i < num_tx_qpl_bufs - 1; i++) + tx->dqo.tx_qpl_buf_next[i] = i + 1; + tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1; + + atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1); + return 0; +} + static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) { struct gve_tx_ring *tx = &priv->tx[idx]; @@ -155,7 +269,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) /* Queue sizes must be a power of 2 */ tx->mask = priv->tx_desc_cnt - 1; - tx->dqo.complq_mask = priv->options_dqo_rda.tx_comp_ring_entries - 1; + tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? + priv->options_dqo_rda.tx_comp_ring_entries - 1 : + tx->mask; /* The max number of pending packets determines the maximum number of * descriptors which maybe written to the completion queue. @@ -211,6 +327,15 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) if (!tx->q_resources) goto err; + if (gve_is_qpl(priv)) { + tx->dqo.qpl = gve_assign_tx_qpl(priv, idx); + if (!tx->dqo.qpl) + goto err; + + if (gve_tx_qpl_buf_init(tx)) + goto err; + } + gve_tx_add_to_block(priv, idx); return 0; @@ -267,20 +392,27 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) return tx->mask - num_used; } +static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, + int desc_count, int buf_count) +{ + return gve_has_pending_packet(tx) && + num_avail_tx_slots(tx) >= desc_count && + gve_has_free_tx_qpl_bufs(tx, buf_count); +} + /* Stops the queue if available descriptors is less than 'count'. * Return: 0 if stop is not required. */ -static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count) +static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, + int desc_count, int buf_count) { - if (likely(gve_has_pending_packet(tx) && - num_avail_tx_slots(tx) >= count)) + if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return 0; /* Update cached TX head pointer */ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); - if (likely(gve_has_pending_packet(tx) && - num_avail_tx_slots(tx) >= count)) + if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return 0; /* No space, so stop the queue */ @@ -295,8 +427,7 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count) */ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); - if (likely(!gve_has_pending_packet(tx) || - num_avail_tx_slots(tx) < count)) + if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) return -EBUSY; netif_tx_start_queue(tx->netdev_txq); @@ -444,44 +575,16 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc, }; } -/* Returns 0 on success, or < 0 on error. - * - * Before this function is called, the caller must ensure - * gve_has_pending_packet(tx) returns true. - */ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, - struct sk_buff *skb) + struct sk_buff *skb, + struct gve_tx_pending_packet_dqo *pkt, + s16 completion_tag, + u32 *desc_idx, + bool is_gso) { const struct skb_shared_info *shinfo = skb_shinfo(skb); - const bool is_gso = skb_is_gso(skb); - u32 desc_idx = tx->dqo_tx.tail; - - struct gve_tx_pending_packet_dqo *pkt; - struct gve_tx_metadata_dqo metadata; - s16 completion_tag; int i; - pkt = gve_alloc_pending_packet(tx); - pkt->skb = skb; - pkt->num_bufs = 0; - completion_tag = pkt - tx->dqo.pending_packets; - - gve_extract_tx_metadata_dqo(skb, &metadata); - if (is_gso) { - int header_len = gve_prep_tso(skb); - - if (unlikely(header_len < 0)) - goto err; - - gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, - skb, &metadata, header_len); - desc_idx = (desc_idx + 1) & tx->mask; - } - - gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, - &metadata); - desc_idx = (desc_idx + 1) & tx->mask; - /* Note: HW requires that the size of a non-TSO packet be within the * range of [17, 9728]. * @@ -490,6 +593,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, * - Hypervisor won't allow MTU larger than 9216. */ + pkt->num_bufs = 0; /* Map the linear portion of skb */ { u32 len = skb_headlen(skb); @@ -503,7 +607,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); ++pkt->num_bufs; - gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, completion_tag, /*eop=*/shinfo->nr_frags == 0, is_gso); } @@ -522,10 +626,139 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); ++pkt->num_bufs; - gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, completion_tag, is_eop, is_gso); } + return 0; +err: + for (i = 0; i < pkt->num_bufs; i++) { + if (i == 0) { + dma_unmap_single(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); + } else { + dma_unmap_page(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); + } + } + pkt->num_bufs = 0; + return -1; +} + +/* Tx buffer i corresponds to + * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO + * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO + */ +static void gve_tx_buf_get_addr(struct gve_tx_ring *tx, + s16 index, + void **va, dma_addr_t *dma_addr) +{ + int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO); + int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << GVE_TX_BUF_SHIFT_DQO; + + *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; + *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; +} + +static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, + struct sk_buff *skb, + struct gve_tx_pending_packet_dqo *pkt, + s16 completion_tag, + u32 *desc_idx, + bool is_gso) +{ + u32 copy_offset = 0; + dma_addr_t dma_addr; + u32 copy_len; + s16 index; + void *va; + + /* Break the packet into buffer size chunks */ + pkt->num_bufs = 0; + while (copy_offset < skb->len) { + index = gve_alloc_tx_qpl_buf(tx); + if (unlikely(index == -1)) + goto err; + + gve_tx_buf_get_addr(tx, index, &va, &dma_addr); + copy_len = min_t(u32, GVE_TX_BUF_SIZE_DQO, + skb->len - copy_offset); + skb_copy_bits(skb, copy_offset, va, copy_len); + + copy_offset += copy_len; + dma_sync_single_for_device(tx->dev, dma_addr, + copy_len, DMA_TO_DEVICE); + gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, + copy_len, + dma_addr, + completion_tag, + copy_offset == skb->len, + is_gso); + + pkt->tx_qpl_buf_ids[pkt->num_bufs] = index; + ++tx->dqo_tx.alloc_tx_qpl_buf_cnt; + ++pkt->num_bufs; + } + + return 0; +err: + /* Should not be here if gve_has_free_tx_qpl_bufs() check is correct */ + gve_free_tx_qpl_bufs(tx, pkt); + return -ENOMEM; +} + +/* Returns 0 on success, or < 0 on error. + * + * Before this function is called, the caller must ensure + * gve_has_pending_packet(tx) returns true. + */ +static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + const bool is_gso = skb_is_gso(skb); + u32 desc_idx = tx->dqo_tx.tail; + struct gve_tx_pending_packet_dqo *pkt; + struct gve_tx_metadata_dqo metadata; + s16 completion_tag; + + pkt = gve_alloc_pending_packet(tx); + pkt->skb = skb; + completion_tag = pkt - tx->dqo.pending_packets; + + gve_extract_tx_metadata_dqo(skb, &metadata); + if (is_gso) { + int header_len = gve_prep_tso(skb); + + if (unlikely(header_len < 0)) + goto err; + + gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, + skb, &metadata, header_len); + desc_idx = (desc_idx + 1) & tx->mask; + } + + gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, + &metadata); + desc_idx = (desc_idx + 1) & tx->mask; + + if (tx->dqo.qpl) { + if (gve_tx_add_skb_copy_dqo(tx, skb, pkt, + completion_tag, + &desc_idx, is_gso)) + goto err; + } else { + if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt, + completion_tag, + &desc_idx, is_gso)) + goto err; + } + + tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; + /* Commit the changes to our state */ tx->dqo_tx.tail = desc_idx; @@ -547,22 +780,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, return 0; err: - for (i = 0; i < pkt->num_bufs; i++) { - if (i == 0) { - dma_unmap_single(tx->dev, - dma_unmap_addr(pkt, dma[i]), - dma_unmap_len(pkt, len[i]), - DMA_TO_DEVICE); - } else { - dma_unmap_page(tx->dev, - dma_unmap_addr(pkt, dma[i]), - dma_unmap_len(pkt, len[i]), - DMA_TO_DEVICE); - } - } - pkt->skb = NULL; - pkt->num_bufs = 0; gve_free_pending_packet(tx, pkt); return -1; @@ -636,40 +854,56 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, int num_buffer_descs; int total_num_descs; - if (skb_is_gso(skb)) { - /* If TSO doesn't meet HW requirements, attempt to linearize the - * packet. - */ - if (unlikely(!gve_can_send_tso(skb) && - skb_linearize(skb) < 0)) { - net_err_ratelimited("%s: Failed to transmit TSO packet\n", - priv->dev->name); - goto drop; - } - - if (unlikely(ipv6_hopopt_jumbo_remove(skb))) - goto drop; + if (tx->dqo.qpl) { + if (skb_is_gso(skb)) + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto drop; - num_buffer_descs = gve_num_buffer_descs_needed(skb); + /* We do not need to verify the number of buffers used per + * packet or per segment in case of TSO as with 2K size buffers + * none of the TX packet rules would be violated. + * + * gve_can_send_tso() checks that each TCP segment of gso_size is + * not distributed over more than 9 SKB frags.. + */ + num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO); } else { - num_buffer_descs = gve_num_buffer_descs_needed(skb); + if (skb_is_gso(skb)) { + /* If TSO doesn't meet HW requirements, attempt to linearize the + * packet. + */ + if (unlikely(!gve_can_send_tso(skb) && + skb_linearize(skb) < 0)) { + net_err_ratelimited("%s: Failed to transmit TSO packet\n", + priv->dev->name); + goto drop; + } - if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) { - if (unlikely(skb_linearize(skb) < 0)) + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) goto drop; - num_buffer_descs = 1; + num_buffer_descs = gve_num_buffer_descs_needed(skb); + } else { + num_buffer_descs = gve_num_buffer_descs_needed(skb); + + if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) { + if (unlikely(skb_linearize(skb) < 0)) + goto drop; + + num_buffer_descs = 1; + } } } /* Metadata + (optional TSO) + data descriptors. */ total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs; if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs + - GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP))) { + GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP, + num_buffer_descs))) { return -1; } - if (unlikely(gve_tx_add_skb_no_copy_dqo(tx, skb) < 0)) + if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0)) goto drop; netdev_tx_sent_queue(tx->netdev_txq, skb->len); @@ -817,7 +1051,11 @@ static void gve_handle_packet_completion(struct gve_priv *priv, return; } } - gve_unmap_packet(tx->dev, pending_packet); + tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; + if (tx->dqo.qpl) + gve_free_tx_qpl_bufs(tx, pending_packet); + else + gve_unmap_packet(tx->dev, pending_packet); *bytes += pending_packet->skb->len; (*pkts)++; @@ -875,12 +1113,16 @@ static void remove_miss_completions(struct gve_priv *priv, remove_from_list(tx, &tx->dqo_compl.miss_completions, pending_packet); - /* Unmap buffers and free skb but do not unallocate packet i.e. + /* Unmap/free TX buffers and free skb but do not unallocate packet i.e. * the completion tag is not freed to ensure that the driver * can take appropriate action if a corresponding valid * completion is received later. */ - gve_unmap_packet(tx->dev, pending_packet); + if (tx->dqo.qpl) + gve_free_tx_qpl_bufs(tx, pending_packet); + else + gve_unmap_packet(tx->dev, pending_packet); + /* This indicates the packet was dropped. */ dev_kfree_skb_any(pending_packet->skb); pending_packet->skb = NULL; diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 50c3f5d6611f..b91e7a06b97f 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -960,8 +960,8 @@ static int hip04_mac_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - ret = -EINVAL; + if (irq < 0) { + ret = irq; goto init_fail; } @@ -1021,7 +1021,7 @@ init_fail: return ret; } -static int hip04_remove(struct platform_device *pdev) +static void hip04_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hip04_priv *priv = netdev_priv(ndev); @@ -1035,8 +1035,6 @@ static int hip04_remove(struct platform_device *pdev) of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); - - return 0; } static const struct of_device_id hip04_mac_match[] = { @@ -1048,7 +1046,7 @@ MODULE_DEVICE_TABLE(of, hip04_mac_match); static struct platform_driver hip04_mac_driver = { .probe = hip04_mac_probe, - .remove = hip04_remove, + .remove_new = hip04_remove, .driver = { .name = DRV_NAME, .of_match_table = hip04_mac_match, diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index ce2571c16e43..2406263c9dd3 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -862,8 +862,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) goto out_disconnect_phy; ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { - ret = -ENODEV; + if (ndev->irq < 0) { + ret = ndev->irq; goto out_disconnect_phy; } @@ -893,7 +893,7 @@ out_free_netdev: return ret; } -static int hisi_femac_drv_remove(struct platform_device *pdev) +static void hisi_femac_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hisi_femac_priv *priv = netdev_priv(ndev); @@ -904,8 +904,6 @@ static int hisi_femac_drv_remove(struct platform_device *pdev) phy_disconnect(ndev->phydev); clk_disable_unprepare(priv->clk); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -961,7 +959,7 @@ static struct platform_driver hisi_femac_driver = { .of_match_table = hisi_femac_match, }, .probe = hisi_femac_drv_probe, - .remove = hisi_femac_drv_remove, + .remove_new = hisi_femac_drv_remove, #ifdef CONFIG_PM .suspend = hisi_femac_drv_suspend, .resume = hisi_femac_drv_resume, diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index f867e9531117..1a972b093a42 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -7,7 +7,8 @@ #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> -#include <linux/of_device.h> +#include <linux/property.h> +#include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> #include <linux/reset.h> @@ -1094,7 +1095,6 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; - const struct of_device_id *of_id = NULL; struct net_device *ndev; struct hix5hd2_priv *priv; struct mii_bus *bus; @@ -1110,12 +1110,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) priv->dev = dev; priv->netdev = ndev; - of_id = of_match_device(hix5hd2_of_match, dev); - if (!of_id) { - ret = -EINVAL; - goto out_free_netdev; - } - priv->hw_cap = (unsigned long)of_id->data; + priv->hw_cap = (unsigned long)device_get_match_data(dev); priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { @@ -1206,9 +1201,8 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) } ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { - netdev_err(ndev, "No irq resource\n"); - ret = -EINVAL; + if (ndev->irq < 0) { + ret = ndev->irq; goto out_phy_node; } @@ -1283,7 +1277,7 @@ out_free_netdev: return ret; } -static int hix5hd2_dev_remove(struct platform_device *pdev) +static void hix5hd2_dev_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hix5hd2_priv *priv = netdev_priv(ndev); @@ -1299,8 +1293,6 @@ static int hix5hd2_dev_remove(struct platform_device *pdev) of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); - - return 0; } static const struct of_device_id hix5hd2_of_match[] = { @@ -1320,7 +1312,7 @@ static struct platform_driver hix5hd2_dev_driver = { .of_match_table = hix5hd2_of_match, }, .probe = hix5hd2_dev_probe, - .remove = hix5hd2_dev_remove, + .remove_new = hix5hd2_dev_remove, }; module_platform_driver(hix5hd2_dev_driver); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 928d934cb21a..f75668c47935 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) } } +static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv) +{ +#define HNS_MAC_LINK_WAIT_TIME 5 +#define HNS_MAC_LINK_WAIT_CNT 40 + + u32 link_status = 0; + int i; + + if (!mac_ctrl_drv->get_link_status) + return link_status; + + for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) { + msleep(HNS_MAC_LINK_WAIT_TIME); + mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status); + if (!link_status) + break; + } + + return link_status; +} + void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) { struct mac_driver *mac_ctrl_drv; @@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) &sfp_prsnt); if (!ret) *link_status = *link_status && sfp_prsnt; + + /* for FIBER port, it may have a fake link up. + * when the link status changes from down to up, we need to do + * anti-shake. the anti-shake time is base on tests. + * only FIBER port need to do this. + */ + if (*link_status && !mac_cb->link) + *link_status = hns_mac_link_anti_shake(mac_ctrl_drv); } mac_cb->link = *link_status; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index e3bb05959ba9..edf0bcf76ac9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -422,7 +422,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param); int hns_mac_init(struct dsaf_device *dsaf_dev); -void mac_adjust_link(struct net_device *net_dev); bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index fcaf5132b865..1b67da1f6fa8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3007,7 +3007,7 @@ free_dev: * hns_dsaf_remove - remove dsaf dev * @pdev: dasf platform device */ -static int hns_dsaf_remove(struct platform_device *pdev) +static void hns_dsaf_remove(struct platform_device *pdev) { struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev); @@ -3020,8 +3020,6 @@ static int hns_dsaf_remove(struct platform_device *pdev) hns_dsaf_free(dsaf_dev); hns_dsaf_free_dev(dsaf_dev); - - return 0; } static const struct of_device_id g_dsaf_match[] = { @@ -3033,7 +3031,7 @@ MODULE_DEVICE_TABLE(of, g_dsaf_match); static struct platform_driver g_dsaf_driver = { .probe = hns_dsaf_probe, - .remove = hns_dsaf_remove, + .remove_new = hns_dsaf_remove, .driver = { .name = DSAF_DRV_NAME, .of_match_table = g_dsaf_match, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index a7eb87da4e70..a08d1f0a5a16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -9,9 +9,6 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> #include "hns_dsaf_ppe.h" diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 0f0e16f9afc0..7e00231c1acf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -92,7 +92,7 @@ struct ppe_common_cb { u8 comm_index; /*ppe_common index*/ u32 ppe_num; - struct hns_ppe_cb ppe_cb[]; + struct hns_ppe_cb ppe_cb[] __counted_by(ppe_num); }; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index e2ff3ca198d1..93344563a259 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -11,10 +11,6 @@ #include <linux/etherdevice.h> #include <asm/cacheflush.h> #include <linux/platform_device.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> -#include <linux/of_irq.h> #include <linux/spinlock.h> #include "hns_dsaf_main.h" diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index a9f805925699..c1e9b6997853 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -108,7 +108,7 @@ struct rcb_common_cb { u32 ring_num; u32 desc_num; /* desc num per queue*/ - struct ring_pair_cb ring_pair_cb[]; + struct ring_pair_cb ring_pair_cb[] __counted_by(ring_num); }; int hns_rcb_buf_size2type(u32 buf_size); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 7cf10d1e2b31..8a713eed4465 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -142,7 +142,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type, int mtu) + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso) { struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -275,6 +276,15 @@ static int hns_nic_maybe_stop_tso( return 0; } +static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum, + struct hnae_ring *ring) +{ + if (skb_is_gso(*out_skb)) + return hns_nic_maybe_stop_tso(out_skb, bnum, ring); + else + return hns_nic_maybe_stop_tx(out_skb, bnum, ring); +} + static void fill_tso_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, int buf_num, enum hns_desc_type type, int mtu) @@ -300,6 +310,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv, mtu); } +static void fill_desc_v2(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso) +{ + if (is_gso) + fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type, + mtu); + else + fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type, + mtu); +} + netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, struct sk_buff *skb, struct hns_nic_ring_data *ring_data) @@ -313,6 +336,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, int seg_num; dma_addr_t dma; int size, next_to_use; + bool is_gso; int i; switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { @@ -339,8 +363,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_err_tx_ok; } + is_gso = skb_is_gso(skb); priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - buf_num, DESC_TYPE_SKB, ndev->mtu); + buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso); /* fill the fragments */ for (i = 1; i < seg_num; i++) { @@ -354,7 +379,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, } priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, seg_num - 1 == i ? 1 : 0, buf_num, - DESC_TYPE_PAGE, ndev->mtu); + DESC_TYPE_PAGE, ndev->mtu, is_gso); } /*complete translate all packets*/ @@ -1776,15 +1801,6 @@ static int hns_nic_set_features(struct net_device *netdev, netdev_info(netdev, "enet v1 do not support tso!\n"); break; default: - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = fill_tso_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; - /* The chip only support 7*4096 */ - netif_set_tso_max_size(netdev, 7 * 4096); - } else { - priv->ops.fill_desc = fill_v2_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - } break; } netdev->features = features; @@ -2159,16 +2175,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev) priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; } else { priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = fill_tso_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; - /* This chip only support 7*4096 */ - netif_set_tso_max_size(netdev, 7 * 4096); - } else { - priv->ops.fill_desc = fill_v2_desc; - priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - } + priv->ops.fill_desc = fill_desc_v2; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2; + netif_set_tso_max_size(netdev, 7 * 4096); /* enable tso when init * control tso on/off through TSE bit in bd */ @@ -2384,7 +2393,7 @@ out_read_prop_fail: return ret; } -static int hns_nic_dev_remove(struct platform_device *pdev) +static void hns_nic_dev_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hns_nic_priv *priv = netdev_priv(ndev); @@ -2413,7 +2422,6 @@ static int hns_nic_dev_remove(struct platform_device *pdev) of_node_put(to_of_node(priv->fwnode)); free_netdev(ndev); - return 0; } static const struct of_device_id hns_enet_of_match[] = { @@ -2431,7 +2439,7 @@ static struct platform_driver hns_nic_dev_driver = { .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), }, .probe = hns_nic_dev_probe, - .remove = hns_nic_dev_remove, + .remove_new = hns_nic_dev_remove, }; module_platform_driver(hns_nic_dev_driver); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index ffa9d6573f54..3f3ee032f631 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -44,7 +44,8 @@ struct hns_nic_ring_data { struct hns_nic_ops { void (*fill_desc)(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type, int mtu); + int buf_num, enum hns_desc_type type, int mtu, + bool is_gso); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring); void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 6efea4662858..e214bfaece1f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -17,11 +17,11 @@ hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o -hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \ +hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \ +hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 514a20bce4f4..d7e175a9cb49 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -103,6 +103,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_LANE_NUM_B, HNAE3_DEV_SUPPORT_WOL_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B, + HNAE3_DEV_SUPPORT_VF_FAULT_B, }; #define hnae3_ae_dev_fd_supported(ae_dev) \ @@ -177,6 +178,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_tm_flush_supported(hdev) \ test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps) +#define hnae3_ae_dev_vf_fault_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -271,6 +275,7 @@ enum hnae3_reset_type { HNAE3_GLOBAL_RESET, HNAE3_IMP_RESET, HNAE3_NONE_RESET, + HNAE3_VF_EXP_RESET, HNAE3_MAX_RESET, }; @@ -382,6 +387,7 @@ struct hnae3_dev_specs { u16 umv_size; u16 mc_mac_size; u32 mac_stats_num; + u8 tnl_num; }; struct hnae3_client_ops { @@ -813,6 +819,7 @@ struct hnae3_tc_info { u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; + bool dcb_ets_active; }; #define HNAE3_MAX_DSCP 64 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index dcecb23daac6..d92ad6082d8e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -157,6 +157,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B}, {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B}, + {HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 2b7197ce0ae8..533c19d25e4f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -93,6 +93,7 @@ enum hclge_opcode_type { HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, HCLGE_OPC_QUERY_DEV_SPECS = 0x0050, + HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067, /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, @@ -348,6 +349,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_GRO_B = 20, HCLGE_COMM_CAP_FD_B = 21, HCLGE_COMM_CAP_FEC_STATS_B = 25, + HCLGE_COMM_CAP_VF_FAULT_B = 26, HCLGE_COMM_CAP_LANE_NUM_B = 27, HCLGE_COMM_CAP_WOL_B = 28, HCLGE_COMM_CAP_TM_FLUSH_B = 31, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index f276b5ecb431..c083d1d10767 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -414,6 +414,9 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = { }, { .name = "support tm flush", .cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B, + }, { + .name = "support vf fault detect", + .cap_bit = HNAE3_DEV_SUPPORT_VF_FAULT_B, } }; @@ -500,11 +503,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector, } sprintf(result[j++], "%d", i); - sprintf(result[j++], "%s", dim_state_str[dim->state]); + sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ? + dim_state_str[dim->state] : "unknown"); sprintf(result[j++], "%u", dim->profile_ix); - sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]); + sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ? + dim_cqe_mode_str[dim->mode] : "unknown"); sprintf(result[j++], "%s", - dim_tune_stat_str[dim->tune_state]); + dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ? + dim_tune_stat_str[dim->tune_state] : "unknown"); sprintf(result[j++], "%u", dim->steps_left); sprintf(result[j++], "%u", dim->steps_right); sprintf(result[j++], "%u", dim->tired); @@ -1045,6 +1051,7 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs; struct hnae3_knic_private_info *kinfo = &h->kinfo; + struct net_device *dev = kinfo->netdev; *pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n"); *pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n", @@ -1087,6 +1094,9 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) dev_specs->mc_mac_size); *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n", dev_specs->mac_stats_num); + *pos += scnprintf(buf + *pos, len - *pos, + "TX timeout threshold: %d seconds\n", + dev->watchdog_timeo / HZ); } static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) @@ -1411,9 +1421,9 @@ int hns3_dbg_init(struct hnae3_handle *handle) return 0; out: - mutex_destroy(&handle->dbgfs_lock); debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; + mutex_destroy(&handle->dbgfs_lock); return ret; } @@ -1421,6 +1431,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle) { u32 i; + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) if (handle->dbgfs_buf[i]) { kvfree(handle->dbgfs_buf[i]); @@ -1428,8 +1441,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle) } mutex_destroy(&handle->dbgfs_lock); - debugfs_remove_recursive(handle->hnae3_dbgfs); - handle->hnae3_dbgfs = NULL; } void hns3_dbg_register_debugfs(const char *debugfs_dir_name) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b7b51e56b030..b618797a7e8d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -18,6 +18,7 @@ #include <net/gre.h> #include <net/gro.h> #include <net/ip6_checksum.h> +#include <net/page_pool/helpers.h> #include <net/pkt_cls.h> #include <net/pkt_sched.h> #include <net/tcp.h> @@ -2102,8 +2103,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, */ if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { + /* This smp_store_release() pairs with smp_load_aquire() in + * hns3_nic_reclaim_desc(). Ensure that the BD valid bit + * is updated. + */ + smp_store_release(&ring->last_to_use, ring->next_to_use); hns3_tx_push_bd(ring, num); - WRITE_ONCE(ring->last_to_use, ring->next_to_use); return; } @@ -2114,6 +2119,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, return; } + /* This smp_store_release() pairs with smp_load_aquire() in + * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated. + */ + smp_store_release(&ring->last_to_use, ring->next_to_use); + if (ring->tqp->mem_base) hns3_tx_mem_doorbell(ring); else @@ -2121,7 +2131,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); ring->pending_buf = 0; - WRITE_ONCE(ring->last_to_use, ring->next_to_use); } static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb, @@ -3307,8 +3316,6 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | @@ -3346,6 +3353,15 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_HW_TC); netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; + + /* The device_version V3 hardware can't offload the checksum for IP in + * GRE packets, but can do it for NvGRE. So default to disable the + * checksum and GSO offload for GRE. + */ + if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) { + netdev->features &= ~NETIF_F_GSO_GRE; + netdev->features &= ~NETIF_F_GSO_GRE_CSUM; + } } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -3562,9 +3578,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int *bytes, int *pkts, int budget) { - /* pair with ring->last_to_use update in hns3_tx_doorbell(), - * smp_store_release() is not used in hns3_tx_doorbell() because - * the doorbell operation already have the needed barrier operation. + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_tx_doorbell(). */ int ltu = smp_load_acquire(&ring->last_to_use); int ntc = ring->next_to_clean; @@ -4925,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) { struct page_pool_params pp_params = { - .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | - PP_FLAG_DMA_SYNC_DEV, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .order = hns3_page_order(ring), .pool_size = ring->desc_num * hns3_buf_size(ring) / (PAGE_SIZE << hns3_page_order(ring)), @@ -5125,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev) struct hns3_nic_priv *priv = netdev_priv(netdev); char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = priv->ae_handle; - u8 mac_addr_temp[ETH_ALEN]; + u8 mac_addr_temp[ETH_ALEN] = {0}; int ret = 0; if (h->ae_algo->ops->get_mac_addr) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 88af34bbee34..acd756b0c7c9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -6,7 +6,7 @@ #include <linux/dim.h> #include <linux/if_vlan.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> #include <asm/barrier.h> #include "hnae3.h" diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 407d30ee55d2..682239f33082 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -569,8 +569,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) { - struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *nic_priv = handle->priv; struct hns3_enet_ring *ring; u8 *stat; int i, j; @@ -773,7 +773,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev, hns3_get_ksettings(h, cmd); break; case HNAE3_MEDIA_TYPE_FIBER: - if (module_type == HNAE3_MODULE_TYPE_CR) + if (module_type == HNAE3_MODULE_TYPE_UNKNOWN) + cmd->base.port = PORT_OTHER; + else if (module_type == HNAE3_MODULE_TYPE_CR) cmd->base.port = PORT_DA; else cmd->base.port = PORT_FIBRE; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 91c173f40701..4d15eb73b972 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -826,7 +826,9 @@ struct hclge_dev_specs_1_cmd { u8 rsv0[2]; __le16 umv_size; __le16 mc_mac_size; - u8 rsv1[12]; + u8 rsv1[6]; + u8 tnl_num; + u8 rsv2[5]; }; /* mac speed type defined in firmware command */ @@ -886,8 +888,4 @@ struct hclge_query_wol_supported_cmd { struct hclge_hw; int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); -enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, - struct hclge_desc *desc); -enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, - struct hclge_desc *desc); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index fad5a5ff3cda..b98301e205f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -259,7 +259,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) int ret; if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || - hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) + h->kinfo.tc_info.mqprio_active) return -EINVAL; ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); @@ -275,10 +275,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) } hclge_tm_schd_info_update(hdev, num_tc); - if (num_tc > 1) - hdev->flag |= HCLGE_FLAG_DCB_ENABLE; - else - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; + h->kinfo.tc_info.dcb_ets_active = num_tc > 1; ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) @@ -487,7 +484,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h) struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; - if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) + if (h->kinfo.tc_info.mqprio_active) return 0; return hdev->dcbx_cap; @@ -611,7 +608,8 @@ static int hclge_setup_tc(struct hnae3_handle *h, if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) return -EBUSY; - if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) + kinfo = &vport->nic.kinfo; + if (kinfo->tc_info.dcb_ets_active) return -EINVAL; ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); @@ -625,7 +623,6 @@ static int hclge_setup_tc(struct hnae3_handle *h, if (ret) return ret; - kinfo = &vport->nic.kinfo; memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info)); hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt); kinfo->tc_info.mqprio_active = tc > 0; @@ -634,13 +631,6 @@ static int hclge_setup_tc(struct hnae3_handle *h, if (ret) goto err_out; - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; - - if (tc > 1) - hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE; - else - hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; - return hclge_notify_init_up(hdev); err_out: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 0fb2eaee3e8a..ff3f8f424ad9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -7,6 +7,7 @@ #include "hclge_debugfs.h" #include "hclge_err.h" #include "hclge_main.h" +#include "hclge_regs.h" #include "hclge_tm.h" #include "hnae3.h" @@ -1518,7 +1519,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, struct hclge_desc desc[3]; int pos = 0; int ret, i; - u32 *req; + __le32 *req; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); @@ -1543,22 +1544,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, tcam_msg.loc); /* tcam_data0 ~ tcam_data1 */ - req = (u32 *)req1->tcam_data; + req = (__le32 *)req1->tcam_data; for (i = 0; i < 2; i++) pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, - "%08x\n", *req++); + "%08x\n", le32_to_cpu(*req++)); /* tcam_data2 ~ tcam_data7 */ - req = (u32 *)req2->tcam_data; + req = (__le32 *)req2->tcam_data; for (i = 0; i < 6; i++) pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, - "%08x\n", *req++); + "%08x\n", le32_to_cpu(*req++)); /* tcam_data8 ~ tcam_data12 */ - req = (u32 *)req3->tcam_data; + req = (__le32 *)req3->tcam_data; for (i = 0; i < 5; i++) pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, - "%08x\n", *req++); + "%08x\n", le32_to_cpu(*req++)); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 3f35227ef1fa..d63e114f93d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1301,10 +1301,12 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = { .msg = "tqp_int_ecc_error" }, { .type_id = PF_ABNORMAL_INT_ERROR, - .msg = "pf_abnormal_int_error" + .msg = "pf_abnormal_int_error", + .cause_by_vf = true }, { .type_id = MPF_ABNORMAL_INT_ERROR, - .msg = "mpf_abnormal_int_error" + .msg = "mpf_abnormal_int_error", + .cause_by_vf = true }, { .type_id = COMMON_ERROR, .msg = "common_error" @@ -2759,7 +2761,7 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev) hclge_handle_error_info_log(ae_dev); } -static void +static bool hclge_handle_error_type_reg_log(struct device *dev, struct hclge_mod_err_info *mod_info, struct hclge_type_reg_err_info *type_reg_info) @@ -2770,6 +2772,7 @@ hclge_handle_error_type_reg_log(struct device *dev, u8 mod_id, total_module, type_id, total_type, i, is_ras; u8 index_module = MODULE_NONE; u8 index_type = NONE_ERROR; + bool cause_by_vf = false; mod_id = mod_info->mod_id; type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK; @@ -2788,6 +2791,7 @@ hclge_handle_error_type_reg_log(struct device *dev, for (i = 0; i < total_type; i++) { if (type_id == hclge_hw_type_id_st[i].type_id) { index_type = i; + cause_by_vf = hclge_hw_type_id_st[i].cause_by_vf; break; } } @@ -2805,6 +2809,8 @@ hclge_handle_error_type_reg_log(struct device *dev, dev_err(dev, "reg_value:\n"); for (i = 0; i < type_reg_info->reg_num; i++) dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]); + + return cause_by_vf; } static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, @@ -2815,6 +2821,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, struct device *dev = &hdev->pdev->dev; struct hclge_mod_err_info *mod_info; struct hclge_sum_err_info *sum_info; + bool cause_by_vf = false; u8 mod_num, err_num, i; u32 offset = 0; @@ -2843,12 +2850,16 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, type_reg_info = (struct hclge_type_reg_err_info *) &buf[offset++]; - hclge_handle_error_type_reg_log(dev, mod_info, - type_reg_info); + if (hclge_handle_error_type_reg_log(dev, mod_info, + type_reg_info)) + cause_by_vf = true; offset += type_reg_info->reg_num; } } + + if (hnae3_ae_dev_vf_fault_supported(hdev->ae_dev) && cause_by_vf) + set_bit(HNAE3_VF_EXP_RESET, &ae_dev->hw_err_reset_req); } static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num) @@ -2940,3 +2951,98 @@ err_desc: out: return ret; } + +static bool hclge_reset_vf_in_bitmap(struct hclge_dev *hdev, + unsigned long *bitmap) +{ + struct hclge_vport *vport; + bool exist_set = false; + int func_id; + int ret; + + func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM); + if (func_id == PF_VPORT_ID) + return false; + + while (func_id != HCLGE_VPORT_NUM) { + vport = hclge_get_vf_vport(hdev, + func_id - HCLGE_VF_VPORT_START_NUM); + if (!vport) { + dev_err(&hdev->pdev->dev, "invalid func id(%d)\n", + func_id); + return false; + } + + dev_info(&hdev->pdev->dev, "do function %d recovery.", func_id); + + ret = hclge_reset_tqp(&vport->nic); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to reset tqp, ret = %d.", ret); + return false; + } + + ret = hclge_inform_vf_reset(vport, HNAE3_VF_FUNC_RESET); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to reset func %d, ret = %d.", + func_id, ret); + return false; + } + + exist_set = true; + clear_bit(func_id, bitmap); + func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM); + } + + return exist_set; +} + +static void hclge_get_vf_fault_bitmap(struct hclge_desc *desc, + unsigned long *bitmap) +{ +#define HCLGE_FIR_FAULT_BYTES 24 +#define HCLGE_SEC_FAULT_BYTES 8 + + u8 *buff; + + BUILD_BUG_ON(HCLGE_FIR_FAULT_BYTES + HCLGE_SEC_FAULT_BYTES != + BITS_TO_BYTES(HCLGE_VPORT_NUM)); + + memcpy(bitmap, desc[0].data, HCLGE_FIR_FAULT_BYTES); + buff = (u8 *)bitmap + HCLGE_FIR_FAULT_BYTES; + memcpy(buff, desc[1].data, HCLGE_SEC_FAULT_BYTES); +} + +int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev) +{ + unsigned long vf_fault_bitmap[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + struct hclge_desc desc[2]; + bool cause_by_vf = false; + int ret; + + if (!test_and_clear_bit(HNAE3_VF_EXP_RESET, + &hdev->ae_dev->hw_err_reset_req) || + !hnae3_ae_dev_vf_fault_supported(hdev->ae_dev)) + return 0; + + hclge_comm_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_GET_QUEUE_ERR_VF, + true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_GET_QUEUE_ERR_VF, + true); + + ret = hclge_comm_cmd_send(&hdev->hw.hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vf bitmap, ret = %d.\n", ret); + return ret; + } + hclge_get_vf_fault_bitmap(desc, vf_fault_bitmap); + + cause_by_vf = hclge_reset_vf_in_bitmap(hdev, vf_fault_bitmap); + if (cause_by_vf) + hdev->ae_dev->hw_err_reset_req = 0; + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 86be6fb32990..68b738affa66 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -196,6 +196,7 @@ struct hclge_hw_module_id { struct hclge_hw_type_id { enum hclge_err_type_list type_id; const char *msg; + bool cause_by_vf; /* indicate the error may from vf exception */ }; struct hclge_sum_err_info { @@ -228,4 +229,5 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, unsigned long *reset_requests); int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev); int hclge_handle_mac_tnl(struct hclge_dev *hdev); +int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a940e35aef29..5ea9e59569ef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -20,6 +20,7 @@ #include "hclge_main.h" #include "hclge_mbx.h" #include "hclge_mdio.h" +#include "hclge_regs.h" #include "hclge_tm.h" #include "hclge_err.h" #include "hnae3.h" @@ -40,20 +41,6 @@ #define HCLGE_PF_RESET_SYNC_TIME 20 #define HCLGE_PF_RESET_SYNC_CNT 1500 -/* Get DFX BD number offset */ -#define HCLGE_DFX_BIOS_BD_OFFSET 1 -#define HCLGE_DFX_SSU_0_BD_OFFSET 2 -#define HCLGE_DFX_SSU_1_BD_OFFSET 3 -#define HCLGE_DFX_IGU_BD_OFFSET 4 -#define HCLGE_DFX_RPU_0_BD_OFFSET 5 -#define HCLGE_DFX_RPU_1_BD_OFFSET 6 -#define HCLGE_DFX_NCSI_BD_OFFSET 7 -#define HCLGE_DFX_RTC_BD_OFFSET 8 -#define HCLGE_DFX_PPP_BD_OFFSET 9 -#define HCLGE_DFX_RCB_BD_OFFSET 10 -#define HCLGE_DFX_TQP_BD_OFFSET 11 -#define HCLGE_DFX_SSU_2_BD_OFFSET 12 - #define HCLGE_LINK_STATUS_MS 10 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); @@ -74,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev); static void hclge_update_fec_stats(struct hclge_dev *hdev); static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, int wait_cnt); +static int hclge_update_port_info(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -94,62 +82,6 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, - HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, - HCLGE_COMM_NIC_CSQ_DEPTH_REG, - HCLGE_COMM_NIC_CSQ_TAIL_REG, - HCLGE_COMM_NIC_CSQ_HEAD_REG, - HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, - HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, - HCLGE_COMM_NIC_CRQ_DEPTH_REG, - HCLGE_COMM_NIC_CRQ_TAIL_REG, - HCLGE_COMM_NIC_CRQ_HEAD_REG, - HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, - HCLGE_COMM_CMDQ_INTR_STS_REG, - HCLGE_COMM_CMDQ_INTR_EN_REG, - HCLGE_COMM_CMDQ_INTR_GEN_REG}; - -static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, - HCLGE_PF_OTHER_INT_REG, - HCLGE_MISC_RESET_STS_REG, - HCLGE_MISC_VECTOR_INT_STS, - HCLGE_GLOBAL_RESET_REG, - HCLGE_FUN_RST_ING, - HCLGE_GRO_EN_REG}; - -static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, - HCLGE_RING_RX_ADDR_H_REG, - HCLGE_RING_RX_BD_NUM_REG, - HCLGE_RING_RX_BD_LENGTH_REG, - HCLGE_RING_RX_MERGE_EN_REG, - HCLGE_RING_RX_TAIL_REG, - HCLGE_RING_RX_HEAD_REG, - HCLGE_RING_RX_FBD_NUM_REG, - HCLGE_RING_RX_OFFSET_REG, - HCLGE_RING_RX_FBD_OFFSET_REG, - HCLGE_RING_RX_STASH_REG, - HCLGE_RING_RX_BD_ERR_REG, - HCLGE_RING_TX_ADDR_L_REG, - HCLGE_RING_TX_ADDR_H_REG, - HCLGE_RING_TX_BD_NUM_REG, - HCLGE_RING_TX_PRIORITY_REG, - HCLGE_RING_TX_TC_REG, - HCLGE_RING_TX_MERGE_EN_REG, - HCLGE_RING_TX_TAIL_REG, - HCLGE_RING_TX_HEAD_REG, - HCLGE_RING_TX_FBD_NUM_REG, - HCLGE_RING_TX_OFFSET_REG, - HCLGE_RING_TX_EBD_NUM_REG, - HCLGE_RING_TX_EBD_OFFSET_REG, - HCLGE_RING_TX_BD_ERR_REG, - HCLGE_RING_EN_REG}; - -static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, - HCLGE_TQP_INTR_GL0_REG, - HCLGE_TQP_INTR_GL1_REG, - HCLGE_TQP_INTR_GL2_REG, - HCLGE_TQP_INTR_RL_REG}; - static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { "External Loopback test", "App Loopback test", @@ -375,36 +307,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { }, }; -static const u32 hclge_dfx_bd_offset_list[] = { - HCLGE_DFX_BIOS_BD_OFFSET, - HCLGE_DFX_SSU_0_BD_OFFSET, - HCLGE_DFX_SSU_1_BD_OFFSET, - HCLGE_DFX_IGU_BD_OFFSET, - HCLGE_DFX_RPU_0_BD_OFFSET, - HCLGE_DFX_RPU_1_BD_OFFSET, - HCLGE_DFX_NCSI_BD_OFFSET, - HCLGE_DFX_RTC_BD_OFFSET, - HCLGE_DFX_PPP_BD_OFFSET, - HCLGE_DFX_RCB_BD_OFFSET, - HCLGE_DFX_TQP_BD_OFFSET, - HCLGE_DFX_SSU_2_BD_OFFSET -}; - -static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { - HCLGE_OPC_DFX_BIOS_COMMON_REG, - HCLGE_OPC_DFX_SSU_REG_0, - HCLGE_OPC_DFX_SSU_REG_1, - HCLGE_OPC_DFX_IGU_EGU_REG, - HCLGE_OPC_DFX_RPU_REG_0, - HCLGE_OPC_DFX_RPU_REG_1, - HCLGE_OPC_DFX_NCSI_REG, - HCLGE_OPC_DFX_RTC_REG, - HCLGE_OPC_DFX_PPP_REG, - HCLGE_OPC_DFX_RCB_REG, - HCLGE_OPC_DFX_TQP_REG, - HCLGE_OPC_DFX_SSU_REG_2 -}; - static const struct key_info meta_data_key_info[] = { { PACKET_TYPE_ID, 6 }, { IP_FRAGEMENT, 1 }, @@ -980,8 +882,8 @@ static const struct hclge_speed_bit_map speed_bit_map[] = { {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, - {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, - {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, }; @@ -1038,100 +940,98 @@ static void hclge_update_fec_support(struct hclge_mac *mac) mac->supported); } +static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = { + {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, + {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, +}; + static void hclge_convert_setting_sr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { + if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_lr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit( - ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { + if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_cr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { + if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_kr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_1G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { + if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_fec(struct hclge_mac *mac) @@ -1257,10 +1157,10 @@ static u32 hclge_get_max_speed(u16 speed_ability) if (speed_ability & HCLGE_SUPPORT_200G_BIT) return HCLGE_MAC_SPEED_200G; - if (speed_ability & HCLGE_SUPPORT_100G_BIT) + if (speed_ability & HCLGE_SUPPORT_100G_BITS) return HCLGE_MAC_SPEED_100G; - if (speed_ability & HCLGE_SUPPORT_50G_BIT) + if (speed_ability & HCLGE_SUPPORT_50G_BITS) return HCLGE_MAC_SPEED_50G; if (speed_ability & HCLGE_SUPPORT_40G_BIT) @@ -1425,6 +1325,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev) ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; + ae_dev->dev_specs.tnl_num = 0; } static void hclge_parse_dev_specs(struct hclge_dev *hdev, @@ -1448,6 +1349,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); + ae_dev->dev_specs.tnl_num = req1->tnl_num; } static void hclge_check_dev_specs(struct hclge_dev *hdev) @@ -3140,6 +3042,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev) if (state != hdev->hw.mac.link) { hdev->hw.mac.link = state; + if (state == HCLGE_LINK_STATUS_UP) + hclge_update_port_info(hdev); + client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); if (rclient && rclient->ops->link_status_change) @@ -3521,7 +3426,7 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } -static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) +struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) { if (!pci_num_vf(hdev->pdev)) { dev_err(&hdev->pdev->dev, @@ -3661,9 +3566,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, u32 regclr) { +#define HCLGE_IMP_RESET_DELAY 5 + switch (event_type) { case HCLGE_VECTOR0_EVENT_PTP: case HCLGE_VECTOR0_EVENT_RST: + if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) + mdelay(HCLGE_IMP_RESET_DELAY); + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); break; case HCLGE_VECTOR0_EVENT_MBX: @@ -4560,6 +4470,7 @@ static void hclge_handle_err_recovery(struct hclge_dev *hdev) if (hclge_find_error_source(hdev)) { hclge_handle_error_info_log(ae_dev); hclge_handle_mac_tnl(hdev); + hclge_handle_vf_queue_err_ras(hdev); } hclge_handle_err_reset_request(hdev); @@ -7317,14 +7228,14 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev, struct flow_dissector *dissector = flow->match.dissector; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS))) { - dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -7445,6 +7356,12 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, NULL, false); if (ret) { + /* if tcam config fail, set rule state to TO_DEL, + * so the rule will be deleted when periodic + * task being scheduled. + */ + hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); + set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); spin_unlock_bh(&hdev->fd_rule_lock); return ret; } @@ -8921,7 +8838,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport, if (mac_type == HCLGE_MAC_ADDR_UC) { if (is_all_added) vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; - else + else if (hclge_is_umv_space_full(vport, true)) vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; } else { if (is_all_added) @@ -10112,8 +10029,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; - mutex_lock(&hdev->vport_lock); - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->vlan_id == vlan_id) { if (is_write_tbl && vlan->hd_tbl_status) @@ -10128,8 +10043,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, break; } } - - mutex_unlock(&hdev->vport_lock); } void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) @@ -10538,11 +10451,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ + mutex_lock(&hdev->vport_lock); if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { + clear_bit(vlan_id, vport->vlan_del_fail_bmap); } + mutex_unlock(&hdev->vport_lock); /* when port base vlan enabled, we use port base vlan as the vlan * filter entry. In this case, we don't update vlan filter table @@ -10557,17 +10475,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, } if (!ret) { - if (!is_kill) + if (!is_kill) { hclge_add_vport_vlan_table(vport, vlan_id, writen_to_tbl); - else if (is_kill && vlan_id != 0) + } else if (is_kill && vlan_id != 0) { + mutex_lock(&hdev->vport_lock); hclge_rm_vport_vlan_table(vport, vlan_id, false); + mutex_unlock(&hdev->vport_lock); + } } else if (is_kill) { /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack */ + mutex_lock(&hdev->vport_lock); set_bit(vlan_id, vport->vlan_del_fail_bmap); + mutex_unlock(&hdev->vport_lock); } hclge_set_vport_vlan_fltr_change(vport); @@ -10607,6 +10530,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev) int i, ret, sync_cnt = 0; u16 vlan_id; + mutex_lock(&hdev->vport_lock); /* start from vport 1 for PF is always alive */ for (i = 0; i < hdev->num_alloc_vport; i++) { struct hclge_vport *vport = &hdev->vport[i]; @@ -10617,21 +10541,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev) ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), vport->vport_id, vlan_id, true); - if (ret && ret != -EINVAL) + if (ret && ret != -EINVAL) { + mutex_unlock(&hdev->vport_lock); return; + } clear_bit(vlan_id, vport->vlan_del_fail_bmap); hclge_rm_vport_vlan_table(vport, vlan_id, false); hclge_set_vport_vlan_fltr_change(vport); sync_cnt++; - if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) + if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { + mutex_unlock(&hdev->vport_lock); return; + } vlan_id = find_first_bit(vport->vlan_del_fail_bmap, VLAN_N_VID); } } + mutex_unlock(&hdev->vport_lock); hclge_sync_vlan_fltr_state(hdev); } @@ -11123,6 +11052,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, static void hclge_info_show(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport->nic; struct device *dev = &hdev->pdev->dev; dev_info(dev, "PF info begin:\n"); @@ -11139,9 +11069,9 @@ static void hclge_info_show(struct hclge_dev *hdev) dev_info(dev, "This is %s PF\n", hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); dev_info(dev, "DCB %s\n", - hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); + handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); dev_info(dev, "MQPRIO %s\n", - hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); + handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); dev_info(dev, "Default tx spare buffer size: %u\n", hdev->tx_spare_buf_size); @@ -11737,6 +11667,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_msi_irq_uninit; if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); if (hnae3_dev_phy_imp_supported(hdev)) ret = hclge_update_tp_port_info(hdev); else @@ -12383,463 +12314,6 @@ out: return ret; } -static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, - u32 *regs_num_64_bit) -{ - struct hclge_desc desc; - u32 total_num; - int ret; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Query register number cmd failed, ret = %d.\n", ret); - return ret; - } - - *regs_num_32_bit = le32_to_cpu(desc.data[0]); - *regs_num_64_bit = le32_to_cpu(desc.data[1]); - - total_num = *regs_num_32_bit + *regs_num_64_bit; - if (!total_num) - return -EINVAL; - - return 0; -} - -static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, - void *data) -{ -#define HCLGE_32_BIT_REG_RTN_DATANUM 8 -#define HCLGE_32_BIT_DESC_NODATA_LEN 2 - - struct hclge_desc *desc; - u32 *reg_val = data; - __le32 *desc_data; - int nodata_num; - int cmd_num; - int i, k, n; - int ret; - - if (regs_num == 0) - return 0; - - nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN; - cmd_num = DIV_ROUND_UP(regs_num + nodata_num, - HCLGE_32_BIT_REG_RTN_DATANUM); - desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); - if (!desc) - return -ENOMEM; - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); - ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); - if (ret) { - dev_err(&hdev->pdev->dev, - "Query 32 bit register cmd failed, ret = %d.\n", ret); - kfree(desc); - return ret; - } - - for (i = 0; i < cmd_num; i++) { - if (i == 0) { - desc_data = (__le32 *)(&desc[i].data[0]); - n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; - } else { - desc_data = (__le32 *)(&desc[i]); - n = HCLGE_32_BIT_REG_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *reg_val++ = le32_to_cpu(*desc_data++); - - regs_num--; - if (!regs_num) - break; - } - } - - kfree(desc); - return 0; -} - -static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, - void *data) -{ -#define HCLGE_64_BIT_REG_RTN_DATANUM 4 -#define HCLGE_64_BIT_DESC_NODATA_LEN 1 - - struct hclge_desc *desc; - u64 *reg_val = data; - __le64 *desc_data; - int nodata_len; - int cmd_num; - int i, k, n; - int ret; - - if (regs_num == 0) - return 0; - - nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN; - cmd_num = DIV_ROUND_UP(regs_num + nodata_len, - HCLGE_64_BIT_REG_RTN_DATANUM); - desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); - if (!desc) - return -ENOMEM; - - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); - ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); - if (ret) { - dev_err(&hdev->pdev->dev, - "Query 64 bit register cmd failed, ret = %d.\n", ret); - kfree(desc); - return ret; - } - - for (i = 0; i < cmd_num; i++) { - if (i == 0) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_64_BIT_REG_RTN_DATANUM; - } - for (k = 0; k < n; k++) { - *reg_val++ = le64_to_cpu(*desc_data++); - - regs_num--; - if (!regs_num) - break; - } - } - - kfree(desc); - return 0; -} - -#define MAX_SEPARATE_NUM 4 -#define SEPARATOR_VALUE 0xFDFCFBFA -#define REG_NUM_PER_LINE 4 -#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) -#define REG_SEPARATOR_LINE 1 -#define REG_NUM_REMAIN_MASK 3 - -int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) -{ - int i; - - /* initialize command BD except the last one */ - for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) { - hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, - true); - desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); - } - - /* initialize the last command BD */ - hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true); - - return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT); -} - -static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, - int *bd_num_list, - u32 type_num) -{ - u32 entries_per_desc, desc_index, index, offset, i; - struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; - int ret; - - ret = hclge_query_bd_num_cmd_send(hdev, desc); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get dfx bd num fail, status is %d.\n", ret); - return ret; - } - - entries_per_desc = ARRAY_SIZE(desc[0].data); - for (i = 0; i < type_num; i++) { - offset = hclge_dfx_bd_offset_list[i]; - index = offset % entries_per_desc; - desc_index = offset / entries_per_desc; - bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]); - } - - return ret; -} - -static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, - struct hclge_desc *desc_src, int bd_num, - enum hclge_opcode_type cmd) -{ - struct hclge_desc *desc = desc_src; - int i, ret; - - hclge_cmd_setup_basic_desc(desc, cmd, true); - for (i = 0; i < bd_num - 1; i++) { - desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); - desc++; - hclge_cmd_setup_basic_desc(desc, cmd, true); - } - - desc = desc_src; - ret = hclge_cmd_send(&hdev->hw, desc, bd_num); - if (ret) - dev_err(&hdev->pdev->dev, - "Query dfx reg cmd(0x%x) send fail, status is %d.\n", - cmd, ret); - - return ret; -} - -static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num, - void *data) -{ - int entries_per_desc, reg_num, separator_num, desc_index, index, i; - struct hclge_desc *desc = desc_src; - u32 *reg = data; - - entries_per_desc = ARRAY_SIZE(desc->data); - reg_num = entries_per_desc * bd_num; - separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK); - for (i = 0; i < reg_num; i++) { - index = i % entries_per_desc; - desc_index = i / entries_per_desc; - *reg++ = le32_to_cpu(desc[desc_index].data[index]); - } - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - - return reg_num + separator_num; -} - -static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) -{ - u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); - int data_len_per_desc, bd_num, i; - int *bd_num_list; - u32 data_len; - int ret; - - bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); - if (!bd_num_list) - return -ENOMEM; - - ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get dfx reg bd num fail, status is %d.\n", ret); - goto out; - } - - data_len_per_desc = sizeof_field(struct hclge_desc, data); - *len = 0; - for (i = 0; i < dfx_reg_type_num; i++) { - bd_num = bd_num_list[i]; - data_len = data_len_per_desc * bd_num; - *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE; - } - -out: - kfree(bd_num_list); - return ret; -} - -static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) -{ - u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); - int bd_num, bd_num_max, buf_len, i; - struct hclge_desc *desc_src; - int *bd_num_list; - u32 *reg = data; - int ret; - - bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); - if (!bd_num_list) - return -ENOMEM; - - ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get dfx reg bd num fail, status is %d.\n", ret); - goto out; - } - - bd_num_max = bd_num_list[0]; - for (i = 1; i < dfx_reg_type_num; i++) - bd_num_max = max_t(int, bd_num_max, bd_num_list[i]); - - buf_len = sizeof(*desc_src) * bd_num_max; - desc_src = kzalloc(buf_len, GFP_KERNEL); - if (!desc_src) { - ret = -ENOMEM; - goto out; - } - - for (i = 0; i < dfx_reg_type_num; i++) { - bd_num = bd_num_list[i]; - ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, - hclge_dfx_reg_opcode_list[i]); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get dfx reg fail, status is %d.\n", ret); - break; - } - - reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); - } - - kfree(desc_src); -out: - kfree(bd_num_list); - return ret; -} - -static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, - struct hnae3_knic_private_info *kinfo) -{ -#define HCLGE_RING_REG_OFFSET 0x200 -#define HCLGE_RING_INT_REG_OFFSET 0x4 - - int i, j, reg_num, separator_num; - int data_num_sum; - u32 *reg = data; - - /* fetching per-PF registers valus from PF PCIe register space */ - reg_num = ARRAY_SIZE(cmdq_reg_addr_list); - separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); - for (i = 0; i < reg_num; i++) - *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - data_num_sum = reg_num + separator_num; - - reg_num = ARRAY_SIZE(common_reg_addr_list); - separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); - for (i = 0; i < reg_num; i++) - *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - data_num_sum += reg_num + separator_num; - - reg_num = ARRAY_SIZE(ring_reg_addr_list); - separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); - for (j = 0; j < kinfo->num_tqps; j++) { - for (i = 0; i < reg_num; i++) - *reg++ = hclge_read_dev(&hdev->hw, - ring_reg_addr_list[i] + - HCLGE_RING_REG_OFFSET * j); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - } - data_num_sum += (reg_num + separator_num) * kinfo->num_tqps; - - reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); - separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); - for (j = 0; j < hdev->num_msi_used - 1; j++) { - for (i = 0; i < reg_num; i++) - *reg++ = hclge_read_dev(&hdev->hw, - tqp_intr_reg_addr_list[i] + - HCLGE_RING_INT_REG_OFFSET * j); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - } - data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); - - return data_num_sum; -} - -static int hclge_get_regs_len(struct hnae3_handle *handle) -{ - int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - int regs_num_32_bit, regs_num_64_bit, dfx_regs_len; - int regs_lines_32_bit, regs_lines_64_bit; - int ret; - - ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get register number failed, ret = %d.\n", ret); - return ret; - } - - ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get dfx reg len failed, ret = %d.\n", ret); - return ret; - } - - cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + - REG_SEPARATOR_LINE; - common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + - REG_SEPARATOR_LINE; - ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + - REG_SEPARATOR_LINE; - tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + - REG_SEPARATOR_LINE; - regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE + - REG_SEPARATOR_LINE; - regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE + - REG_SEPARATOR_LINE; - - return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + - tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + - regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len; -} - -static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, - void *data) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - u32 regs_num_32_bit, regs_num_64_bit; - int i, reg_num, separator_num, ret; - u32 *reg = data; - - *version = hdev->fw_version; - - ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get register number failed, ret = %d.\n", ret); - return; - } - - reg += hclge_fetch_pf_reg(hdev, reg, kinfo); - - ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 32 bit register failed, ret = %d.\n", ret); - return; - } - reg_num = regs_num_32_bit; - reg += reg_num; - separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - - ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get 64 bit register failed, ret = %d.\n", ret); - return; - } - reg_num = regs_num_64_bit * 2; - reg += reg_num; - separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - - ret = hclge_get_dfx_reg(hdev, reg); - if (ret) - dev_err(&hdev->pdev->dev, - "Get dfx register failed, ret = %d.\n", ret); -} - static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) { struct hclge_set_led_state_cmd *req; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 6a43d1515585..51979cf71262 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -185,15 +185,22 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_1G_BIT BIT(0) #define HCLGE_SUPPORT_10G_BIT BIT(1) #define HCLGE_SUPPORT_25G_BIT BIT(2) -#define HCLGE_SUPPORT_50G_BIT BIT(3) -#define HCLGE_SUPPORT_100G_BIT BIT(4) +#define HCLGE_SUPPORT_50G_R2_BIT BIT(3) +#define HCLGE_SUPPORT_100G_R4_BIT BIT(4) /* to be compatible with exsit board */ #define HCLGE_SUPPORT_40G_BIT BIT(5) #define HCLGE_SUPPORT_100M_BIT BIT(6) #define HCLGE_SUPPORT_10M_BIT BIT(7) #define HCLGE_SUPPORT_200G_BIT BIT(8) +#define HCLGE_SUPPORT_50G_R1_BIT BIT(9) +#define HCLGE_SUPPORT_100G_R2_BIT BIT(10) + #define HCLGE_SUPPORT_GE \ (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) +#define HCLGE_SUPPORT_50G_BITS \ + (HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT) +#define HCLGE_SUPPORT_100G_BITS \ + (HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, @@ -919,8 +926,6 @@ struct hclge_dev { #define HCLGE_FLAG_MAIN BIT(0) #define HCLGE_FLAG_DCB_CAPABLE BIT(1) -#define HCLGE_FLAG_DCB_ENABLE BIT(2) -#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) u32 flag; u32 pkt_buf_size; /* Total pf buf size for tx/rx */ @@ -1078,6 +1083,11 @@ struct hclge_mac_speed_map { u32 speed_fw; /* speed defined in firmware */ }; +struct hclge_link_mode_bmap { + u16 support_bit; + enum ethtool_link_mode_bit_indices link_mode; +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, @@ -1142,13 +1152,12 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, u16 state, struct hclge_vlan_info *vlan_info); void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); -int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, - struct hclge_desc *desc); void hclge_report_hw_error(struct hclge_dev *hdev, enum hnae3_hw_error_type type); -void hclge_inform_vf_promisc_info(struct hclge_vport *vport); int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len); int hclge_push_vf_link_status(struct hclge_vport *vport); int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en); int hclge_mac_update_stats(struct hclge_dev *hdev); +struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf); +int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 04ff9bf12185..4b0d07ca2505 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -124,7 +124,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } -static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) +int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) { __le16 msg_data; u8 dest_vfid; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c new file mode 100644 index 000000000000..43c1c18fa81f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2023 Hisilicon Limited. + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_regs.h" +#include "hnae3.h" + +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_CMDQ_INTR_STS_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, + HCLGE_PF_OTHER_INT_REG, + HCLGE_MISC_RESET_STS_REG, + HCLGE_MISC_VECTOR_INT_STS, + HCLGE_GLOBAL_RESET_REG, + HCLGE_FUN_RST_ING, + HCLGE_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, + HCLGE_RING_RX_ADDR_H_REG, + HCLGE_RING_RX_BD_NUM_REG, + HCLGE_RING_RX_BD_LENGTH_REG, + HCLGE_RING_RX_MERGE_EN_REG, + HCLGE_RING_RX_TAIL_REG, + HCLGE_RING_RX_HEAD_REG, + HCLGE_RING_RX_FBD_NUM_REG, + HCLGE_RING_RX_OFFSET_REG, + HCLGE_RING_RX_FBD_OFFSET_REG, + HCLGE_RING_RX_STASH_REG, + HCLGE_RING_RX_BD_ERR_REG, + HCLGE_RING_TX_ADDR_L_REG, + HCLGE_RING_TX_ADDR_H_REG, + HCLGE_RING_TX_BD_NUM_REG, + HCLGE_RING_TX_PRIORITY_REG, + HCLGE_RING_TX_TC_REG, + HCLGE_RING_TX_MERGE_EN_REG, + HCLGE_RING_TX_TAIL_REG, + HCLGE_RING_TX_HEAD_REG, + HCLGE_RING_TX_FBD_NUM_REG, + HCLGE_RING_TX_OFFSET_REG, + HCLGE_RING_TX_EBD_NUM_REG, + HCLGE_RING_TX_EBD_OFFSET_REG, + HCLGE_RING_TX_BD_ERR_REG, + HCLGE_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, + HCLGE_TQP_INTR_GL0_REG, + HCLGE_TQP_INTR_GL1_REG, + HCLGE_TQP_INTR_GL2_REG, + HCLGE_TQP_INTR_RL_REG}; + +/* Get DFX BD number offset */ +#define HCLGE_DFX_BIOS_BD_OFFSET 1 +#define HCLGE_DFX_SSU_0_BD_OFFSET 2 +#define HCLGE_DFX_SSU_1_BD_OFFSET 3 +#define HCLGE_DFX_IGU_BD_OFFSET 4 +#define HCLGE_DFX_RPU_0_BD_OFFSET 5 +#define HCLGE_DFX_RPU_1_BD_OFFSET 6 +#define HCLGE_DFX_NCSI_BD_OFFSET 7 +#define HCLGE_DFX_RTC_BD_OFFSET 8 +#define HCLGE_DFX_PPP_BD_OFFSET 9 +#define HCLGE_DFX_RCB_BD_OFFSET 10 +#define HCLGE_DFX_TQP_BD_OFFSET 11 +#define HCLGE_DFX_SSU_2_BD_OFFSET 12 + +static const u32 hclge_dfx_bd_offset_list[] = { + HCLGE_DFX_BIOS_BD_OFFSET, + HCLGE_DFX_SSU_0_BD_OFFSET, + HCLGE_DFX_SSU_1_BD_OFFSET, + HCLGE_DFX_IGU_BD_OFFSET, + HCLGE_DFX_RPU_0_BD_OFFSET, + HCLGE_DFX_RPU_1_BD_OFFSET, + HCLGE_DFX_NCSI_BD_OFFSET, + HCLGE_DFX_RTC_BD_OFFSET, + HCLGE_DFX_PPP_BD_OFFSET, + HCLGE_DFX_RCB_BD_OFFSET, + HCLGE_DFX_TQP_BD_OFFSET, + HCLGE_DFX_SSU_2_BD_OFFSET +}; + +static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { + HCLGE_OPC_DFX_BIOS_COMMON_REG, + HCLGE_OPC_DFX_SSU_REG_0, + HCLGE_OPC_DFX_SSU_REG_1, + HCLGE_OPC_DFX_IGU_EGU_REG, + HCLGE_OPC_DFX_RPU_REG_0, + HCLGE_OPC_DFX_RPU_REG_1, + HCLGE_OPC_DFX_NCSI_REG, + HCLGE_OPC_DFX_RTC_REG, + HCLGE_OPC_DFX_PPP_REG, + HCLGE_OPC_DFX_RCB_REG, + HCLGE_OPC_DFX_TQP_REG, + HCLGE_OPC_DFX_SSU_REG_2 +}; + +enum hclge_reg_tag { + HCLGE_REG_TAG_CMDQ = 0, + HCLGE_REG_TAG_COMMON, + HCLGE_REG_TAG_RING, + HCLGE_REG_TAG_TQP_INTR, + HCLGE_REG_TAG_QUERY_32_BIT, + HCLGE_REG_TAG_QUERY_64_BIT, + HCLGE_REG_TAG_DFX_BIOS_COMMON, + HCLGE_REG_TAG_DFX_SSU_0, + HCLGE_REG_TAG_DFX_SSU_1, + HCLGE_REG_TAG_DFX_IGU_EGU, + HCLGE_REG_TAG_DFX_RPU_0, + HCLGE_REG_TAG_DFX_RPU_1, + HCLGE_REG_TAG_DFX_NCSI, + HCLGE_REG_TAG_DFX_RTC, + HCLGE_REG_TAG_DFX_PPP, + HCLGE_REG_TAG_DFX_RCB, + HCLGE_REG_TAG_DFX_TQP, + HCLGE_REG_TAG_DFX_SSU_2, + HCLGE_REG_TAG_RPU_TNL, +}; + +#pragma pack(4) +struct hclge_reg_tlv { + u16 tag; + u16 len; +}; + +struct hclge_reg_header { + u64 magic_number; + u8 is_vf; + u8 rsv[7]; +}; + +#pragma pack() + +#define HCLGE_REG_TLV_SIZE sizeof(struct hclge_reg_tlv) +#define HCLGE_REG_HEADER_SIZE sizeof(struct hclge_reg_header) +#define HCLGE_REG_TLV_SPACE (sizeof(struct hclge_reg_tlv) / sizeof(u32)) +#define HCLGE_REG_HEADER_SPACE (sizeof(struct hclge_reg_header) / sizeof(u32)) +#define HCLGE_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */ + +#define HCLGE_REG_RPU_TNL_ID_0 1 + +static u32 hclge_reg_get_header(void *data) +{ + struct hclge_reg_header *header = data; + + header->magic_number = HCLGE_REG_MAGIC_NUMBER; + header->is_vf = 0x0; + + return HCLGE_REG_HEADER_SPACE; +} + +static u32 hclge_reg_get_tlv(u32 tag, u32 regs_num, void *data) +{ + struct hclge_reg_tlv *tlv = data; + + tlv->tag = tag; + tlv->len = regs_num * sizeof(u32) + HCLGE_REG_TLV_SIZE; + + return HCLGE_REG_TLV_SPACE; +} + +static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_32_BIT_REG_RTN_DATANUM 8 +#define HCLGE_32_BIT_DESC_NODATA_LEN 2 + + struct hclge_desc *desc; + u32 *reg_val = data; + __le32 *desc_data; + int nodata_num; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_num, + HCLGE_32_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 32 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le32 *)(&desc[i].data[0]); + n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; + } else { + desc_data = (__le32 *)(&desc[i]); + n = HCLGE_32_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le32_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, + void *data) +{ +#define HCLGE_64_BIT_REG_RTN_DATANUM 4 +#define HCLGE_64_BIT_DESC_NODATA_LEN 1 + + struct hclge_desc *desc; + u64 *reg_val = data; + __le64 *desc_data; + int nodata_len; + int cmd_num; + int i, k, n; + int ret; + + if (regs_num == 0) + return 0; + + nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN; + cmd_num = DIV_ROUND_UP(regs_num + nodata_len, + HCLGE_64_BIT_REG_RTN_DATANUM); + desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); + ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query 64 bit register cmd failed, ret = %d.\n", ret); + kfree(desc); + return ret; + } + + for (i = 0; i < cmd_num; i++) { + if (i == 0) { + desc_data = (__le64 *)(&desc[i].data[0]); + n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; + } else { + desc_data = (__le64 *)(&desc[i]); + n = HCLGE_64_BIT_REG_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *reg_val++ = le64_to_cpu(*desc_data++); + + regs_num--; + if (!regs_num) + break; + } + } + + kfree(desc); + return 0; +} + +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) +{ + int i; + + /* initialize command BD except the last one */ + for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, + true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + /* initialize the last command BD */ + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true); + + return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT); +} + +static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, + int *bd_num_list, + u32 type_num) +{ + u32 entries_per_desc, desc_index, index, offset, i; + struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; + int ret; + + ret = hclge_query_bd_num_cmd_send(hdev, desc); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx bd num fail, status is %d.\n", ret); + return ret; + } + + entries_per_desc = ARRAY_SIZE(desc[0].data); + for (i = 0; i < type_num; i++) { + offset = hclge_dfx_bd_offset_list[i]; + index = offset % entries_per_desc; + desc_index = offset / entries_per_desc; + bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]); + } + + return ret; +} + +static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int i, ret; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + for (i = 0; i < bd_num - 1; i++) { + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + desc = desc_src; + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "Query dfx reg cmd(0x%x) send fail, status is %d.\n", + cmd, ret); + + return ret; +} + +/* tnl_id = 0 means get sum of all tnl reg's value */ +static int hclge_dfx_reg_rpu_tnl_cmd_send(struct hclge_dev *hdev, u32 tnl_id, + struct hclge_desc *desc, int bd_num) +{ + int i, ret; + + for (i = 0; i < bd_num; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_RPU_REG_0, + true); + if (i != bd_num - 1) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + desc[0].data[0] = cpu_to_le32(tnl_id); + ret = hclge_cmd_send(&hdev->hw, desc, bd_num); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to query dfx rpu tnl reg, ret = %d\n", + ret); + return ret; +} + +static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num, + void *data) +{ + int entries_per_desc, reg_num, desc_index, index, i; + struct hclge_desc *desc = desc_src; + u32 *reg = data; + + entries_per_desc = ARRAY_SIZE(desc->data); + reg_num = entries_per_desc * bd_num; + for (i = 0; i < reg_num; i++) { + index = i % entries_per_desc; + desc_index = i / entries_per_desc; + *reg++ = le32_to_cpu(desc[desc_index].data[index]); + } + + return reg_num; +} + +static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + int data_len_per_desc; + int *bd_num_list; + int ret; + u32 i; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + data_len_per_desc = sizeof_field(struct hclge_desc, data); + *len = 0; + for (i = 0; i < dfx_reg_type_num; i++) + *len += bd_num_list[i] * data_len_per_desc + HCLGE_REG_TLV_SIZE; + + /** + * the num of dfx_rpu_0 is reused by each dfx_rpu_tnl + * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is + * starting at 0, so offset need '- 1'. + */ + *len += (bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1] * data_len_per_desc + + HCLGE_REG_TLV_SIZE) * ae_dev->dev_specs.tnl_num; + +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_get_dfx_rpu_tnl_reg(struct hclge_dev *hdev, u32 *reg, + struct hclge_desc *desc_src, + int bd_num) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + int ret = 0; + u8 i; + + for (i = HCLGE_REG_RPU_TNL_ID_0; i <= ae_dev->dev_specs.tnl_num; i++) { + ret = hclge_dfx_reg_rpu_tnl_cmd_send(hdev, i, desc_src, bd_num); + if (ret) + break; + + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RPU_TNL, + ARRAY_SIZE(desc_src->data) * bd_num, + reg); + reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); + } + + return ret; +} + +static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) +{ + u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); + int bd_num, bd_num_max, buf_len; + struct hclge_desc *desc_src; + int *bd_num_list; + u32 *reg = data; + int ret; + u32 i; + + bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL); + if (!bd_num_list) + return -ENOMEM; + + ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg bd num fail, status is %d.\n", ret); + goto out; + } + + bd_num_max = bd_num_list[0]; + for (i = 1; i < dfx_reg_type_num; i++) + bd_num_max = max_t(int, bd_num_max, bd_num_list[i]); + + buf_len = sizeof(*desc_src) * bd_num_max; + desc_src = kzalloc(buf_len, GFP_KERNEL); + if (!desc_src) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < dfx_reg_type_num; i++) { + bd_num = bd_num_list[i]; + ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, + hclge_dfx_reg_opcode_list[i]); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg fail, status is %d.\n", ret); + goto free; + } + + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_DFX_BIOS_COMMON + i, + ARRAY_SIZE(desc_src->data) * bd_num, + reg); + reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); + } + + /** + * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is + * starting at 0, so offset need '- 1'. + */ + bd_num = bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1]; + ret = hclge_get_dfx_rpu_tnl_reg(hdev, reg, desc_src, bd_num); + +free: + kfree(desc_src); +out: + kfree(bd_num_list); + return ret; +} + +static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, + struct hnae3_knic_private_info *kinfo) +{ +#define HCLGE_RING_REG_OFFSET 0x200 +#define HCLGE_RING_INT_REG_OFFSET 0x4 + + int i, j, reg_num; + int data_num_sum; + u32 *reg = data; + + /* fetching per-PF registers valus from PF PCIe register space */ + reg_num = ARRAY_SIZE(cmdq_reg_addr_list); + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_CMDQ, reg_num, reg); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + data_num_sum = reg_num + HCLGE_REG_TLV_SPACE; + + reg_num = ARRAY_SIZE(common_reg_addr_list); + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_COMMON, reg_num, reg); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); + data_num_sum += reg_num + HCLGE_REG_TLV_SPACE; + + reg_num = ARRAY_SIZE(ring_reg_addr_list); + for (j = 0; j < kinfo->num_tqps; j++) { + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGE_RING_REG_OFFSET * j); + } + data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps; + + reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); + for (j = 0; j < hdev->num_msi_used - 1; j++) { + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_TQP_INTR, reg_num, reg); + for (i = 0; i < reg_num; i++) + *reg++ = hclge_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + HCLGE_RING_INT_REG_OFFSET * j); + } + data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * + (hdev->num_msi_used - 1); + + return data_num_sum; +} + +static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, + u32 *regs_num_64_bit) +{ + struct hclge_desc desc; + u32 total_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query register number cmd failed, ret = %d.\n", ret); + return ret; + } + + *regs_num_32_bit = le32_to_cpu(desc.data[0]); + *regs_num_64_bit = le32_to_cpu(desc.data[1]); + + total_num = *regs_num_32_bit + *regs_num_64_bit; + if (!total_num) + return -EINVAL; + + return 0; +} + +int hclge_get_regs_len(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + int regs_num_32_bit, regs_num_64_bit, dfx_regs_len; + int cmdq_len, common_len, ring_len, tqp_intr_len; + int regs_len_32_bit, regs_len_64_bit; + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return ret; + } + + ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get dfx reg len failed, ret = %d.\n", ret); + return ret; + } + + cmdq_len = HCLGE_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list); + common_len = HCLGE_REG_TLV_SIZE + sizeof(common_reg_addr_list); + ring_len = HCLGE_REG_TLV_SIZE + sizeof(ring_reg_addr_list); + tqp_intr_len = HCLGE_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list); + regs_len_32_bit = HCLGE_REG_TLV_SIZE + regs_num_32_bit * sizeof(u32); + regs_len_64_bit = HCLGE_REG_TLV_SIZE + regs_num_64_bit * sizeof(u64); + + /* return the total length of all register values */ + return HCLGE_REG_HEADER_SIZE + cmdq_len + common_len + ring_len * + kinfo->num_tqps + tqp_intr_len * (hdev->num_msi_used - 1) + + regs_len_32_bit + regs_len_64_bit + dfx_regs_len; +} + +void hclge_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ +#define HCLGE_REG_64_BIT_SPACE_MULTIPLE 2 + + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 regs_num_32_bit, regs_num_64_bit; + u32 *reg = data; + int ret; + + *version = hdev->fw_version; + + ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get register number failed, ret = %d.\n", ret); + return; + } + + reg += hclge_reg_get_header(reg); + reg += hclge_fetch_pf_reg(hdev, reg, kinfo); + + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_32_BIT, + regs_num_32_bit, reg); + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 32 bit register failed, ret = %d.\n", ret); + return; + } + reg += regs_num_32_bit; + + reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_64_BIT, + regs_num_64_bit * + HCLGE_REG_64_BIT_SPACE_MULTIPLE, reg); + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 64 bit register failed, ret = %d.\n", ret); + return; + } + reg += regs_num_64_bit * HCLGE_REG_64_BIT_SPACE_MULTIPLE; + + ret = hclge_get_dfx_reg(hdev, reg); + if (ret) + dev_err(&hdev->pdev->dev, + "Get dfx register failed, ret = %d.\n", ret); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h new file mode 100644 index 000000000000..b6bc1ecb8054 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2023 Hisilicon Limited. + +#ifndef __HCLGE_REGS_H +#define __HCLGE_REGS_H +#include <linux/types.h> +#include "hclge_comm_cmd.h" + +struct hnae3_handle; +struct hclge_dev; + +int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc); +int hclge_get_regs_len(struct hnae3_handle *handle); +void hclge_get_regs(struct hnae3_handle *handle, u32 *version, + void *data); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 34f02ca8d1d2..0aa9beefd1c7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -6,6 +6,7 @@ #include <net/rtnetlink.h> #include "hclgevf_cmd.h" #include "hclgevf_main.h" +#include "hclgevf_regs.h" #include "hclge_mbx.h" #include "hnae3.h" #include "hclgevf_devlink.h" @@ -33,58 +34,6 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, - HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, - HCLGE_COMM_NIC_CSQ_DEPTH_REG, - HCLGE_COMM_NIC_CSQ_TAIL_REG, - HCLGE_COMM_NIC_CSQ_HEAD_REG, - HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, - HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, - HCLGE_COMM_NIC_CRQ_DEPTH_REG, - HCLGE_COMM_NIC_CRQ_TAIL_REG, - HCLGE_COMM_NIC_CRQ_HEAD_REG, - HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, - HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, - HCLGE_COMM_CMDQ_INTR_EN_REG, - HCLGE_COMM_CMDQ_INTR_GEN_REG}; - -static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, - HCLGEVF_RST_ING, - HCLGEVF_GRO_EN_REG}; - -static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, - HCLGEVF_RING_RX_ADDR_H_REG, - HCLGEVF_RING_RX_BD_NUM_REG, - HCLGEVF_RING_RX_BD_LENGTH_REG, - HCLGEVF_RING_RX_MERGE_EN_REG, - HCLGEVF_RING_RX_TAIL_REG, - HCLGEVF_RING_RX_HEAD_REG, - HCLGEVF_RING_RX_FBD_NUM_REG, - HCLGEVF_RING_RX_OFFSET_REG, - HCLGEVF_RING_RX_FBD_OFFSET_REG, - HCLGEVF_RING_RX_STASH_REG, - HCLGEVF_RING_RX_BD_ERR_REG, - HCLGEVF_RING_TX_ADDR_L_REG, - HCLGEVF_RING_TX_ADDR_H_REG, - HCLGEVF_RING_TX_BD_NUM_REG, - HCLGEVF_RING_TX_PRIORITY_REG, - HCLGEVF_RING_TX_TC_REG, - HCLGEVF_RING_TX_MERGE_EN_REG, - HCLGEVF_RING_TX_TAIL_REG, - HCLGEVF_RING_TX_HEAD_REG, - HCLGEVF_RING_TX_FBD_NUM_REG, - HCLGEVF_RING_TX_OFFSET_REG, - HCLGEVF_RING_TX_EBD_NUM_REG, - HCLGEVF_RING_TX_EBD_OFFSET_REG, - HCLGEVF_RING_TX_BD_ERR_REG, - HCLGEVF_RING_EN_REG}; - -static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, - HCLGEVF_TQP_INTR_GL0_REG, - HCLGEVF_TQP_INTR_GL1_REG, - HCLGEVF_TQP_INTR_GL2_REG, - HCLGEVF_TQP_INTR_RL_REG}; - /* hclgevf_cmd_send - send command to command queue * @hw: pointer to the hw struct * @desc: prefilled descriptor for describing the command @@ -111,7 +60,7 @@ void hclgevf_arq_init(struct hclgevf_dev *hdev) spin_unlock(&cmdq->crq.lock); } -static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) +struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) { if (!handle->client) return container_of(handle, struct hclgevf_dev, nic); @@ -1257,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, hdev->vlan_del_fail_bmap); return -EBUSY; + } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) { + clear_bit(vlan_id, hdev->vlan_del_fail_bmap); } hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, @@ -1284,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) int ret, sync_cnt = 0; u16 vlan_id; + if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID)) + return; + + rtnl_lock(); vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); while (vlan_id != VLAN_N_VID) { ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), vlan_id, true); if (ret) - return; + break; clear_bit(vlan_id, hdev->vlan_del_fail_bmap); sync_cnt++; if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) - return; + break; vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); } + rtnl_unlock(); } static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) @@ -1906,7 +1862,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) unsigned long delta = round_jiffies_relative(HZ); struct hnae3_handle *handle = &hdev->nic; - if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) + if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) || + test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) return; if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { @@ -2024,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, return HCLGEVF_VECTOR0_EVENT_OTHER; } +static void hclgevf_reset_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer); + + hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST); + hclgevf_reset_task_schedule(hdev); +} + static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) { +#define HCLGEVF_RESET_DELAY 5 + enum hclgevf_evt_cause event_cause; struct hclgevf_dev *hdev = data; u32 clearval; @@ -2037,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) switch (event_cause) { case HCLGEVF_VECTOR0_EVENT_RST: - hclgevf_reset_task_schedule(hdev); + mod_timer(&hdev->reset_timer, + jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY)); break; case HCLGEVF_VECTOR0_EVENT_MBX: hclgevf_mbx_handler(hdev); @@ -2980,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) HCLGEVF_DRIVER_NAME); hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); + timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); return 0; @@ -3258,72 +3227,6 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle, *advertising = hdev->hw.mac.advertising; } -#define MAX_SEPARATE_NUM 4 -#define SEPARATOR_VALUE 0xFDFCFBFA -#define REG_NUM_PER_LINE 4 -#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) - -static int hclgevf_get_regs_len(struct hnae3_handle *handle) -{ - int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - - cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; - common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; - ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; - tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; - - return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + - tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; -} - -static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, - void *data) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, j, reg_um, separator_num; - u32 *reg = data; - - *version = hdev->fw_version; - - /* fetching per-VF registers values from VF PCIe register space */ - reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - - reg_um = sizeof(common_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - - reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (j = 0; j < hdev->num_tqps; j++) { - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, - ring_reg_addr_list[i] + - HCLGEVF_TQP_REG_SIZE * j); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - } - - reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); - separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; - for (j = 0; j < hdev->num_msi_used - 1; j++) { - for (i = 0; i < reg_um; i++) - *reg++ = hclgevf_read_dev(&hdev->hw, - tqp_intr_reg_addr_list[i] + - 4 * j); - for (i = 0; i < separator_num; i++) - *reg++ = SEPARATOR_VALUE; - } -} - void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, struct hclge_mbx_port_base_vlan *port_base_vlan) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 59ca6c794d6d..a73f2bf3a56a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -219,6 +219,7 @@ struct hclgevf_dev { enum hnae3_reset_type reset_level; unsigned long reset_pending; enum hnae3_reset_type reset_type; + struct timer_list reset_timer; #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 @@ -294,4 +295,5 @@ void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, struct hclge_mbx_port_base_vlan *port_base_vlan); +struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index bbf7b14079de..85c2a634c8f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, i++; } + /* ensure additional_info will be seen after received_resp */ + smp_rmb(); + if (i >= HCLGEVF_MAX_TRY_TIMES) { dev_err(&hdev->pdev->dev, "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n", @@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, resp->resp_status = hclgevf_resp_to_errno(resp_status); memcpy(resp->additional_info, req->msg.resp_data, HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); + + /* ensure additional_info will be seen before setting received_resp */ + smp_wmb(); + if (match_id) { /* If match_id is not zero, it means PF support match_id. * if the match_id is right, VF get the right response, or diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c new file mode 100644 index 000000000000..65b9dcd38137 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2023 Hisilicon Limited. + +#include "hclgevf_main.h" +#include "hclgevf_regs.h" +#include "hnae3.h" + +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, + HCLGEVF_RST_ING, + HCLGEVF_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, + HCLGEVF_RING_RX_ADDR_H_REG, + HCLGEVF_RING_RX_BD_NUM_REG, + HCLGEVF_RING_RX_BD_LENGTH_REG, + HCLGEVF_RING_RX_MERGE_EN_REG, + HCLGEVF_RING_RX_TAIL_REG, + HCLGEVF_RING_RX_HEAD_REG, + HCLGEVF_RING_RX_FBD_NUM_REG, + HCLGEVF_RING_RX_OFFSET_REG, + HCLGEVF_RING_RX_FBD_OFFSET_REG, + HCLGEVF_RING_RX_STASH_REG, + HCLGEVF_RING_RX_BD_ERR_REG, + HCLGEVF_RING_TX_ADDR_L_REG, + HCLGEVF_RING_TX_ADDR_H_REG, + HCLGEVF_RING_TX_BD_NUM_REG, + HCLGEVF_RING_TX_PRIORITY_REG, + HCLGEVF_RING_TX_TC_REG, + HCLGEVF_RING_TX_MERGE_EN_REG, + HCLGEVF_RING_TX_TAIL_REG, + HCLGEVF_RING_TX_HEAD_REG, + HCLGEVF_RING_TX_FBD_NUM_REG, + HCLGEVF_RING_TX_OFFSET_REG, + HCLGEVF_RING_TX_EBD_NUM_REG, + HCLGEVF_RING_TX_EBD_OFFSET_REG, + HCLGEVF_RING_TX_BD_ERR_REG, + HCLGEVF_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, + HCLGEVF_TQP_INTR_GL0_REG, + HCLGEVF_TQP_INTR_GL1_REG, + HCLGEVF_TQP_INTR_GL2_REG, + HCLGEVF_TQP_INTR_RL_REG}; + +enum hclgevf_reg_tag { + HCLGEVF_REG_TAG_CMDQ = 0, + HCLGEVF_REG_TAG_COMMON, + HCLGEVF_REG_TAG_RING, + HCLGEVF_REG_TAG_TQP_INTR, +}; + +#pragma pack(4) +struct hclgevf_reg_tlv { + u16 tag; + u16 len; +}; + +struct hclgevf_reg_header { + u64 magic_number; + u8 is_vf; + u8 rsv[7]; +}; + +#pragma pack() + +#define HCLGEVF_REG_TLV_SIZE sizeof(struct hclgevf_reg_tlv) +#define HCLGEVF_REG_HEADER_SIZE sizeof(struct hclgevf_reg_header) +#define HCLGEVF_REG_TLV_SPACE (sizeof(struct hclgevf_reg_tlv) / sizeof(u32)) +#define HCLGEVF_REG_HEADER_SPACE (sizeof(struct hclgevf_reg_header) / sizeof(u32)) +#define HCLGEVF_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */ + +static u32 hclgevf_reg_get_header(void *data) +{ + struct hclgevf_reg_header *header = data; + + header->magic_number = HCLGEVF_REG_MAGIC_NUMBER; + header->is_vf = 0x1; + + return HCLGEVF_REG_HEADER_SPACE; +} + +static u32 hclgevf_reg_get_tlv(u32 tag, u32 regs_num, void *data) +{ + struct hclgevf_reg_tlv *tlv = data; + + tlv->tag = tag; + tlv->len = regs_num * sizeof(u32) + HCLGEVF_REG_TLV_SIZE; + + return HCLGEVF_REG_TLV_SPACE; +} + +int hclgevf_get_regs_len(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int cmdq_len, common_len, ring_len, tqp_intr_len; + + cmdq_len = HCLGEVF_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list); + common_len = HCLGEVF_REG_TLV_SIZE + sizeof(common_reg_addr_list); + ring_len = HCLGEVF_REG_TLV_SIZE + sizeof(ring_reg_addr_list); + tqp_intr_len = HCLGEVF_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list); + + /* return the total length of all register values */ + return HCLGEVF_REG_HEADER_SIZE + cmdq_len + common_len + + tqp_intr_len * (hdev->num_msi_used - 1) + + ring_len * hdev->num_tqps; +} + +void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ +#define HCLGEVF_RING_REG_OFFSET 0x200 +#define HCLGEVF_RING_INT_REG_OFFSET 0x4 + + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, j, reg_um; + u32 *reg = data; + + *version = hdev->fw_version; + reg += hclgevf_reg_get_header(reg); + + /* fetching per-VF registers values from VF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg); + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg); + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + for (j = 0; j < hdev->num_tqps; j++) { + reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg); + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + HCLGEVF_RING_REG_OFFSET * j); + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + for (j = 0; j < hdev->num_msi_used - 1; j++) { + reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg); + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + HCLGEVF_RING_INT_REG_OFFSET * j); + } +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h new file mode 100644 index 000000000000..77bdcf60a1af --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2023 Hisilicon Limited. */ + +#ifndef __HCLGEVF_REGS_H +#define __HCLGEVF_REGS_H +#include <linux/types.h> + +struct hnae3_handle; + +int hclgevf_get_regs_len(struct hnae3_handle *handle); +void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, + void *data); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 9232caaf0bdc..ed73707176c1 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -217,7 +217,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev, static int hns_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum, u16 data) { - struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + struct hns_mdio_device *mdio_dev = bus->priv; u16 reg = (u16)(regnum & 0xffff); u16 cmd_reg_cfg; int ret; @@ -259,7 +259,7 @@ static int hns_mdio_write_c22(struct mii_bus *bus, static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum, u16 data) { - struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + struct hns_mdio_device *mdio_dev = bus->priv; u16 reg = (u16)(regnum & 0xffff); u16 cmd_reg_cfg; int ret; @@ -312,7 +312,7 @@ static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, */ static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum) { - struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + struct hns_mdio_device *mdio_dev = bus->priv; u16 reg = (u16)(regnum & 0xffff); u16 reg_val; int ret; @@ -363,7 +363,7 @@ static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum) static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum) { - struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + struct hns_mdio_device *mdio_dev = bus->priv; u16 reg = (u16)(regnum & 0xffff); u16 reg_val; int ret; @@ -424,7 +424,7 @@ static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, */ static int hns_mdio_reset(struct mii_bus *bus) { - struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + struct hns_mdio_device *mdio_dev = bus->priv; const struct hns_mdio_sc_reg *sc_reg; int ret; @@ -610,7 +610,7 @@ static int hns_mdio_probe(struct platform_device *pdev) * * Return 0 on success, negative on failure */ -static int hns_mdio_remove(struct platform_device *pdev) +static void hns_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus; @@ -618,7 +618,6 @@ static int hns_mdio_remove(struct platform_device *pdev) mdiobus_unregister(bus); platform_set_drvdata(pdev, NULL); - return 0; } static const struct of_device_id hns_mdio_match[] = { @@ -636,7 +635,7 @@ MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match); static struct platform_driver hns_mdio_driver = { .probe = hns_mdio_probe, - .remove = hns_mdio_remove, + .remove_new = hns_mdio_remove, .driver = { .name = MDIO_DRV_NAME, .of_match_table = hns_mdio_match, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 1749d26f4bef..03e42512a2d5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -315,136 +315,76 @@ void hinic_devlink_unregister(struct hinic_devlink_priv *priv) devlink_unregister(devlink); } -static int chip_fault_show(struct devlink_fmsg *fmsg, - struct hinic_fault_event *event) +static void chip_fault_show(struct devlink_fmsg *fmsg, + struct hinic_fault_event *event) { const char * const level_str[FAULT_LEVEL_MAX + 1] = { "fatal", "reset", "flr", "general", "suggestion", "Unknown"}; u8 fault_level; - int err; fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ? event->event.chip.err_level : FAULT_LEVEL_MAX; - if (fault_level == FAULT_LEVEL_SERIOUS_FLR) { - err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id", - (u32)event->event.chip.func_id); - if (err) - return err; - } - - err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr", - event->event.chip.err_csr_addr); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value", - event->event.chip.err_csr_value); - if (err) - return err; - - return 0; + if (fault_level == FAULT_LEVEL_SERIOUS_FLR) + devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id", + (u32)event->event.chip.func_id); + devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id); + devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type); + devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]); + devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr", + event->event.chip.err_csr_addr); + devlink_fmsg_u32_pair_put(fmsg, "err_csr_value", + event->event.chip.err_csr_value); } -static int fault_report_show(struct devlink_fmsg *fmsg, - struct hinic_fault_event *event) +static void fault_report_show(struct devlink_fmsg *fmsg, + struct hinic_fault_event *event) { const char * const type_str[FAULT_TYPE_MAX + 1] = { "chip", "ucode", "mem rd timeout", "mem wr timeout", "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"}; u8 fault_type; - int err; fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX; - err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]); - if (err) - return err; - - err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data", - event->event.val, sizeof(event->event.val)); - if (err) - return err; + devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]); + devlink_fmsg_binary_pair_put(fmsg, "Fault raw data", event->event.val, + sizeof(event->event.val)); switch (event->type) { case FAULT_TYPE_CHIP: - err = chip_fault_show(fmsg, event); - if (err) - return err; + chip_fault_show(fmsg, event); break; case FAULT_TYPE_UCODE: - err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id); + devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id); + devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id); + devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc); break; case FAULT_TYPE_MEM_RD_TIMEOUT: case FAULT_TYPE_MEM_WR_TIMEOUT: - err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl", - event->event.mem_timeout.err_csr_ctrl); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data", - event->event.mem_timeout.err_csr_data); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab", - event->event.mem_timeout.ctrl_tab); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "mem_index", - event->event.mem_timeout.mem_index); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl", + event->event.mem_timeout.err_csr_ctrl); + devlink_fmsg_u32_pair_put(fmsg, "err_csr_data", + event->event.mem_timeout.err_csr_data); + devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab", + event->event.mem_timeout.ctrl_tab); + devlink_fmsg_u32_pair_put(fmsg, "mem_index", + event->event.mem_timeout.mem_index); break; case FAULT_TYPE_REG_RD_TIMEOUT: case FAULT_TYPE_REG_WR_TIMEOUT: - err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr); break; case FAULT_TYPE_PHY_FAULT: - err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type); + devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id); + devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad); + devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr); + devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data); break; default: break; } - - return 0; } static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter, @@ -452,75 +392,30 @@ static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { if (priv_ctx) - return fault_report_show(fmsg, priv_ctx); + fault_report_show(fmsg, priv_ctx); return 0; } -static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg, - struct hinic_mgmt_watchdog_info *watchdog_info) +static void mgmt_watchdog_report_show(struct devlink_fmsg *fmsg, + struct hinic_mgmt_watchdog_info *winfo) { - int err; - - err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr); - if (err) - return err; - - err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info", - watchdog_info->reg, sizeof(watchdog_info->reg)); - if (err) - return err; - - err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)", - watchdog_info->data, sizeof(watchdog_info->data)); - if (err) - return err; - - return 0; + devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", winfo->curr_time_h); + devlink_fmsg_u32_pair_put(fmsg, "time_l", winfo->curr_time_l); + devlink_fmsg_u32_pair_put(fmsg, "task_id", winfo->task_id); + devlink_fmsg_u32_pair_put(fmsg, "sp", winfo->sp); + devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", winfo->curr_used); + devlink_fmsg_u32_pair_put(fmsg, "peak_used", winfo->peak_used); + devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", winfo->is_overflow); + devlink_fmsg_u32_pair_put(fmsg, "stack_top", winfo->stack_top); + devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", winfo->stack_bottom); + devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", winfo->pc); + devlink_fmsg_u32_pair_put(fmsg, "lr", winfo->lr); + devlink_fmsg_u32_pair_put(fmsg, "cpsr", winfo->cpsr); + devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info", winfo->reg, + sizeof(winfo->reg)); + devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)", + winfo->data, sizeof(winfo->data)); } static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter, @@ -528,7 +423,7 @@ static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { if (priv_ctx) - return mgmt_watchdog_report_show(fmsg, priv_ctx); + mgmt_watchdog_report_show(fmsg, priv_ctx); return 0; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 9406237c461e..f81a43d2cdfc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -456,9 +456,6 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en) u16 out_size = sizeof(vlan_filter); int err; - if (!hwdev) - return -EINVAL; - vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif); vlan_filter.enable = en; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index ad47ac51a139..9b60966736db 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -861,7 +861,7 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; - int err, irqname_len; + int err; txq->netdev = netdev; txq->sq = sq; @@ -882,15 +882,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, goto err_alloc_free_sges; } - irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1; - txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); + txq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, "%s_txq%d", + netdev->name, qp->q_id); if (!txq->irq_name) { err = -ENOMEM; goto err_alloc_irqname; } - sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id); - err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, CI_UPDATE_NO_COALESC); if (err) diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 54bb4d9a0d1e..813403c2628f 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -153,7 +153,7 @@ probe_failed_free_mpu: return retval; } -static int sni_82596_driver_remove(struct platform_device *pdev) +static void sni_82596_driver_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct i596_private *lp = netdev_priv(dev); @@ -164,12 +164,11 @@ static int sni_82596_driver_remove(struct platform_device *pdev) iounmap(lp->ca); iounmap(lp->mpu_port); free_netdev (dev); - return 0; } static struct platform_driver sni_82596_driver = { .probe = sni_82596_probe, - .remove = sni_82596_driver_remove, + .remove_new = sni_82596_driver_remove, .driver = { .name = sni_82596_string, }, diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index b4aff59b3eb4..1e29e5c9a2df 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -31,6 +31,7 @@ #include <linux/prefetch.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/platform_device.h> #include <net/ip.h> @@ -89,7 +90,7 @@ static struct ehea_bcmc_reg_array ehea_bcmc_regs; static int ehea_probe_adapter(struct platform_device *dev); -static int ehea_remove(struct platform_device *dev); +static void ehea_remove(struct platform_device *dev); static const struct of_device_id ehea_module_device_table[] = { { @@ -120,7 +121,7 @@ static struct platform_driver ehea_driver = { .of_match_table = ehea_device_table, }, .probe = ehea_probe_adapter, - .remove = ehea_remove, + .remove_new = ehea_remove, }; void ehea_dump(void *adr, int len, char *msg) @@ -899,7 +900,7 @@ static int ehea_poll(struct napi_struct *napi, int budget) if (!cqe && !cqe_skb) return rx; - if (!napi_reschedule(napi)) + if (!napi_schedule(napi)) return rx; cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); @@ -3470,7 +3471,7 @@ out: return ret; } -static int ehea_remove(struct platform_device *dev) +static void ehea_remove(struct platform_device *dev) { struct ehea_adapter *adapter = platform_get_drvdata(dev); int i; @@ -3491,8 +3492,6 @@ static int ehea_remove(struct platform_device *dev) list_del(&adapter->list); ehea_update_firmware_handles(); - - return 0; } static int check_module_parm(void) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index c97095abd26a..e6e47b1842ea 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -38,6 +38,7 @@ #include <linux/of_irq.h> #include <linux/of_net.h> #include <linux/of_mdio.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -3252,7 +3253,7 @@ static int emac_probe(struct platform_device *ofdev) return err; } -static int emac_remove(struct platform_device *ofdev) +static void emac_remove(struct platform_device *ofdev) { struct emac_instance *dev = platform_get_drvdata(ofdev); @@ -3289,8 +3290,6 @@ static int emac_remove(struct platform_device *ofdev) irq_dispose_mapping(dev->emac_irq); free_netdev(dev->ndev); - - return 0; } /* XXX Features in here should be replaced by properties... */ @@ -3318,7 +3317,7 @@ static struct platform_driver emac_driver = { .of_match_table = emac_match, }, .probe = emac_probe, - .remove = emac_remove, + .remove_new = emac_remove, }; static void __init emac_make_bootlist(void) diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 89a1b0fea158..295516b07662 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -27,7 +27,6 @@ #include <linux/netdevice.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> -#include <linux/of_platform.h> #include <linux/slab.h> #include <asm/io.h> diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index ff5487bbebe3..2439f7e96e05 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -22,7 +22,9 @@ #include <linux/delay.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include "core.h" #include <asm/dcr-regs.h> @@ -440,7 +442,7 @@ static int mal_poll(struct napi_struct *napi, int budget) if (unlikely(mc->ops->peek_rx(mc->dev) || test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { MAL_DBG2(mal, "rotting packet" NL); - if (!napi_reschedule(napi)) + if (!napi_schedule(napi)) goto more_work; spin_lock_irqsave(&mal->lock, flags); @@ -709,7 +711,7 @@ static int mal_probe(struct platform_device *ofdev) return err; } -static int mal_remove(struct platform_device *ofdev) +static void mal_remove(struct platform_device *ofdev) { struct mal_instance *mal = platform_get_drvdata(ofdev); @@ -738,8 +740,6 @@ static int mal_remove(struct platform_device *ofdev) NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, mal->bd_dma); kfree(mal); - - return 0; } static const struct of_device_id mal_platform_match[] = @@ -768,7 +768,7 @@ static struct platform_driver mal_of_driver = { .of_match_table = mal_platform_match, }, .probe = mal_probe, - .remove = mal_remove, + .remove_new = mal_remove, }; int __init mal_init(void) diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 50358cf00130..e1712fdc3c31 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -19,7 +19,9 @@ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/ethtool.h> +#include <linux/of.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <asm/io.h> #include "emac.h" @@ -271,7 +273,7 @@ static int rgmii_probe(struct platform_device *ofdev) return rc; } -static int rgmii_remove(struct platform_device *ofdev) +static void rgmii_remove(struct platform_device *ofdev) { struct rgmii_instance *dev = platform_get_drvdata(ofdev); @@ -279,8 +281,6 @@ static int rgmii_remove(struct platform_device *ofdev) iounmap(dev->base); kfree(dev); - - return 0; } static const struct of_device_id rgmii_match[] = @@ -300,7 +300,7 @@ static struct platform_driver rgmii_driver = { .of_match_table = rgmii_match, }, .probe = rgmii_probe, - .remove = rgmii_remove, + .remove_new = rgmii_remove, }; int __init rgmii_init(void) diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index 008bbdaf1204..fa3488258ca2 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -14,7 +14,9 @@ * * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net> */ +#include <linux/mod_devicetable.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <asm/io.h> #include "emac.h" @@ -128,7 +130,7 @@ static int tah_probe(struct platform_device *ofdev) return rc; } -static int tah_remove(struct platform_device *ofdev) +static void tah_remove(struct platform_device *ofdev) { struct tah_instance *dev = platform_get_drvdata(ofdev); @@ -136,8 +138,6 @@ static int tah_remove(struct platform_device *ofdev) iounmap(dev->base); kfree(dev); - - return 0; } static const struct of_device_id tah_match[] = @@ -158,7 +158,7 @@ static struct platform_driver tah_driver = { .of_match_table = tah_match, }, .probe = tah_probe, - .remove = tah_remove, + .remove_new = tah_remove, }; int __init tah_init(void) diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 57a25c7a9e70..26e86cdee2f6 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -19,7 +19,9 @@ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/ethtool.h> +#include <linux/mod_devicetable.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <asm/io.h> #include "emac.h" @@ -276,7 +278,7 @@ static int zmii_probe(struct platform_device *ofdev) return rc; } -static int zmii_remove(struct platform_device *ofdev) +static void zmii_remove(struct platform_device *ofdev) { struct zmii_instance *dev = platform_get_drvdata(ofdev); @@ -284,8 +286,6 @@ static int zmii_remove(struct platform_device *ofdev) iounmap(dev->base); kfree(dev); - - return 0; } static const struct of_device_id zmii_match[] = @@ -306,7 +306,7 @@ static struct platform_driver zmii_driver = { .of_match_table = zmii_match, }, .probe = zmii_probe, - .remove = zmii_remove, + .remove_new = zmii_remove, }; int __init zmii_init(void) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 832a2ae01950..b5aef0b29efe 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb, * the user space for finding a flow. During this process, OVS computes * checksum on the first packet when CHECKSUM_PARTIAL flag is set. * - * So, re-compute TCP pseudo header checksum when configured for - * trunk mode. + * So, re-compute TCP pseudo header checksum. */ + if (iph_proto == IPPROTO_TCP) { struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); + if (tcph->check == 0x0000) { /* Recompute TCP pseudo header checksum */ - if (adapter->is_active_trunk) { - tcphdrlen = skb->len - iphlen; - if (skb_proto == ETH_P_IP) - tcph->check = - ~csum_tcpudp_magic(iph->saddr, - iph->daddr, tcphdrlen, iph_proto, 0); - else if (skb_proto == ETH_P_IPV6) - tcph->check = - ~csum_ipv6_magic(&iph6->saddr, - &iph6->daddr, tcphdrlen, iph_proto, 0); - } + tcphdrlen = skb->len - iphlen; + if (skb_proto == ETH_P_IP) + tcph->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcphdrlen, iph_proto, 0); + else if (skb_proto == ETH_P_IPV6) + tcph->check = + ~csum_ipv6_magic(&iph6->saddr, + &iph6->daddr, tcphdrlen, iph_proto, 0); /* Setup SKB fields for checksum offload */ skb_partial_csum_set(skb, iphlen, offsetof(struct tcphdr, check)); @@ -1433,7 +1432,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) BUG_ON(lpar_rc != H_SUCCESS); if (ibmveth_rxq_pending_buffer(adapter) && - napi_reschedule(napi)) { + napi_schedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index df76cdaddcfb..30c47b8470ad 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -194,9 +194,8 @@ static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter) struct ibmvnic_sub_crq_queue **rxqs; struct ibmvnic_sub_crq_queue **txqs; int num_rxqs, num_txqs; - int rc, i; + int i; - rc = 0; rxqs = adapter->rx_scrq; txqs = adapter->tx_scrq; num_txqs = adapter->num_active_tx_scrqs; @@ -3520,7 +3519,7 @@ restart_poll: if (napi_complete_done(napi, frames_processed)) { enable_scrq_irq(adapter, rx_scrq); if (pending_scrq(adapter, rx_scrq)) { - if (napi_reschedule(napi)) { + if (napi_schedule(napi)) { disable_scrq_irq(adapter, rx_scrq); goto restart_poll; } @@ -5248,7 +5247,8 @@ static void handle_vpd_rsp(union ibmvnic_crq *crq, /* copy firmware version string from vpd into adapter */ if ((substr + 3 + fw_level_len) < (adapter->vpd->buff + adapter->vpd->len)) { - strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); + strscpy(adapter->fw_version, substr + 3, + sizeof(adapter->fw_version)); } else { dev_info(dev, "FW substr extrapolated VPD buff\n"); } diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 9bc0a9519899..06ddd7147c7f 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -225,6 +225,7 @@ config I40E depends on PTP_1588_CLOCK_OPTIONAL depends on PCI select AUXILIARY_BUS + select NET_DEVLINK help This driver supports Intel(R) Ethernet Controller XL710 Family of devices. For more information on how to identify your adapter, go @@ -284,6 +285,7 @@ config ICE select DIMLIB select NET_DEVLINK select PLDMFW + select DPLL help This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go @@ -355,5 +357,17 @@ config IGC To compile this driver as a module, choose M here. The module will be called igc. +config IDPF + tristate "Intel(R) Infrastructure Data Path Function Support" + depends on PCI_MSI + select DIMLIB + select PAGE_POOL + select PAGE_POOL_STATS + help + This driver supports Intel(R) Infrastructure Data Path Function + devices. + + To compile this driver as a module, choose M here. The module + will be called idpf. endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index d80d04132073..dacb481ee5b1 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IAVF) += iavf/ obj-$(CONFIG_FM10K) += fm10k/ obj-$(CONFIG_ICE) += ice/ +obj-$(CONFIG_IDPF) += idpf/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index d3fdc290937f..01f0f12035ca 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2841,7 +2841,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &e100_netdev_ops; netdev->ethtool_ops = &e100_ethtool_ops; netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); nic = netdev_priv(netdev); netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 4817eb13ca6f..75f3fd1d8d6e 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -347,6 +347,5 @@ bool e1000_has_link(struct e1000_adapter *adapter); void e1000_power_up_phy(struct e1000_adapter *); void e1000_set_ethtool_ops(struct net_device *netdev); void e1000_check_options(struct e1000_adapter *adapter); -char *e1000_get_hw_dev_name(struct e1000_hw *hw); #endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index b57a04954ccf..95cdd17134e5 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -343,7 +343,6 @@ struct e1000_host_mng_dhcp_cookie { }; #endif -bool e1000_check_mng_mode(struct e1000_hw *hw); s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); @@ -352,7 +351,6 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw); /* Filters (multicast, vlan, receive) */ u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); -void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); @@ -361,7 +359,6 @@ s32 e1000_setup_led(struct e1000_hw *hw); s32 e1000_cleanup_led(struct e1000_hw *hw); s32 e1000_led_on(struct e1000_hw *hw); s32 e1000_led_off(struct e1000_hw *hw); -s32 e1000_blink_led_start(struct e1000_hw *hw); /* Adaptive IFS Functions */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index da6e303ad99b..1d1e93686af2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1014,7 +1014,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, e1000_clean); - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); adapter->bd_number = cards_found; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 721f86fd5802..9835e6a90d56 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -917,6 +917,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: mask |= BIT(18); break; default: @@ -1585,6 +1586,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 29f9fae35f42..1fef6bb5a5fb 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -122,6 +122,8 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_PTP_I219_V26 0x57B6 #define E1000_DEV_ID_PCH_PTP_I219_LM27 0x57B7 #define E1000_DEV_ID_PCH_PTP_I219_V27 0x57B8 +#define E1000_DEV_ID_PCH_NVL_I219_LM29 0x57B9 +#define E1000_DEV_ID_PCH_NVL_I219_V29 0x57BA #define E1000_REVISION_4 4 @@ -150,6 +152,7 @@ enum e1000_mac_type { e1000_pch_mtp, e1000_pch_lnp, e1000_pch_ptp, + e1000_pch_nvp, }; enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 0c7fd10312c8..39e9fc601bf5 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -323,6 +323,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -470,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -717,6 +719,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -1685,6 +1688,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -2142,6 +2146,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -3188,6 +3193,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; @@ -4129,6 +4135,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h index 6ab261119801..563176fd436e 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.h +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -29,8 +29,6 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); s32 e1000e_setup_led_generic(struct e1000_hw *hw); s32 e1000e_setup_link_generic(struct e1000_hw *hw); -s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw); -s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); void e1000_clear_vfta_generic(struct e1000_hw *hw); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 771a3c909c45..f536c856727c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3545,6 +3545,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; @@ -4061,6 +4062,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: fc->refresh_time = 0xFFFF; fc->pause_time = 0xFFFF; @@ -7021,6 +7023,8 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev) struct e1000_adapter *adapter = netdev_priv(netdev); int rc; + pdev->pme_poll = true; + rc = __e1000_resume(pdev); if (rc) return rc; @@ -7682,7 +7686,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); - if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp) + if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); return 0; @@ -7911,6 +7915,8 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V26), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_mtp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_LM29), board_pch_mtp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_V29), board_pch_mtp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index def4566a916f..02d871bc112a 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -288,6 +288,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_mtp: case e1000_pch_lnp: case e1000_pch_ptp: + case e1000_pch_nvp: if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index d53369e30040..13a05604dcc0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -448,10 +448,10 @@ static void fm10k_get_drvinfo(struct net_device *dev, { struct fm10k_intfc *interface = netdev_priv(dev); - strncpy(info->driver, fm10k_driver_name, - sizeof(info->driver) - 1); - strncpy(info->bus_info, pci_name(interface->pdev), - sizeof(info->bus_info) - 1); + strscpy(info->driver, fm10k_driver_name, + sizeof(info->driver)); + strscpy(info->bus_info, pci_name(interface->pdev), + sizeof(info->bus_info)); } static void fm10k_get_pauseparam(struct net_device *dev, diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 2f21b3e89fd0..cad93f323bd5 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -24,6 +24,7 @@ i40e-objs := i40e_main.o \ i40e_ddp.o \ i40e_client.o \ i40e_virtchnl_pf.o \ - i40e_xsk.o + i40e_xsk.o \ + i40e_devlink.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 6e310a539467..1bf424ac3145 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -4,47 +4,21 @@ #ifndef _I40E_H_ #define _I40E_H_ -#include <net/tcp.h> -#include <net/udp.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/module.h> #include <linux/pci.h> -#include <linux/netdevice.h> -#include <linux/ioport.h> -#include <linux/iommu.h> -#include <linux/slab.h> -#include <linux/list.h> -#include <linux/hashtable.h> -#include <linux/string.h> -#include <linux/in.h> -#include <linux/ip.h> -#include <linux/sctp.h> -#include <linux/pkt_sched.h> -#include <linux/ipv6.h> -#include <net/checksum.h> -#include <net/ip6_checksum.h> -#include <linux/ethtool.h> -#include <linux/if_vlan.h> -#include <linux/if_macvlan.h> -#include <linux/if_bridge.h> -#include <linux/clocksource.h> -#include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> +#include <linux/types.h> +#include <linux/avf/virtchnl.h> +#include <linux/net/intel/i40e_client.h> +#include <net/devlink.h> #include <net/pkt_cls.h> -#include <net/pkt_sched.h> -#include <net/tc_act/tc_gact.h> -#include <net/tc_act/tc_mirred.h> #include <net/udp_tunnel.h> -#include <net/xdp_sock.h> -#include <linux/bitfield.h> -#include "i40e_type.h" +#include "i40e_dcb.h" +#include "i40e_debug.h" +#include "i40e_devlink.h" +#include "i40e_io.h" #include "i40e_prototype.h" -#include <linux/net/intel/i40e_client.h> -#include <linux/avf/virtchnl.h> -#include "i40e_virtchnl_pf.h" +#include "i40e_register.h" #include "i40e_txrx.h" -#include "i40e_dcb.h" /* Useful i40e defaults */ #define I40E_MAX_VEB 16 @@ -75,23 +49,19 @@ #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) -#define I40E_NVM_VERSION_LO_SHIFT 0 -#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_HI_SHIFT 12 -#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) -#define I40E_OEM_VER_BUILD_MASK 0xffff -#define I40E_OEM_VER_PATCH_MASK 0xff -#define I40E_OEM_VER_BUILD_SHIFT 8 -#define I40E_OEM_VER_SHIFT 24 #define I40E_PHY_DEBUG_ALL \ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW) #define I40E_OEM_EETRACK_ID 0xffffffff -#define I40E_OEM_GEN_SHIFT 24 -#define I40E_OEM_SNAP_MASK 0x00ff0000 -#define I40E_OEM_SNAP_SHIFT 16 -#define I40E_OEM_RELEASE_MASK 0x0000ffff +#define I40E_NVM_VERSION_LO_MASK GENMASK(7, 0) +#define I40E_NVM_VERSION_HI_MASK GENMASK(15, 12) +#define I40E_OEM_VER_BUILD_MASK GENMASK(23, 8) +#define I40E_OEM_VER_PATCH_MASK GENMASK(7, 0) +#define I40E_OEM_VER_MASK GENMASK(31, 24) +#define I40E_OEM_GEN_MASK GENMASK(31, 24) +#define I40E_OEM_SNAP_MASK GENMASK(23, 16) +#define I40E_OEM_RELEASE_MASK GENMASK(15, 0) #define I40E_RX_DESC(R, i) \ (&(((union i40e_rx_desc *)((R)->desc))[i])) @@ -323,29 +293,6 @@ struct i40e_udp_port_config { u8 filter_index; }; -#define I40_DDP_FLASH_REGION 100 -#define I40E_PROFILE_INFO_SIZE 48 -#define I40E_MAX_PROFILE_NUM 16 -#define I40E_PROFILE_LIST_SIZE \ - (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4) -#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/" -#define I40E_DDP_PROFILE_NAME_MAX 64 - -int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, - bool is_add); -int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); - -struct i40e_ddp_profile_list { - u32 p_count; - struct i40e_profile_info p_info[]; -}; - -struct i40e_ddp_old_profile_list { - struct list_head list; - size_t old_ddp_size; - u8 old_ddp_buf[]; -}; - /* macros related to FLX_PIT */ #define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ @@ -462,6 +409,7 @@ static inline const u8 *i40e_channel_mac(struct i40e_channel *ch) /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; + struct devlink_port devlink_port; struct i40e_hw hw; DECLARE_BITMAP(state, __I40E_STATE_SIZE__); struct msix_entry *msix_entries; @@ -580,7 +528,6 @@ struct i40e_pf { #define I40E_FLAG_DISABLE_FW_LLDP BIT(24) #define I40E_FLAG_RS_FEC BIT(25) #define I40E_FLAG_BASE_R_FEC BIT(26) -#define I40E_FLAG_VF_VLAN_PRUNING BIT(27) /* TOTAL_PORT_SHUTDOWN * Allows to physically disable the link on the NIC's port. * If enabled, (after link down request from the OS) @@ -603,6 +550,7 @@ struct i40e_pf { * in abilities field of i40e_aq_set_phy_config structure */ #define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27) +#define I40E_FLAG_VF_VLAN_PRUNING BIT(28) struct i40e_client_instance *cinst; bool stat_offsets_loaded; @@ -1002,43 +950,104 @@ struct i40e_device { }; /** - * i40e_nvm_version_str - format the NVM version strings + * i40e_info_nvm_ver - format the NVM version string * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + * + * Formats NVM version string as: + * <gen>.<snap>.<release> when eetrackid == I40E_OEM_EETRACK_ID + * <nvm_major>.<nvm_minor> otherwise **/ -static inline char *i40e_nvm_version_str(struct i40e_hw *hw) +static inline void i40e_info_nvm_ver(struct i40e_hw *hw, char *buf, size_t len) { - static char buf[32]; - u32 full_ver; + struct i40e_nvm_info *nvm = &hw->nvm; - full_ver = hw->nvm.oem_ver; - - if (hw->nvm.eetrack == I40E_OEM_EETRACK_ID) { + if (nvm->eetrack == I40E_OEM_EETRACK_ID) { + u32 full_ver = nvm->oem_ver; u8 gen, snap; u16 release; - gen = (u8)(full_ver >> I40E_OEM_GEN_SHIFT); - snap = (u8)((full_ver & I40E_OEM_SNAP_MASK) >> - I40E_OEM_SNAP_SHIFT); - release = (u16)(full_ver & I40E_OEM_RELEASE_MASK); - - snprintf(buf, sizeof(buf), "%x.%x.%x", gen, snap, release); + gen = FIELD_GET(I40E_OEM_GEN_MASK, full_ver); + snap = FIELD_GET(I40E_OEM_SNAP_MASK, full_ver); + release = FIELD_GET(I40E_OEM_RELEASE_MASK, full_ver); + snprintf(buf, len, "%x.%x.%x", gen, snap, release); } else { - u8 ver, patch; + u8 major, minor; + + major = FIELD_GET(I40E_NVM_VERSION_HI_MASK, nvm->version); + minor = FIELD_GET(I40E_NVM_VERSION_LO_MASK, nvm->version); + snprintf(buf, len, "%x.%02x", major, minor); + } +} + +/** + * i40e_info_eetrack - format the EETrackID string + * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + * + * Returns hexadecimally formated EETrackID if it is + * different from I40E_OEM_EETRACK_ID or empty string. + **/ +static inline void i40e_info_eetrack(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + + buf[0] = '\0'; + if (nvm->eetrack != I40E_OEM_EETRACK_ID) + snprintf(buf, len, "0x%08x", nvm->eetrack); +} + +/** + * i40e_info_civd_ver - format the NVM version strings + * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + * + * Returns formated combo image version if adapter's EETrackID is + * different from I40E_OEM_EETRACK_ID or empty string. + **/ +static inline void i40e_info_civd_ver(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + + buf[0] = '\0'; + if (nvm->eetrack != I40E_OEM_EETRACK_ID) { + u32 full_ver = nvm->oem_ver; + u8 major, minor; u16 build; - ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT); - build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) & - I40E_OEM_VER_BUILD_MASK); - patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK); - - snprintf(buf, sizeof(buf), - "%x.%02x 0x%x %d.%d.%d", - (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> - I40E_NVM_VERSION_HI_SHIFT, - (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> - I40E_NVM_VERSION_LO_SHIFT, - hw->nvm.eetrack, ver, build, patch); + major = FIELD_GET(I40E_OEM_VER_MASK, full_ver); + build = FIELD_GET(I40E_OEM_VER_BUILD_MASK, full_ver); + minor = FIELD_GET(I40E_OEM_VER_PATCH_MASK, full_ver); + snprintf(buf, len, "%d.%d.%d", major, build, minor); } +} + +/** + * i40e_nvm_version_str - format the NVM version strings + * @hw: ptr to the hardware info + * @buf: string buffer to store + * @len: buffer size + **/ +static inline char *i40e_nvm_version_str(struct i40e_hw *hw, char *buf, + size_t len) +{ + char ver[16] = " "; + + /* Get NVM version */ + i40e_info_nvm_ver(hw, buf, len); + + /* Append EETrackID if provided */ + i40e_info_eetrack(hw, &ver[1], sizeof(ver) - 1); + if (strlen(ver) > 1) + strlcat(buf, ver, len); + + /* Append combo image version if provided */ + i40e_info_civd_ver(hw, &ver[1], sizeof(ver) - 1); + if (strlen(ver) > 1) + strlcat(buf, ver, len); return buf; } @@ -1321,4 +1330,15 @@ static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) return pf->flags & I40E_FLAG_TC_MQPRIO; } +/** + * i40e_hw_to_pf - get pf pointer from the hardware structure + * @hw: pointer to the device HW structure + **/ +static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw) +{ + return container_of(hw, struct i40e_pf, hw); +} + +struct device *i40e_hw_to_dev(struct i40e_hw *hw); + #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 86fac8f959bb..9ce6e633cc2f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,10 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e_status.h" -#include "i40e_type.h" +#include <linux/delay.h> +#include "i40e_alloc.h" #include "i40e_register.h" -#include "i40e_adminq.h" #include "i40e_prototype.h" static void i40e_resume_aq(struct i40e_hw *hw); @@ -52,7 +51,6 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, - i40e_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); @@ -79,7 +77,6 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) int ret_code; ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, - i40e_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); @@ -137,7 +134,6 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw) for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, - i40e_mem_arq_buf, hw->aq.arq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) @@ -199,7 +195,6 @@ static int i40e_alloc_asq_bufs(struct i40e_hw *hw) for (i = 0; i < hw->aq.num_asq_entries; i++) { bi = &hw->aq.asq.r.asq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, - i40e_mem_asq_buf, hw->aq.asq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) @@ -284,7 +279,7 @@ static int i40e_config_asq_regs(struct i40e_hw *hw) /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.asq.bal); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + ret_code = -EIO; return ret_code; } @@ -316,7 +311,7 @@ static int i40e_config_arq_regs(struct i40e_hw *hw) /* Check one register to verify that config was applied */ reg = rd32(hw, hw->aq.arq.bal); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + ret_code = -EIO; return ret_code; } @@ -340,14 +335,14 @@ static int i40e_init_asq(struct i40e_hw *hw) if (hw->aq.asq.count > 0) { /* queue already initialized */ - ret_code = I40E_ERR_NOT_READY; + ret_code = -EBUSY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_asq_entries == 0) || (hw->aq.asq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; + ret_code = -EIO; goto init_adminq_exit; } @@ -399,14 +394,14 @@ static int i40e_init_arq(struct i40e_hw *hw) if (hw->aq.arq.count > 0) { /* queue already initialized */ - ret_code = I40E_ERR_NOT_READY; + ret_code = -EBUSY; goto init_adminq_exit; } /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.arq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; + ret_code = -EIO; goto init_adminq_exit; } @@ -452,7 +447,7 @@ static int i40e_shutdown_asq(struct i40e_hw *hw) mutex_lock(&hw->aq.asq_mutex); if (hw->aq.asq.count == 0) { - ret_code = I40E_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_asq_out; } @@ -486,7 +481,7 @@ static int i40e_shutdown_arq(struct i40e_hw *hw) mutex_lock(&hw->aq.arq_mutex); if (hw->aq.arq.count == 0) { - ret_code = I40E_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_arq_out; } @@ -594,7 +589,7 @@ int i40e_init_adminq(struct i40e_hw *hw) (hw->aq.num_asq_entries == 0) || (hw->aq.arq_buf_size == 0) || (hw->aq.asq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; + ret_code = -EIO; goto init_adminq_exit; } @@ -626,13 +621,13 @@ int i40e_init_adminq(struct i40e_hw *hw) &hw->aq.api_maj_ver, &hw->aq.api_min_ver, NULL); - if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) + if (ret_code != -EIO) break; retry++; msleep(100); i40e_resume_aq(hw); } while (retry < 10); - if (ret_code != I40E_SUCCESS) + if (ret_code != 0) goto init_adminq_free_arq; /* Some features were introduced in different FW API version @@ -672,7 +667,7 @@ int i40e_init_adminq(struct i40e_hw *hw) hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { - ret_code = I40E_ERR_FIRMWARE_API_VERSION; + ret_code = -EIO; goto init_adminq_free_arq; } @@ -799,7 +794,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, if (hw->aq.asq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Admin queue not initialized.\n"); - status = I40E_ERR_QUEUE_EMPTY; + status = -EIO; goto asq_send_command_error; } @@ -809,7 +804,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); - status = I40E_ERR_ADMIN_QUEUE_FULL; + status = -ENOSPC; goto asq_send_command_error; } @@ -840,7 +835,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Invalid buffer size: %d.\n", buff_size); - status = I40E_ERR_INVALID_SIZE; + status = -EINVAL; goto asq_send_command_error; } @@ -848,7 +843,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Async flag not set along with postpone flag"); - status = I40E_ERR_PARAM; + status = -EINVAL; goto asq_send_command_error; } @@ -863,7 +858,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Error queue is full.\n"); - status = I40E_ERR_ADMIN_QUEUE_FULL; + status = -ENOSPC; goto asq_send_command_error; } @@ -940,9 +935,9 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) status = 0; else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) - status = I40E_ERR_NOT_READY; + status = -EBUSY; else - status = I40E_ERR_ADMIN_QUEUE_ERROR; + status = -EIO; hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } @@ -960,11 +955,11 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); - status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; + status = -EIO; } else { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: Writeback timeout.\n"); - status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + status = -EIO; } } @@ -1106,7 +1101,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, if (hw->aq.arq.count == 0) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Admin queue not initialized.\n"); - ret_code = I40E_ERR_QUEUE_EMPTY; + ret_code = -EIO; goto clean_arq_element_err; } @@ -1114,7 +1109,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + ret_code = -EALREADY; goto clean_arq_element_out; } @@ -1126,7 +1121,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & I40E_AQ_FLAG_ERR) { - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + ret_code = -EIO; i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: Event received with error 0x%X.\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index ee394aacef4d..80125bea80a2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -4,8 +4,8 @@ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ -#include "i40e_osdep.h" -#include "i40e_status.h" +#include <linux/mutex.h> +#include "i40e_alloc.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ @@ -117,7 +117,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) }; /* aq_rc is invalid if AQ timed out */ - if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + if (aq_ret == -EIO) return -EAGAIN; if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 3357d65a906b..18a1c3b6d72c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -4,6 +4,8 @@ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ +#include <linux/bits.h> + /* This header file defines the i40e Admin Queue commands and is shared between * i40e Firmware and Software. * diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index a6c9a9e343d1..e0dde326255d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -4,25 +4,25 @@ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ +#include <linux/types.h> + struct i40e_hw; -/* Memory allocation types */ -enum i40e_memory_type { - i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ - i40e_mem_asq_buf = 1, - i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ - i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ - i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ - i40e_mem_pd = 5, /* Page Descriptor */ - i40e_mem_bp = 6, /* Backing Page - 4KB */ - i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ - i40e_mem_reserved +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +}; + +struct i40e_virt_mem { + void *va; + u32 size; }; /* prototype for functions used for dynamic memory allocation */ int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, - enum i40e_memory_type type, u64 size, u32 alignment); int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem); diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 639c5a1ca853..306758428aef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -6,7 +6,6 @@ #include <linux/net/intel/i40e_client.h> #include "i40e.h" -#include "i40e_prototype.h" static LIST_HEAD(i40e_devices); static DEFINE_MUTEX(i40e_device_mutex); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index ed88e38d488b..d7e24d661724 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,11 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ -#include "i40e.h" -#include "i40e_type.h" -#include "i40e_adminq.h" -#include "i40e_prototype.h" #include <linux/avf/virtchnl.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> +#include "i40e_adminq_cmd.h" +#include "i40e_devids.h" +#include "i40e_prototype.h" +#include "i40e_register.h" /** * i40e_set_mac_type - Sets MAC type @@ -56,7 +59,7 @@ int i40e_set_mac_type(struct i40e_hw *hw) break; } } else { - status = I40E_ERR_DEVICE_NOT_SUPPORTED; + status = -ENODEV; } hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", @@ -660,7 +663,7 @@ int i40e_init_shared_code(struct i40e_hw *hw) case I40E_MAC_X722: break; default: - return I40E_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; } hw->phy.get_link_info = true; @@ -780,7 +783,7 @@ int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) if (flags & I40E_AQC_PORT_ADDR_VALID) ether_addr_copy(mac_addr, addrs.port_mac); else - status = I40E_ERR_INVALID_MAC_ADDR; + status = -EINVAL; return status; } @@ -818,62 +821,72 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) } /** - * i40e_read_pba_string - Reads part number string from EEPROM + * i40e_get_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length * - * Reads the part number string from the EEPROM. + * Reads the part number string from the EEPROM and stores it + * into newly allocated buffer and saves resulting pointer + * to i40e_hw->pba_id field. **/ -int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size) +void i40e_get_pba_string(struct i40e_hw *hw) { +#define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA u16 pba_word = 0; u16 pba_size = 0; u16 pba_ptr = 0; - int status = 0; - u16 i = 0; + int status; + char *ptr; + u16 i; status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); - if (status || (pba_word != 0xFAFA)) { - hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); - return status; + if (status) { + hw_dbg(hw, "Failed to read PBA flags.\n"); + return; + } + if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) { + hw_dbg(hw, "PBA block is not present.\n"); + return; } status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); if (status) { hw_dbg(hw, "Failed to read PBA Block pointer.\n"); - return status; + return; } status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); if (status) { hw_dbg(hw, "Failed to read PBA Block size.\n"); - return status; + return; } /* Subtract one to get PBA word count (PBA Size word is included in - * total size) + * total size) and advance pointer to first PBA word. */ pba_size--; - if (pba_num_size < (((u32)pba_size * 2) + 1)) { - hw_dbg(hw, "Buffer too small for PBA data.\n"); - return I40E_ERR_PARAM; + pba_ptr++; + if (!pba_size) { + hw_dbg(hw, "PBA ID is empty.\n"); + return; } + ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL); + if (!ptr) + return; + hw->pba_id = ptr; + for (i = 0; i < pba_size; i++) { - status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); + status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word); if (status) { hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); - return status; + devm_kfree(i40e_hw_to_dev(hw), hw->pba_id); + hw->pba_id = NULL; + return; } - pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; - pba_num[(i * 2) + 1] = pba_word & 0xFF; + *ptr++ = (pba_word >> 8) & 0xFF; + *ptr++ = pba_word & 0xFF; } - pba_num[(pba_size * 2)] = '\0'; - - return status; } /** @@ -955,7 +968,7 @@ static int i40e_poll_globr(struct i40e_hw *hw, hw_dbg(hw, "Global reset failed.\n"); hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); - return I40E_ERR_RESET_FAILED; + return -EIO; } #define I40E_PF_RESET_WAIT_COUNT_A0 200 @@ -995,7 +1008,7 @@ int i40e_pf_reset(struct i40e_hw *hw) } if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { hw_dbg(hw, "Global reset polling failed to complete.\n"); - return I40E_ERR_RESET_FAILED; + return -EIO; } /* Now Wait for the FW to be ready */ @@ -1014,7 +1027,7 @@ int i40e_pf_reset(struct i40e_hw *hw) I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { hw_dbg(hw, "wait for FW Reset complete timedout\n"); hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); - return I40E_ERR_RESET_FAILED; + return -EIO; } /* If there was a Global Reset in progress when we got here, @@ -1040,10 +1053,10 @@ int i40e_pf_reset(struct i40e_hw *hw) } if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { if (i40e_poll_globr(hw, grst_del)) - return I40E_ERR_RESET_FAILED; + return -EIO; } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { hw_dbg(hw, "PF reset polling failed to complete.\n"); - return I40E_ERR_RESET_FAILED; + return -EIO; } } @@ -1082,7 +1095,7 @@ void i40e_clear_hw(struct i40e_hw *hw) I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> I40E_PFLAN_QALLOC_LASTQ_SHIFT; - if (val & I40E_PFLAN_QALLOC_VALID_MASK) + if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue) num_queues = (j - base_queue) + 1; else num_queues = 0; @@ -1092,7 +1105,7 @@ void i40e_clear_hw(struct i40e_hw *hw) I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> I40E_PF_VT_PFALLOC_LASTVF_SHIFT; - if (val & I40E_PF_VT_PFALLOC_VALID_MASK) + if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i) num_vfs = (j - i) + 1; else num_vfs = 0; @@ -1318,7 +1331,7 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw, int status; if (!abilities) - return I40E_ERR_PARAM; + return -EINVAL; do { i40e_fill_default_direct_cmd_desc(&desc, @@ -1341,12 +1354,12 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw, switch (hw->aq.asq_last_status) { case I40E_AQ_RC_EIO: - status = I40E_ERR_UNKNOWN_PHY; + status = -EIO; break; case I40E_AQ_RC_EAGAIN: usleep_range(1000, 2000); total_delay++; - status = I40E_ERR_TIMEOUT; + status = -EIO; break; /* also covers I40E_AQ_RC_OK */ default: @@ -1396,7 +1409,7 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw, int status; if (!config) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_config); @@ -2312,7 +2325,7 @@ int i40e_aq_send_driver_version(struct i40e_hw *hw, u16 len; if (dv == NULL) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); @@ -2430,7 +2443,7 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, /* SEIDs need to either both be set or both be 0 for floating VEB */ if (!!uplink_seid != !!downlink_seid) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); @@ -2485,7 +2498,7 @@ int i40e_aq_get_veb_parameters(struct i40e_hw *hw, int status; if (veb_seid == 0) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_veb_parameters); @@ -2575,7 +2588,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, u16 buf_size; if (count == 0 || !mv_list || !hw) - return I40E_ERR_PARAM; + return -EINVAL; buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); @@ -2608,7 +2621,7 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid, u16 buf_size; if (count == 0 || !mv_list || !hw) - return I40E_ERR_PARAM; + return -EINVAL; buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid); @@ -2638,7 +2651,7 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, int status; if (count == 0 || !mv_list || !hw) - return I40E_ERR_PARAM; + return -EINVAL; buf_size = count * sizeof(*mv_list); @@ -2685,7 +2698,7 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid, u16 buf_size; if (count == 0 || !mv_list || !hw) - return I40E_ERR_PARAM; + return -EINVAL; buf_size = count * sizeof(*mv_list); @@ -2791,7 +2804,7 @@ int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { if (count == 0 || !mr_list) - return I40E_ERR_PARAM; + return -EINVAL; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, @@ -2827,7 +2840,7 @@ int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, * not matter. */ if (count == 0 || !mr_list) - return I40E_ERR_PARAM; + return -EINVAL; } return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, @@ -2892,7 +2905,7 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw, int status; if (reg_val == NULL) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); @@ -3031,7 +3044,7 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { - status = I40E_ERR_PARAM; + status = -EINVAL; goto i40e_aq_read_nvm_exit; } @@ -3076,7 +3089,7 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { - status = I40E_ERR_PARAM; + status = -EINVAL; goto i40e_aq_erase_nvm_exit; } @@ -3368,7 +3381,7 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw, if (list_type_opc != i40e_aqc_opc_list_func_capabilities && list_type_opc != i40e_aqc_opc_list_dev_capabilities) { - status = I40E_ERR_PARAM; + status = -EINVAL; goto exit; } @@ -3416,7 +3429,7 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) { - status = I40E_ERR_PARAM; + status = -EINVAL; goto i40e_aq_update_nvm_exit; } @@ -3473,7 +3486,7 @@ int i40e_aq_rearrange_nvm(struct i40e_hw *hw, I40E_AQ_NVM_REARRANGE_TO_STRUCT); if (!rearrange_nvm) { - status = I40E_ERR_PARAM; + status = -EINVAL; goto i40e_aq_rearrange_nvm_exit; } @@ -3510,7 +3523,7 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, int status; if (buff_size == 0 || !buff) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); /* Indirect Command */ @@ -3558,7 +3571,7 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw, cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw; if (buff_size == 0 || !buff) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_set_local_mib); @@ -3627,7 +3640,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { i40e_debug(hw, I40E_DEBUG_ALL, "Restore LLDP not supported by current FW version.\n"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; } i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); @@ -3729,7 +3742,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, int status; if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) - return I40E_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_dcb_parameters); @@ -3760,7 +3773,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, int status; if (buff_size == 0 || !buff) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); @@ -3848,7 +3861,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, int status; if (seid == 0) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); @@ -3922,7 +3935,7 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, cmd_param_flag = false; break; default: - return I40E_ERR_PARAM; + return -EINVAL; } i40e_fill_default_direct_cmd_desc(&desc, opcode); @@ -4148,7 +4161,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw, fcoe_filt_size <<= (u32)settings->fcoe_filt_num; break; default: - return I40E_ERR_PARAM; + return -EINVAL; } switch (settings->fcoe_cntx_num) { @@ -4160,7 +4173,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw, fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; break; default: - return I40E_ERR_PARAM; + return -EINVAL; } /* Validate PE settings passed */ @@ -4178,7 +4191,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw, case I40E_HASH_FILTER_SIZE_1M: break; default: - return I40E_ERR_PARAM; + return -EINVAL; } switch (settings->pe_cntx_num) { @@ -4194,7 +4207,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw, case I40E_DMA_CNTX_SIZE_256K: break; default: - return I40E_ERR_PARAM; + return -EINVAL; } /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ @@ -4202,7 +4215,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw, fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) - return I40E_ERR_INVALID_SIZE; + return -EINVAL; return 0; } @@ -4224,7 +4237,7 @@ int i40e_set_filter_control(struct i40e_hw *hw, u32 val; if (!settings) - return I40E_ERR_PARAM; + return -EINVAL; /* Validate the input settings */ ret = i40e_validate_filter_settings(hw, settings); @@ -4306,7 +4319,7 @@ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, int status; if (vsi_seid == 0) - return I40E_ERR_PARAM; + return -EINVAL; if (is_add) { i40e_fill_default_direct_cmd_desc(&desc, @@ -4381,7 +4394,7 @@ static int i40e_aq_alternate_read(struct i40e_hw *hw, int status; if (!reg_val0) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); cmd_resp->address0 = cpu_to_le32(reg_addr0); @@ -4517,7 +4530,7 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, int status; if (buff_size == 0 || !buff) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_dump_internals); @@ -4635,7 +4648,7 @@ int i40e_read_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 *value) { u8 port_num = (u8)hw->func_caps.mdio_port_num; - int status = I40E_ERR_TIMEOUT; + int status = -EIO; u32 command = 0; u16 retry = 1000; @@ -4680,7 +4693,7 @@ int i40e_write_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 value) { u8 port_num = (u8)hw->func_caps.mdio_port_num; - int status = I40E_ERR_TIMEOUT; + int status = -EIO; u32 command = 0; u16 retry = 1000; @@ -4721,7 +4734,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value) { u8 port_num = hw->func_caps.mdio_port_num; - int status = I40E_ERR_TIMEOUT; + int status = -EIO; u32 command = 0; u16 retry = 1000; @@ -4755,7 +4768,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw, (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); - status = I40E_ERR_TIMEOUT; + status = -EIO; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { @@ -4795,7 +4808,7 @@ int i40e_write_phy_register_clause45(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 value) { u8 port_num = hw->func_caps.mdio_port_num; - int status = I40E_ERR_TIMEOUT; + int status = -EIO; u16 retry = 1000; u32 command = 0; @@ -4831,7 +4844,7 @@ int i40e_write_phy_register_clause45(struct i40e_hw *hw, (I40E_MDIO_CLAUSE45_STCODE_MASK) | (I40E_GLGEN_MSCA_MDICMD_MASK) | (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); - status = I40E_ERR_TIMEOUT; + status = -EIO; retry = 1000; wr32(hw, I40E_GLGEN_MSCA(port_num), command); do { @@ -4880,7 +4893,7 @@ int i40e_write_phy_register(struct i40e_hw *hw, phy_addr, value); break; default: - status = I40E_ERR_UNKNOWN_PHY; + status = -EIO; break; } @@ -4919,7 +4932,7 @@ int i40e_read_phy_register(struct i40e_hw *hw, phy_addr, value); break; default: - status = I40E_ERR_UNKNOWN_PHY; + status = -EIO; break; } @@ -5109,7 +5122,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, I40E_PHY_COM_REG_PAGE, true, I40E_PHY_LED_PROV_REG_1, ®_val_aq, NULL); - if (status == I40E_SUCCESS) + if (status == 0) *val = (u16)reg_val_aq; return status; } @@ -5204,7 +5217,7 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, int status; if (!reg_val) - return I40E_ERR_PARAM; + return -EINVAL; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); @@ -5644,7 +5657,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, if (track_id == I40E_DDP_TRACKID_INVALID) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); - return I40E_NOT_SUPPORTED; + return -EOPNOTSUPP; } dev_cnt = profile->device_table_count; @@ -5657,7 +5670,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, if (dev_cnt && i == dev_cnt) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP\n"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; } I40E_SECTION_TABLE(profile, sec_tbl); @@ -5672,14 +5685,14 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, sec->section.type == SECTION_TYPE_RB_AQ) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Not a roll-back package\n"); - return I40E_NOT_SUPPORTED; + return -EOPNOTSUPP; } } else { if (sec->section.type == SECTION_TYPE_RB_AQ || sec->section.type == SECTION_TYPE_RB_MMIO) { i40e_debug(hw, I40E_DEBUG_PACKAGE, "Not an original package\n"); - return I40E_NOT_SUPPORTED; + return -EOPNOTSUPP; } } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 90638b67f8dc..68602fc375f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ -#include "i40e_adminq.h" -#include "i40e_prototype.h" +#include "i40e_alloc.h" #include "i40e_dcb.h" +#include "i40e_prototype.h" /** * i40e_get_dcbx_status @@ -17,7 +17,7 @@ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) u32 reg; if (!status) - return I40E_ERR_PARAM; + return -EINVAL; reg = rd32(hw, I40E_PRTDCB_GENS); *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> @@ -508,7 +508,7 @@ int i40e_lldp_to_dcb_config(u8 *lldpmib, u16 type; if (!lldpmib || !dcbcfg) - return I40E_ERR_PARAM; + return -EINVAL; /* set to the start of LLDPDU */ lldpmib += ETH_HLEN; @@ -874,7 +874,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) int ret = 0; if (!hw->func_caps.dcb) - return I40E_NOT_SUPPORTED; + return -EOPNOTSUPP; /* Read LLDP NVM area */ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) { @@ -885,7 +885,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) else if (hw->mac.type == I40E_MAC_X722) offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET; else - return I40E_NOT_SUPPORTED; + return -EOPNOTSUPP; ret = i40e_read_nvm_module_data(hw, I40E_SR_EMP_SR_SETTINGS_PTR, @@ -897,7 +897,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) ret = i40e_read_lldp_cfg(hw, &lldp_cfg); } if (ret) - return I40E_ERR_NOT_READY; + return -EBUSY; /* Get the LLDP AdminStatus for the current port */ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); @@ -906,7 +906,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) /* LLDP agent disabled */ if (!adminstatus) { hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; - return I40E_ERR_NOT_READY; + return -EBUSY; } /* Get DCBX status */ @@ -922,7 +922,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) if (ret) return ret; } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - return I40E_ERR_NOT_READY; + return -EBUSY; } /* Configure the LLDP MIB change event */ @@ -949,7 +949,7 @@ i40e_get_fw_lldp_status(struct i40e_hw *hw, int ret; if (!lldp_status) - return I40E_ERR_PARAM; + return -EINVAL; /* Allocate buffer for the LLDPDU */ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); @@ -1299,7 +1299,7 @@ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, sizeof(tlv->typelength) + length); } while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU); *miblen = offset; - return I40E_SUCCESS; + return 0; } /** @@ -1957,7 +1957,7 @@ int i40e_read_lldp_cfg(struct i40e_hw *hw, u32 mem; if (!lldp_cfg) - return I40E_ERR_PARAM; + return -EINVAL; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 195421d863ab..077a95dad32c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -2,8 +2,8 @@ /* Copyright(c) 2013 - 2021 Intel Corporation. */ #ifdef CONFIG_I40E_DCB -#include "i40e.h" #include <net/dcbnl.h> +#include "i40e.h" #define I40E_DCBNL_STATUS_SUCCESS 0 #define I40E_DCBNL_STATUS_ERROR 1 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index 7e8183762fd9..cf25bfc5dc3f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -1,9 +1,27 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include <linux/firmware.h> #include "i40e.h" -#include <linux/firmware.h> +#define I40_DDP_FLASH_REGION 100 +#define I40E_PROFILE_INFO_SIZE 48 +#define I40E_MAX_PROFILE_NUM 16 +#define I40E_PROFILE_LIST_SIZE \ + (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4) +#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/" +#define I40E_DDP_PROFILE_NAME_MAX 64 + +struct i40e_ddp_profile_list { + u32 p_count; + struct i40e_profile_info p_info[]; +}; + +struct i40e_ddp_old_profile_list { + struct list_head list; + size_t old_ddp_size; + u8 old_ddp_buf[]; +}; /** * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent @@ -220,7 +238,7 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev, netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G"); return false; } - if (size < (sizeof(struct i40e_package_header) + + if (size < (sizeof(struct i40e_package_header) + sizeof(u32) + sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) { netdev_err(netdev, "Invalid DDP profile - size is too small."); return false; @@ -261,8 +279,8 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev, * Checks correctness and loads DDP profile to the NIC. The function is * also used for rolling back previously loaded profile. **/ -int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, - bool is_add) +static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, + bool is_add) { u8 profile_info_sec[sizeof(struct i40e_profile_section_header) + sizeof(struct i40e_profile_info)]; @@ -281,7 +299,7 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size)) return -EINVAL; - if (size < (sizeof(struct i40e_package_header) + + if (size < (sizeof(struct i40e_package_header) + sizeof(u32) + sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) { netdev_err(netdev, "Invalid DDP recipe size."); return -EINVAL; @@ -344,7 +362,7 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, if (is_add) { status = i40e_write_profile(&pf->hw, profile_hdr, track_id); if (status) { - if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) { + if (status == -ENODEV) { netdev_err(netdev, "Profile is not supported by the device."); return -EPERM; @@ -438,10 +456,9 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash) char profile_name[sizeof(I40E_DDP_PROFILE_PATH) + I40E_DDP_PROFILE_NAME_MAX]; - profile_name[sizeof(profile_name) - 1] = 0; - strncpy(profile_name, I40E_DDP_PROFILE_PATH, - sizeof(profile_name) - 1); - strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX); + scnprintf(profile_name, sizeof(profile_name), "%s%s", + I40E_DDP_PROFILE_PATH, flash->data); + /* Load DDP recipe. */ status = request_firmware(&ddp_config, profile_name, &netdev->dev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h new file mode 100644 index 000000000000..27ebc72d8bfe --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2023 Intel Corporation. */ + +#ifndef _I40E_DEBUG_H_ +#define _I40E_DEBUG_H_ + +#include <linux/dev_printk.h> + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + I40E_DEBUG_PACKAGE = 0x00002000, + I40E_DEBUG_IWARP = 0x00F00000, + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +struct i40e_hw; +struct device *i40e_hw_to_dev(struct i40e_hw *hw); + +#define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A) + +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + dev_info(i40e_hw_to_dev(hw), s, ##__VA_ARGS__); \ +} while (0) + +#endif /* _I40E_DEBUG_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 62497f5565c5..999c9708def5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -5,8 +5,9 @@ #include <linux/fs.h> #include <linux/debugfs.h> - +#include <linux/if_bridge.h> #include "i40e.h" +#include "i40e_virtchnl_pf.h" static struct dentry *i40e_dbg_root; @@ -1309,7 +1310,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); if (!ret) { dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); - } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + } else if (ret == -EIO) { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x AQ Error: %d\n", desc->opcode, pf->hw.aq.asq_last_status); @@ -1370,7 +1371,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, buffer_len, NULL); if (!ret) { dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); - } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + } else if (ret == -EIO) { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x AQ Error: %d\n", desc->opcode, pf->hw.aq.asq_last_status); diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c new file mode 100644 index 000000000000..cc4e9e2addb7 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2023 Intel Corporation. */ + +#include <net/devlink.h> +#include "i40e.h" +#include "i40e_devlink.h" + +static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len) +{ + u8 dsn[8]; + + put_unaligned_be64(pci_get_dsn(pf->pdev), dsn); + + snprintf(buf, len, "%8phD", dsn); +} + +static void i40e_info_fw_mgmt(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_adminq_info *aq = &hw->aq; + + snprintf(buf, len, "%u.%u", aq->fw_maj_ver, aq->fw_min_ver); +} + +static void i40e_info_fw_mgmt_build(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_adminq_info *aq = &hw->aq; + + snprintf(buf, len, "%05d", aq->fw_build); +} + +static void i40e_info_fw_api(struct i40e_hw *hw, char *buf, size_t len) +{ + struct i40e_adminq_info *aq = &hw->aq; + + snprintf(buf, len, "%u.%u", aq->api_maj_ver, aq->api_min_ver); +} + +static void i40e_info_pba(struct i40e_hw *hw, char *buf, size_t len) +{ + buf[0] = '\0'; + if (hw->pba_id) + strscpy(buf, hw->pba_id, len); +} + +enum i40e_devlink_version_type { + I40E_DL_VERSION_FIXED, + I40E_DL_VERSION_RUNNING, +}; + +static int i40e_devlink_info_put(struct devlink_info_req *req, + enum i40e_devlink_version_type type, + const char *key, const char *value) +{ + if (!strlen(value)) + return 0; + + switch (type) { + case I40E_DL_VERSION_FIXED: + return devlink_info_version_fixed_put(req, key, value); + case I40E_DL_VERSION_RUNNING: + return devlink_info_version_running_put(req, key, value); + } + return 0; +} + +static int i40e_devlink_info_get(struct devlink *dl, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct i40e_pf *pf = devlink_priv(dl); + struct i40e_hw *hw = &pf->hw; + char buf[32]; + int err; + + i40e_info_get_dsn(pf, buf, sizeof(buf)); + err = devlink_info_serial_number_put(req, buf); + if (err) + return err; + + i40e_info_fw_mgmt(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf); + if (err) + return err; + + i40e_info_fw_mgmt_build(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + "fw.mgmt.build", buf); + if (err) + return err; + + i40e_info_fw_api(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, + buf); + if (err) + return err; + + i40e_info_nvm_ver(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + "fw.psid.api", buf); + if (err) + return err; + + i40e_info_eetrack(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, + buf); + if (err) + return err; + + i40e_info_civd_ver(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, buf); + if (err) + return err; + + i40e_info_pba(hw, buf, sizeof(buf)); + err = i40e_devlink_info_put(req, I40E_DL_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, buf); + + return err; +} + +static const struct devlink_ops i40e_devlink_ops = { + .info_get = i40e_devlink_info_get, +}; + +/** + * i40e_alloc_pf - Allocate devlink and return i40e_pf structure pointer + * @dev: the device to allocate for + * + * Allocate a devlink instance for this device and return the private + * area as the i40e_pf structure. + **/ +struct i40e_pf *i40e_alloc_pf(struct device *dev) +{ + struct devlink *devlink; + + devlink = devlink_alloc(&i40e_devlink_ops, sizeof(struct i40e_pf), dev); + if (!devlink) + return NULL; + + return devlink_priv(devlink); +} + +/** + * i40e_free_pf - Free i40e_pf structure and associated devlink + * @pf: the PF structure + * + * Free i40e_pf structure and devlink allocated by devlink_alloc. + **/ +void i40e_free_pf(struct i40e_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + + devlink_free(devlink); +} + +/** + * i40e_devlink_register - Register devlink interface for this PF + * @pf: the PF to register the devlink for. + * + * Register the devlink instance associated with this physical function. + **/ +void i40e_devlink_register(struct i40e_pf *pf) +{ + devlink_register(priv_to_devlink(pf)); +} + +/** + * i40e_devlink_unregister - Unregister devlink resources for this PF. + * @pf: the PF structure to cleanup + * + * Releases resources used by devlink and cleans up associated memory. + **/ +void i40e_devlink_unregister(struct i40e_pf *pf) +{ + devlink_unregister(priv_to_devlink(pf)); +} + +/** + * i40e_devlink_set_switch_id - Set unique switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void i40e_devlink_set_switch_id(struct i40e_pf *pf, + struct netdev_phys_item_id *ppid) +{ + u64 id = pci_get_dsn(pf->pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +/** + * i40e_devlink_create_port - Create a devlink port for this PF + * @pf: the PF to create a port for + * + * Create and register a devlink_port for this PF. Note that although each + * physical function is connected to a separate devlink instance, the port + * will still be numbered according to the physical function id. + * + * Return: zero on success or an error code on failure. + **/ +int i40e_devlink_create_port(struct i40e_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct devlink_port_attrs attrs = {}; + struct device *dev = &pf->pdev->dev; + int err; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.pf_id; + i40e_devlink_set_switch_id(pf, &attrs.switch_id); + devlink_port_attrs_set(&pf->devlink_port, &attrs); + err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id); + if (err) { + dev_err(dev, "devlink_port_register failed: %d\n", err); + return err; + } + + return 0; +} + +/** + * i40e_devlink_destroy_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup + * + * Unregisters the devlink_port structure associated with this PF. + **/ +void i40e_devlink_destroy_port(struct i40e_pf *pf) +{ + devlink_port_unregister(&pf->devlink_port); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.h b/drivers/net/ethernet/intel/i40e/i40e_devlink.h new file mode 100644 index 000000000000..469fb3d2ee25 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023, Intel Corporation. */ + +#ifndef _I40E_DEVLINK_H_ +#define _I40E_DEVLINK_H_ + +#include <linux/device.h> + +struct i40e_pf; + +struct i40e_pf *i40e_alloc_pf(struct device *dev); +void i40e_free_pf(struct i40e_pf *pf); +void i40e_devlink_register(struct i40e_pf *pf); +void i40e_devlink_unregister(struct i40e_pf *pf); +int i40e_devlink_create_port(struct i40e_pf *pf); +void i40e_devlink_destroy_port(struct i40e_pf *pf); + +#endif /* _I40E_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index 97fe1787a8f4..b1ad7c4259b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -28,7 +28,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_DIAG, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n", __func__, reg, pat, val); - return I40E_ERR_DIAG_TEST_FAILED; + return -EIO; } } @@ -38,7 +38,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_DIAG, "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n", __func__, reg, orig_val, val); - return I40E_ERR_DIAG_TEST_FAILED; + return -EIO; } return 0; @@ -127,5 +127,5 @@ int i40e_diag_eeprom_test(struct i40e_hw *hw) BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) return i40e_validate_nvm_checksum(hw, NULL); else - return I40E_ERR_DIAG_TEST_FAILED; + return -EIO; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index c3ce5f35211f..ece3a6b9a5c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -4,7 +4,10 @@ #ifndef _I40E_DIAG_H_ #define _I40E_DIAG_H_ -#include "i40e_type.h" +#include "i40e_adminq_cmd.h" + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; enum i40e_lb_mode { I40E_LB_MODE_NONE = 0x0, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index afc4fa8c66af..fd7163128c4d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -3,9 +3,10 @@ /* ethtool support for i40e */ -#include "i40e.h" +#include "i40e_devids.h" #include "i40e_diag.h" #include "i40e_txrx_common.h" +#include "i40e_virtchnl_pf.h" /* ethtool statistics helpers */ @@ -245,6 +246,7 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_errors), I40E_NETDEV_STAT(tx_errors), I40E_NETDEV_STAT(rx_dropped), + I40E_NETDEV_STAT(rx_missed_errors), I40E_NETDEV_STAT(tx_dropped), I40E_NETDEV_STAT(collisions), I40E_NETDEV_STAT(rx_length_errors), @@ -321,7 +323,7 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors), - I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards), + I40E_PF_STAT("port.rx_discards", stats.eth.rx_discards), I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors), I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes), @@ -2004,8 +2006,8 @@ static void i40e_get_drvinfo(struct net_device *netdev, struct i40e_pf *pf = vsi->back; strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); - strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), - sizeof(drvinfo->fw_version)); + i40e_nvm_version_str(&pf->hw, drvinfo->fw_version, + sizeof(drvinfo->fw_version)); strscpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; @@ -2512,11 +2514,13 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) u8 *p = data; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string); + ethtool_sprintf(&p, "%s", + i40e_gstrings_priv_flags[i].flag_string); if (pf->hw.pf_id != 0) return; for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) - ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string); + ethtool_sprintf(&p, "%s", + i40e_gl_gstrings_priv_flags[i].flag_string); } static void i40e_get_strings(struct net_device *netdev, u32 stringset, @@ -5699,8 +5703,8 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - int status = I40E_SUCCESS; __le16 eee_capability; + int status = 0; /* Deny parameters we don't support */ if (i40e_is_eee_param_supported(netdev, edata)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 46f7950a0049..1742624ca62e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e.h" -#include "i40e_osdep.h" -#include "i40e_register.h" -#include "i40e_status.h" #include "i40e_alloc.h" +#include "i40e_debug.h" #include "i40e_hmc.h" #include "i40e_type.h" @@ -23,37 +20,33 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw, enum i40e_sd_entry_type type, u64 direct_mode_sz) { - enum i40e_memory_type mem_type __attribute__((unused)); struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; - int ret_code = I40E_SUCCESS; struct i40e_dma_mem mem; + int ret_code = 0; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n"); goto exit; } if (sd_index >= hmc_info->sd_table.sd_cnt) { - ret_code = I40E_ERR_INVALID_SD_INDEX; + ret_code = -EINVAL; hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; if (!sd_entry->valid) { - if (I40E_SD_TYPE_PAGED == type) { - mem_type = i40e_mem_pd; + if (type == I40E_SD_TYPE_PAGED) alloc_len = I40E_HMC_PAGED_BP_SIZE; - } else { - mem_type = i40e_mem_bp_jumbo; + else alloc_len = direct_mode_sz; - } /* allocate a 4K pd page or 2M backing page */ - ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, + ret_code = i40e_allocate_dma_mem(hw, &mem, alloc_len, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) goto exit; @@ -121,7 +114,7 @@ int i40e_add_pd_table_entry(struct i40e_hw *hw, u64 *pd_addr; if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { - ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + ret_code = -EINVAL; hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n"); goto exit; } @@ -141,7 +134,7 @@ int i40e_add_pd_table_entry(struct i40e_hw *hw, page = rsrc_pg; } else { /* allocate a 4K backing page */ - ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + ret_code = i40e_allocate_dma_mem(hw, page, I40E_HMC_PAGED_BP_SIZE, I40E_HMC_PD_BP_BUF_ALIGNMENT); if (ret_code) @@ -200,13 +193,13 @@ int i40e_remove_pd_bp(struct i40e_hw *hw, sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; if (sd_idx >= hmc_info->sd_table.sd_cnt) { - ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + ret_code = -EINVAL; hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { - ret_code = I40E_ERR_INVALID_SD_TYPE; + ret_code = -EINVAL; hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n"); goto exit; } @@ -251,7 +244,7 @@ int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_DEC_BP_REFCNT(&sd_entry->u.bp); if (sd_entry->u.bp.ref_cnt) { - ret_code = I40E_ERR_NOT_READY; + ret_code = -EBUSY; goto exit; } I40E_DEC_SD_REFCNT(&hmc_info->sd_table); @@ -276,7 +269,7 @@ int i40e_remove_sd_bp_new(struct i40e_hw *hw, struct i40e_hmc_sd_entry *sd_entry; if (!is_pf) - return I40E_NOT_SUPPORTED; + return -EOPNOTSUPP; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; @@ -299,7 +292,7 @@ int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, sd_entry = &hmc_info->sd_table.sd_entry[idx]; if (sd_entry->u.pd_table.ref_cnt) { - ret_code = I40E_ERR_NOT_READY; + ret_code = -EBUSY; goto exit; } @@ -325,7 +318,7 @@ int i40e_remove_pd_page_new(struct i40e_hw *hw, struct i40e_hmc_sd_entry *sd_entry; if (!is_pf) - return I40E_NOT_SUPPORTED; + return -EOPNOTSUPP; sd_entry = &hmc_info->sd_table.sd_entry[idx]; I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 9960da07a573..480e3a883cc7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -4,6 +4,10 @@ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ +#include "i40e_alloc.h" +#include "i40e_io.h" +#include "i40e_register.h" + #define I40E_HMC_MAX_BP_COUNT 512 /* forward-declare the HW struct for the compiler */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_io.h b/drivers/net/ethernet/intel/i40e/i40e_io.h new file mode 100644 index 000000000000..2a2ed9a1d476 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_io.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2023 Intel Corporation. */ + +#ifndef _I40E_IO_H_ +#define _I40E_IO_H_ + +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include <linux/io-64-nonatomic-lo-hi.h> + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) + +#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) + +#endif /* _I40E_IO_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 40c101f286d1..beaaf5c309d5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,13 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e.h" -#include "i40e_osdep.h" -#include "i40e_register.h" -#include "i40e_type.h" -#include "i40e_hmc.h" +#include "i40e_alloc.h" +#include "i40e_debug.h" #include "i40e_lan_hmc.h" -#include "i40e_prototype.h" +#include "i40e_type.h" /* lan specific interface functions */ @@ -111,7 +108,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, /* validate values requested by driver don't exceed HMC capacity */ if (txq_num > obj->max_cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", txq_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; @@ -134,7 +131,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, /* validate values requested by driver don't exceed HMC capacity */ if (rxq_num > obj->max_cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", rxq_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; @@ -157,7 +154,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_cntx_num > obj->max_cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_cntx_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; @@ -180,7 +177,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_filt_num > obj->max_cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + ret_code = -EINVAL; hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", fcoe_filt_num, obj->max_cnt, ret_code); goto init_lan_hmc_out; @@ -289,30 +286,30 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw, u32 i, j; if (NULL == info) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + ret_code = -EINVAL; hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; @@ -324,8 +321,8 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { - ret_code = I40E_ERR_INVALID_SD_INDEX; - goto exit; + ret_code = -EINVAL; + goto exit; } /* find pd index */ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, @@ -393,7 +390,7 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw, j, sd_entry->entry_type); break; default: - ret_code = I40E_ERR_INVALID_SD_TYPE; + ret_code = -EINVAL; goto exit; } } @@ -417,7 +414,7 @@ exit_sd_error: i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); break; default: - ret_code = I40E_ERR_INVALID_SD_TYPE; + ret_code = -EINVAL; break; } j--; @@ -474,7 +471,7 @@ try_type_paged: break; default: /* unsupported type */ - ret_code = I40E_ERR_INVALID_SD_TYPE; + ret_code = -EINVAL; hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n", ret_code); goto configure_lan_hmc_out; @@ -530,34 +527,34 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw, u32 i, j; if (NULL == info) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n"); goto exit; } if (NULL == info->hmc_info->sd_table.sd_entry) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n"); goto exit; } if (NULL == info->hmc_info->hmc_obj) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; @@ -565,7 +562,7 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw, if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { - ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + ret_code = -EINVAL; hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; @@ -599,7 +596,7 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { - ret_code = I40E_ERR_INVALID_SD_INDEX; + ret_code = -EINVAL; goto exit; } @@ -987,29 +984,29 @@ int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, int ret_code = 0; if (NULL == hmc_info) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n"); goto exit; } if (NULL == hmc_info->hmc_obj) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); goto exit; } if (NULL == object_base) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { - ret_code = I40E_ERR_BAD_PTR; + ret_code = -EINVAL; hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n"); goto exit; } if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n", ret_code); - ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + ret_code = -EINVAL; goto exit; } /* find sd index and limit */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index 9f960404c2b3..305a276953b0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -4,6 +4,8 @@ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ +#include "i40e_hmc.h" + /* forward-declare the HW struct for the compiler */ struct i40e_hw; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a86bfa3bba74..1ab8dbe2d880 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,19 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2021 Intel Corporation. */ -#include <linux/etherdevice.h> -#include <linux/of_net.h> -#include <linux/pci.h> -#include <linux/bpf.h> #include <generated/utsrelease.h> #include <linux/crash_dump.h> +#include <linux/if_bridge.h> +#include <linux/if_macvlan.h> +#include <linux/module.h> +#include <net/pkt_cls.h> +#include <net/xdp_sock_drv.h> /* Local includes */ #include "i40e.h" +#include "i40e_devids.h" #include "i40e_diag.h" +#include "i40e_lan_hmc.h" +#include "i40e_virtchnl_pf.h" #include "i40e_xsk.h" -#include <net/udp_tunnel.h> -#include <net/xdp_sock_drv.h> + /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -120,16 +123,27 @@ static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, } /** - * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code + * i40e_hw_to_dev - get device pointer from the hardware structure + * @hw: pointer to the device HW structure + **/ +struct device *i40e_hw_to_dev(struct i40e_hw *hw) +{ + struct i40e_pf *pf = i40e_hw_to_pf(hw); + + return &pf->pdev->dev; +} + +/** + * i40e_allocate_dma_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested * @alignment: what to align the allocation to **/ -int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, - u64 size, u32 alignment) +int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, + u64 size, u32 alignment) { - struct i40e_pf *pf = (struct i40e_pf *)hw->back; + struct i40e_pf *pf = i40e_hw_to_pf(hw); mem->size = ALIGN(size, alignment); mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, @@ -141,13 +155,13 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, } /** - * i40e_free_dma_mem_d - OS specific memory free for shared code + * i40e_free_dma_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem) { - struct i40e_pf *pf = (struct i40e_pf *)hw->back; + struct i40e_pf *pf = i40e_hw_to_pf(hw); dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); mem->va = NULL; @@ -158,13 +172,13 @@ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) } /** - * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code + * i40e_allocate_virt_mem - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out * @size: size of memory requested **/ -int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, - u32 size) +int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, + u32 size) { mem->size = size; mem->va = kzalloc(size, GFP_KERNEL); @@ -176,11 +190,11 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, } /** - * i40e_free_virt_mem_d - OS specific memory free for shared code + * i40e_free_virt_mem - OS specific memory free for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to free **/ -int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) +int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem) { /* it's ok to kfree a NULL pointer */ kfree(mem->va); @@ -489,6 +503,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, stats->tx_dropped = vsi_stats->tx_dropped; stats->rx_errors = vsi_stats->rx_errors; stats->rx_dropped = vsi_stats->rx_dropped; + stats->rx_missed_errors = vsi_stats->rx_missed_errors; stats->rx_crc_errors = vsi_stats->rx_crc_errors; stats->rx_length_errors = vsi_stats->rx_length_errors; } @@ -680,17 +695,13 @@ i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, struct i40e_eth_stats *stat_offset, struct i40e_eth_stats *stat) { - u64 rx_rdpc, rx_rxerr; - i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded, - &stat_offset->rx_discards, &rx_rdpc); + &stat_offset->rx_discards, &stat->rx_discards); i40e_stat_update64(hw, I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), offset_loaded, &stat_offset->rx_discards_other, - &rx_rxerr); - - stat->rx_discards = rx_rdpc + rx_rxerr; + &stat->rx_discards_other); } /** @@ -712,9 +723,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); - i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), - vsi->stat_offsets_loaded, - &oes->rx_discards, &es->rx_discards); i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unknown_protocol, &es->rx_unknown_protocol); @@ -971,8 +979,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ns->tx_errors = es->tx_errors; ons->multicast = oes->rx_multicast; ns->multicast = es->rx_multicast; - ons->rx_dropped = oes->rx_discards; - ns->rx_dropped = es->rx_discards; + ons->rx_dropped = oes->rx_discards_other; + ns->rx_dropped = es->rx_discards_other; + ons->rx_missed_errors = oes->rx_discards; + ns->rx_missed_errors = es->rx_discards; ons->tx_dropped = oes->tx_discards; ns->tx_dropped = es->tx_discards; @@ -3586,11 +3596,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (ring->xsk_pool) { ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); - /* For AF_XDP ZC, we disallow packets to span on - * multiple buffers, thus letting us skip that - * handling in the fast-path. - */ - chain_len = 1; ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); @@ -5715,7 +5720,7 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) int ret; if (!vsi) - return I40E_ERR_PARAM; + return -EINVAL; pf = vsi->back; hw = &pf->hw; @@ -7159,7 +7164,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) */ if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) { dev_info(&pf->pdev->dev, "DCB is not supported.\n"); - err = I40E_NOT_SUPPORTED; + err = -EOPNOTSUPP; goto out; } if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) { @@ -7469,7 +7474,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) non_zero_phy_type = true; else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) - return I40E_SUCCESS; + return 0; /* To force link we need to set bits for all supported PHY types, * but there are now more than 32, so we need to split the bitmap @@ -7520,7 +7525,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up) i40e_aq_set_link_restart_an(hw, is_up, NULL); - return I40E_SUCCESS; + return 0; } /** @@ -8367,7 +8372,7 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, }; if (filter->flags >= ARRAY_SIZE(flag_table)) - return I40E_ERR_CONFIG; + return -EIO; memset(&cld_filter, 0, sizeof(cld_filter)); @@ -8531,15 +8536,15 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, u8 field_flags = 0; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { - dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -8581,7 +8586,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8591,7 +8596,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return I40E_ERR_CONFIG; + return -EIO; } } ether_addr_copy(filter->dst_mac, match.key->dst); @@ -8609,7 +8614,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", match.mask->vlan_id); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8633,7 +8638,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", &match.mask->dst); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8643,13 +8648,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", &match.mask->src); - return I40E_ERR_CONFIG; + return -EIO; } } if (field_flags & I40E_CLOUD_FIELD_TEN_ID) { dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); - return I40E_ERR_CONFIG; + return -EIO; } filter->dst_ipv4 = match.key->dst; filter->src_ipv4 = match.key->src; @@ -8667,7 +8672,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, ipv6_addr_loopback(&match.key->src)) { dev_err(&pf->pdev->dev, "Bad ipv6, addr is LOOPBACK\n"); - return I40E_ERR_CONFIG; + return -EIO; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@ -8689,7 +8694,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", be16_to_cpu(match.mask->src)); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -8699,7 +8704,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, } else { dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", be16_to_cpu(match.mask->dst)); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -9907,11 +9912,11 @@ static void i40e_link_event(struct i40e_pf *pf) status = i40e_get_link_status(&pf->hw, &new_link); /* On success, disable temp link polling */ - if (status == I40E_SUCCESS) { + if (status == 0) { clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); } else { /* Enable link polling temporarily until i40e_get_link_status - * returns I40E_SUCCESS + * returns 0 */ set_bit(__I40E_TEMP_LINK_POLLING, pf->state); dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", @@ -10165,7 +10170,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) do { ret = i40e_clean_arq_element(hw, &event, &pending); - if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) + if (ret == -EALREADY) break; else if (ret) { dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); @@ -10793,7 +10798,9 @@ static void i40e_get_oem_version(struct i40e_hw *hw) &gen_snap); i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET, &release); - hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release; + hw->nvm.oem_ver = + FIELD_PREP(I40E_OEM_GEN_MASK | I40E_OEM_SNAP_MASK, gen_snap) | + FIELD_PREP(I40E_OEM_RELEASE_MASK, release); hw->nvm.eetrack = I40E_OEM_EETRACK_ID; } @@ -12575,7 +12582,7 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "Commit BW only works on partition 1! This is partition %d", pf->hw.partition_id); - ret = I40E_NOT_SUPPORTED; + ret = -EOPNOTSUPP; goto bw_commit_out; } @@ -12657,10 +12664,10 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 - int read_status = I40E_SUCCESS; u16 sr_emp_sr_settings_ptr = 0; u16 features_enable = 0; u16 link_behavior = 0; + int read_status = 0; bool ret = false; read_status = i40e_read_nvm_word(&pf->hw, @@ -13823,6 +13830,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY | NETDEV_XDP_ACT_RX_SG; + netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD; } else { /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to @@ -14205,6 +14213,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) } set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; + if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { vsi->netdev_registered = false; @@ -14218,6 +14227,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); } + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); + spin_lock_bh(&vsi->mac_filter_hash_lock); /* clear the sync flag on all filters */ @@ -14398,6 +14410,8 @@ err_rings: free_netdev(vsi->netdev); vsi->netdev = NULL; } + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: i40e_vsi_clear(vsi); @@ -14538,9 +14552,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ret = i40e_netif_set_realnum_tx_rx_queues(vsi); if (ret) goto err_netdev; + if (vsi->type == I40E_VSI_MAIN) { + ret = i40e_devlink_create_port(pf); + if (ret) + goto err_netdev; + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); + } ret = register_netdev(vsi->netdev); if (ret) - goto err_netdev; + goto err_dl_port; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); #ifdef CONFIG_I40E_DCB @@ -14583,6 +14603,9 @@ err_msix: free_netdev(vsi->netdev); vsi->netdev = NULL; } +err_dl_port: + if (vsi->type == I40E_VSI_MAIN) + i40e_devlink_destroy_port(pf); err_netdev: i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); err_vsi: @@ -15467,12 +15490,12 @@ static int i40e_pf_loop_reset(struct i40e_pf *pf) int ret; ret = i40e_pf_reset(hw); - while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { + while (ret != 0 && time_before(jiffies, time_end)) { usleep_range(10000, 20000); ret = i40e_pf_reset(hw); } - if (ret == I40E_SUCCESS) + if (ret == 0) pf->pfr_count++; else dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); @@ -15515,10 +15538,10 @@ static int i40e_handle_resets(struct i40e_pf *pf) const int pfr = i40e_pf_loop_reset(pf); const bool is_empr = i40e_check_fw_empr(pf); - if (is_empr || pfr != I40E_SUCCESS) + if (is_empr || pfr != 0) dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); - return is_empr ? I40E_ERR_RESET_FAILED : pfr; + return is_empr ? -EIO : pfr; } /** @@ -15613,7 +15636,7 @@ err_switch_setup: iounmap(hw->hw_addr); pci_release_mem_regions(pf->pdev); pci_disable_device(pf->pdev); - kfree(pf); + i40e_free_pf(pf); return err; } @@ -15627,10 +15650,10 @@ err_switch_setup: **/ static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw) { - struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev; + struct i40e_pf *pf = i40e_hw_to_pf(hw); - hw->subsystem_device_id = pdev->subsystem_device ? - pdev->subsystem_device : + hw->subsystem_device_id = pf->pdev->subsystem_device ? + pf->pdev->subsystem_device : (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX); } @@ -15655,6 +15678,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct i40e_hw *hw; static u16 pfs_found; u16 wol_nvm_bits; + char nvm_ver[32]; u16 link_status; #ifdef CONFIG_I40E_DCB int status; @@ -15690,7 +15714,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * the Admin Queue structures and then querying for the * device's current profile information. */ - pf = kzalloc(sizeof(*pf), GFP_KERNEL); + pf = i40e_alloc_pf(&pdev->dev); if (!pf) { err = -ENOMEM; goto err_pf_alloc; @@ -15700,7 +15724,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) set_bit(__I40E_DOWN, pf->state); hw = &pf->hw; - hw->back = pf; pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), I40E_MAX_CSR_SPACE); @@ -15811,7 +15834,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_adminq(hw); if (err) { - if (err == I40E_ERR_FIRMWARE_API_VERSION) + if (err == -EIO) dev_info(&pdev->dev, "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n", hw->aq.api_maj_ver, @@ -15825,13 +15848,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pf_reset; } i40e_get_oem_version(hw); + i40e_get_pba_string(hw); /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */ + i40e_nvm_version_str(hw, nvm_ver, sizeof(nvm_ver)); dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, - hw->aq.api_maj_ver, hw->aq.api_min_ver, - i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, - hw->subsystem_vendor_id, hw->subsystem_device_id); + hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver, + hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, + hw->subsystem_device_id); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) @@ -16199,7 +16224,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT; if (val < MAX_FRAME_SIZE_DEFAULT) dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", - i, val); + pf->hw.port, val); /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out @@ -16218,6 +16243,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* print a string summarizing features */ i40e_print_features(pf); + i40e_devlink_register(pf); + return 0; /* Unwind what we've done if something failed in the setup */ @@ -16238,7 +16265,7 @@ err_adminq_setup: err_pf_reset: iounmap(hw->hw_addr); err_ioremap: - kfree(pf); + i40e_free_pf(pf); err_pf_alloc: pci_release_mem_regions(pdev); err_pci_reg: @@ -16263,6 +16290,8 @@ static void i40e_remove(struct pci_dev *pdev) int ret_code; int i; + i40e_devlink_unregister(pf); + i40e_dbg_pf_exit(pf); i40e_ptp_stop(pf); @@ -16324,11 +16353,15 @@ static void i40e_remove(struct pci_dev *pdev) i40e_switch_branch_release(pf->veb[i]); } - /* Now we can shutdown the PF's VSI, just before we kill + /* Now we can shutdown the PF's VSIs, just before we kill * adminq and hmc. */ - if (pf->vsi[pf->lan_vsi]) - i40e_vsi_release(pf->vsi[pf->lan_vsi]); + for (i = pf->num_alloc_vsi; i--;) + if (pf->vsi[i]) { + i40e_vsi_close(pf->vsi[i]); + i40e_vsi_release(pf->vsi[i]); + pf->vsi[i] = NULL; + } i40e_cloud_filter_exit(pf); @@ -16384,7 +16417,7 @@ unmap: kfree(pf->vsi); iounmap(hw->hw_addr); - kfree(pf); + i40e_free_pf(pf); pci_release_mem_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index f99c1f7fec40..77cdbfc19d47 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include <linux/delay.h> +#include "i40e_alloc.h" #include "i40e_prototype.h" /** @@ -37,7 +39,7 @@ int i40e_init_nvm(struct i40e_hw *hw) nvm->blank_nvm_mode = false; } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; - ret_code = I40E_ERR_NVM_BLANK_MODE; + ret_code = -EIO; i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); } @@ -111,8 +113,8 @@ i40e_i40e_acquire_nvm_exit: **/ void i40e_release_nvm(struct i40e_hw *hw) { - int ret_code = I40E_SUCCESS; u32 total_delay = 0; + int ret_code = 0; if (hw->nvm.blank_nvm_mode) return; @@ -122,7 +124,7 @@ void i40e_release_nvm(struct i40e_hw *hw) /* there are some rare cases when trying to release the resource * results in an admin Q timeout, so handle them correctly */ - while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && + while ((ret_code == -EIO) && (total_delay < hw->aq.asq_cmd_timeout)) { usleep_range(1000, 2000); ret_code = i40e_aq_release_resource(hw, @@ -140,7 +142,7 @@ void i40e_release_nvm(struct i40e_hw *hw) **/ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) { - int ret_code = I40E_ERR_TIMEOUT; + int ret_code = -EIO; u32 srctl, wait_cnt; /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ @@ -152,7 +154,7 @@ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) } udelay(5); } - if (ret_code == I40E_ERR_TIMEOUT) + if (ret_code == -EIO) i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); return ret_code; } @@ -168,14 +170,14 @@ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, u16 *data) { - int ret_code = I40E_ERR_TIMEOUT; + int ret_code = -EIO; u32 sr_reg; if (offset >= hw->nvm.sr_size) { i40e_debug(hw, I40E_DEBUG_NVM, "NVM read error: offset %d beyond Shadow RAM limit %d\n", offset, hw->nvm.sr_size); - ret_code = I40E_ERR_PARAM; + ret_code = -EINVAL; goto read_nvm_exit; } @@ -222,7 +224,7 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw, bool last_command) { struct i40e_asq_cmd_details cmd_details; - int ret_code = I40E_ERR_NVM; + int ret_code = -EIO; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -267,7 +269,7 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw, static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, u16 *data) { - int ret_code = I40E_ERR_TIMEOUT; + int ret_code = -EIO; ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); *data = le16_to_cpu(*(__le16 *)data); @@ -348,7 +350,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm word failed.Error code: %d.\n", status); - return I40E_ERR_NVM; + return -EIO; } } #define I40E_NVM_INVALID_PTR_VAL 0x7FFF @@ -358,7 +360,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw, if (ptr_value == I40E_NVM_INVALID_PTR_VAL || ptr_value == I40E_NVM_INVALID_VAL) { i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n"); - return I40E_ERR_BAD_PTR; + return -EINVAL; } /* Check whether the module is in SR mapped area or outside */ @@ -367,7 +369,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n"); - return I40E_ERR_PARAM; + return -EINVAL; } else { /* Read from the Shadow RAM */ @@ -377,7 +379,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_ALL, "Reading nvm word failed.Error code: %d.\n", status); - return I40E_ERR_NVM; + return -EIO; } offset = ptr_value + module_offset + specific_ptr + @@ -549,7 +551,7 @@ static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, bool last_command) { struct i40e_asq_cmd_details cmd_details; - int ret_code = I40E_ERR_NVM; + int ret_code = -EIO; memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -614,7 +616,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw, /* read pointer to VPD area */ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); if (ret_code) { - ret_code = I40E_ERR_NVM_CHECKSUM; + ret_code = -EIO; goto i40e_calc_nvm_checksum_exit; } @@ -622,7 +624,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw, ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); if (ret_code) { - ret_code = I40E_ERR_NVM_CHECKSUM; + ret_code = -EIO; goto i40e_calc_nvm_checksum_exit; } @@ -636,7 +638,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw, ret_code = __i40e_read_nvm_buffer(hw, i, &words, data); if (ret_code) { - ret_code = I40E_ERR_NVM_CHECKSUM; + ret_code = -EIO; goto i40e_calc_nvm_checksum_exit; } } @@ -724,7 +726,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw, * calculated checksum */ if (checksum_local != checksum_sr) - ret_code = I40E_ERR_NVM_CHECKSUM; + ret_code = -EIO; /* If the user cares, return the calculated checksum */ if (checksum) @@ -839,7 +841,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw, if (upd_cmd == I40E_NVMUPD_STATUS) { if (!cmd->data_size) { *perrno = -EFAULT; - return I40E_ERR_BUF_TOO_SHORT; + return -EINVAL; } bytes[0] = hw->nvmupd_state; @@ -896,7 +898,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw, break; } - status = I40E_ERR_NOT_READY; + status = -EBUSY; *perrno = -EBUSY; break; @@ -904,7 +906,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw, /* invalid state, should never happen */ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: no such state %d\n", hw->nvmupd_state); - status = I40E_NOT_SUPPORTED; + status = -EOPNOTSUPP; *perrno = -ESRCH; break; } @@ -1045,7 +1047,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in init state\n", i40e_nvm_update_state_str[upd_cmd]); - status = I40E_ERR_NVM; + status = -EIO; *perrno = -ESRCH; break; } @@ -1087,7 +1089,7 @@ static int i40e_nvmupd_state_reading(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in reading state.\n", i40e_nvm_update_state_str[upd_cmd]); - status = I40E_NOT_SUPPORTED; + status = -EOPNOTSUPP; *perrno = -ESRCH; break; } @@ -1174,7 +1176,7 @@ retry: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in writing state.\n", i40e_nvm_update_state_str[upd_cmd]); - status = I40E_NOT_SUPPORTED; + status = -EOPNOTSUPP; *perrno = -ESRCH; break; } @@ -1398,7 +1400,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", cmd->data_size, aq_desc_len); *perrno = -EINVAL; - return I40E_ERR_PARAM; + return -EINVAL; } aq_desc = (struct i40e_aq_desc *)bytes; @@ -1473,7 +1475,7 @@ static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", __func__, cmd->offset, aq_total_len); *perrno = -EINVAL; - return I40E_ERR_PARAM; + return -EINVAL; } /* check copylength range */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h deleted file mode 100644 index 2bd4de03dafa..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_OSDEP_H_ -#define _I40E_OSDEP_H_ - -#include <linux/types.h> -#include <linux/if_ether.h> -#include <linux/if_vlan.h> -#include <linux/tcp.h> -#include <linux/pci.h> -#include <linux/highuid.h> - -/* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include <linux/io-64-nonatomic-lo-hi.h> - -/* File to be the magic between shared code and - * actual OS primitives - */ - -#define hw_dbg(hw, S, A...) \ -do { \ - dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \ -} while (0) - -#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) -#define rd32(a, reg) readl((a)->hw_addr + (reg)) - -#define rd64(a, reg) readq((a)->hw_addr + (reg)) -#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) - -/* memory allocation tracking */ -struct i40e_dma_mem { - void *va; - dma_addr_t pa; - u32 size; -}; - -#define i40e_allocate_dma_mem(h, m, unused, s, a) \ - i40e_allocate_dma_mem_d(h, m, s, a) -#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) - -struct i40e_virt_mem { - void *va; - u32 size; -}; - -#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) -#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) - -#define i40e_debug(h, m, s, ...) \ -do { \ - if (((m) & (h)->debug_mask)) \ - pr_info("i40e %02x:%02x.%x " s, \ - (h)->bus.bus_id, (h)->bus.device, \ - (h)->bus.func, ##__VA_ARGS__); \ -} while (0) - -#endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index fe845987d99a..001162042050 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -4,9 +4,10 @@ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ -#include "i40e_type.h" -#include "i40e_alloc.h" +#include <linux/ethtool.h> #include <linux/avf/virtchnl.h> +#include "i40e_debug.h" +#include "i40e_type.h" /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are @@ -18,7 +19,6 @@ /* adminq functions */ int i40e_init_adminq(struct i40e_hw *hw); void i40e_shutdown_adminq(struct i40e_hw *hw); -void i40e_adminq_init_ring_data(struct i40e_hw *hw); int i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *events_pending); @@ -51,7 +51,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw, void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len); -void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); @@ -117,9 +116,6 @@ int i40e_aq_set_link_restart_an(struct i40e_hw *hw, int i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); -int i40e_aq_set_local_advt_reg(struct i40e_hw *hw, - u64 advt_reg, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_send_driver_version(struct i40e_hw *hw, struct i40e_driver_version *dv, struct i40e_asq_cmd_details *cmd_details); @@ -269,9 +265,6 @@ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); int i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); -int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, - u16 seid, u16 credit, u8 max_bw, - struct i40e_asq_cmd_details *cmd_details); int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); @@ -348,9 +341,7 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw, struct i40e_aqc_configure_partition_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); -int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, - u32 pba_num_size); -int i40e_validate_mac_addr(u8 *mac_addr); +void i40e_get_pba_string(struct i40e_hw *hw); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); /* prototype for functions used for NVM access */ int i40e_init_nvm(struct i40e_hw *hw); @@ -425,14 +416,6 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) /* prototype for functions used for SW locks */ /* i40e_common for VF drivers*/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg); -int i40e_vf_reset(struct i40e_hw *hw); -int i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - int v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); int i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings); int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, @@ -514,4 +497,8 @@ int i40e_add_pinfo_to_list(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id); + +/* i40e_ddp */ +int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); + #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index c37abbb3cd06..20b77398f060 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include "i40e.h" #include <linux/ptp_classify.h> #include <linux/posix-clock.h> +#include "i40e.h" +#include "i40e_devids.h" /* The XL710 timesync is very much like Intel's 82599 design when it comes to * the fundamental clock design. However, the clock operations are much simpler @@ -1132,7 +1133,7 @@ int i40e_ptp_alloc_pins(struct i40e_pf *pf) if (!pf->ptp_pins) { dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n"); - return -I40E_ERR_NO_MEMORY; + return -ENOMEM; } pf->ptp_pins->sdp3_2 = off; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 7339003aa17c..f6671ac79735 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -4,6 +4,9 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) + #define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ @@ -202,7 +205,9 @@ #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h deleted file mode 100644 index 4d2782e76038..000000000000 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_STATUS_H_ -#define _I40E_STATUS_H_ - -/* Error Codes */ -enum i40e_status_code { - I40E_SUCCESS = 0, - I40E_ERR_NVM = -1, - I40E_ERR_NVM_CHECKSUM = -2, - I40E_ERR_CONFIG = -4, - I40E_ERR_PARAM = -5, - I40E_ERR_UNKNOWN_PHY = -7, - I40E_ERR_INVALID_MAC_ADDR = -10, - I40E_ERR_DEVICE_NOT_SUPPORTED = -11, - I40E_ERR_RESET_FAILED = -15, - I40E_ERR_NO_AVAILABLE_VSI = -17, - I40E_ERR_NO_MEMORY = -18, - I40E_ERR_BAD_PTR = -19, - I40E_ERR_INVALID_SIZE = -26, - I40E_ERR_QUEUE_EMPTY = -32, - I40E_ERR_TIMEOUT = -37, - I40E_ERR_INVALID_SD_INDEX = -45, - I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, - I40E_ERR_INVALID_SD_TYPE = -47, - I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, - I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, - I40E_ERR_ADMIN_QUEUE_ERROR = -53, - I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, - I40E_ERR_BUF_TOO_SHORT = -55, - I40E_ERR_ADMIN_QUEUE_FULL = -56, - I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, - I40E_ERR_NVM_BLANK_MODE = -59, - I40E_ERR_NOT_IMPLEMENTED = -60, - I40E_ERR_DIAG_TEST_FAILED = -62, - I40E_ERR_NOT_READY = -63, - I40E_NOT_SUPPORTED = -64, - I40E_ERR_FIRMWARE_API_VERSION = -65, - I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, -}; - -#endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 8b8bf4880faa..dd410b15000f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,14 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ -#include <linux/prefetch.h> #include <linux/bpf_trace.h> +#include <linux/prefetch.h> +#include <linux/sctp.h> #include <net/mpls.h> #include <net/xdp.h> -#include "i40e.h" -#include "i40e_trace.h" -#include "i40e_prototype.h" #include "i40e_txrx_common.h" +#include "i40e_trace.h" #include "i40e_xsk.h" #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) @@ -2284,8 +2283,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, * If the buffer is an EOP buffer, this function exits returning false, * otherwise return true indicating that this is in fact a non-EOP buffer. */ -static bool i40e_is_non_eop(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc) +bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) { /* if we are the last buffer then there is nothing else to do */ #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) @@ -2405,7 +2404,7 @@ void i40e_update_rx_stats(struct i40e_ring *rx_ring, void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) { if (xdp_res & I40E_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_res & I40E_XDP_TX) { struct i40e_ring *xdp_ring = @@ -2544,7 +2543,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, rx_buffer = i40e_rx_bi(rx_ring, ntp); i40e_inc_ntp(rx_ring); i40e_reuse_rx_page(rx_ring, rx_buffer); - cleaned_count++; + /* Update ntc and bump cleaned count if not in the + * middle of mb packet. + */ + if (rx_ring->next_to_clean == ntp) { + rx_ring->next_to_clean = + rx_ring->next_to_process; + cleaned_count++; + } continue; } @@ -2847,7 +2853,7 @@ tx_only: return budget; } - if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; /* Exit the polling mode, but don't re-enable interrupts if stack might diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 8c3d24012c54..421fe5675584 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -5,6 +5,7 @@ #define _I40E_TXRX_H_ #include <net/xdp.h> +#include "i40e_type.h" /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_DEFAULT_IRQ_WORK 256 @@ -473,6 +474,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index 8c5118c8baaf..e26807fd2123 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -4,6 +4,8 @@ #ifndef I40E_TXRX_COMMON_ #define I40E_TXRX_COMMON_ +#include "i40e.h" + int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw, u64 qword1); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 388c3d36d96a..f95bc2a4a838 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -4,16 +4,9 @@ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ -#include "i40e_status.h" -#include "i40e_osdep.h" -#include "i40e_register.h" +#include <uapi/linux/if_ether.h> #include "i40e_adminq.h" #include "i40e_hmc.h" -#include "i40e_lan_hmc.h" -#include "i40e_devids.h" - -/* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 4 @@ -44,48 +37,14 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); #define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 -/* debug masks - set these bits in hw->debug_mask to control output */ -enum i40e_debug_mask { - I40E_DEBUG_INIT = 0x00000001, - I40E_DEBUG_RELEASE = 0x00000002, - - I40E_DEBUG_LINK = 0x00000010, - I40E_DEBUG_PHY = 0x00000020, - I40E_DEBUG_HMC = 0x00000040, - I40E_DEBUG_NVM = 0x00000080, - I40E_DEBUG_LAN = 0x00000100, - I40E_DEBUG_FLOW = 0x00000200, - I40E_DEBUG_DCB = 0x00000400, - I40E_DEBUG_DIAG = 0x00000800, - I40E_DEBUG_FD = 0x00001000, - I40E_DEBUG_PACKAGE = 0x00002000, - I40E_DEBUG_IWARP = 0x00F00000, - I40E_DEBUG_AQ_MESSAGE = 0x01000000, - I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, - I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, - I40E_DEBUG_AQ_COMMAND = 0x06000000, - I40E_DEBUG_AQ = 0x0F000000, - - I40E_DEBUG_USER = 0xF0000000, - - I40E_DEBUG_ALL = 0xFFFFFFFF -}; - -#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \ - I40E_GLGEN_MSCA_STCODE_SHIFT) -#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) - -#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \ - I40E_GLGEN_MSCA_STCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1) +#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) +#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2) + +#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0) +#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0) +#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1) +#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3) #define I40E_PHY_COM_REG_PAGE 0x1E #define I40E_PHY_LED_LINK_MODE_MASK 0xF0 @@ -526,7 +485,6 @@ struct i40e_dcbx_config { /* Port hardware description */ struct i40e_hw { u8 __iomem *hw_addr; - void *back; /* subsystem structs */ struct i40e_phy_info phy; @@ -535,6 +493,9 @@ struct i40e_hw { struct i40e_nvm_info nvm; struct i40e_fc_info fc; + /* PBA ID */ + const char *pba_id; + /* pci info */ u16 device_id; u16 vendor_id; @@ -1456,7 +1417,7 @@ struct i40e_ddp_version { struct i40e_package_header { struct i40e_ddp_version version; u32 segment_count; - u32 segment_offset[1]; + u32 segment_offset[]; }; /* Generic segment header */ @@ -1487,12 +1448,12 @@ struct i40e_profile_segment { struct i40e_ddp_version version; char name[I40E_DDP_NAME_SIZE]; u32 device_table_count; - struct i40e_device_id_entry device_table[1]; + struct i40e_device_id_entry device_table[]; }; struct i40e_section_table { u32 section_count; - u32 section_offset[1]; + u32 section_offset[]; }; struct i40e_profile_section_header { @@ -1524,7 +1485,7 @@ struct i40e_profile_aq_section { u16 flags; u8 param[16]; u16 datalen; - u8 data[1]; + u8 data[]; }; struct i40e_profile_info { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index be59ba3774e1..3f99eb198245 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2,6 +2,8 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" +#include "i40e_lan_hmc.h" +#include "i40e_virtchnl_pf.h" /*********************notification routines***********************/ @@ -506,6 +508,7 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, struct virtchnl_rdma_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; + size_t size; u32 msix_vf; int ret = 0; @@ -521,9 +524,9 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf, } kfree(vf->qvlist_info); - vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info, - qvlist_info->num_vectors - 1), - GFP_KERNEL); + size = virtchnl_struct_size(vf->qvlist_info, qv_info, + qvlist_info->num_vectors); + vf->qvlist_info = kzalloc(size, GFP_KERNEL); if (!vf->qvlist_info) { ret = -ENOMEM; goto err_out; @@ -1346,14 +1349,14 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, bool alluni) { struct i40e_pf *pf = vf->pf; - int aq_ret = I40E_SUCCESS; struct i40e_vsi *vsi; + int aq_ret = 0; u16 num_vlans; s16 *vl; vsi = i40e_find_vsi_from_id(pf, vsi_id); if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) - return I40E_ERR_PARAM; + return -EINVAL; if (vf->port_vlan_id) { aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, @@ -1363,7 +1366,7 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); if (!vl) - return I40E_ERR_NO_MEMORY; + return -ENOMEM; aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, vl, num_vlans); @@ -2037,7 +2040,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) if (VF_IS_V10(&vf->vf_ver)) info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, - I40E_SUCCESS, (u8 *)&info, + 0, (u8 *)&info, sizeof(struct virtchnl_version_info)); } @@ -2099,14 +2102,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) int ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } - len = struct_size(vfres, vsi_res, num_vsis); + len = virtchnl_struct_size(vfres, vsi_res, num_vsis); vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { - aq_ret = I40E_ERR_NO_MEMORY; + aq_ret = -ENOMEM; len = 0; goto err; } @@ -2159,7 +2162,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; @@ -2227,7 +2230,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { @@ -2243,12 +2246,12 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) } if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } @@ -2315,17 +2318,17 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2333,7 +2336,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) for (i = 0; i < vf->num_tc; i++) num_qps_all += vf->ch[i].num_qps; if (num_qps_all != qci->num_queue_pairs) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } } @@ -2346,7 +2349,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) if (!vf->adq_enabled) { if (!i40e_vc_isvalid_queue_id(vf, vsi_id, qpi->txq.queue_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2355,14 +2358,14 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != vsi_queue_id) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } } if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { - aq_ret = I40E_ERR_NO_AVAILABLE_VSI; + aq_ret = -ENODEV; goto error_param; } vsi_id = vf->ch[idx].vsi_id; @@ -2372,7 +2375,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) &qpi->rxq) || i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, &qpi->txq)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2383,7 +2386,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) */ if (vf->adq_enabled) { if (idx >= ARRAY_SIZE(vf->ch)) { - aq_ret = I40E_ERR_NO_AVAILABLE_VSI; + aq_ret = -ENODEV; goto error_param; } if (j == (vf->ch[idx].num_qps - 1)) { @@ -2406,7 +2409,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) vsi->num_queue_pairs = vf->ch[i].num_qps; if (i40e_update_adq_vsi_queues(vsi, i)) { - aq_ret = I40E_ERR_CONFIG; + aq_ret = -EIO; goto error_param; } } @@ -2464,13 +2467,13 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (irqmap_info->num_vectors > vf->pf->hw.func_caps.num_msix_vectors_vf) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2479,18 +2482,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) /* validate msg params */ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } vsi_id = map->vsi_id; if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -2579,29 +2582,29 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) int i; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_validate_vqs_bitmaps(vqs)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } /* Use the queue bit map sent by the VF */ if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, true)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, true)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } @@ -2610,7 +2613,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) /* zero belongs to LAN VSI */ for (i = 1; i < vf->num_tc; i++) { if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx])) - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; } } @@ -2636,29 +2639,29 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_validate_vqs_bitmaps(vqs)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } /* Use the queue bit map sent by the VF */ if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, false)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, false)) { - aq_ret = I40E_ERR_TIMEOUT; + aq_ret = -EIO; goto error_param; } error_param: @@ -2790,18 +2793,18 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) memset(&stats, 0, sizeof(struct i40e_eth_stats)); if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } i40e_update_eth_stats(vsi); @@ -2862,7 +2865,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, is_zero_ether_addr(addr)) { dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", addr); - return I40E_ERR_INVALID_MAC_ADDR; + return -EINVAL; } /* If the host VMM administrator has set the VF MAC address @@ -2998,7 +3001,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { - ret = I40E_ERR_PARAM; + ret = -EINVAL; goto error_param; } @@ -3027,7 +3030,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "Unable to add MAC filter %pM for VF %d\n", al->list[i].addr, vf->vf_id); - ret = I40E_ERR_PARAM; + ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } @@ -3067,7 +3070,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { - ret = I40E_ERR_PARAM; + ret = -EINVAL; goto error_param; } @@ -3076,7 +3079,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) is_zero_ether_addr(al->list[i].addr)) { dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n", al->list[i].addr, vf->vf_id); - ret = I40E_ERR_INVALID_MAC_ADDR; + ret = -EINVAL; goto error_param; } if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) @@ -3088,7 +3091,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) if (i40e_del_mac_filter(vsi, al->list[i].addr)) { - ret = I40E_ERR_INVALID_MAC_ADDR; + ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } @@ -3149,13 +3152,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) } if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; dev_err(&pf->pdev->dev, "invalid VF VLAN id %d\n", vfl->vlan_id[i]); goto error_param; @@ -3163,7 +3166,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) } vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -3214,13 +3217,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > I40E_MAX_VLANID) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } } @@ -3228,7 +3231,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { if (vfl->num_elements > 1 || vfl->vlan_id[0]) - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -3269,7 +3272,7 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } @@ -3298,13 +3301,13 @@ static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto error_param; } if (config) { if (i40e_config_rdma_qvlist(vf, qvlist_info)) - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; } else { i40e_release_rdma_qvlist(vf); } @@ -3335,7 +3338,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || vrk->key_len != I40E_HKEY_ARRAY_SIZE) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3366,13 +3369,13 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } for (i = 0; i < vrl->lut_entries; i++) if (vrl->lut[i] >= vf->num_queue_pairs) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3399,14 +3402,14 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) int len = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } len = sizeof(struct virtchnl_rss_hena); vrh = kzalloc(len, GFP_KERNEL); if (!vrh) { - aq_ret = I40E_ERR_NO_MEMORY; + aq_ret = -ENOMEM; len = 0; goto err; } @@ -3435,7 +3438,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); @@ -3460,7 +3463,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3486,7 +3489,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3574,7 +3577,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, dev_err(&pf->pdev->dev, "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n", vf->vf_id); - return I40E_ERR_CONFIG; + return -EIO; } } @@ -3627,9 +3630,9 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, } } - return I40E_SUCCESS; + return 0; err: - return I40E_ERR_CONFIG; + return -EIO; } /** @@ -3713,7 +3716,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) int i, ret; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3721,7 +3724,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: ADq not enabled, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3729,7 +3732,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: Invalid input, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3841,10 +3844,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; int aq_ret = 0; - int i, ret; + int i; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } @@ -3852,7 +3855,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: ADq is not enabled, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } @@ -3860,13 +3863,15 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err_out; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); - if (!cfilter) - return -ENOMEM; + if (!cfilter) { + aq_ret = -ENOMEM; + goto err_out; + } /* parse destination mac address */ for (i = 0; i < ETH_ALEN; i++) @@ -3914,13 +3919,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) /* Adding cloud filter programmed as TC filter */ if (tcf.dst_port) - ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); + aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true); else - ret = i40e_add_del_cloud_filter(vsi, cfilter, true); - if (ret) { + aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true); + if (aq_ret) { dev_err(&pf->pdev->dev, "VF %d: Failed to add cloud filter, err %pe aq_err %s\n", - vf->vf_id, ERR_PTR(ret), + vf->vf_id, ERR_PTR(aq_ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto err_free; } @@ -3953,7 +3958,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) u64 speed = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3961,7 +3966,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) if (vf->spoofchk) { dev_err(&pf->pdev->dev, "Spoof check is ON, turn it OFF to enable ADq\n"); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3969,7 +3974,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3978,7 +3983,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n", vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -3990,7 +3995,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n", vf->vf_id, i, tci->list[i].count, I40E_DEFAULT_QUEUES_PER_VF); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -4001,7 +4006,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "No queues left to allocate to VF %d\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } else { /* we need to allocate max VF queues to enable ADq so as to @@ -4016,7 +4021,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) if (speed == SPEED_UNKNOWN) { dev_err(&pf->pdev->dev, "Cannot detect link speed\n"); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -4029,7 +4034,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) "Invalid max tx rate %llu specified for VF %d.", tci->list[i].max_tx_rate, vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } else { vf->ch[i].max_tx_rate = @@ -4045,7 +4050,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); - return I40E_SUCCESS; + return 0; /* send the response to the VF */ err: @@ -4064,7 +4069,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) int aq_ret = 0; if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; goto err; } @@ -4079,13 +4084,13 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) } else { dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; + aq_ret = -EINVAL; } /* reset the VF in order to allocate resources */ i40e_vc_reset_vf(vf, true); - return I40E_SUCCESS; + return 0; err: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS, @@ -4119,21 +4124,16 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, /* Check if VF is disabled. */ if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) - return I40E_ERR_PARAM; + return -EINVAL; /* perform basic checks on the msg */ ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (ret) { - i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); + i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); - switch (ret) { - case VIRTCHNL_STATUS_ERR_PARAM: - return -EPERM; - default: - return -EINVAL; - } + return ret; } switch (v_opcode) { @@ -4226,7 +4226,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); ret = i40e_vc_send_resp_to_vf(vf, v_opcode, - I40E_ERR_NOT_IMPLEMENTED); + -EOPNOTSUPP); break; } @@ -4305,6 +4305,38 @@ err_out: } /** + * i40e_check_vf_init_timeout + * @vf: the virtual function + * + * Check that the VF's initialization was successfully done and if not + * wait up to 300ms for its finish. + * + * Returns true when VF is initialized, false on timeout + **/ +static bool i40e_check_vf_init_timeout(struct i40e_vf *vf) +{ + int i; + + /* When the VF is resetting wait until it is done. + * It can take up to 200 milliseconds, but wait for + * up to 300 milliseconds to be safe. + */ + for (i = 0; i < 15; i++) { + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + return true; + msleep(20); + } + + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + dev_err(&vf->pf->pdev->dev, + "VF %d still in reset. Try again.\n", vf->vf_id); + return false; + } + + return true; +} + +/** * i40e_ndo_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -4322,7 +4354,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) int ret = 0; struct hlist_node *h; int bkt; - u8 i; if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); @@ -4335,21 +4366,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; vf = &pf->vf[vf_id]; - - /* When the VF is resetting wait until it is done. - * It can take up to 200 milliseconds, - * but wait for up to 300 milliseconds to be safe. - * Acquire the VSI pointer only after the VF has been - * properly initialized. - */ - for (i = 0; i < 15; i++) { - if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) - break; - msleep(20); - } - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error_param; } @@ -4451,22 +4468,18 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, } vf = &pf->vf[vf_id]; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error_pvid; } + vsi = pf->vsi[vf->lan_vsi_idx]; if (le16_to_cpu(vsi->info.pvid) == vlanprio) /* duplicate request, so just return success */ goto error_pvid; i40e_vlan_stripping_enable(vsi); - i40e_vc_reset_vf(vf, true); - /* During reset the VF got a new VSI, so refresh a pointer. */ - vsi = pf->vsi[vf->lan_vsi_idx]; + /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -4552,6 +4565,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, */ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + i40e_vc_reset_vf(vf, true); + /* During reset the VF got a new VSI, so refresh a pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; + ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni); if (ret) { dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n"); @@ -4601,13 +4618,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } vf = &pf->vf[vf_id]; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto error; } + vsi = pf->vsi[vf->lan_vsi_idx]; ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (ret) @@ -4774,9 +4789,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) } vf = &(pf->vf[vf_id]); - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", - vf_id); + if (!i40e_check_vf_init_timeout(vf)) { ret = -EAGAIN; goto out; } @@ -4907,7 +4920,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->tx_bytes = stats->tx_bytes; vf_stats->broadcast = stats->rx_broadcast; vf_stats->multicast = stats->rx_multicast; - vf_stats->rx_dropped = stats->rx_discards; + vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; vf_stats->tx_dropped = stats->tx_discards; return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 895b8feb2567..2ee0f8a23248 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -4,7 +4,9 @@ #ifndef _I40E_VIRTCHNL_PF_H_ #define _I40E_VIRTCHNL_PF_H_ -#include "i40e.h" +#include <linux/avf/virtchnl.h> +#include <linux/netdevice.h> +#include "i40e_type.h" #define I40E_MAX_VLANID 4095 diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 05ec1181471e..e99fa854d17f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -2,11 +2,7 @@ /* Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf_trace.h> -#include <linux/stringify.h> #include <net/xdp_sock_drv.h> -#include <net/xdp.h> - -#include "i40e.h" #include "i40e_txrx_common.h" #include "i40e_xsk.h" @@ -294,8 +290,14 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, { unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; + struct skb_shared_info *sinfo = NULL; struct sk_buff *skb; + u32 nr_frags = 0; + if (unlikely(xdp_buff_has_frags(xdp))) { + sinfo = xdp_get_shared_info_from_buff(xdp); + nr_frags = sinfo->nr_frags; + } net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ @@ -312,6 +314,28 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, __skb_pull(skb, metasize); } + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + + for (int i = 0; i < nr_frags; i++) { + struct skb_shared_info *skinfo = skb_shinfo(skb); + skb_frag_t *frag = &sinfo->frags[i]; + struct page *page; + void *addr; + + page = dev_alloc_page(); + if (!page) { + dev_kfree_skb(skb); + return NULL; + } + addr = page_to_virt(page); + + memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); + + __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, + addr, 0, skb_frag_size(frag)); + } + out: xsk_buff_free(xdp); return skb; @@ -322,14 +346,13 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, unsigned int *rx_packets, unsigned int *rx_bytes, - unsigned int size, unsigned int xdp_res, bool *failure) { struct sk_buff *skb; *rx_packets = 1; - *rx_bytes = size; + *rx_bytes = xdp_get_buff_len(xdp_buff); if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) return; @@ -363,7 +386,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, return; } - *rx_bytes = skb->len; i40e_process_skb_fields(rx_ring, rx_desc, skb); napi_gro_receive(&rx_ring->q_vector->napi, skb); return; @@ -374,6 +396,31 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, WARN_ON_ONCE(1); } +static int +i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first, + struct xdp_buff *xdp, const unsigned int size) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); + + if (!xdp_buff_has_frags(first)) { + sinfo->nr_frags = 0; + sinfo->xdp_frags_size = 0; + xdp_buff_set_frags_flag(first); + } + + if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { + xsk_buff_free(first); + return -ENOMEM; + } + + __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, + virt_to_page(xdp->data_hard_start), 0, size); + sinfo->xdp_frags_size += size; + xsk_buff_add_frag(xdp); + + return 0; +} + /** * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring * @rx_ring: Rx ring @@ -384,12 +431,17 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 next_to_process = rx_ring->next_to_process; u16 next_to_clean = rx_ring->next_to_clean; - u16 count_mask = rx_ring->count - 1; unsigned int xdp_res, xdp_xmit = 0; + struct xdp_buff *first = NULL; + u32 count = rx_ring->count; struct bpf_prog *xdp_prog; + u32 entries_to_alloc; bool failure = false; - u16 cleaned_count; + + if (next_to_process != next_to_clean) + first = *i40e_rx_bi(rx_ring, next_to_clean); /* NB! xdp_prog will always be !NULL, due to the fact that * this path is enabled by setting an XDP program. @@ -404,7 +456,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) unsigned int size; u64 qword; - rx_desc = I40E_RX_DESC(rx_ring, next_to_clean); + rx_desc = I40E_RX_DESC(rx_ring, next_to_process); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); /* This memory barrier is needed to keep us from reading @@ -417,9 +469,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) i40e_clean_programming_status(rx_ring, rx_desc->raw.qword[0], qword); - bi = *i40e_rx_bi(rx_ring, next_to_clean); + bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_free(bi); - next_to_clean = (next_to_clean + 1) & count_mask; + if (++next_to_process == count) + next_to_process = 0; continue; } @@ -428,26 +481,40 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) if (!size) break; - bi = *i40e_rx_bi(rx_ring, next_to_clean); + bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_set_size(bi, size); xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); - xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog); - i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, - &rx_bytes, size, xdp_res, &failure); + if (!first) + first = bi; + else if (i40e_add_xsk_frag(rx_ring, first, bi, size)) + break; + + if (++next_to_process == count) + next_to_process = 0; + + if (i40e_is_non_eop(rx_ring, rx_desc)) + continue; + + xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog); + i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets, + &rx_bytes, xdp_res, &failure); + first->flags = 0; + next_to_clean = next_to_process; if (failure) break; total_rx_packets += rx_packets; total_rx_bytes += rx_bytes; xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); - next_to_clean = (next_to_clean + 1) & count_mask; + first = NULL; } rx_ring->next_to_clean = next_to_clean; - cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; + rx_ring->next_to_process = next_to_process; - if (cleaned_count >= I40E_RX_BUFFER_WRITE) - failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); + entries_to_alloc = I40E_DESC_UNUSED(rx_ring); + if (entries_to_alloc >= I40E_RX_BUFFER_WRITE) + failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc); i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); @@ -466,6 +533,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, unsigned int *total_bytes) { + u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc); struct i40e_tx_desc *tx_desc; dma_addr_t dma; @@ -474,8 +542,7 @@ static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); tx_desc->buffer_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, - 0, desc->len, 0); + tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0); *total_bytes += desc->len; } @@ -489,14 +556,14 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des u32 i; loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { + u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]); + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); tx_desc = I40E_TX_DESC(xdp_ring, ntu++); tx_desc->buffer_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | - I40E_TX_DESC_CMD_EOP, - 0, desc[i].len, 0); + tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0); *total_bytes += desc[i].len; } @@ -683,14 +750,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) { - u16 count_mask = rx_ring->count - 1; u16 ntc = rx_ring->next_to_clean; u16 ntu = rx_ring->next_to_use; - for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { + while (ntc != ntu) { struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc); xsk_buff_free(rx_bi); + ntc++; + if (ntc >= rx_ring->count) + ntc = 0; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 821df248f8be..ef156fad52f2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -4,6 +4,8 @@ #ifndef _I40E_XSK_H_ #define _I40E_XSK_H_ +#include <linux/types.h> + /* This value should match the pragma in the loop_unrolled_for * macro. Why 4? It is strictly empirical. It seems to be a good * compromise between the advantage of having simultaneous outstanding @@ -20,7 +22,9 @@ #define loop_unrolled_for for #endif +struct i40e_ring; struct i40e_vsi; +struct net_device; struct xsk_buff_pool; int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index 9c3e45c54d01..2d154a4e2fd7 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -13,4 +13,4 @@ obj-$(CONFIG_IAVF) += iavf.o iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ iavf_adv_rss.o \ - iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o + iavf_txrx.o iavf_common.o iavf_adminq.o diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 8cbdebc5b698..63b45c61cc4a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -63,7 +63,6 @@ struct iavf_vsi { DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); int base_vector; u16 qs_handle; - void *priv; /* client driver data reference. */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -92,9 +91,9 @@ struct iavf_vsi { #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ #define IAVF_MBPS_QUANTA 50 -#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ - (IAVF_MAX_VF_VSI * \ - sizeof(struct virtchnl_vsi_resource))) +#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE \ + virtchnl_struct_size((struct virtchnl_vf_resource *)NULL, \ + vsi_res, IAVF_MAX_VF_VSI) /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. @@ -256,7 +255,6 @@ struct iavf_adapter { struct work_struct reset_task; struct work_struct adminq_task; struct work_struct finish_config; - struct delayed_work client_task; wait_queue_head_t down_waitqueue; wait_queue_head_t reset_waitqueue; wait_queue_head_t vc_waitqueue; @@ -265,7 +263,6 @@ struct iavf_adapter { int num_vlan_filters; struct list_head mac_filter_list; struct mutex crit_lock; - struct mutex client_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; @@ -282,10 +279,6 @@ struct iavf_adapter { u64 hw_csum_rx_error; u32 rx_desc_count; int num_msix_vectors; - int num_rdma_msix; - int rdma_base_vector; - u32 client_pending; - struct iavf_client_instance *cinst; struct msix_entry *msix_entries; u32 flags; @@ -294,17 +287,12 @@ struct iavf_adapter { #define IAVF_FLAG_RESET_PENDING BIT(4) #define IAVF_FLAG_RESET_NEEDED BIT(5) #define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) -#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) -#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) -#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) -#define IAVF_FLAG_PROMISC_ON BIT(13) -#define IAVF_FLAG_ALLMULTI_ON BIT(14) #define IAVF_FLAG_LEGACY_RX BIT(15) #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) #define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) #define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define IAVF_FLAG_FDIR_ENABLED BIT(21) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ @@ -325,10 +313,7 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) #define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) #define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) -#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) -#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) -#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) -#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) +#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15) #define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19) #define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20) #define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21) @@ -365,6 +350,12 @@ struct iavf_adapter { (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \ IAVF_EXTENDED_CAP_RECV_VLAN_V2) + /* Lock to prevent possible clobbering of + * current_netdev_promisc_flags + */ + spinlock_t current_netdev_promisc_flags_lock; + netdev_features_t current_netdev_promisc_flags; + /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; @@ -376,7 +367,6 @@ struct iavf_adapter { unsigned long crit_section; struct delayed_work watchdog_task; - bool netdev_registered; bool link_up; enum virtchnl_link_speed link_speed; /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set @@ -388,11 +378,6 @@ struct iavf_adapter { u32 link_speed_mbps; enum virtchnl_ops current_op; -#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ - (_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RDMA : \ - 0) -#define CLIENT_ENABLED(_a) ((_a)->cinst) /* RSS by the PF should be preferred over RSS via other methods. */ #define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_RSS_PF) @@ -405,6 +390,8 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_VLAN) #define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN_V2) +#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_CRC) #define VLAN_V2_FILTERING_ALLOWED(_a) \ (VLAN_V2_ALLOWED((_a)) && \ ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \ @@ -458,12 +445,6 @@ struct iavf_adapter { /* Ethtool Private Flags */ -/* lan device, used by client interface */ -struct iavf_device { - struct list_head list; - struct iavf_adapter *vf; -}; - /* needed by iavf_ethtool.c */ extern char iavf_driver_name[]; @@ -521,7 +502,7 @@ void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags); -void iavf_schedule_request_stats(struct iavf_adapter *adapter); +void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags); void iavf_schedule_finish_config(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); @@ -551,7 +532,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter); void iavf_del_ether_addrs(struct iavf_adapter *adapter); void iavf_add_vlans(struct iavf_adapter *adapter); void iavf_del_vlans(struct iavf_adapter *adapter); -void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags); +void iavf_set_promiscuous(struct iavf_adapter *adapter); +bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter); void iavf_request_stats(struct iavf_adapter *adapter); int iavf_request_reset(struct iavf_adapter *adapter); void iavf_get_hena(struct iavf_adapter *adapter); @@ -566,11 +548,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, int iavf_config_rss(struct iavf_adapter *adapter); int iavf_lan_add_device(struct iavf_adapter *adapter); int iavf_lan_del_device(struct iavf_adapter *adapter); -void iavf_client_subtask(struct iavf_adapter *adapter); -void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len); -void iavf_notify_client_l2_params(struct iavf_vsi *vsi); -void iavf_notify_client_open(struct iavf_vsi *vsi); -void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset); void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c deleted file mode 100644 index 93c903c02c64..000000000000 --- a/drivers/net/ethernet/intel/iavf/iavf_client.c +++ /dev/null @@ -1,578 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include <linux/list.h> -#include <linux/errno.h> - -#include "iavf.h" -#include "iavf_prototype.h" -#include "iavf_client.h" - -static -const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR; -static struct iavf_client *vf_registered_client; -static LIST_HEAD(iavf_devices); -static DEFINE_MUTEX(iavf_device_mutex); - -static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, - struct iavf_client *client, - u8 *msg, u16 len); - -static int iavf_client_setup_qvlist(struct iavf_info *ldev, - struct iavf_client *client, - struct iavf_qvlist_info *qvlist_info); - -static struct iavf_ops iavf_lan_ops = { - .virtchnl_send = iavf_client_virtchnl_send, - .setup_qvlist = iavf_client_setup_qvlist, -}; - -/** - * iavf_client_get_params - retrieve relevant client parameters - * @vsi: VSI with parameters - * @params: client param struct - **/ -static -void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params) -{ - int i; - - memset(params, 0, sizeof(struct iavf_params)); - params->mtu = vsi->netdev->mtu; - params->link_up = vsi->back->link_up; - - for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) { - params->qos.prio_qos[i].tc = 0; - params->qos.prio_qos[i].qs_handle = vsi->qs_handle; - } -} - -/** - * iavf_notify_client_message - call the client message receive callback - * @vsi: the VSI associated with this client - * @msg: message buffer - * @len: length of message - * - * If there is a client to this VSI, call the client - **/ -void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len) -{ - struct iavf_client_instance *cinst; - - if (!vsi) - return; - - cinst = vsi->back->cinst; - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->virtchnl_receive) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance virtchnl_receive function\n"); - return; - } - cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client, - msg, len); -} - -/** - * iavf_notify_client_l2_params - call the client notify callback - * @vsi: the VSI with l2 param changes - * - * If there is a client to this VSI, call the client - **/ -void iavf_notify_client_l2_params(struct iavf_vsi *vsi) -{ - struct iavf_client_instance *cinst; - struct iavf_params params; - - if (!vsi) - return; - - cinst = vsi->back->cinst; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->l2_param_change) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance l2_param_change function\n"); - return; - } - iavf_client_get_params(vsi, ¶ms); - cinst->lan_info.params = params; - cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, - ¶ms); -} - -/** - * iavf_notify_client_open - call the client open callback - * @vsi: the VSI with netdev opened - * - * If there is a client to this netdev, call the client with open - **/ -void iavf_notify_client_open(struct iavf_vsi *vsi) -{ - struct iavf_adapter *adapter = vsi->back; - struct iavf_client_instance *cinst = adapter->cinst; - int ret; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->open) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance open function\n"); - return; - } - if (!(test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state))) { - ret = cinst->client->ops->open(&cinst->lan_info, cinst->client); - if (!ret) - set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); - } -} - -/** - * iavf_client_release_qvlist - send a message to the PF to release rdma qv map - * @ldev: pointer to L2 context. - * - * Return 0 on success or < 0 on error - **/ -static int iavf_client_release_qvlist(struct iavf_info *ldev) -{ - struct iavf_adapter *adapter = ldev->vf; - enum iavf_status err; - - if (adapter->aq_required) - return -EAGAIN; - - err = iavf_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, - IAVF_SUCCESS, NULL, 0, NULL); - - if (err) - dev_err(&adapter->pdev->dev, - "Unable to send RDMA vector release message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - - return err; -} - -/** - * iavf_notify_client_close - call the client close callback - * @vsi: the VSI with netdev closed - * @reset: true when close called due to reset pending - * - * If there is a client to this netdev, call the client with close - **/ -void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset) -{ - struct iavf_adapter *adapter = vsi->back; - struct iavf_client_instance *cinst = adapter->cinst; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->close) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance close function\n"); - return; - } - cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); - iavf_client_release_qvlist(&cinst->lan_info); - clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); -} - -/** - * iavf_client_add_instance - add a client instance to the instance list - * @adapter: pointer to the board struct - * - * Returns cinst ptr on success, NULL on failure - **/ -static struct iavf_client_instance * -iavf_client_add_instance(struct iavf_adapter *adapter) -{ - struct iavf_client_instance *cinst = NULL; - struct iavf_vsi *vsi = &adapter->vsi; - struct netdev_hw_addr *mac = NULL; - struct iavf_params params; - - if (!vf_registered_client) - goto out; - - if (adapter->cinst) { - cinst = adapter->cinst; - goto out; - } - - cinst = kzalloc(sizeof(*cinst), GFP_KERNEL); - if (!cinst) - goto out; - - cinst->lan_info.vf = (void *)adapter; - cinst->lan_info.netdev = vsi->netdev; - cinst->lan_info.pcidev = adapter->pdev; - cinst->lan_info.fid = 0; - cinst->lan_info.ftype = IAVF_CLIENT_FTYPE_VF; - cinst->lan_info.hw_addr = adapter->hw.hw_addr; - cinst->lan_info.ops = &iavf_lan_ops; - cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR; - cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR; - cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD; - iavf_client_get_params(vsi, ¶ms); - cinst->lan_info.params = params; - set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state); - - cinst->lan_info.msix_count = adapter->num_rdma_msix; - cinst->lan_info.msix_entries = - &adapter->msix_entries[adapter->rdma_base_vector]; - - mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, - struct netdev_hw_addr, list); - if (mac) - ether_addr_copy(cinst->lan_info.lanmac, mac->addr); - else - dev_err(&adapter->pdev->dev, "MAC address list is empty!\n"); - - cinst->client = vf_registered_client; - adapter->cinst = cinst; -out: - return cinst; -} - -/** - * iavf_client_del_instance - removes a client instance from the list - * @adapter: pointer to the board struct - * - **/ -static -void iavf_client_del_instance(struct iavf_adapter *adapter) -{ - kfree(adapter->cinst); - adapter->cinst = NULL; -} - -/** - * iavf_client_subtask - client maintenance work - * @adapter: board private structure - **/ -void iavf_client_subtask(struct iavf_adapter *adapter) -{ - struct iavf_client *client = vf_registered_client; - struct iavf_client_instance *cinst; - int ret = 0; - - if (adapter->state < __IAVF_DOWN) - return; - - /* first check client is registered */ - if (!client) - return; - - /* Add the client instance to the instance list */ - cinst = iavf_client_add_instance(adapter); - if (!cinst) - return; - - dev_info(&adapter->pdev->dev, "Added instance of Client %s\n", - client->name); - - if (!test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { - /* Send an Open request to the client */ - - if (client->ops && client->ops->open) - ret = client->ops->open(&cinst->lan_info, client); - if (!ret) - set_bit(__IAVF_CLIENT_INSTANCE_OPENED, - &cinst->state); - else - /* remove client instance */ - iavf_client_del_instance(adapter); - } -} - -/** - * iavf_lan_add_device - add a lan device struct to the list of lan devices - * @adapter: pointer to the board struct - * - * Returns 0 on success or none 0 on error - **/ -int iavf_lan_add_device(struct iavf_adapter *adapter) -{ - struct iavf_device *ldev; - int ret = 0; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &iavf_devices, list) { - if (ldev->vf == adapter) { - ret = -EEXIST; - goto out; - } - } - ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); - if (!ldev) { - ret = -ENOMEM; - goto out; - } - ldev->vf = adapter; - INIT_LIST_HEAD(&ldev->list); - list_add(&ldev->list, &iavf_devices); - dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", - adapter->hw.bus.bus_id, adapter->hw.bus.device, - adapter->hw.bus.func); - - /* Since in some cases register may have happened before a device gets - * added, we can schedule a subtask to go initiate the clients. - */ - adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - -out: - mutex_unlock(&iavf_device_mutex); - return ret; -} - -/** - * iavf_lan_del_device - removes a lan device from the device list - * @adapter: pointer to the board struct - * - * Returns 0 on success or non-0 on error - **/ -int iavf_lan_del_device(struct iavf_adapter *adapter) -{ - struct iavf_device *ldev, *tmp; - int ret = -ENODEV; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) { - if (ldev->vf == adapter) { - dev_info(&adapter->pdev->dev, - "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", - adapter->hw.bus.bus_id, adapter->hw.bus.device, - adapter->hw.bus.func); - list_del(&ldev->list); - kfree(ldev); - ret = 0; - break; - } - } - - mutex_unlock(&iavf_device_mutex); - return ret; -} - -/** - * iavf_client_release - release client specific resources - * @client: pointer to the registered client - * - **/ -static void iavf_client_release(struct iavf_client *client) -{ - struct iavf_client_instance *cinst; - struct iavf_device *ldev; - struct iavf_adapter *adapter; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &iavf_devices, list) { - adapter = ldev->vf; - cinst = adapter->cinst; - if (!cinst) - continue; - if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) { - if (client->ops && client->ops->close) - client->ops->close(&cinst->lan_info, client, - false); - iavf_client_release_qvlist(&cinst->lan_info); - clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state); - - dev_warn(&adapter->pdev->dev, - "Client %s instance closed\n", client->name); - } - /* delete the client instance */ - iavf_client_del_instance(adapter); - dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", - client->name); - } - mutex_unlock(&iavf_device_mutex); -} - -/** - * iavf_client_prepare - prepare client specific resources - * @client: pointer to the registered client - * - **/ -static void iavf_client_prepare(struct iavf_client *client) -{ - struct iavf_device *ldev; - struct iavf_adapter *adapter; - - mutex_lock(&iavf_device_mutex); - list_for_each_entry(ldev, &iavf_devices, list) { - adapter = ldev->vf; - /* Signal the watchdog to service the client */ - adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - } - mutex_unlock(&iavf_device_mutex); -} - -/** - * iavf_client_virtchnl_send - send a message to the PF instance - * @ldev: pointer to L2 context. - * @client: Client pointer. - * @msg: pointer to message buffer - * @len: message length - * - * Return 0 on success or < 0 on error - **/ -static u32 iavf_client_virtchnl_send(struct iavf_info *ldev, - struct iavf_client *client, - u8 *msg, u16 len) -{ - struct iavf_adapter *adapter = ldev->vf; - enum iavf_status err; - - if (adapter->aq_required) - return -EAGAIN; - - err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA, - IAVF_SUCCESS, msg, len, NULL); - if (err) - dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - - return err; -} - -/** - * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map - * @ldev: pointer to L2 context. - * @client: Client pointer. - * @qvlist_info: queue and vector list - * - * Return 0 on success or < 0 on error - **/ -static int iavf_client_setup_qvlist(struct iavf_info *ldev, - struct iavf_client *client, - struct iavf_qvlist_info *qvlist_info) -{ - struct virtchnl_rdma_qvlist_info *v_qvlist_info; - struct iavf_adapter *adapter = ldev->vf; - struct iavf_qv_info *qv_info; - enum iavf_status err; - u32 v_idx, i; - size_t msg_size; - - if (adapter->aq_required) - return -EAGAIN; - - /* A quick check on whether the vectors belong to the client */ - for (i = 0; i < qvlist_info->num_vectors; i++) { - qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; - v_idx = qv_info->v_idx; - if ((v_idx >= - (adapter->rdma_base_vector + adapter->num_rdma_msix)) || - (v_idx < adapter->rdma_base_vector)) - return -EINVAL; - } - - v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info; - msg_size = struct_size(v_qvlist_info, qv_info, - v_qvlist_info->num_vectors - 1); - - adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP); - err = iavf_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS, - (u8 *)v_qvlist_info, msg_size, NULL); - - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to send RDMA vector config message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - goto out; - } - - err = -EBUSY; - for (i = 0; i < 5; i++) { - msleep(100); - if (!(adapter->client_pending & - BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) { - err = 0; - break; - } - } -out: - return err; -} - -/** - * iavf_register_client - Register a iavf client driver with the L2 driver - * @client: pointer to the iavf_client struct - * - * Returns 0 on success or non-0 on error - **/ -int iavf_register_client(struct iavf_client *client) -{ - int ret = 0; - - if (!client) { - ret = -EIO; - goto out; - } - - if (strlen(client->name) == 0) { - pr_info("iavf: Failed to register client with no name\n"); - ret = -EIO; - goto out; - } - - if (vf_registered_client) { - pr_info("iavf: Client %s has already been registered!\n", - client->name); - ret = -EEXIST; - goto out; - } - - if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) || - (client->version.minor != IAVF_CLIENT_VERSION_MINOR)) { - pr_info("iavf: Failed to register client %s due to mismatched client interface version\n", - client->name); - pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", - client->version.major, client->version.minor, - client->version.build, - iavf_client_interface_version_str); - ret = -EIO; - goto out; - } - - vf_registered_client = client; - - iavf_client_prepare(client); - - pr_info("iavf: Registered client %s with return code %d\n", - client->name, ret); -out: - return ret; -} -EXPORT_SYMBOL(iavf_register_client); - -/** - * iavf_unregister_client - Unregister a iavf client driver with the L2 driver - * @client: pointer to the iavf_client struct - * - * Returns 0 on success or non-0 on error - **/ -int iavf_unregister_client(struct iavf_client *client) -{ - int ret = 0; - - /* When a unregister request comes through we would have to send - * a close for each of the client instances that were opened. - * client_release function is called to handle this. - */ - iavf_client_release(client); - - if (vf_registered_client != client) { - pr_info("iavf: Client %s has not been registered\n", - client->name); - ret = -ENODEV; - goto out; - } - vf_registered_client = NULL; - pr_info("iavf: Unregistered client %s\n", client->name); -out: - return ret; -} -EXPORT_SYMBOL(iavf_unregister_client); diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h deleted file mode 100644 index c5d51d7dc7cc..000000000000 --- a/drivers/net/ethernet/intel/iavf/iavf_client.h +++ /dev/null @@ -1,169 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _IAVF_CLIENT_H_ -#define _IAVF_CLIENT_H_ - -#define IAVF_CLIENT_STR_LENGTH 10 - -/* Client interface version should be updated anytime there is a change in the - * existing APIs or data structures. - */ -#define IAVF_CLIENT_VERSION_MAJOR 0 -#define IAVF_CLIENT_VERSION_MINOR 01 -#define IAVF_CLIENT_VERSION_BUILD 00 -#define IAVF_CLIENT_VERSION_STR \ - __stringify(IAVF_CLIENT_VERSION_MAJOR) "." \ - __stringify(IAVF_CLIENT_VERSION_MINOR) "." \ - __stringify(IAVF_CLIENT_VERSION_BUILD) - -struct iavf_client_version { - u8 major; - u8 minor; - u8 build; - u8 rsvd; -}; - -enum iavf_client_state { - __IAVF_CLIENT_NULL, - __IAVF_CLIENT_REGISTERED -}; - -enum iavf_client_instance_state { - __IAVF_CLIENT_INSTANCE_NONE, - __IAVF_CLIENT_INSTANCE_OPENED, -}; - -struct iavf_ops; -struct iavf_client; - -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define IAVF_QUEUE_TYPE_PE_AEQ 0x80 -#define IAVF_QUEUE_INVALID_IDX 0xFFFF - -struct iavf_qv_info { - u32 v_idx; /* msix_vector */ - u16 ceq_idx; - u16 aeq_idx; - u8 itr_idx; -}; - -struct iavf_qvlist_info { - u32 num_vectors; - struct iavf_qv_info qv_info[1]; -}; - -#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF - -/* set of LAN parameters useful for clients managed by LAN */ - -/* Struct to hold per priority info */ -struct iavf_prio_qos_params { - u16 qs_handle; /* qs handle for prio */ - u8 tc; /* TC mapped to prio */ - u8 reserved; -}; - -#define IAVF_CLIENT_MAX_USER_PRIORITY 8 -/* Struct to hold Client QoS */ -struct iavf_qos_params { - struct iavf_prio_qos_params prio_qos[IAVF_CLIENT_MAX_USER_PRIORITY]; -}; - -struct iavf_params { - struct iavf_qos_params qos; - u16 mtu; - u16 link_up; /* boolean */ -}; - -/* Structure to hold LAN device info for a client device */ -struct iavf_info { - struct iavf_client_version version; - u8 lanmac[6]; - struct net_device *netdev; - struct pci_dev *pcidev; - u8 __iomem *hw_addr; - u8 fid; /* function id, PF id or VF id */ -#define IAVF_CLIENT_FTYPE_PF 0 -#define IAVF_CLIENT_FTYPE_VF 1 - u8 ftype; /* function type, PF or VF */ - void *vf; /* cast to iavf_adapter */ - - /* All L2 params that could change during the life span of the device - * and needs to be communicated to the client when they change - */ - struct iavf_params params; - struct iavf_ops *ops; - - u16 msix_count; /* number of msix vectors*/ - /* Array down below will be dynamically allocated based on msix_count */ - struct msix_entry *msix_entries; - u16 itr_index; /* Which ITR index the PE driver is suppose to use */ -}; - -struct iavf_ops { - /* setup_q_vector_list enables queues with a particular vector */ - int (*setup_qvlist)(struct iavf_info *ldev, struct iavf_client *client, - struct iavf_qvlist_info *qv_info); - - u32 (*virtchnl_send)(struct iavf_info *ldev, struct iavf_client *client, - u8 *msg, u16 len); - - /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/ - void (*request_reset)(struct iavf_info *ldev, - struct iavf_client *client); -}; - -struct iavf_client_ops { - /* Should be called from register_client() or whenever the driver is - * ready to create a specific client instance. - */ - int (*open)(struct iavf_info *ldev, struct iavf_client *client); - - /* Should be closed when netdev is unavailable or when unregister - * call comes in. If the close happens due to a reset, set the reset - * bit to true. - */ - void (*close)(struct iavf_info *ldev, struct iavf_client *client, - bool reset); - - /* called when some l2 managed parameters changes - mss */ - void (*l2_param_change)(struct iavf_info *ldev, - struct iavf_client *client, - struct iavf_params *params); - - /* called when a message is received from the PF */ - int (*virtchnl_receive)(struct iavf_info *ldev, - struct iavf_client *client, - u8 *msg, u16 len); -}; - -/* Client device */ -struct iavf_client_instance { - struct list_head list; - struct iavf_info lan_info; - struct iavf_client *client; - unsigned long state; -}; - -struct iavf_client { - struct list_head list; /* list of registered clients */ - char name[IAVF_CLIENT_STR_LENGTH]; - struct iavf_client_version version; - unsigned long state; /* client state */ - atomic_t ref_cnt; /* Count of all the client devices of this kind */ - u32 flags; -#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) - u8 type; -#define IAVF_CLIENT_RDMA 0 - struct iavf_client_ops *ops; /* client ops provided by the client */ -}; - -/* used by clients */ -int iavf_register_client(struct iavf_client *client); -int iavf_unregister_client(struct iavf_client *client); -#endif /* _IAVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 1afd761d8052..8091e6feca01 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -7,38 +7,6 @@ #include <linux/avf/virtchnl.h> /** - * iavf_set_mac_type - Sets MAC type - * @hw: pointer to the HW structure - * - * This function sets the mac type of the adapter based on the - * vendor ID and device ID stored in the hw structure. - **/ -enum iavf_status iavf_set_mac_type(struct iavf_hw *hw) -{ - enum iavf_status status = 0; - - if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { - switch (hw->device_id) { - case IAVF_DEV_ID_X722_VF: - hw->mac.type = IAVF_MAC_X722_VF; - break; - case IAVF_DEV_ID_VF: - case IAVF_DEV_ID_VF_HV: - case IAVF_DEV_ID_ADAPTIVE_VF: - hw->mac.type = IAVF_MAC_VF; - break; - default: - hw->mac.type = IAVF_MAC_GENERIC; - break; - } - } else { - status = IAVF_ERR_DEVICE_NOT_SUPPORTED; - } - - return status; -} - -/** * iavf_aq_str - convert AQ err code to a string * @hw: pointer to the HW structure * @aq_err: the AQ error code to convert diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index a34303ad057d..dc499fe7734e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -362,7 +362,7 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, unsigned int i; /* Explicitly request stats refresh */ - iavf_schedule_request_stats(adapter); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS); iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); @@ -395,11 +395,9 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) { unsigned int i; - for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { - snprintf(data, ETH_GSTRING_LEN, "%s", - iavf_gstrings_priv_flags[i].flag_string); - data += ETH_GSTRING_LEN; - } + for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) + ethtool_sprintf(&data, "%s", + iavf_gstrings_priv_flags[i].flag_string); } /** @@ -829,18 +827,10 @@ static int __iavf_set_coalesce(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); int i; - if (ec->rx_coalesce_usecs == 0) { - if (ec->use_adaptive_rx_coalesce) - netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) || - (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) { + if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) { netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; - } else if (ec->tx_coalesce_usecs == 0) { - if (ec->use_adaptive_tx_coalesce) - netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) || - (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) { + } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) { netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } @@ -1071,7 +1061,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, struct iavf_fdir_fltr *rule = NULL; int ret = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); @@ -1213,7 +1203,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, unsigned int cnt = 0; int val = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; cmd->data = IAVF_MAX_FDIR_FILTERS; @@ -1405,7 +1395,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx int count = 50; int err; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; if (fsp->flow_type & FLOW_MAC_EXT) @@ -1446,12 +1436,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_lock_bh(&adapter->fdir_fltr_lock); iavf_fdir_list_add_fltr(adapter, fltr); adapter->fdir_active_fltr++; - fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + if (adapter->link_up) { + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + } else { + fltr->state = IAVF_FDIR_FLTR_INACTIVE; + } spin_unlock_bh(&adapter->fdir_fltr_lock); - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); - + if (adapter->link_up) + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); ret: if (err && fltr) kfree(fltr); @@ -1473,7 +1467,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx struct iavf_fdir_fltr *fltr = NULL; int err = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); @@ -1482,6 +1476,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { + list_del(&fltr->list); + kfree(fltr); + adapter->fdir_active_fltr--; + fltr = NULL; } else { err = -EBUSY; } @@ -1790,7 +1789,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, ret = 0; break; case ETHTOOL_GRXCLSRLCNT: - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) break; spin_lock_bh(&adapter->fdir_fltr_lock); cmd->rule_cnt = adapter->fdir_active_fltr; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index 9eb9f73f6adf..d31bd923ba8c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -6,12 +6,25 @@ struct iavf_adapter; -/* State of Flow Director filter */ +/* State of Flow Director filter + * + * *_REQUEST states are used to mark filter to be sent to PF driver to perform + * an action (either add or delete filter). *_PENDING states are an indication + * that request was sent to PF and the driver is waiting for response. + * + * Both DELETE and DISABLE states are being used to delete a filter in PF. + * The difference is that after a successful response filter in DEL_PENDING + * state is being deleted from VF driver as well and filter in DIS_PENDING state + * is being changed to INACTIVE state. + */ enum iavf_fdir_fltr_state_t { IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */ IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */ IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */ IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */ + IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */ + IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */ + IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */ IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */ }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 9610ca770349..e8d5b889addc 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3,7 +3,6 @@ #include "iavf.h" #include "iavf_prototype.h" -#include "iavf_client.h" /* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -278,27 +277,6 @@ void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem) } /** - * iavf_lock_timeout - try to lock mutex but give up after timeout - * @lock: mutex that should be locked - * @msecs: timeout in msecs - * - * Returns 0 on success, negative on failure - **/ -static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) -{ - unsigned int wait, delay = 10; - - for (wait = 0; wait < msecs; wait += delay) { - if (mutex_trylock(lock)) - return 0; - - msleep(delay); - } - - return -1; -} - -/** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED @@ -314,15 +292,13 @@ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags) } /** - * iavf_schedule_request_stats - Set the flags and schedule statistics request + * iavf_schedule_aq_request - Set the flags and schedule aq request * @adapter: board private structure - * - * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly - * request and refresh ethtool stats + * @flags: requested aq flags **/ -void iavf_schedule_request_stats(struct iavf_adapter *adapter) +void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags) { - adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + adapter->aq_required |= flags; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } @@ -823,7 +799,7 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->vlan_filter_list); f->state = IAVF_VLAN_ADD; adapter->num_vlan_filters++; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); } clearout: @@ -845,7 +821,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) f = iavf_find_vlan(adapter, vlan); if (f) { f->state = IAVF_VLAN_REMOVE; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); } spin_unlock_bh(&adapter->mac_vlan_list_lock); @@ -1189,6 +1165,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr) } /** + * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed + * @adapter: device specific adapter + */ +bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter) +{ + return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) & + (IFF_PROMISC | IFF_ALLMULTI); +} + +/** * iavf_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ @@ -1201,19 +1187,10 @@ static void iavf_set_rx_mode(struct net_device *netdev) __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync); spin_unlock_bh(&adapter->mac_vlan_list_lock); - if (netdev->flags & IFF_PROMISC && - !(adapter->flags & IAVF_FLAG_PROMISC_ON)) - adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC; - else if (!(netdev->flags & IFF_PROMISC) && - adapter->flags & IAVF_FLAG_PROMISC_ON) - adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC; - - if (netdev->flags & IFF_ALLMULTI && - !(adapter->flags & IAVF_FLAG_ALLMULTI_ON)) - adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI; - else if (!(netdev->flags & IFF_ALLMULTI) && - adapter->flags & IAVF_FLAG_ALLMULTI_ON) - adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI; + spin_lock_bh(&adapter->current_netdev_promisc_flags_lock); + if (iavf_promiscuous_mode_changed(adapter)) + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; + spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); } /** @@ -1277,7 +1254,7 @@ static void iavf_configure(struct iavf_adapter *adapter) * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding crit_lock. **/ static void iavf_up_complete(struct iavf_adapter *adapter) { @@ -1287,8 +1264,6 @@ static void iavf_up_complete(struct iavf_adapter *adapter) iavf_napi_enable_all(adapter); adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; - if (CLIENT_ENABLED(adapter)) - adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } @@ -1357,18 +1332,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) **/ static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) { - struct iavf_fdir_fltr *fdir, *fdirtmp; + struct iavf_fdir_fltr *fdir; /* remove all Flow Director filters */ spin_lock_bh(&adapter->fdir_fltr_lock); - list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, - list) { + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { - list_del(&fdir->list); - kfree(fdir); - adapter->fdir_active_fltr--; - } else { - fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + /* Cancel a request, keep filter as inactive */ + fdir->state = IAVF_FDIR_FLTR_INACTIVE; + } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || + fdir->state == IAVF_FDIR_FLTR_ACTIVE) { + /* Disable filters which are active or have a pending + * request to PF to be added + */ + fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -1401,7 +1378,7 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) * iavf_down - Shutdown the connection processing * @adapter: board private structure * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + * Expects to be called while holding crit_lock. **/ void iavf_down(struct iavf_adapter *adapter) { @@ -1421,7 +1398,10 @@ void iavf_down(struct iavf_adapter *adapter) iavf_clear_fdir_filters(adapter); iavf_clear_adv_rss_conf(adapter); - if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) + return; + + if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { /* cancel any current operation */ adapter->current_op = VIRTCHNL_OP_UNKNOWN; /* Schedule operations to close down the HW. Don't wait @@ -1438,9 +1418,9 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; if (!list_empty(&adapter->adv_rss_list_head)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; - adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } + adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } @@ -1953,6 +1933,17 @@ err_alloc_queues: } /** + * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does + * @adapter: board private structure + **/ +static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter) +{ + iavf_free_q_vectors(adapter); + iavf_reset_interrupt_capability(adapter); + iavf_free_queues(adapter); +} + +/** * iavf_free_rss - Free memory used by RSS structs * @adapter: board private structure **/ @@ -1980,11 +1971,9 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool runni if (running) iavf_free_traffic_irqs(adapter); iavf_free_misc_irq(adapter); - iavf_reset_interrupt_capability(adapter); - iavf_free_q_vectors(adapter); - iavf_free_queues(adapter); + iavf_free_interrupt_scheme(adapter); - err = iavf_init_interrupt_scheme(adapter); + err = iavf_init_interrupt_scheme(adapter); if (err) goto err; @@ -2019,7 +2008,7 @@ static void iavf_finish_config(struct work_struct *work) mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && - adapter->netdev_registered && + adapter->netdev->reg_state == NETREG_REGISTERED && !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { netdev_update_features(adapter->netdev); adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; @@ -2027,7 +2016,7 @@ static void iavf_finish_config(struct work_struct *work) switch (adapter->state) { case __IAVF_DOWN: - if (!adapter->netdev_registered) { + if (adapter->netdev->reg_state != NETREG_REGISTERED) { err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", @@ -2041,7 +2030,6 @@ static void iavf_finish_config(struct work_struct *work) __IAVF_INIT_CONFIG_ADAPTER); goto out; } - adapter->netdev_registered = true; } /* Set the real number of queues when reset occurs while @@ -2163,19 +2151,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) return 0; } - if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) { - iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | - FLAG_VF_MULTICAST_PROMISC); - return 0; - } - - if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) { - iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); - return 0; - } - if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || - (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { - iavf_set_promiscuous(adapter, 0); + if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) { + iavf_set_promiscuous(adapter); return 0; } @@ -2367,11 +2344,6 @@ static void iavf_startup(struct iavf_adapter *adapter) /* driver loaded, probe complete */ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - status = iavf_set_mac_type(hw); - if (status) { - dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status); - goto err; - } ret = iavf_check_reset_complete(hw); if (ret) { @@ -2712,12 +2684,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) adapter->link_up = false; netif_tx_stop_all_queues(netdev); - if (CLIENT_ALLOWED(adapter)) { - err = iavf_lan_add_device(adapter); - if (err) - dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", - err); - } dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); @@ -2914,7 +2880,6 @@ static void iavf_watchdog_task(struct work_struct *work) return; } - schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); mutex_unlock(&adapter->crit_lock); restart_watchdog: if (adapter->state >= __IAVF_DOWN) @@ -2983,9 +2948,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) spin_unlock_bh(&adapter->cloud_filter_list_lock); iavf_free_misc_irq(adapter); - iavf_reset_interrupt_capability(adapter); - iavf_free_q_vectors(adapter); - iavf_free_queues(adapter); + iavf_free_interrupt_scheme(adapter); memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; @@ -3027,16 +2990,6 @@ static void iavf_reset_task(struct work_struct *work) return; } - while (!mutex_trylock(&adapter->client_lock)) - usleep_range(500, 1000); - if (CLIENT_ENABLED(adapter)) { - adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | - IAVF_FLAG_CLIENT_NEEDS_CLOSE | - IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS | - IAVF_FLAG_SERVICE_CLIENT_REQUESTED); - cancel_delayed_work_sync(&adapter->client_task); - iavf_notify_client_close(&adapter->vsi, true); - } iavf_misc_irq_disable(adapter); if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { adapter->flags &= ~IAVF_FLAG_RESET_NEEDED; @@ -3080,7 +3033,6 @@ static void iavf_reset_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3219,7 +3171,6 @@ continue_reset: adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; wake_up(&adapter->reset_waitqueue); - mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); return; @@ -3230,7 +3181,6 @@ reset_err: } iavf_disable_vf(adapter); - mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3330,48 +3280,6 @@ out: } /** - * iavf_client_task - worker thread to perform client work - * @work: pointer to work_struct containing our data - * - * This task handles client interactions. Because client calls can be - * reentrant, we can't handle them in the watchdog. - **/ -static void iavf_client_task(struct work_struct *work) -{ - struct iavf_adapter *adapter = - container_of(work, struct iavf_adapter, client_task.work); - - /* If we can't get the client bit, just give up. We'll be rescheduled - * later. - */ - - if (!mutex_trylock(&adapter->client_lock)) - return; - - if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { - iavf_client_subtask(adapter); - adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - goto out; - } - if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { - iavf_notify_client_l2_params(&adapter->vsi); - adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS; - goto out; - } - if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) { - iavf_notify_client_close(&adapter->vsi, false); - adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE; - goto out; - } - if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) { - iavf_notify_client_open(&adapter->vsi); - adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; - } -out: - mutex_unlock(&adapter->client_lock); -} - -/** * iavf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * @@ -3744,15 +3652,15 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, struct virtchnl_filter *vf = &filter->f; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { - dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -4186,6 +4094,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, } /** + * iavf_restore_fdir_filters + * @adapter: board private structure + * + * Restore existing FDIR filters when VF netdev comes back up. + **/ +static void iavf_restore_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *f; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(f, &adapter->fdir_list_head, list) { + if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) { + /* Cancel a request, keep filter as active */ + f->state = IAVF_FDIR_FLTR_ACTIVE; + } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING || + f->state == IAVF_FDIR_FLTR_INACTIVE) { + /* Add filters which are inactive or have a pending + * request to PF to be deleted + */ + f->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); +} + +/** * iavf_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -4252,8 +4187,9 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); - /* Restore VLAN filters that were removed with IFF_DOWN */ + /* Restore filters that were removed with IFF_DOWN */ iavf_restore_filters(adapter); + iavf_restore_fdir_filters(adapter); iavf_configure(adapter); @@ -4303,8 +4239,6 @@ static int iavf_close(struct net_device *netdev) } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); - if (CLIENT_ENABLED(adapter)) - adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl * deadlock with adminq_task() until iavf_close timeouts. We must send @@ -4373,10 +4307,6 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; - if (CLIENT_ENABLED(adapter)) { - iavf_notify_client_l2_params(&adapter->vsi); - adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; - } if (netif_running(netdev)) { iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); @@ -4390,6 +4320,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) return ret; } +/** + * iavf_disable_fdir - disable Flow Director and clear existing filters + * @adapter: board private structure + **/ +static void iavf_disable_fdir(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir, *fdirtmp; + bool del_filters = false; + + adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED; + + /* remove all Flow Director filters */ + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST || + fdir->state == IAVF_FDIR_FLTR_INACTIVE) { + /* Delete filters not registered in PF */ + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || + fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST || + fdir->state == IAVF_FDIR_FLTR_ACTIVE) { + /* Filters registered in PF, schedule their deletion */ + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + del_filters = true; + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { + /* Request to delete filter already sent to PF, change + * state to DEL_PENDING to delete filter after PF's + * response, not set as INACTIVE + */ + fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (del_filters) { + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + } +} + #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_RX | \ @@ -4411,6 +4384,16 @@ static int iavf_set_features(struct net_device *netdev, (features & NETIF_VLAN_OFFLOAD_FEATURES)) iavf_set_vlan_offload_features(adapter, netdev->features, features); + if (CRC_OFFLOAD_ALLOWED(adapter) && + ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS))) + iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); + + if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) { + if (features & NETIF_F_NTUPLE) + adapter->flags |= IAVF_FLAG_FDIR_ENABLED; + else + iavf_disable_fdir(adapter); + } return 0; } @@ -4532,6 +4515,9 @@ iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) } } + if (CRC_OFFLOAD_ALLOWED(adapter)) + hw_features |= NETIF_F_RXFCS; + return hw_features; } @@ -4696,6 +4682,55 @@ iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, } /** + * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features + * @adapter: board private structure + * @requested_features: stack requested NETDEV features + * + * Returns fixed-up features bits + **/ +static netdev_features_t +iavf_fix_strip_features(struct iavf_adapter *adapter, + netdev_features_t requested_features) +{ + struct net_device *netdev = adapter->netdev; + bool crc_offload_req, is_vlan_strip; + netdev_features_t vlan_strip; + int num_non_zero_vlan; + + crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) && + (requested_features & NETIF_F_RXFCS); + num_non_zero_vlan = iavf_get_num_vlans_added(adapter); + vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX); + is_vlan_strip = requested_features & vlan_strip; + + if (!crc_offload_req) + return requested_features; + + if (!num_non_zero_vlan && (netdev->features & vlan_strip) && + !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) { + requested_features &= ~vlan_strip; + netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); + return requested_features; + } + + if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) { + requested_features &= ~vlan_strip; + if (!(netdev->features & vlan_strip)) + netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping"); + + return requested_features; + } + + if (num_non_zero_vlan && is_vlan_strip && + !(netdev->features & NETIF_F_RXFCS)) { + requested_features &= ~NETIF_F_RXFCS; + netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping"); + } + + return requested_features; +} + +/** * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits @@ -4707,7 +4742,12 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - return iavf_fix_netdev_vlan_features(adapter, features); + features = iavf_fix_netdev_vlan_features(adapter, features); + + if (!FDIR_FLTR_SUPPORT(adapter)) + features &= ~NETIF_F_NTUPLE; + + return iavf_fix_strip_features(adapter, features); } static const struct net_device_ops iavf_netdev_ops = { @@ -4744,7 +4784,7 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) if ((rstat == VIRTCHNL_VFR_VFACTIVE) || (rstat == VIRTCHNL_VFR_COMPLETED)) return 0; - usleep_range(10, 20); + msleep(IAVF_RESET_WAIT_MS); } return -EBUSY; } @@ -4824,6 +4864,12 @@ int iavf_process_config(struct iavf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (FDIR_FLTR_SUPPORT(adapter)) { + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + adapter->flags |= IAVF_FLAG_FDIR_ENABLED; + } + netdev->priv_flags |= IFF_UNICAST_FLT; /* Do not turn on offloads when they are requested to be turned off. @@ -4848,34 +4894,6 @@ int iavf_process_config(struct iavf_adapter *adapter) } /** - * iavf_shutdown - Shutdown the device in preparation for a reboot - * @pdev: pci device structure - **/ -static void iavf_shutdown(struct pci_dev *pdev) -{ - struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); - struct net_device *netdev = adapter->netdev; - - netif_device_detach(netdev); - - if (netif_running(netdev)) - iavf_close(netdev); - - if (iavf_lock_timeout(&adapter->crit_lock, 5000)) - dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__); - /* Prevent the watchdog from running. */ - iavf_change_state(adapter, __IAVF_REMOVE); - adapter->aq_required = 0; - mutex_unlock(&adapter->crit_lock); - -#ifdef CONFIG_PM - pci_save_state(pdev); - -#endif - pci_disable_device(pdev); -} - -/** * iavf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in iavf_pci_tbl @@ -4963,7 +4981,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * and destroy them only once in remove */ mutex_init(&adapter->crit_lock); - mutex_init(&adapter->client_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -4971,6 +4988,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->cloud_filter_list_lock); spin_lock_init(&adapter->fdir_fltr_lock); spin_lock_init(&adapter->adv_rss_lock); + spin_lock_init(&adapter->current_netdev_promisc_flags_lock); INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); @@ -4982,9 +5000,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_WORK(&adapter->finish_config, iavf_finish_config); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); - INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); @@ -4995,6 +5010,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the wait queue for indicating virtchannel events */ init_waitqueue_head(&adapter->vc_waitqueue); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + /* Initialization goes on in the work. Do not add more of it below. */ return 0; err_ioremap: @@ -5022,8 +5040,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) netif_device_detach(netdev); - while (!mutex_trylock(&adapter->crit_lock)) - usleep_range(500, 1000); + mutex_lock(&adapter->crit_lock); if (netif_running(netdev)) { rtnl_lock(); @@ -5086,17 +5103,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d) **/ static void iavf_remove(struct pci_dev *pdev) { - struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev); struct iavf_fdir_fltr *fdir, *fdirtmp; struct iavf_vlan_filter *vlf, *vlftmp; struct iavf_cloud_filter *cf, *cftmp; struct iavf_adv_rss *rss, *rsstmp; struct iavf_mac_filter *f, *ftmp; + struct iavf_adapter *adapter; struct net_device *netdev; struct iavf_hw *hw; - int err; - netdev = adapter->netdev; + /* Don't proceed with remove if netdev is already freed */ + netdev = pci_get_drvdata(pdev); + if (!netdev) + return; + + adapter = iavf_pdev_to_adapter(pdev); hw = &adapter->hw; if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) @@ -5125,19 +5146,8 @@ static void iavf_remove(struct pci_dev *pdev) cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->finish_config); - rtnl_lock(); - if (adapter->netdev_registered) { - unregister_netdevice(netdev); - adapter->netdev_registered = false; - } - rtnl_unlock(); - - if (CLIENT_ALLOWED(adapter)) { - err = iavf_lan_del_device(adapter); - if (err) - dev_warn(&pdev->dev, "Failed to delete client device: %d\n", - err); - } + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); @@ -5156,7 +5166,6 @@ static void iavf_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->adminq_task); - cancel_delayed_work_sync(&adapter->client_task); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; @@ -5164,9 +5173,7 @@ static void iavf_remove(struct pci_dev *pdev) iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); iavf_free_misc_irq(adapter); - - iavf_reset_interrupt_capability(adapter); - iavf_free_q_vectors(adapter); + iavf_free_interrupt_scheme(adapter); iavf_free_rss(adapter); @@ -5176,13 +5183,11 @@ static void iavf_remove(struct pci_dev *pdev) /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); - mutex_destroy(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); mutex_destroy(&adapter->crit_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); - iavf_free_queues(adapter); kfree(adapter->vf_res); spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter @@ -5224,11 +5229,25 @@ static void iavf_remove(struct pci_dev *pdev) destroy_workqueue(adapter->wq); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); pci_disable_device(pdev); } +/** + * iavf_shutdown - Shutdown the device in preparation for a reboot + * @pdev: pci device structure + **/ +static void iavf_shutdown(struct pci_dev *pdev) +{ + iavf_remove(pdev); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); static struct pci_driver iavf_driver = { diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h index 940cb4203fbe..4a48e6171405 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -45,8 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, struct iavf_aqc_get_set_rss_key_data *key); -enum iavf_status iavf_set_mac_type(struct iavf_hw *hw); - extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[]; static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 8c5f6096b002..d64c4997136b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -7,8 +7,8 @@ #include "iavf_trace.h" #include "iavf_prototype.h" -static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, - u32 td_tag) +static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) { return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | @@ -370,8 +370,8 @@ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, q_vector->arm_wb_state = true; } -static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, - struct iavf_ring_container *rc) +static bool iavf_container_is_rx(struct iavf_q_vector *q_vector, + struct iavf_ring_container *rc) { return &q_vector->rx == rc; } @@ -806,7 +806,7 @@ err: * @rx_ring: ring to bump * @val: new head index **/ -static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) +static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -828,7 +828,7 @@ static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) * * Returns the offset value for ring into the data buffer. */ -static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) +static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; } @@ -977,9 +977,9 @@ no_buffers: * @skb: skb currently being received and modified * @rx_desc: the receive descriptor **/ -static inline void iavf_rx_checksum(struct iavf_vsi *vsi, - struct sk_buff *skb, - union iavf_rx_desc *rx_desc) +static void iavf_rx_checksum(struct iavf_vsi *vsi, + struct sk_buff *skb, + union iavf_rx_desc *rx_desc) { struct iavf_rx_ptype_decoded decoded; u32 rx_error, rx_status; @@ -1061,7 +1061,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline int iavf_ptype_to_htype(u8 ptype) +static int iavf_ptype_to_htype(u8 ptype) { struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1085,10 +1085,10 @@ static inline int iavf_ptype_to_htype(u8 ptype) * @skb: skb currently being received and modified * @rx_ptype: Rx packet type **/ -static inline void iavf_rx_hash(struct iavf_ring *ring, - union iavf_rx_desc *rx_desc, - struct sk_buff *skb, - u8 rx_ptype) +static void iavf_rx_hash(struct iavf_ring *ring, + union iavf_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) { u32 hash; const __le64 rss_mask = @@ -1115,10 +1115,10 @@ static inline void iavf_rx_hash(struct iavf_ring *ring, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static inline -void iavf_process_skb_fields(struct iavf_ring *rx_ring, - union iavf_rx_desc *rx_desc, struct sk_buff *skb, - u8 rx_ptype) +static void +iavf_process_skb_fields(struct iavf_ring *rx_ring, + union iavf_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); @@ -1662,8 +1662,8 @@ static inline u32 iavf_buildreg_itr(const int type, u16 itr) * @q_vector: q_vector for which itr is being updated and interrupt enabled * **/ -static inline void iavf_update_enable_itr(struct iavf_vsi *vsi, - struct iavf_q_vector *q_vector) +static void iavf_update_enable_itr(struct iavf_vsi *vsi, + struct iavf_q_vector *q_vector) { struct iavf_hw *hw = &vsi->back->hw; u32 intval; @@ -2275,9 +2275,9 @@ int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, - struct iavf_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, + struct iavf_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 7e6ee32d19b6..10ba36602c0c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -15,7 +15,6 @@ */ #define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */ -#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */ #define IAVF_ITR_100K 10 /* all values below must be even */ #define IAVF_ITR_50K 20 #define IAVF_ITR_20K 50 diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index 9f1f523807c4..2b6a207fa441 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -69,15 +69,6 @@ enum iavf_debug_mask { * the Firmware and AdminQ are intended to insulate the driver from most of the * future changes, but these structures will also do part of the job. */ -enum iavf_mac_type { - IAVF_MAC_UNKNOWN = 0, - IAVF_MAC_XL710, - IAVF_MAC_VF, - IAVF_MAC_X722, - IAVF_MAC_X722_VF, - IAVF_MAC_GENERIC, -}; - enum iavf_vsi_type { IAVF_VSI_MAIN = 0, IAVF_VSI_VMDQ1 = 1, @@ -110,11 +101,8 @@ struct iavf_hw_capabilities { }; struct iavf_mac_info { - enum iavf_mac_type type; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; - u16 max_fcoeq; }; /* PCI bus types */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index be3c007ce90a..2d9366be0ec5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -3,7 +3,6 @@ #include "iavf.h" #include "iavf_prototype.h" -#include "iavf_client.h" /** * iavf_send_pf_msg @@ -142,6 +141,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | + VIRTCHNL_VF_OFFLOAD_CRC | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_ADQ | @@ -215,8 +215,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter) u16 len; int err; - len = sizeof(struct virtchnl_vf_resource) + - IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); + len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; event.buf_len = len; event.msg_buf = kzalloc(len, GFP_KERNEL); if (!event.msg_buf) @@ -284,7 +283,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) return; } adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; - len = struct_size(vqci, qpair, pairs); + len = virtchnl_struct_size(vqci, qpair, pairs); vqci = kzalloc(len, GFP_KERNEL); if (!vqci) return; @@ -313,6 +312,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter) vqpi->rxq.databuffer_size = ALIGN(adapter->rx_rings[i].rx_buf_len, BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); + if (CRC_OFFLOAD_ALLOWED(adapter)) + vqpi->rxq.crc_disable = !!(adapter->netdev->features & + NETIF_F_RXFCS); vqpi++; } @@ -397,7 +399,7 @@ void iavf_map_queues(struct iavf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NONQ_VECS; - len = struct_size(vimi, vecmap, adapter->num_msix_vectors); + len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors); vimi = kzalloc(len, GFP_KERNEL); if (!vimi) return; @@ -476,13 +478,11 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter) } adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; - len = struct_size(veal, list, count); + len = virtchnl_struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_ether_addr_list)) / - sizeof(struct virtchnl_ether_addr); - len = struct_size(veal, list, count); + while (len > IAVF_MAX_AQ_BUF_SIZE) + len = virtchnl_struct_size(veal, list, --count); more = true; } @@ -547,13 +547,11 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter) } adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; - len = struct_size(veal, list, count); + len = virtchnl_struct_size(veal, list, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_ether_addr_list)) / - sizeof(struct virtchnl_ether_addr); - len = struct_size(veal, list, count); + while (len > IAVF_MAX_AQ_BUF_SIZE) + len = virtchnl_struct_size(veal, list, --count); more = true; } veal = kzalloc(len, GFP_ATOMIC); @@ -687,12 +685,12 @@ void iavf_add_vlans(struct iavf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_ADD_VLAN; - len = sizeof(*vvfl) + (count * sizeof(u16)); + len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / - sizeof(u16); - len = sizeof(*vvfl) + (count * sizeof(u16)); + while (len > IAVF_MAX_AQ_BUF_SIZE) + len = virtchnl_struct_size(vvfl, vlan_id, + --count); more = true; } vvfl = kzalloc(len, GFP_ATOMIC); @@ -732,15 +730,12 @@ void iavf_add_vlans(struct iavf_adapter *adapter) more = true; } - len = sizeof(*vvfl_v2) + ((count - 1) * - sizeof(struct virtchnl_vlan_filter)); + len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / - sizeof(struct virtchnl_vlan_filter); - len = sizeof(*vvfl_v2) + - ((count - 1) * - sizeof(struct virtchnl_vlan_filter)); + while (len > IAVF_MAX_AQ_BUF_SIZE) + len = virtchnl_struct_size(vvfl_v2, filters, + --count); more = true; } @@ -838,12 +833,12 @@ void iavf_del_vlans(struct iavf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_DEL_VLAN; - len = sizeof(*vvfl) + (count * sizeof(u16)); + len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / - sizeof(u16); - len = sizeof(*vvfl) + (count * sizeof(u16)); + while (len > IAVF_MAX_AQ_BUF_SIZE) + len = virtchnl_struct_size(vvfl, vlan_id, + --count); more = true; } vvfl = kzalloc(len, GFP_ATOMIC); @@ -884,16 +879,12 @@ void iavf_del_vlans(struct iavf_adapter *adapter) adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; - len = sizeof(*vvfl_v2) + - ((count - 1) * sizeof(struct virtchnl_vlan_filter)); + len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(*vvfl_v2)) / - sizeof(struct virtchnl_vlan_filter); - len = sizeof(*vvfl_v2) + - ((count - 1) * - sizeof(struct virtchnl_vlan_filter)); + while (len > IAVF_MAX_AQ_BUF_SIZE) + len = virtchnl_struct_size(vvfl_v2, filters, + --count); more = true; } @@ -948,14 +939,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter) /** * iavf_set_promiscuous * @adapter: adapter structure - * @flags: bitmask to control unicast/multicast promiscuous. * * Request that the PF enable promiscuous mode for our VSI. **/ -void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) +void iavf_set_promiscuous(struct iavf_adapter *adapter) { + struct net_device *netdev = adapter->netdev; struct virtchnl_promisc_info vpi; - int promisc_all; + unsigned int flags; if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -964,36 +955,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) return; } - promisc_all = FLAG_VF_UNICAST_PROMISC | - FLAG_VF_MULTICAST_PROMISC; - if ((flags & promisc_all) == promisc_all) { - adapter->flags |= IAVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC; - dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); - } + /* prevent changes to promiscuous flags */ + spin_lock_bh(&adapter->current_netdev_promisc_flags_lock); - if (flags & FLAG_VF_MULTICAST_PROMISC) { - adapter->flags |= IAVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", - adapter->netdev->name); + /* sanity check to prevent duplicate AQ calls */ + if (!iavf_promiscuous_mode_changed(adapter)) { + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; + dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n"); + /* allow changes to promiscuous flags */ + spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); + return; } - if (!flags) { - if (adapter->flags & IAVF_FLAG_PROMISC_ON) { - adapter->flags &= ~IAVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); - } + /* there are 2 bits, but only 3 states */ + if (!(netdev->flags & IFF_PROMISC) && + netdev->flags & IFF_ALLMULTI) { + /* State 1 - only multicast promiscuous mode enabled + * - !IFF_PROMISC && IFF_ALLMULTI + */ + flags = FLAG_VF_MULTICAST_PROMISC; + adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; + adapter->current_netdev_promisc_flags &= ~IFF_PROMISC; + dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + } else if (!(netdev->flags & IFF_PROMISC) && + !(netdev->flags & IFF_ALLMULTI)) { + /* State 2 - unicast/multicast promiscuous mode disabled + * - !IFF_PROMISC && !IFF_ALLMULTI + */ + flags = 0; + adapter->current_netdev_promisc_flags &= + ~(IFF_PROMISC | IFF_ALLMULTI); + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } else { + /* State 3 - unicast/multicast promiscuous mode enabled + * - IFF_PROMISC && IFF_ALLMULTI + * - IFF_PROMISC && !IFF_ALLMULTI + */ + flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC; + adapter->current_netdev_promisc_flags |= IFF_PROMISC; + if (netdev->flags & IFF_ALLMULTI) + adapter->current_netdev_promisc_flags |= IFF_ALLMULTI; + else + adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI; - if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { - adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; - dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", - adapter->netdev->name); - } + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); } + adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE; + + /* allow changes to promiscuous flags */ + spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock); + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; @@ -1085,8 +1097,7 @@ void iavf_set_rss_key(struct iavf_adapter *adapter) adapter->current_op); return; } - len = sizeof(struct virtchnl_rss_key) + - (adapter->rss_key_size * sizeof(u8)) - 1; + len = virtchnl_struct_size(vrk, key, adapter->rss_key_size); vrk = kzalloc(len, GFP_KERNEL); if (!vrk) return; @@ -1117,8 +1128,7 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter) adapter->current_op); return; } - len = sizeof(struct virtchnl_rss_lut) + - (adapter->rss_lut_size * sizeof(u8)) - 1; + len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size); vrl = kzalloc(len, GFP_KERNEL); if (!vrl) return; @@ -1367,8 +1377,6 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); } -#define IAVF_MAX_SPEED_STRLEN 13 - /** * iavf_print_link_message - print link up or down * @adapter: adapter structure @@ -1386,10 +1394,6 @@ static void iavf_print_link_message(struct iavf_adapter *adapter) return; } - speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); - if (!speed) - return; - if (ADV_LINK_SUPPORT(adapter)) { link_speed_mbps = adapter->link_speed_mbps; goto print_link_msg; @@ -1427,17 +1431,17 @@ static void iavf_print_link_message(struct iavf_adapter *adapter) print_link_msg: if (link_speed_mbps > SPEED_1000) { - if (link_speed_mbps == SPEED_2500) - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); - else + if (link_speed_mbps == SPEED_2500) { + speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps"); + } else { /* convert to Gbps inline */ - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", - link_speed_mbps / 1000, "Gbps"); + speed = kasprintf(GFP_KERNEL, "%d Gbps", + link_speed_mbps / 1000); + } } else if (link_speed_mbps == SPEED_UNKNOWN) { - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); + speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps"); } else { - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", - link_speed_mbps, "Mbps"); + speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps); } netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); @@ -1499,7 +1503,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter) return; } - len = struct_size(vti, list, adapter->num_tc - 1); + len = virtchnl_struct_size(vti, list, adapter->num_tc); vti = kzalloc(len, GFP_KERNEL); if (!vti) return; @@ -1731,8 +1735,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter) **/ void iavf_del_fdir_filter(struct iavf_adapter *adapter) { + struct virtchnl_fdir_del f = {}; struct iavf_fdir_fltr *fdir; - struct virtchnl_fdir_del f; bool process_fltr = false; int len; @@ -1749,11 +1753,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter) list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { process_fltr = true; - memset(&f, 0, len); f.vsi_id = fdir->vc_add_msg.vsi_id; f.flow_id = fdir->flow_id; fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; break; + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) { + process_fltr = true; + f.vsi_id = fdir->vc_add_msg.vsi_id; + f.flow_id = fdir->flow_id; + fdir->state = IAVF_FDIR_FLTR_DIS_PENDING; + break; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -1898,6 +1907,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, } /** + * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset + * @adapter: private adapter structure + * + * Called after a reset to re-add all FDIR filters and delete some of them + * if they were pending to be deleted. + */ +static void iavf_activate_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *f, *ftmp; + bool add_filters = false; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) { + if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST || + f->state == IAVF_FDIR_FLTR_ADD_PENDING || + f->state == IAVF_FDIR_FLTR_ACTIVE) { + /* All filters and requests have been removed in PF, + * restore them + */ + f->state = IAVF_FDIR_FLTR_ADD_REQUEST; + add_filters = true; + } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST || + f->state == IAVF_FDIR_FLTR_DIS_PENDING) { + /* Link down state, leave filters as inactive */ + f->state = IAVF_FDIR_FLTR_INACTIVE; + } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST || + f->state == IAVF_FDIR_FLTR_DEL_PENDING) { + /* Delete filters that were pending to be deleted, the + * list on PF is already cleared after a reset + */ + list_del(&f->list); + kfree(f); + adapter->fdir_active_fltr--; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (add_filters) + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; +} + +/** * iavf_virtchnl_completion * @adapter: adapter structure * @v_opcode: opcode sent by PF @@ -2074,7 +2125,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fdir, &adapter->fdir_list_head, list) { - if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING || + fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { fdir->state = IAVF_FDIR_FLTR_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", iavf_stat_str(&adapter->hw, @@ -2175,9 +2227,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } break; case VIRTCHNL_OP_GET_VF_RESOURCES: { - u16 len = sizeof(struct virtchnl_vf_resource) + - IAVF_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource); + u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE; + memcpy(adapter->vf_res, msg, min(msglen, len)); iavf_validate_num_queues(adapter); iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); @@ -2211,6 +2262,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->mac_vlan_list_lock); + iavf_activate_fdir_filters(adapter); + iavf_parse_vf_resource_msg(adapter); /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the @@ -2305,19 +2358,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; - case VIRTCHNL_OP_RDMA: - /* Gobble zero-length replies from the PF. They indicate that - * a previous message was received OK, and the client doesn't - * care about that. - */ - if (msglen && CLIENT_ENABLED(adapter)) - iavf_notify_client_message(&adapter->vsi, msg, msglen); - break; - - case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: - adapter->client_pending &= - ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP)); - break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; @@ -2400,7 +2440,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { - if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || + del_fltr->status == + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", fdir->loc); list_del(&fdir->list); @@ -2412,6 +2454,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, del_fltr->status); iavf_print_fdir_fltr(adapter, fdir); } + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || + del_fltr->status == + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { + fdir->state = IAVF_FDIR_FLTR_INACTIVE; + } else { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n", + del_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + } } } spin_unlock_bh(&adapter->fdir_fltr_lock); diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 817977e3039d..0679907980f7 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -43,9 +43,9 @@ ice-$(CONFIG_PCI_IOV) += \ ice_vf_mbx.o \ ice_vf_vsi_vlan_ops.o \ ice_vf_lib.o -ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o +ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o -ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o +ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o ice-$(CONFIG_GNSS) += ice_gnss.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 4ba3d99439a0..351e0d36df44 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -76,6 +76,7 @@ #include "ice_vsi_vlan_ops.h" #include "ice_gnss.h" #include "ice_irq.h" +#include "ice_dpll.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -195,11 +196,16 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) +#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) + enum ice_feature { ICE_F_DSCP, - ICE_F_PTP_EXTTS, + ICE_F_PHY_RCLK, ICE_F_SMA_CTRL, + ICE_F_CGU, ICE_F_GNSS, + ICE_F_ROCE_LAG, + ICE_F_SRIOV_LAG, ICE_F_MAX }; @@ -370,6 +376,7 @@ struct ice_vsi { u16 rx_buf_len; struct ice_aqc_vsi_props info; /* VSI properties */ + struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ /* VSI stats */ struct rtnl_link_stats64 net_stats; @@ -505,6 +512,7 @@ enum ice_pf_flags { ICE_FLAG_UNPLUG_AUX_DEV, ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ + ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -517,6 +525,7 @@ enum ice_misc_thread_tasks { struct ice_switchdev_info { struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; + struct ice_esw_br_offloads *br_offloads; bool is_running; }; @@ -545,6 +554,8 @@ struct ice_pf { * MSIX vectors allowed on this PF. */ u16 sriov_base_vector; + unsigned long *sriov_irq_bm; /* bitmap to track irq usage */ + u16 sriov_irq_size; /* size of the irq_bm bitmap */ u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ @@ -567,6 +578,7 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ struct mutex adev_mutex; /* lock to protect aux device access */ + struct mutex lag_mutex; /* protect ice_lag struct in PF */ u32 msg_enable; struct ice_ptp ptp; struct gnss_serial *gnss_serial; @@ -626,6 +638,7 @@ struct ice_pf { struct ice_lag *lag; /* Link Aggregation information */ struct ice_switchdev_info switchdev; + struct ice_esw_br_port *br_port; #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 @@ -634,8 +647,11 @@ struct ice_pf { #define ICE_VF_AGG_NODE_ID_START 65 #define ICE_MAX_VF_AGG_NODES 32 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; + struct ice_dplls dplls; }; +extern struct workqueue_struct *ice_lag_wq; + struct ice_netdev_priv { struct ice_vsi *vsi; struct ice_repr *repr; @@ -660,6 +676,18 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) } /** + * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt + * @pf: Board private structure + * + * Return true if this PF should respond to the Tx timestamp interrupt + * indication in the miscellaneous OICR interrupt handler. + */ +static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf) +{ + return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; +} + +/** * ice_irq_dynamic_ena - Enable default interrupt generation settings * @hw: pointer to HW struct * @vsi: pointer to VSI struct, can be NULL @@ -853,7 +881,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf) return false; } -bool netif_is_ice(struct net_device *dev); +bool netif_is_ice(const struct net_device *dev); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_open_ctrl(struct ice_vsi *vsi); @@ -909,14 +937,32 @@ void ice_fdir_release_flows(struct ice_hw *hw); void ice_fdir_replay_flows(struct ice_hw *hw); void ice_fdir_replay_fltrs(struct ice_pf *pf); int ice_fdir_create_dflt_rules(struct ice_pf *pf); -int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, - struct ice_rq_event_info *event); + +enum ice_aq_task_state { + ICE_AQ_TASK_NOT_PREPARED, + ICE_AQ_TASK_WAITING, + ICE_AQ_TASK_COMPLETE, + ICE_AQ_TASK_CANCELED, +}; + +struct ice_aq_task { + struct hlist_node entry; + struct ice_rq_event_info event; + enum ice_aq_task_state state; + u16 opcode; +}; + +void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, + u16 opcode); +int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, + unsigned long timeout); int ice_open(struct net_device *netdev); int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); int ice_load(struct ice_pf *pf); void ice_unload(struct ice_pf *pf); +void ice_adv_lnk_speed_maps_init(void); /** * ice_set_rdma_cap - enable RDMA support diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 63d3e1dcbba5..d7fdb7ba7268 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -120,6 +120,9 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 +#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092 +#define ICE_AQC_BIT_ROCEV2_LAG 0x01 +#define ICE_AQC_BIT_SRIOV_LAG 0x02 u8 major_ver; u8 minor_ver; @@ -232,6 +235,8 @@ struct ice_aqc_set_port_params { #define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2) __le16 bad_frame_vsi; __le16 swid; +#define ICE_AQC_PORT_SWID_VALID BIT(15) +#define ICE_AQC_PORT_SWID_M 0xFF u8 reserved[10]; }; @@ -241,10 +246,12 @@ struct ice_aqc_set_port_params { * Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) * Get Allocated Resource Descriptors Command (indirect 0x020A) + * Share Resource command (indirect 0x020B) */ #define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 #define ICE_AQC_RES_TYPE_RECIPE 0x05 +#define ICE_AQC_RES_TYPE_SWID 0x07 #define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 #define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 #define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 @@ -264,6 +271,7 @@ struct ice_aqc_set_port_params { /* Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) + * Share Resource command (indirect 0x020B) */ struct ice_aqc_alloc_free_res_cmd { __le16 num_entries; /* Number of Resource entries */ @@ -818,7 +826,11 @@ struct ice_aqc_txsched_move_grp_info_hdr { __le32 src_parent_teid; __le32 dest_parent_teid; __le16 num_elems; - __le16 reserved; + u8 mode; +#define ICE_AQC_MOVE_ELEM_MODE_SAME_PF 0x0 +#define ICE_AQC_MOVE_ELEM_MODE_GIVE_OWN 0x1 +#define ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN 0x2 + u8 reserved; }; struct ice_aqc_move_elem { @@ -1087,7 +1099,15 @@ struct ice_aqc_get_phy_caps { #define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) #define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) #define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) -#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4 +#define ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 BIT_ULL(5) +#define ICE_PHY_TYPE_HIGH_200G_SR4 BIT_ULL(6) +#define ICE_PHY_TYPE_HIGH_200G_FR4 BIT_ULL(7) +#define ICE_PHY_TYPE_HIGH_200G_LR4 BIT_ULL(8) +#define ICE_PHY_TYPE_HIGH_200G_DR4 BIT_ULL(9) +#define ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 BIT_ULL(10) +#define ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC BIT_ULL(11) +#define ICE_PHY_TYPE_HIGH_200G_AUI4 BIT_ULL(12) +#define ICE_PHY_TYPE_HIGH_MAX_INDEX 12 struct ice_aqc_get_phy_caps_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ @@ -1307,11 +1327,41 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_SPEED_40GB BIT(8) #define ICE_AQ_LINK_SPEED_50GB BIT(9) #define ICE_AQ_LINK_SPEED_100GB BIT(10) +#define ICE_AQ_LINK_SPEED_200GB BIT(11) #define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) - __le32 reserved3; /* Aligns next field to 8-byte boundary */ - __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ - __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ -}; + /* Aligns next field to 8-byte boundary */ + __le16 reserved3; + u8 ext_fec_status; + /* RS 272 FEC enabled */ +#define ICE_AQ_LINK_RS_272_FEC_EN BIT(0) + u8 reserved4; + /* Use values from ICE_PHY_TYPE_LOW_* */ + __le64 phy_type_low; + /* Use values from ICE_PHY_TYPE_HIGH_* */ + __le64 phy_type_high; +#define ICE_AQC_LS_DATA_SIZE_V1 \ + offsetofend(struct ice_aqc_get_link_status_data, phy_type_high) + /* Get link status v2 link partner data */ + __le64 lp_phy_type_low; + __le64 lp_phy_type_high; + u8 lp_fec_adv; +#define ICE_AQ_LINK_LP_10G_KR_FEC_CAP BIT(0) +#define ICE_AQ_LINK_LP_25G_KR_FEC_CAP BIT(1) +#define ICE_AQ_LINK_LP_RS_528_FEC_CAP BIT(2) +#define ICE_AQ_LINK_LP_50G_KR_272_FEC_CAP BIT(3) +#define ICE_AQ_LINK_LP_100G_KR_272_FEC_CAP BIT(4) +#define ICE_AQ_LINK_LP_200G_KR_272_FEC_CAP BIT(5) + u8 lp_fec_req; +#define ICE_AQ_LINK_LP_10G_KR_FEC_REQ BIT(0) +#define ICE_AQ_LINK_LP_25G_KR_FEC_REQ BIT(1) +#define ICE_AQ_LINK_LP_RS_528_FEC_REQ BIT(2) +#define ICE_AQ_LINK_LP_KR_272_FEC_REQ BIT(3) + u8 lp_flowcontrol; +#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0) +#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1) +#define ICE_AQC_LS_DATA_SIZE_V2 \ + offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol) +} __packed; /* Set event mask command (direct 0x0613) */ struct ice_aqc_set_event_mask { @@ -1339,6 +1389,30 @@ struct ice_aqc_set_mac_lb { u8 reserved[15]; }; +/* Set PHY recovered clock output (direct 0x0630) */ +struct ice_aqc_set_phy_rec_clk_out { + u8 phy_output; + u8 port_num; +#define ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT 0xFF + u8 flags; +#define ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN BIT(0) + u8 rsvd; + __le32 freq; + u8 rsvd2[6]; + __le16 node_handle; +}; + +/* Get PHY recovered clock output (direct 0x0631) */ +struct ice_aqc_get_phy_rec_clk_out { + u8 phy_output; + u8 port_num; +#define ICE_AQC_GET_PHY_REC_CLK_OUT_CURR_PORT 0xFF + u8 flags; +#define ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN BIT(0) + u8 rsvd[11]; + __le16 node_handle; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -1355,6 +1429,9 @@ struct ice_aqc_link_topo_params { #define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 #define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 #define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL 9 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX 10 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11 #define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_M \ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) @@ -1391,7 +1468,13 @@ struct ice_aqc_link_topo_addr { struct ice_aqc_get_link_topo { struct ice_aqc_link_topo_addr addr; u8 node_part_num; -#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 0x25 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY 0x30 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47 +#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48 u8 rsvd[9]; }; @@ -1781,11 +1864,10 @@ struct ice_aqc_lldp_filter_ctrl { u8 reserved2[12]; }; +#define ICE_AQC_RSS_VSI_VALID BIT(15) + /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { -#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) -#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0 -#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S) __le16 vsi_id; u8 reserved[6]; __le32 addr_high; @@ -1803,35 +1885,33 @@ struct ice_aqc_get_set_rss_keys { u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE]; }; -/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ -struct ice_aqc_get_set_rss_lut { -#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) -#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 -#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) - __le16 vsi_id; -#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 -#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ - (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) - -#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0 -#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1 -#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2 +enum ice_lut_type { + ICE_LUT_VSI = 0, + ICE_LUT_PF = 1, + ICE_LUT_GLOBAL = 2, +}; -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2 -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \ - (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) +enum ice_lut_size { + ICE_LUT_VSI_SIZE = 64, + ICE_LUT_GLOBAL_SIZE = 512, + ICE_LUT_PF_SIZE = 2048, +}; -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128 -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0 -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512 -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1 -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048 -#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2 +/* enum ice_aqc_lut_flags combines constants used to fill + * &ice_aqc_get_set_rss_lut ::flags, which is an amalgamation of global LUT ID, + * LUT size and LUT type, last of which does not need neither shift nor mask. + */ +enum ice_aqc_lut_flags { + ICE_AQC_LUT_SIZE_SMALL = 0, /* size = 64 or 128 */ + ICE_AQC_LUT_SIZE_512 = BIT(2), + ICE_AQC_LUT_SIZE_2K = BIT(3), -#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4 -#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \ - (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) + ICE_AQC_LUT_GLOBAL_IDX = GENMASK(7, 4), +}; +/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ +struct ice_aqc_get_set_rss_lut { + __le16 vsi_id; __le16 flags; __le32 reserved; __le32 addr_high; @@ -1923,6 +2003,42 @@ struct ice_aqc_dis_txq_item { __le16 q_id[]; } __packed; +/* Move/Reconfigure Tx queue (indirect 0x0C32) */ +struct ice_aqc_cfg_txqs { + u8 cmd_type; +#define ICE_AQC_Q_CFG_MOVE_NODE 0x1 +#define ICE_AQC_Q_CFG_TC_CHNG 0x2 +#define ICE_AQC_Q_CFG_MOVE_TC_CHNG 0x3 +#define ICE_AQC_Q_CFG_SUBSEQ_CALL BIT(2) +#define ICE_AQC_Q_CFG_FLUSH BIT(3) + u8 num_qs; + u8 port_num_chng; +#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7 +#define ICE_AQC_Q_CFG_DST_PRT_S 3 +#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S) + u8 time_out; +#define ICE_AQC_Q_CFG_TIMEOUT_S 2 +#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S) + __le32 blocked_cgds; + __le32 addr_high; + __le32 addr_low; +}; + +/* Per Q struct for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */ +struct ice_aqc_cfg_txq_perq { + __le16 q_handle; + u8 tc; + u8 rsvd; + __le32 q_teid; +}; + +/* The buffer for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */ +struct ice_aqc_cfg_txqs_buf { + __le32 src_parent_teid; + __le32 dst_parent_teid; + struct ice_aqc_cfg_txq_perq queue_info[]; +}; + /* Add Tx RDMA Queue Set (indirect 0x0C33) */ struct ice_aqc_add_rdma_qset { u8 num_qset_grps; @@ -2079,6 +2195,193 @@ struct ice_aqc_get_pkg_info_resp { struct ice_aqc_get_pkg_info pkg_info[]; }; +/* Get CGU abilities command response data structure (indirect 0x0C61) */ +struct ice_aqc_get_cgu_abilities { + u8 num_inputs; + u8 num_outputs; + u8 pps_dpll_idx; + u8 eec_dpll_idx; + __le32 max_in_freq; + __le32 max_in_phase_adj; + __le32 max_out_freq; + __le32 max_out_phase_adj; + u8 cgu_part_num; + u8 rsvd[3]; +}; + +/* Set CGU input config (direct 0x0C62) */ +struct ice_aqc_set_cgu_input_config { + u8 input_idx; + u8 flags1; +#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6) +#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7) + u8 flags2; +#define ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5) +#define ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6) + u8 rsvd; + __le32 freq; + __le32 phase_delay; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU input config response descriptor structure (direct 0x0C63) */ +struct ice_aqc_get_cgu_input_config { + u8 input_idx; + u8 status; +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_LOS BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6) +#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7) + u8 type; +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_GPS BIT(4) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5) +#define ICE_AQC_GET_CGU_IN_CFG_TYPE_PHY BIT(6) + u8 flags1; +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3) +#define ICE_AQC_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7) + __le32 freq; + __le32 phase_delay; + u8 flags2; +#define ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5) +#define ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6) + u8 rsvd[1]; + __le16 node_handle; +}; + +/* Set CGU output config (direct 0x0C64) */ +struct ice_aqc_set_cgu_output_config { + u8 output_idx; + u8 flags; +#define ICE_AQC_SET_CGU_OUT_CFG_OUT_EN BIT(0) +#define ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN BIT(1) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3) +#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4) + u8 src_sel; +#define ICE_AQC_SET_CGU_OUT_CFG_DPLL_SRC_SEL ICE_M(0x1F, 0) + u8 rsvd; + __le32 freq; + __le32 phase_delay; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU output config (direct 0x0C65) */ +struct ice_aqc_get_cgu_output_config { + u8 output_idx; + u8 flags; +#define ICE_AQC_GET_CGU_OUT_CFG_OUT_EN BIT(0) +#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN BIT(1) +#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2) + u8 src_sel; +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0 +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL \ + ICE_M(0x1F, ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT) +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5 +#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE \ + ICE_M(0x7, ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT) + u8 rsvd; + __le32 freq; + __le32 src_freq; + u8 rsvd2[2]; + __le16 node_handle; +}; + +/* Get CGU DPLL status (direct 0x0C66) */ +struct ice_aqc_get_cgu_dpll_status { + u8 dpll_num; + u8 ref_state; +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4) +#define ICE_AQC_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5) +#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6) + u8 dpll_state; +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO BIT(1) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5) +#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7) + u8 config; +#define ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0) +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT 5 +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE \ + ICE_M(0x7, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT) +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_FREERUN 0 +#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \ + ICE_M(0x3, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT) + __le32 phase_offset_h; + __le32 phase_offset_l; + u8 eec_mode; +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB +#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF + u8 rsvd[1]; + __le16 node_handle; +}; + +/* Set CGU DPLL config (direct 0x0C67) */ +struct ice_aqc_set_cgu_dpll_config { + u8 dpll_num; + u8 ref_state; +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6) + u8 rsvd; + u8 config; +#define ICE_AQC_SET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT 5 +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE \ + ICE_M(0x7, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT) +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_FREERUN 0 +#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \ + ICE_M(0x3, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT) + u8 rsvd2[8]; + u8 eec_mode; + u8 rsvd3[1]; + __le16 node_handle; +}; + +/* Set CGU reference priority (direct 0x0C68) */ +struct ice_aqc_set_cgu_ref_prio { + u8 dpll_num; + u8 ref_idx; + u8 ref_priority; + u8 rsvd[11]; + __le16 node_handle; +}; + +/* Get CGU reference priority (direct 0x0C69) */ +struct ice_aqc_get_cgu_ref_prio { + u8 dpll_num; + u8 ref_idx; + u8 ref_priority; /* Valid only in response */ + u8 rsvd[13]; +}; + +/* Get CGU info (direct 0x0C6A) */ +struct ice_aqc_get_cgu_info { + __le32 cgu_id; + __le32 cgu_cfg_ver; + __le32 cgu_fw_ver; + u8 node_part_num; + u8 dev_rev; + __le16 node_handle; +}; + /* Driver Shared Parameters (direct, 0x0C90) */ struct ice_aqc_driver_shared_params { u8 set_or_get_op; @@ -2093,16 +2396,6 @@ struct ice_aqc_driver_shared_params { __le32 addr_low; }; -enum ice_aqc_driver_params { - /* OS clock index for PTP timer Domain 0 */ - ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0, - /* OS clock index for PTP timer Domain 1 */ - ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1, - - /* Add new parameters above */ - ICE_AQC_DRIVER_PARAM_MAX = 16, -}; - /* Lan Queue Overflow Event (direct, 0x1001) */ struct ice_aqc_event_lan_overflow { __le32 prtdcb_ruptq; @@ -2148,6 +2441,8 @@ struct ice_aq_desc { struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; + struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out; + struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; @@ -2181,12 +2476,22 @@ struct ice_aq_desc { struct ice_aqc_neigh_dev_req neigh_dev; struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; + struct ice_aqc_cfg_txqs cfg_txqs; struct ice_aqc_add_rdma_qset add_rdma_qset; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_fw_logging fw_logging; struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_download_pkg download_pkg; + struct ice_aqc_set_cgu_input_config set_cgu_input_config; + struct ice_aqc_get_cgu_input_config get_cgu_input_config; + struct ice_aqc_set_cgu_output_config set_cgu_output_config; + struct ice_aqc_get_cgu_output_config get_cgu_output_config; + struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status; + struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config; + struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio; + struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio; + struct ice_aqc_get_cgu_info get_cgu_info; struct ice_aqc_driver_shared_params drv_shared_params; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; @@ -2263,6 +2568,7 @@ enum ice_adminq_opc { /* Alloc/Free/Get Resources */ ice_aqc_opc_alloc_res = 0x0208, ice_aqc_opc_free_res = 0x0209, + ice_aqc_opc_share_res = 0x020B, ice_aqc_opc_set_vlan_mode_parameters = 0x020C, ice_aqc_opc_get_vlan_mode_parameters = 0x020D, @@ -2310,6 +2616,8 @@ enum ice_adminq_opc { ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, + ice_aqc_opc_set_phy_rec_clk_out = 0x0630, + ice_aqc_opc_get_phy_rec_clk_out = 0x0631, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, @@ -2356,6 +2664,7 @@ enum ice_adminq_opc { /* Tx queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + ice_aqc_opc_cfg_txqs = 0x0C32, ice_aqc_opc_add_rdma_qset = 0x0C33, /* package commands */ @@ -2364,6 +2673,18 @@ enum ice_adminq_opc { ice_aqc_opc_update_pkg = 0x0C42, ice_aqc_opc_get_pkg_info_list = 0x0C43, + /* 1588/SyncE commands/events */ + ice_aqc_opc_get_cgu_abilities = 0x0C61, + ice_aqc_opc_set_cgu_input_config = 0x0C62, + ice_aqc_opc_get_cgu_input_config = 0x0C63, + ice_aqc_opc_set_cgu_output_config = 0x0C64, + ice_aqc_opc_get_cgu_output_config = 0x0C65, + ice_aqc_opc_get_cgu_dpll_status = 0x0C66, + ice_aqc_opc_set_cgu_dpll_config = 0x0C67, + ice_aqc_opc_set_cgu_ref_prio = 0x0C68, + ice_aqc_opc_get_cgu_ref_prio = 0x0C69, + ice_aqc_opc_get_cgu_info = 0x0C6A, + ice_aqc_opc_driver_shared_params = 0x0C90, /* Standalone Commands/Events */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 074bf9403cd1..7fa43827a3f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -408,7 +408,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) */ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) { - int chain_len = ICE_MAX_CHAINED_RX_BUFS; struct ice_vsi *vsi = ring->vsi; u32 rxdid = ICE_RXDID_FLEX_NIC; struct ice_rlan_ctx rlan_ctx; @@ -473,17 +472,11 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) */ rlan_ctx.showiv = 0; - /* For AF_XDP ZC, we disallow packets to span on - * multiple buffers, thus letting us skip that - * handling in the fast-path. - */ - if (ring->xsk_pool) - chain_len = 1; /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ rlan_ctx.rxmax = min_t(u32, vsi->max_frame, - chain_len * ring->rx_buf_len); + ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e16d4c83ed5f..9a6c25f98632 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ #include "ice_common.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" +#include "ice_ptp_hw.h" #define ICE_PF_RESET_WAIT_COUNT 300 +#define ICE_MAX_NETLIST_SIZE 10 static const char * const ice_link_mode_str_low[] = { [0] = "100BASE_TX", @@ -152,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E823L_SFP: hw->mac_type = ICE_MAC_GENERIC; break; + case ICE_DEV_ID_E830_BACKPLANE: + case ICE_DEV_ID_E830_QSFP56: + case ICE_DEV_ID_E830_SFP: + case ICE_DEV_ID_E830_SFP_DD: + hw->mac_type = ICE_MAC_E830; + break; default: hw->mac_type = ICE_MAC_UNKNOWN; break; @@ -435,6 +443,80 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, } /** + * ice_aq_get_netlist_node + * @hw: pointer to the hw struct + * @cmd: get_link_topo AQ structure + * @node_part_number: output node part number if node found + * @node_handle: output node handle parameter if node found + * + * Get netlist node handle. + */ +int +ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + desc.params.get_link_topo = *cmd; + + if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) + return -EINTR; + + if (node_handle) + *node_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + if (node_part_number) + *node_part_number = desc.params.get_link_topo.node_part_num; + + return 0; +} + +/** + * ice_find_netlist_node + * @hw: pointer to the hw struct + * @node_type_ctx: type of netlist node to look for + * @node_part_number: node part number to look for + * @node_handle: output parameter if node found - optional + * + * Scan the netlist for a node handle of the given node type and part number. + * + * If node_handle is non-NULL it will be modified on function exit. It is only + * valid if the function returns zero, and should be ignored on any non-zero + * return value. + * + * Returns: 0 if the node is found, -ENOENT if no handle was found, and + * a negative error code on failure to access the AQ. + */ +static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, + u8 node_part_number, u16 *node_handle) +{ + u8 idx; + + for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { + struct ice_aqc_get_link_topo cmd = {}; + u8 rec_node_part_number; + int status; + + cmd.addr.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, + node_type_ctx); + cmd.addr.topo_params.index = idx; + + status = ice_aq_get_netlist_node(hw, &cmd, + &rec_node_part_number, + node_handle); + if (status) + return status; + + if (rec_node_part_number == node_part_number) + return 0; + } + + return -ENOENT; +} + +/** * ice_is_media_cage_present * @pi: port information structure * @@ -570,6 +652,24 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) } /** + * ice_get_link_status_datalen + * @hw: pointer to the HW struct + * + * Returns datalength for the Get Link Status AQ command, which is bigger for + * newer adapter families handled by ice driver. + */ +static u16 ice_get_link_status_datalen(struct ice_hw *hw) +{ + switch (hw->mac_type) { + case ICE_MAC_E830: + return ICE_AQC_LS_DATA_SIZE_V2; + case ICE_MAC_E810: + default: + return ICE_AQC_LS_DATA_SIZE_V1; + } +} + +/** * ice_aq_get_link_info * @pi: port information structure * @ena_lse: enable/disable LinkStatusEvent reporting @@ -607,8 +707,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, resp->cmd_flags = cpu_to_le16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); - + status = ice_aq_send_cmd(hw, &desc, &link_data, + ice_get_link_status_datalen(hw), cd); if (status) return status; @@ -683,8 +783,7 @@ static void ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, struct ice_aqc_set_mac_cfg *cmd) { - u16 fc_thres_val, tx_timer_val; - u32 val; + u32 val, fc_thres_m; /* We read back the transmit timer and FC threshold value of * LFC. Thus, we will use index = @@ -693,19 +792,32 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, * Also, because we are operating on transmit timer and FC * threshold of LFC, we don't turn on any bit in tx_tmr_priority */ -#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX - - /* Retrieve the transmit timer */ - val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); - tx_timer_val = val & - PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; - cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); - - /* Retrieve the FC threshold */ - val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); - fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; - - cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); +#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX +#define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR + + if (hw->mac_type == ICE_MAC_E830) { + /* Retrieve the transmit timer */ + val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); + cmd->tx_tmr_value = + le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); + + /* Retrieve the fc threshold */ + val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); + fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; + } else { + /* Retrieve the transmit timer */ + val = rd32(hw, + E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); + cmd->tx_tmr_value = + le16_encode_bits(val, + E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); + + /* Retrieve the fc threshold */ + val = rd32(hw, + E800_REFRESH_TMR(E800_IDX_OF_LFC)); + fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; + } + cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); } /** @@ -1999,37 +2111,31 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) /** * ice_aq_alloc_free_res - command to allocate/free resources * @hw: pointer to the HW struct - * @num_entries: number of resource entries in buffer * @buf: Indirect buffer to hold data parameters and response * @buf_size: size of buffer for indirect commands * @opc: pass in the command opcode - * @cd: pointer to command details structure or NULL * * Helper function to allocate/free resources using the admin queue commands */ -int -ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, - struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, - enum ice_adminq_opc opc, struct ice_sq_cd *cd) +int ice_aq_alloc_free_res(struct ice_hw *hw, + struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, + enum ice_adminq_opc opc) { struct ice_aqc_alloc_free_res_cmd *cmd; struct ice_aq_desc desc; cmd = &desc.params.sw_res_ctrl; - if (!buf) - return -EINVAL; - - if (buf_size < flex_array_size(buf, elem, num_entries)) + if (!buf || buf_size < flex_array_size(buf, elem, 1)) return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - cmd->num_entries = cpu_to_le16(num_entries); + cmd->num_entries = cpu_to_le16(1); - return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); } /** @@ -2059,8 +2165,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) if (btm) buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, - ice_aqc_opc_alloc_res, NULL); + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); if (status) goto ice_alloc_res_exit; @@ -2094,8 +2199,7 @@ int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) buf->res_type = cpu_to_le16(type); memcpy(buf->elem, res, sizeof(*buf->elem) * num); - status = ice_aq_alloc_free_res(hw, num, buf, buf_len, - ice_aqc_opc_free_res, NULL); + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); if (status) ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); @@ -2241,6 +2345,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, "%s: reset_restrict_support = %d\n", prefix, caps->reset_restrict_support); break; + case ICE_AQC_CAPS_FW_LAG_SUPPORT: + caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); + ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", + prefix, caps->roce_lag); + caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); + ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", + prefix, caps->sriov_lag); + break; default: /* Not one of the recognized common capabilities */ found = false; @@ -2388,16 +2500,21 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, static void ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) { - u32 reg_val, val; + u32 reg_val, gsize, bsize; reg_val = rd32(hw, GLQF_FD_SIZE); - val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> - GLQF_FD_SIZE_FD_GSIZE_S; - func_p->fd_fltr_guar = - ice_get_num_per_func(hw, val); - val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> - GLQF_FD_SIZE_FD_BSIZE_S; - func_p->fd_fltr_best_effort = val; + switch (hw->mac_type) { + case ICE_MAC_E830: + gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); + bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); + break; + case ICE_MAC_E810: + default: + gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); + bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); + } + func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); + func_p->fd_fltr_best_effort = bsize; ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", func_p->fd_fltr_guar); @@ -2654,6 +2771,116 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** + * ice_is_pf_c827 - check if pf contains c827 phy + * @hw: pointer to the hw struct + */ +bool ice_is_pf_c827(struct ice_hw *hw) +{ + struct ice_aqc_get_link_topo cmd = {}; + u8 node_part_number; + u16 node_handle; + int status; + + if (hw->mac_type != ICE_MAC_E810) + return false; + + if (hw->device_id != ICE_DEV_ID_E810C_QSFP) + return true; + + cmd.addr.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); + cmd.addr.topo_params.index = 0; + + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + + if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) + return false; + + if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) + return true; + + return false; +} + +/** + * ice_is_phy_rclk_in_netlist + * @hw: pointer to the hw struct + * + * Check if the PHY Recovered Clock device is present in the netlist + */ +bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && + ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) + return false; + + return true; +} + +/** + * ice_is_clock_mux_in_netlist + * @hw: pointer to the hw struct + * + * Check if the Clock Multiplexer device is present in the netlist + */ +bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, + ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, + NULL)) + return false; + + return true; +} + +/** + * ice_is_cgu_in_netlist - check for CGU presence + * @hw: pointer to the hw struct + * + * Check if the Clock Generation Unit (CGU) device is present in the netlist. + * Save the CGU part number in the hw structure for later use. + * Return: + * * true - cgu is present + * * false - cgu is not present + */ +bool ice_is_cgu_in_netlist(struct ice_hw *hw) +{ + if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, + NULL)) { + hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; + return true; + } else if (!ice_find_netlist_node(hw, + ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, + NULL)) { + hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; + return true; + } + + return false; +} + +/** + * ice_is_gps_in_netlist + * @hw: pointer to the hw struct + * + * Check if the GPS generic device is present in the netlist + */ +bool ice_is_gps_in_netlist(struct ice_hw *hw) +{ + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, + ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) + return false; + + return true; +} + +/** * ice_aq_list_caps - query function/device capabilities * @hw: pointer to the HW struct * @buf: a buffer to hold the capabilities @@ -3869,6 +4096,34 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, return status; } +static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) +{ + switch (type) { + case ICE_LUT_VSI: + return ICE_LUT_VSI_SIZE; + case ICE_LUT_GLOBAL: + return ICE_LUT_GLOBAL_SIZE; + case ICE_LUT_PF: + return ICE_LUT_PF_SIZE; + } + WARN_ONCE(1, "incorrect type passed"); + return ICE_LUT_VSI_SIZE; +} + +static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) +{ + switch (size) { + case ICE_LUT_VSI_SIZE: + return ICE_AQC_LUT_SIZE_SMALL; + case ICE_LUT_GLOBAL_SIZE: + return ICE_AQC_LUT_SIZE_512; + case ICE_LUT_PF_SIZE: + return ICE_AQC_LUT_SIZE_2K; + } + WARN_ONCE(1, "incorrect size passed"); + return 0; +} + /** * __ice_aq_get_set_rss_lut * @hw: pointer to the hardware structure @@ -3878,95 +4133,44 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ static int -__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) -{ - u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; - struct ice_aqc_get_set_rss_lut *cmd_resp; +__ice_aq_get_set_rss_lut(struct ice_hw *hw, + struct ice_aq_get_set_rss_lut_params *params, bool set) +{ + u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; + enum ice_lut_type lut_type = params->lut_type; + struct ice_aqc_get_set_rss_lut *desc_params; + enum ice_aqc_lut_flags flags; + enum ice_lut_size lut_size; struct ice_aq_desc desc; - int status; - u8 *lut; - - if (!params) - return -EINVAL; + u8 *lut = params->lut; - vsi_handle = params->vsi_handle; - lut = params->lut; - if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) + if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) return -EINVAL; - lut_size = params->lut_size; - lut_type = params->lut_type; - glob_lut_idx = params->global_lut_id; - vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - - cmd_resp = &desc.params.get_set_rss_lut; + lut_size = ice_lut_type_to_size(lut_type); + if (lut_size > params->lut_size) + return -EINVAL; + else if (set && lut_size != params->lut_size) + return -EINVAL; - if (set) { - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); + opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + if (set) desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - } else { - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); - } - cmd_resp->vsi_id = cpu_to_le16(((vsi_id << - ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & - ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | - ICE_AQC_GSET_RSS_LUT_VSI_VALID); - - switch (lut_type) { - case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: - case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: - case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: - flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & - ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); - break; - default: - status = -EINVAL; - goto ice_aq_get_set_rss_lut_exit; - } + desc_params = &desc.params.get_set_rss_lut; + vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); - if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { - flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & - ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); + if (lut_type == ICE_LUT_GLOBAL) + glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, + params->global_lut_id); - if (!set) - goto ice_aq_get_set_rss_lut_send; - } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { - if (!set) - goto ice_aq_get_set_rss_lut_send; - } else { - goto ice_aq_get_set_rss_lut_send; - } + flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); + desc_params->flags = cpu_to_le16(flags); - /* LUT size is only valid for Global and PF table types */ - switch (lut_size) { - case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: - break; - case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: - flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; - break; - case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: - if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { - flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; - break; - } - fallthrough; - default: - status = -EINVAL; - goto ice_aq_get_set_rss_lut_exit; - } - -ice_aq_get_set_rss_lut_send: - cmd_resp->flags = cpu_to_le16(flags); - status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); - -ice_aq_get_set_rss_lut_exit: - return status; + return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); } /** @@ -4008,12 +4212,10 @@ static int __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, struct ice_aqc_get_set_rss_keys *key, bool set) { - struct ice_aqc_get_set_rss_key *cmd_resp; + struct ice_aqc_get_set_rss_key *desc_params; u16 key_size = sizeof(*key); struct ice_aq_desc desc; - cmd_resp = &desc.params.get_set_rss_key; - if (set) { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -4021,10 +4223,8 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); } - cmd_resp->vsi_id = cpu_to_le16(((vsi_id << - ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & - ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | - ICE_AQC_GSET_RSS_KEY_VSI_VALID); + desc_params = &desc.params.get_set_rss_key; + desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); } @@ -4222,6 +4422,53 @@ do_aq: } /** + * ice_aq_cfg_lan_txq + * @hw: pointer to the hardware structure + * @buf: buffer for command + * @buf_size: size of buffer in bytes + * @num_qs: number of queues being configured + * @oldport: origination lport + * @newport: destination lport + * @cd: pointer to command details structure or NULL + * + * Move/Configure LAN Tx queue (0x0C32) + * + * There is a better AQ command to use for moving nodes, so only coding + * this one for configuring the node. + */ +int +ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, + u16 buf_size, u16 num_qs, u8 oldport, u8 newport, + struct ice_sq_cd *cd) +{ + struct ice_aqc_cfg_txqs *cmd; + struct ice_aq_desc desc; + int status; + + cmd = &desc.params.cfg_txqs; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (!buf) + return -EINVAL; + + cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; + cmd->num_qs = num_qs; + cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); + cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) & + ICE_AQC_Q_CFG_DST_PRT_M; + cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) & + ICE_AQC_Q_CFG_TIMEOUT_M; + cmd->blocked_cgds = 0; + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", + hw->adminq.sq_last_status); + return status; +} + +/** * ice_aq_add_rdma_qsets * @hw: pointer to the hardware structure * @num_qset_grps: Number of RDMA Qset groups @@ -4644,11 +4891,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { - struct ice_aqc_dis_txq_item *qg_list; + DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); + u16 i, buf_size = __struct_size(qg_list); struct ice_q_ctx *q_ctx; int status = -ENOENT; struct ice_hw *hw; - u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return -EIO; @@ -4666,11 +4913,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, return -EIO; } - buf_size = struct_size(qg_list, q_id, 1); - qg_list = kzalloc(buf_size, GFP_KERNEL); - if (!qg_list) - return -ENOMEM; - mutex_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { @@ -4700,9 +4942,9 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, break; ice_free_sched_node(pi, node); q_ctx->q_handle = ICE_INVAL_Q_HANDLE; + q_ctx->q_teid = ICE_INVAL_TEID; } mutex_unlock(&pi->sched_lock); - kfree(qg_list); return status; } @@ -4871,10 +5113,10 @@ int ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id) { - struct ice_aqc_dis_txq_item *qg_list; + DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); + u16 qg_size = __struct_size(qg_list); struct ice_hw *hw; int status = 0; - u16 qg_size; int i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) @@ -4882,11 +5124,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, hw = pi->hw; - qg_size = struct_size(qg_list, q_id, 1); - qg_list = kzalloc(qg_size, GFP_KERNEL); - if (!qg_list) - return -ENOMEM; - mutex_lock(&pi->sched_lock); for (i = 0; i < count; i++) { @@ -4911,7 +5148,395 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, } mutex_unlock(&pi->sched_lock); - kfree(qg_list); + return status; +} + +/** + * ice_aq_get_cgu_abilities - get cgu abilities + * @hw: pointer to the HW struct + * @abilities: CGU abilities + * + * Get CGU abilities (0x0C61) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_abilities(struct ice_hw *hw, + struct ice_aqc_get_cgu_abilities *abilities) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); + return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); +} + +/** + * ice_aq_set_input_pin_cfg - set input pin config + * @hw: pointer to the HW struct + * @input_idx: Input index + * @flags1: Input flags + * @flags2: Input flags + * @freq: Frequency in Hz + * @phase_delay: Delay in ps + * + * Set CGU input config (0x0C62) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, + u32 freq, s32 phase_delay) +{ + struct ice_aqc_set_cgu_input_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); + cmd = &desc.params.set_cgu_input_config; + cmd->input_idx = input_idx; + cmd->flags1 = flags1; + cmd->flags2 = flags2; + cmd->freq = cpu_to_le32(freq); + cmd->phase_delay = cpu_to_le32(phase_delay); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_input_pin_cfg - get input pin config + * @hw: pointer to the HW struct + * @input_idx: Input index + * @status: Pin status + * @type: Pin type + * @flags1: Input flags + * @flags2: Input flags + * @freq: Frequency in Hz + * @phase_delay: Delay in ps + * + * Get CGU input config (0x0C63) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, + u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) +{ + struct ice_aqc_get_cgu_input_config *cmd; + struct ice_aq_desc desc; + int ret; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); + cmd = &desc.params.get_cgu_input_config; + cmd->input_idx = input_idx; + + ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!ret) { + if (status) + *status = cmd->status; + if (type) + *type = cmd->type; + if (flags1) + *flags1 = cmd->flags1; + if (flags2) + *flags2 = cmd->flags2; + if (freq) + *freq = le32_to_cpu(cmd->freq); + if (phase_delay) + *phase_delay = le32_to_cpu(cmd->phase_delay); + } + + return ret; +} + +/** + * ice_aq_set_output_pin_cfg - set output pin config + * @hw: pointer to the HW struct + * @output_idx: Output index + * @flags: Output flags + * @src_sel: Index of DPLL block + * @freq: Output frequency + * @phase_delay: Output phase compensation + * + * Set CGU output config (0x0C64) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, + u8 src_sel, u32 freq, s32 phase_delay) +{ + struct ice_aqc_set_cgu_output_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); + cmd = &desc.params.set_cgu_output_config; + cmd->output_idx = output_idx; + cmd->flags = flags; + cmd->src_sel = src_sel; + cmd->freq = cpu_to_le32(freq); + cmd->phase_delay = cpu_to_le32(phase_delay); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_output_pin_cfg - get output pin config + * @hw: pointer to the HW struct + * @output_idx: Output index + * @flags: Output flags + * @src_sel: Internal DPLL source + * @freq: Output frequency + * @src_freq: Source frequency + * + * Get CGU output config (0x0C65) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, + u8 *src_sel, u32 *freq, u32 *src_freq) +{ + struct ice_aqc_get_cgu_output_config *cmd; + struct ice_aq_desc desc; + int ret; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); + cmd = &desc.params.get_cgu_output_config; + cmd->output_idx = output_idx; + + ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!ret) { + if (flags) + *flags = cmd->flags; + if (src_sel) + *src_sel = cmd->src_sel; + if (freq) + *freq = le32_to_cpu(cmd->freq); + if (src_freq) + *src_freq = le32_to_cpu(cmd->src_freq); + } + + return ret; +} + +/** + * ice_aq_get_cgu_dpll_status - get dpll status + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_state: Reference clock state + * @config: current DPLL config + * @dpll_state: current DPLL state + * @phase_offset: Phase offset in ns + * @eec_mode: EEC_mode + * + * Get CGU DPLL status (0x0C66) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, + u8 *dpll_state, u8 *config, s64 *phase_offset, + u8 *eec_mode) +{ + struct ice_aqc_get_cgu_dpll_status *cmd; + const s64 nsec_per_psec = 1000LL; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); + cmd = &desc.params.get_cgu_dpll_status; + cmd->dpll_num = dpll_num; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *ref_state = cmd->ref_state; + *dpll_state = cmd->dpll_state; + *config = cmd->config; + *phase_offset = le32_to_cpu(cmd->phase_offset_h); + *phase_offset <<= 32; + *phase_offset += le32_to_cpu(cmd->phase_offset_l); + *phase_offset = div64_s64(sign_extend64(*phase_offset, 47), + nsec_per_psec); + *eec_mode = cmd->eec_mode; + } + + return status; +} + +/** + * ice_aq_set_cgu_dpll_config - set dpll config + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_state: Reference clock state + * @config: DPLL config + * @eec_mode: EEC mode + * + * Set CGU DPLL config (0x0C67) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, + u8 config, u8 eec_mode) +{ + struct ice_aqc_set_cgu_dpll_config *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); + cmd = &desc.params.set_cgu_dpll_config; + cmd->dpll_num = dpll_num; + cmd->ref_state = ref_state; + cmd->config = config; + cmd->eec_mode = eec_mode; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_set_cgu_ref_prio - set input reference priority + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_idx: Reference pin index + * @ref_priority: Reference input priority + * + * Set CGU reference priority (0x0C68) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 ref_priority) +{ + struct ice_aqc_set_cgu_ref_prio *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); + cmd = &desc.params.set_cgu_ref_prio; + cmd->dpll_num = dpll_num; + cmd->ref_idx = ref_idx; + cmd->ref_priority = ref_priority; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_get_cgu_ref_prio - get input reference priority + * @hw: pointer to the HW struct + * @dpll_num: DPLL index + * @ref_idx: Reference pin index + * @ref_prio: Reference input priority + * + * Get CGU reference priority (0x0C69) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 *ref_prio) +{ + struct ice_aqc_get_cgu_ref_prio *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); + cmd = &desc.params.get_cgu_ref_prio; + cmd->dpll_num = dpll_num; + cmd->ref_idx = ref_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + *ref_prio = cmd->ref_priority; + + return status; +} + +/** + * ice_aq_get_cgu_info - get cgu info + * @hw: pointer to the HW struct + * @cgu_id: CGU ID + * @cgu_cfg_ver: CGU config version + * @cgu_fw_ver: CGU firmware version + * + * Get CGU info (0x0C6A) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, + u32 *cgu_fw_ver) +{ + struct ice_aqc_get_cgu_info *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); + cmd = &desc.params.get_cgu_info; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *cgu_id = le32_to_cpu(cmd->cgu_id); + *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); + *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); + } + + return status; +} + +/** + * ice_aq_set_phy_rec_clk_out - set RCLK phy out + * @hw: pointer to the HW struct + * @phy_output: PHY reference clock output pin + * @enable: GPIO state to be applied + * @freq: PHY output frequency + * + * Set phy recovered clock as reference (0x0630) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, + u32 *freq) +{ + struct ice_aqc_set_phy_rec_clk_out *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); + cmd = &desc.params.set_phy_rec_clk_out; + cmd->phy_output = phy_output; + cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; + cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; + cmd->freq = cpu_to_le32(*freq); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) + *freq = le32_to_cpu(cmd->freq); + + return status; +} + +/** + * ice_aq_get_phy_rec_clk_out - get phy recovered signal info + * @hw: pointer to the HW struct + * @phy_output: PHY reference clock output pin + * @port_num: Port number + * @flags: PHY flags + * @node_handle: PHY output frequency + * + * Get PHY recovered clock output info (0x0631) + * Return: 0 on success or negative value on failure. + */ +int +ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, + u8 *flags, u16 *node_handle) +{ + struct ice_aqc_get_phy_rec_clk_out *cmd; + struct ice_aq_desc desc; + int status; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); + cmd = &desc.params.get_phy_rec_clk_out; + cmd->phy_output = *phy_output; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status) { + *phy_output = cmd->phy_output; + if (port_num) + *port_num = cmd->port_num; + if (flags) + *flags = cmd->flags; + if (node_handle) + *node_handle = le16_to_cpu(cmd->node_handle); + } + return status; } @@ -5185,81 +5810,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, } /** - * ice_aq_set_driver_param - Set driver parameter to share via firmware - * @hw: pointer to the HW struct - * @idx: parameter index to set - * @value: the value to set the parameter to - * @cd: pointer to command details structure or NULL - * - * Set the value of one of the software defined parameters. All PFs connected - * to this device can read the value using ice_aq_get_driver_param. - * - * Note that firmware provides no synchronization or locking, and will not - * save the parameter value during a device reset. It is expected that - * a single PF will write the parameter value, while all other PFs will only - * read it. - */ -int -ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 value, struct ice_sq_cd *cd) -{ - struct ice_aqc_driver_shared_params *cmd; - struct ice_aq_desc desc; - - if (idx >= ICE_AQC_DRIVER_PARAM_MAX) - return -EIO; - - cmd = &desc.params.drv_shared_params; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); - - cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; - cmd->param_indx = idx; - cmd->param_val = cpu_to_le32(value); - - return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); -} - -/** - * ice_aq_get_driver_param - Get driver parameter shared via firmware - * @hw: pointer to the HW struct - * @idx: parameter index to set - * @value: storage to return the shared parameter - * @cd: pointer to command details structure or NULL - * - * Get the value of one of the software defined parameters. - * - * Note that firmware provides no synchronization or locking. It is expected - * that only a single PF will write a given parameter. - */ -int -ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 *value, struct ice_sq_cd *cd) -{ - struct ice_aqc_driver_shared_params *cmd; - struct ice_aq_desc desc; - int status; - - if (idx >= ICE_AQC_DRIVER_PARAM_MAX) - return -EIO; - - cmd = &desc.params.drv_shared_params; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); - - cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; - cmd->param_indx = idx; - - status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); - if (status) - return status; - - *value = le32_to_cpu(cmd->param_val); - - return 0; -} - -/** * ice_aq_set_gpio * @hw: pointer to the hw struct * @gpio_ctrl_handle: GPIO controller node handle @@ -5560,6 +6110,7 @@ static const u32 ice_aq_to_link_speed[] = { SPEED_40000, SPEED_50000, SPEED_100000, /* BIT(10) */ + SPEED_200000, }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 81961a7d6598..31fdcac33986 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -38,10 +38,9 @@ int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); -int -ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, - struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, - enum ice_adminq_opc opc, struct ice_sq_cd *cd); +int ice_aq_alloc_free_res(struct ice_hw *hw, + struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, + enum ice_adminq_opc opc); bool ice_is_sbq_supported(struct ice_hw *hw); struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw); int @@ -93,6 +92,14 @@ int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); +bool ice_is_pf_c827(struct ice_hw *hw); +bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw); +bool ice_is_clock_mux_in_netlist(struct ice_hw *hw); +bool ice_is_cgu_in_netlist(struct ice_hw *hw); +bool ice_is_gps_in_netlist(struct ice_hw *hw); +int +ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, + u8 *node_part_number, u16 *node_handle); int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); @@ -186,12 +193,54 @@ int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); +int +ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, + u16 buf_size, u16 num_qs, u8 oldport, u8 newport, + struct ice_sq_cd *cd); int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); +int +ice_aq_get_cgu_abilities(struct ice_hw *hw, + struct ice_aqc_get_cgu_abilities *abilities); +int +ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, + u32 freq, s32 phase_delay); +int +ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, + u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay); +int +ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, + u8 src_sel, u32 freq, s32 phase_delay); +int +ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, + u8 *src_sel, u32 *freq, u32 *src_freq); +int +ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, + u8 *dpll_state, u8 *config, s64 *phase_offset, + u8 *eec_mode); +int +ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, + u8 config, u8 eec_mode); +int +ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 ref_priority); +int +ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, + u8 *ref_prio); +int +ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, + u32 *cgu_fw_ver); + +int +ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, + u32 *freq); +int +ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, + u8 *flags, u16 *node_handle); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); @@ -204,12 +253,6 @@ int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); int -ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 value, struct ice_sq_cd *cd); -int -ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, - u32 *value, struct ice_sq_cd *cd); -int ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, struct ice_sq_cd *cd); int diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 3eb01731e496..e1fbc6de452d 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -70,6 +70,11 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return -EINVAL; + } + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; mutex_lock(&pf->tc_mutex); @@ -170,6 +175,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) if (mode == pf->dcbx_cap) return ICE_DCB_NO_HW_CHG; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return ICE_DCB_NO_HW_CHG; + } + qos_cfg = &pf->hw.port_info->qos_cfg; /* DSCP configuration is not DCBx negotiated */ @@ -261,6 +271,11 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return -EINVAL; + } + mutex_lock(&pf->tc_mutex); new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; @@ -323,6 +338,11 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set) if (prio >= ICE_MAX_USER_PRIORITY) return; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return; + } + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc; @@ -379,6 +399,11 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return ICE_DCB_NO_HW_CHG; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return ICE_DCB_NO_HW_CHG; + } + /* Nothing to do */ if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags)) return ICE_DCB_NO_HW_CHG; @@ -451,6 +476,11 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, if (tc >= ICE_MAX_TRAFFIC_CLASS) return; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return; + } + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; /* prio_type, bwg_id and bw_pct per UP are not supported */ @@ -505,6 +535,11 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) if (pgid >= ICE_MAX_TRAFFIC_CLASS) return; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return; + } + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; new_cfg->etscfg.tcbwtable[pgid] = bw_pct; @@ -725,6 +760,11 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) return -EINVAL; } + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return -EINVAL; + } + max_tc = pf->hw.func_caps.common_cap.maxtc; if (app->priority >= max_tc) { netdev_err(netdev, "TC %d out of range, max TC %d\n", @@ -836,6 +876,11 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) return -EINVAL; } + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return -EINVAL; + } + mutex_lock(&pf->tc_mutex); old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; @@ -937,6 +982,11 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return ICE_DCB_NO_HW_CHG; + if (pf->lag && pf->lag->bonded) { + netdev_err(netdev, "DCB changes not allowed when in a bond\n"); + return ICE_DCB_NO_HW_CHG; + } + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; mutex_lock(&pf->tc_mutex); diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index d71ed210f9c4..8b7504a9df31 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -30,7 +30,7 @@ static const struct ice_tunnel_type_scan tnls[] = { * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ -enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; @@ -118,7 +118,7 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) * * This helper function validates a buffer's header. */ -struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) { struct ice_buf_hdr *hdr; u16 section_count; @@ -1153,23 +1153,168 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw) } /** - * ice_dwnld_cfg_bufs + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static int +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + int status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == -EIO) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * ice_get_pkg_seg_by_idx + * @pkg_hdr: pointer to the package header to be searched + * @idx: index of segment + */ +static struct ice_generic_seg_hdr * +ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + if (idx < le32_to_cpu(pkg_hdr->seg_count)) + return (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + + le32_to_cpu(pkg_hdr->seg_offset[idx])); + + return NULL; +} + +/** + * ice_is_signing_seg_at_idx - determine if segment is a signing segment + * @pkg_hdr: pointer to package header + * @idx: segment index + */ +static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg; + + seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) + return false; + + return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING; +} + +/** + * ice_is_signing_seg_type_at_idx + * @pkg_hdr: pointer to package header + * @idx: segment index + * @seg_id: segment id that is expected + * @sign_type: signing type + * + * Determine if a segment is a signing segment of the correct type + */ +static bool +ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, + u32 seg_id, u32 sign_type) +{ + struct ice_sign_seg *seg; + + if (!ice_is_signing_seg_at_idx(pkg_hdr, idx)) + return false; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + + if (seg && le32_to_cpu(seg->seg_id) == seg_id && + le32_to_cpu(seg->sign_type) == sign_type) + return true; + + return false; +} + +/** + * ice_is_buffer_metadata - determine if package buffer is a metadata buffer + * @buf: pointer to buffer header + */ +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) +{ + if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) + return true; + + return false; +} + +/** + * ice_is_last_download_buffer + * @buf: pointer to current buffer header + * @idx: index of the buffer in the current sequence + * @count: the buffer count in the current sequence + * + * Note: this routine should only be called if the buffer is not the last buffer + */ +static bool +ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) +{ + struct ice_buf *next_buf; + + if ((idx + 1) == count) + return true; + + /* A set metadata flag in the next buffer will signal that the current + * buffer will be the last buffer downloaded + */ + next_buf = ((struct ice_buf *)buf) + 1; + + return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); +} + +/** + * ice_dwnld_cfg_bufs_no_lock * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array + * @start: buffer index of first buffer to download + * @count: the number of buffers to download + * @indicate_last: if true, then set last buffer flag on last buffer download * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. + * Downloads package configuration buffers to the firmware. Metadata buffers + * are skipped, and the first metadata buffer found indicates that the rest + * of the buffers are all metadata buffers. */ -static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, - struct ice_buf *bufs, u32 count) +static enum ice_ddp_state +ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, + u32 count, bool indicate_last) { enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_buf_hdr *bh; enum ice_aq_err err; u32 offset, info, i; - int status; if (!bufs || !count) return ICE_DDP_PKG_ERR; @@ -1178,43 +1323,25 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, * then there are no buffers to be downloaded, and the operation is * considered a success. */ - bh = (struct ice_buf_hdr *)bufs; + bh = (struct ice_buf_hdr *)(bufs + start); if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) return ICE_DDP_PKG_SUCCESS; - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); - if (status) { - if (status == -EALREADY) - return ICE_DDP_PKG_ALREADY_LOADED; - return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); - } - for (i = 0; i < count; i++) { - bool last = ((i + 1) == count); - - if (!last) { - /* check next buffer for metadata flag */ - bh = (struct ice_buf_hdr *)(bufs + i + 1); + bool last = false; + int status; - /* A set metadata flag in the next buffer will signal - * that the current buffer will be the last buffer - * downloaded - */ - if (le16_to_cpu(bh->section_count)) - if (le32_to_cpu(bh->section_entry[0].type) & - ICE_METADATA_BUF) - last = true; - } + bh = (struct ice_buf_hdr *)(bufs + start + i); - bh = (struct ice_buf_hdr *)(bufs + i); + if (indicate_last) + last = ice_is_last_download_buffer(bh, i, count); status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, &offset, &info, NULL); /* Save AQ status from download package */ if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Pkg download failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); err = hw->adminq.sq_last_status; state = ice_map_aq_err_to_ddp_state(err); @@ -1225,50 +1352,231 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, break; } - if (!status) { - status = ice_set_vlan_mode(hw); - if (status) - ice_debug(hw, ICE_DBG_PKG, - "Failed to set VLAN mode: err %d\n", status); + return state; +} + +/** + * ice_download_pkg_sig_seg - download a signature segment + * @hw: pointer to the hardware structure + * @seg: pointer to signature segment + */ +static enum ice_ddp_state +ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) +{ + return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, + le32_to_cpu(seg->buf_tbl.buf_count), + false); +} + +/** + * ice_download_pkg_config_seg - download a config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index + * @start: starting buffer + * @count: buffer count + * + * Note: idx must reference a ICE segment + */ +static enum ice_ddp_state +ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx, u32 start, u32 count) +{ + struct ice_buf_table *bufs; + struct ice_seg *seg; + u32 buf_count; + + seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) + return ICE_DDP_PKG_ERR; + + bufs = ice_find_buf_table(seg); + buf_count = le32_to_cpu(bufs->buf_count); + + if (start >= buf_count || start + count > buf_count) + return ICE_DDP_PKG_ERR; + + return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, + true); +} + +/** + * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index (must be a signature segment) + * + * Note: idx must reference a signature segment + */ +static enum ice_ddp_state +ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx) +{ + enum ice_ddp_state state; + struct ice_sign_seg *seg; + u32 conf_idx; + u32 start; + u32 count; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) { + state = ICE_DDP_PKG_ERR; + goto exit; } + conf_idx = le32_to_cpu(seg->signed_seg_idx); + start = le32_to_cpu(seg->signed_buf_start); + count = le32_to_cpu(seg->signed_buf_count); + + state = ice_download_pkg_sig_seg(hw, seg); + if (state) + goto exit; + + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, + count); + +exit: + return state; +} + +/** + * ice_match_signing_seg - determine if a matching signing segment exists + * @pkg_hdr: pointer to package header + * @seg_id: segment id that is expected + * @sign_type: signing type + */ +static bool +ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) +{ + u32 i; + + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, + sign_type)) + return true; + } + + return false; +} + +/** + * ice_post_dwnld_pkg_actions - perform post download package actions + * @hw: pointer to the hardware structure + */ +static enum ice_ddp_state +ice_post_dwnld_pkg_actions(struct ice_hw *hw) +{ + int status; + + status = ice_set_vlan_mode(hw); + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", + status); + return ICE_DDP_PKG_ERR; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_download_pkg_with_sig_seg + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state +ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + enum ice_aq_err aq_err = hw->adminq.sq_last_status; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; + int status; + u32 i; + + ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); + ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + state = ICE_DDP_PKG_ALREADY_LOADED; + else + state = ice_map_aq_err_to_ddp_state(aq_err); + return state; + } + + for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { + if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, + hw->pkg_sign_type)) + continue; + + state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); + if (state) + break; + } + + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + ice_release_global_cfg_lock(hw); return state; } /** - * ice_aq_get_pkg_info_list + * ice_dwnld_cfg_bufs * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array * - * Get Package Info List (0x0C43) + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. */ -static int ice_aq_get_pkg_info_list(struct ice_hw *hw, - struct ice_aqc_get_pkg_info_resp *pkg_info, - u16 buf_size, struct ice_sq_cd *cd) +static enum ice_ddp_state +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - struct ice_aq_desc desc; + enum ice_ddp_state state; + struct ice_buf_hdr *bh; + int status; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + if (!bufs || !count) + return ICE_DDP_PKG_ERR; - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; } /** - * ice_download_pkg + * ice_download_pkg_without_sig_seg * @hw: pointer to the hardware structure * @ice_seg: pointer to the segment of the package to be downloaded * - * Handles the download of a complete package. + * Handles the download of a complete package without signature segment. */ -static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, - struct ice_seg *ice_seg) +static enum ice_ddp_state +ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; - int status; ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", ice_seg->hdr.seg_format_ver.major, @@ -1285,79 +1593,52 @@ static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", le32_to_cpu(ice_buf_tbl->buf_count)); - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - le32_to_cpu(ice_buf_tbl->buf_count)); - - ice_post_pkg_dwnld_vlan_mode_cfg(hw); - - return status; + return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + le32_to_cpu(ice_buf_tbl->buf_count)); } /** - * ice_aq_download_pkg + * ice_download_pkg * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer to transfer - * @buf_size: the size of the package buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL + * @pkg_hdr: pointer to package header + * @ice_seg: pointer to the segment of the package to be downloaded * - * Download Package (0x0C40) + * Handles the download of a complete package. */ -int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, bool last_buf, u32 *error_offset, - u32 *error_info, struct ice_sq_cd *cd) +static enum ice_ddp_state +ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + struct ice_seg *ice_seg) { - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - int status; - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + enum ice_ddp_state state; - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == -EIO) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; + if (hw->pkg_has_signing_seg) + state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); + else + state = ice_download_pkg_without_sig_seg(hw, ice_seg); - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } + ice_post_pkg_dwnld_vlan_mode_cfg(hw); - return status; + return state; } /** - * ice_aq_upload_section + * ice_aq_get_pkg_info_list * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer which will receive the section - * @buf_size: the size of the package buffer + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer * @cd: pointer to command details structure or NULL * - * Upload Section (0x0C41) + * Get Package Info List (0x0C43) */ -int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd) +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); } /** @@ -1408,6 +1689,26 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, } /** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + +/** * ice_update_pkg_no_lock * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers @@ -1470,8 +1771,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * success it returns a pointer to the segment header, otherwise it will * return NULL. */ -struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) +static struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) { u32 i; @@ -1496,6 +1798,73 @@ struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, } /** + * ice_has_signing_seg - determine if package has a signing segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + */ +static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); + + return seg_hdr ? true : false; +} + +/** + * ice_get_pkg_segment_id - get correct package segment id, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) +{ + u32 seg_id; + + switch (mac_type) { + case ICE_MAC_E830: + seg_id = SEGMENT_TYPE_ICE_E830; + break; + case ICE_MAC_GENERIC: + default: + seg_id = SEGMENT_TYPE_ICE_E810; + break; + } + + return seg_id; +} + +/** + * ice_get_pkg_sign_type - get package segment sign type, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) +{ + u32 sign_type; + + switch (mac_type) { + case ICE_MAC_E830: + sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB; + break; + case ICE_MAC_GENERIC: + default: + sign_type = SEGMENT_SIGN_TYPE_RSA2K; + break; + } + + return sign_type; +} + +/** + * ice_get_signing_req - get correct package requirements, based on device + * @hw: pointer to the hardware structure + */ +static void ice_get_signing_req(struct ice_hw *hw) +{ + hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); + hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); +} + +/** * ice_init_pkg_info * @hw: pointer to the hardware structure * @pkg_hdr: pointer to the driver's package hdr @@ -1510,7 +1879,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, if (!pkg_hdr) return ICE_DDP_PKG_ERR; - seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); + hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); + ice_get_signing_req(hw); + + ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", + hw->pkg_seg_id); + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); if (seg_hdr) { struct ice_meta_sect *meta; struct ice_pkg_enum state; @@ -1558,21 +1934,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, */ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) { - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_aqc_get_pkg_info_resp *pkg_info; - u16 size; + DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info, + ICE_PKG_CNT); + u16 size = __struct_size(pkg_info); u32 i; - size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); - pkg_info = kzalloc(size, GFP_KERNEL); - if (!pkg_info) + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) return ICE_DDP_PKG_ERR; - if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { - state = ICE_DDP_PKG_ERR; - goto init_pkg_free_alloc; - } - for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { #define ICE_PKG_FLAG_COUNT 4 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; @@ -1602,10 +1971,7 @@ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) pkg_info->pkg_info[i].name, flags); } -init_pkg_free_alloc: - kfree(pkg_info); - - return state; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1620,9 +1986,10 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, struct ice_seg **seg) { - struct ice_aqc_get_pkg_info_resp *pkg; + DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info, + ICE_PKG_CNT); + u16 size = __struct_size(pkg); enum ice_ddp_state state; - u16 size; u32 i; /* Check package version compatibility */ @@ -1633,7 +2000,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, } /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); @@ -1641,15 +2008,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, } /* Check if FW is compatible with the OS package */ - size = struct_size(pkg, pkg_info, ICE_PKG_CNT); - pkg = kzalloc(size, GFP_KERNEL); - if (!pkg) - return ICE_DDP_PKG_ERR; - - if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { - state = ICE_DDP_PKG_LOAD_ERROR; - goto fw_ddp_compat_free_alloc; - } + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) + return ICE_DDP_PKG_LOAD_ERROR; for (i = 0; i < le32_to_cpu(pkg->count); i++) { /* loop till we find the NVM package */ @@ -1666,8 +2026,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, /* done processing NVM package so break */ break; } -fw_ddp_compat_free_alloc: - kfree(pkg); + return state; } @@ -1807,6 +2166,11 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) if (state) return state; + /* must be a matching segment */ + if (hw->pkg_has_signing_seg && + !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type)) + return ICE_DDP_PKG_ERR; + /* before downloading the package, check package version for * compatibility with driver */ @@ -1816,7 +2180,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); - state = ice_download_pkg(hw, seg); + state = ice_download_pkg(hw, pkg, seg); if (state == ICE_DDP_PKG_ALREADY_LOADED) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index 41acfe26df1c..ff66c2ffb1a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -98,10 +98,21 @@ struct ice_pkg_hdr { __le32 seg_offset[]; }; +/* Package signing algorithm types */ +#define SEGMENT_SIGN_TYPE_INVALID 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */ +#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005 + /* generic segment */ struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_ICE 0x00000010 +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE_E810 0x00000010 +#define SEGMENT_TYPE_SIGNING 0x00001001 +#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020 +#define SEGMENT_TYPE_ICE_E830 0x00000017 __le32 seg_type; struct ice_pkg_ver seg_format_ver; __le32 seg_size; @@ -163,6 +174,18 @@ struct ice_global_metadata_seg { #define ICE_MIN_S_SZ 1 #define ICE_MAX_S_SZ 4084 +struct ice_sign_seg { + struct ice_generic_seg_hdr hdr; + __le32 seg_id; + __le32 sign_type; + __le32 signed_seg_idx; + __le32 signed_buf_start; + __le32 signed_buf_count; +#define ICE_SIGN_SEG_RESERVED_COUNT 44 + u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; + struct ice_buf_table buf_tbl; +}; + /* section information */ struct ice_section_entry { __le32 type; @@ -416,21 +439,13 @@ struct ice_pkg_enum { void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); }; -int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, bool last_buf, u32 *error_offset, - u32 *error_info, struct ice_sq_cd *cd); int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd); void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); -enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); - struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); -struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr); - int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); @@ -439,6 +454,4 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); -struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf); - #endif diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 6d560d1c74a4..a2d384dbfc76 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ #ifndef _ICE_DEVIDS_H_ #define _ICE_DEVIDS_H_ @@ -16,6 +16,14 @@ #define ICE_DEV_ID_E823L_1GBE 0x124F /* Intel(R) Ethernet Connection E823-L for QSFP */ #define ICE_DEV_ID_E823L_QSFP 0x151D +/* Intel(R) Ethernet Controller E830-C for backplane */ +#define ICE_DEV_ID_E830_BACKPLANE 0x12D1 +/* Intel(R) Ethernet Controller E830-C for QSFP */ +#define ICE_DEV_ID_E830_QSFP56 0x12D2 +/* Intel(R) Ethernet Controller E830-C for SFP */ +#define ICE_DEV_ID_E830_SFP 0x12D3 +/* Intel(R) Ethernet Controller E830-C for SFP-DD */ +#define ICE_DEV_ID_E830_SFP_DD 0x12D4 /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c new file mode 100644 index 000000000000..86b180cb32a0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -0,0 +1,2117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" +#include "ice_trace.h" +#include <linux/dpll.h> + +#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50 +#define ICE_DPLL_PIN_IDX_INVALID 0xff +#define ICE_DPLL_RCLK_NUM_PER_PF 1 + +/** + * enum ice_dpll_pin_type - enumerate ice pin types: + * @ICE_DPLL_PIN_INVALID: invalid pin type + * @ICE_DPLL_PIN_TYPE_INPUT: input pin + * @ICE_DPLL_PIN_TYPE_OUTPUT: output pin + * @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin + */ +enum ice_dpll_pin_type { + ICE_DPLL_PIN_INVALID, + ICE_DPLL_PIN_TYPE_INPUT, + ICE_DPLL_PIN_TYPE_OUTPUT, + ICE_DPLL_PIN_TYPE_RCLK_INPUT, +}; + +static const char * const pin_type_name[] = { + [ICE_DPLL_PIN_TYPE_INPUT] = "input", + [ICE_DPLL_PIN_TYPE_OUTPUT] = "output", + [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input", +}; + +/** + * ice_dpll_pin_freq_set - set pin's frequency + * @pf: private board structure + * @pin: pointer to a pin + * @pin_type: type of pin being configured + * @freq: frequency to be set + * @extack: error reporting + * + * Set requested frequency on a pin. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - success + * * negative - error on AQ or wrong pin type given + */ +static int +ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, const u32 freq, + struct netlink_ext_ack *extack) +{ + u8 flags; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + flags = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ; + ret = ice_aq_set_input_pin_cfg(&pf->hw, pin->idx, flags, + pin->flags[0], freq, 0); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ; + ret = ice_aq_set_output_pin_cfg(&pf->hw, pin->idx, flags, + 0, freq, 0); + break; + default: + return -EINVAL; + } + if (ret) { + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin freq:%u on pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + freq, pin->idx); + return ret; + } + pin->freq = freq; + + return 0; +} + +/** + * ice_dpll_frequency_set - wrapper for pin callback for set frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: frequency to be set + * @extack: error reporting + * @pin_type: type of pin being configured + * + * Wraps internal set frequency command on a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't set in hw + */ +static int +ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const u32 frequency, + struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_input_frequency_set - input pin callback for set frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: frequency to be set + * @extack: error reporting + * + * Wraps internal set frequency command on a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't set in hw + */ +static int +ice_dpll_input_frequency_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_output_frequency_set - output pin callback for set frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: frequency to be set + * @extack: error reporting + * + * Wraps internal set frequency command on a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't set in hw + */ +static int +ice_dpll_output_frequency_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_frequency_get - wrapper for pin callback for get frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * @pin_type: type of pin being configured + * + * Wraps internal get frequency command of a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't get from hw + */ +static int +ice_dpll_frequency_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *frequency = p->freq; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_input_frequency_get - input pin callback for get frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * + * Wraps internal get frequency command of a input pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't get from hw + */ +static int +ice_dpll_input_frequency_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_output_frequency_get - output pin callback for get frequency + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: pointer to dpll + * @dpll_priv: private data pointer passed on dpll registration + * @frequency: on success holds pin's frequency + * @extack: error reporting + * + * Wraps internal get frequency command of a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error pin not found or couldn't get from hw + */ +static int +ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack) +{ + return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_pin_enable - enable a pin on dplls + * @hw: board private hw structure + * @pin: pointer to a pin + * @pin_type: type of pin being enabled + * @extack: error reporting + * + * Enable a pin on both dplls. Store current state in pin->flags. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - OK + * * negative - error + */ +static int +ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, + struct netlink_ext_ack *extack) +{ + u8 flags = 0; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); + break; + default: + return -EINVAL; + } + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to enable %s pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + + return ret; +} + +/** + * ice_dpll_pin_disable - disable a pin on dplls + * @hw: board private hw structure + * @pin: pointer to a pin + * @pin_type: type of pin being disabled + * @extack: error reporting + * + * Disable a pin on both dplls. Store current state in pin->flags. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - OK + * * negative - error + */ +static int +ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, + struct netlink_ext_ack *extack) +{ + u8 flags = 0; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); + break; + default: + return -EINVAL; + } + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to disable %s pin:%u\n", + ret, ice_aq_str(hw->adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + + return ret; +} + +/** + * ice_dpll_pin_state_update - update pin's state + * @pf: private board struct + * @pin: structure with pin attributes to be updated + * @pin_type: type of pin being updated + * @extack: error reporting + * + * Determine pin current state and frequency, then update struct + * holding the pin info. For input pin states are separated for each + * dpll, for rclk pins states are separated for each parent. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - OK + * * negative - error + */ +static int +ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, + enum ice_dpll_pin_type pin_type, + struct netlink_ext_ack *extack) +{ + u8 parent, port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; + int ret; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, + NULL, &pin->flags[0], + &pin->freq, NULL); + if (ret) + goto err; + if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) { + if (pin->pin) { + pin->state[pf->dplls.eec.dpll_idx] = + pin->pin == pf->dplls.eec.active_input ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_SELECTABLE; + pin->state[pf->dplls.pps.dpll_idx] = + pin->pin == pf->dplls.pps.active_input ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_SELECTABLE; + } else { + pin->state[pf->dplls.eec.dpll_idx] = + DPLL_PIN_STATE_SELECTABLE; + pin->state[pf->dplls.pps.dpll_idx] = + DPLL_PIN_STATE_SELECTABLE; + } + } else { + pin->state[pf->dplls.eec.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + } + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx, + &pin->flags[0], NULL, + &pin->freq, NULL); + if (ret) + goto err; + if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) + pin->state[0] = DPLL_PIN_STATE_CONNECTED; + else + pin->state[0] = DPLL_PIN_STATE_DISCONNECTED; + break; + case ICE_DPLL_PIN_TYPE_RCLK_INPUT: + for (parent = 0; parent < pf->dplls.rclk.num_parents; + parent++) { + u8 p = parent; + + ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p, + &port_num, + &pin->flags[parent], + NULL); + if (ret) + goto err; + if (ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN & + pin->flags[parent]) + pin->state[parent] = DPLL_PIN_STATE_CONNECTED; + else + pin->state[parent] = + DPLL_PIN_STATE_DISCONNECTED; + } + break; + default: + return -EINVAL; + } + + return 0; +err: + if (extack) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to update %s pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + else + dev_err_ratelimited(ice_pf_to_dev(pf), + "err:%d %s failed to update %s pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + pin_type_name[pin_type], pin->idx); + return ret; +} + +/** + * ice_dpll_hw_input_prio_set - set input priority value in hardware + * @pf: board private structure + * @dpll: ice dpll pointer + * @pin: ice pin pointer + * @prio: priority value being set on a dpll + * @extack: error reporting + * + * Internal wrapper for setting the priority in the hardware. + * + * Context: Called under pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll, + struct ice_dpll_pin *pin, const u32 prio, + struct netlink_ext_ack *extack) +{ + int ret; + + ret = ice_aq_set_cgu_ref_prio(&pf->hw, dpll->dpll_idx, pin->idx, + (u8)prio); + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin prio:%u on pin:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + prio, pin->idx); + else + dpll->input_prio[pin->idx] = prio; + + return ret; +} + +/** + * ice_dpll_lock_status_get - get dpll lock status callback + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @status: on success holds dpll's lock status + * @extack: error reporting + * + * Dpll subsystem callback, provides dpll's lock status. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_lock_status *status, + struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *status = d->dpll_state; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_mode_supported - check if dpll's working mode is supported + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @mode: mode to be checked for support + * @extack: error reporting + * + * Dpll subsystem callback. Provides information if working mode is supported + * by dpll. + * + * Return: + * * true - mode is supported + * * false - mode is not supported + */ +static bool ice_dpll_mode_supported(const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_mode mode, + struct netlink_ext_ack *extack) +{ + if (mode == DPLL_MODE_AUTOMATIC) + return true; + + return false; +} + +/** + * ice_dpll_mode_get - get dpll's working mode + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @mode: on success holds current working mode of dpll + * @extack: error reporting + * + * Dpll subsystem callback. Provides working mode of dpll. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_mode *mode, + struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *mode = d->mode; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_pin_state_set - set pin's state on dpll + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @enable: if pin shalll be enabled + * @extack: error reporting + * @pin_type: type of a pin + * + * Set pin state on a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - OK or no change required + * * negative - error + */ +static int +ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + bool enable, struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + if (enable) + ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack); + else + ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack); + if (!ret) + ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_output_state_set - enable/disable output pin on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: dpll being configured + * @dpll_priv: private data pointer passed on dpll registration + * @state: state of pin to be set + * @extack: error reporting + * + * Dpll subsystem callback. Set given state on output type pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - successfully enabled mode + * * negative - failed to enable mode + */ +static int +ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + bool enable = state == DPLL_PIN_STATE_CONNECTED; + + return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_input_state_set - enable/disable input pin on dpll levice + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: dpll being configured + * @dpll_priv: private data pointer passed on dpll registration + * @state: state of pin to be set + * @extack: error reporting + * + * Dpll subsystem callback. Enables given mode on input type pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - successfully enabled mode + * * negative - failed to enable mode + */ +static int +ice_dpll_input_state_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + bool enable = state == DPLL_PIN_STATE_SELECTABLE; + + return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_pin_state_get - set pin's state on dpll + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds state of the pin + * @extack: error reporting + * @pin_type: type of questioned pin + * + * Determine pin state set it on a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int +ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); + if (ret) + goto unlock; + if (pin_type == ICE_DPLL_PIN_TYPE_INPUT) + *state = p->state[d->dpll_idx]; + else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) + *state = p->state[0]; + ret = 0; +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_output_state_get - get output pin state on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds state of the pin + * @extack: error reporting + * + * Dpll subsystem callback. Check state of a pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int +ice_dpll_output_state_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state, + extack, ICE_DPLL_PIN_TYPE_OUTPUT); +} + +/** + * ice_dpll_input_state_get - get input pin state on dpll device + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @state: on success holds state of the pin + * @extack: error reporting + * + * Dpll subsystem callback. Check state of a input pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failed to get state + */ +static int +ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state, + extack, ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_input_prio_get - get dpll's input prio + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @prio: on success - returns input priority on dpll + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting priority of a input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_input_prio_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 *prio, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + *prio = d->input_prio[p->idx]; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_input_prio_set - set dpll input prio + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @prio: input priority to be set on dpll + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting priority of a input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 prio, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack); + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_input_direction - callback for get input pin direction + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @direction: holds input pin direction + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting direction of a input pin. + * + * Return: + * * 0 - success + */ +static int +ice_dpll_input_direction(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack) +{ + *direction = DPLL_PIN_DIRECTION_INPUT; + + return 0; +} + +/** + * ice_dpll_output_direction - callback for get output pin direction + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @direction: holds output pin direction + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting direction of an output pin. + * + * Return: + * * 0 - success + */ +static int +ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack) +{ + *direction = DPLL_PIN_DIRECTION_OUTPUT; + + return 0; +} + +/** + * ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: on success holds pin phase_adjust value + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting phase adjust value of a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_pin_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 *phase_adjust, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_pf *pf = p->pf; + + mutex_lock(&pf->dplls.lock); + *phase_adjust = p->phase_adjust; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_pin_phase_adjust_set - helper for setting a pin phase adjust value + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: phase_adjust to be set + * @extack: error reporting + * @type: type of a pin + * + * Helper for dpll subsystem callback. Handler for setting phase adjust value + * of a pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 phase_adjust, + struct netlink_ext_ack *extack, + enum ice_dpll_pin_type type) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + u8 flag, flags_en = 0; + int ret; + + mutex_lock(&pf->dplls.lock); + switch (type) { + case ICE_DPLL_PIN_TYPE_INPUT: + flag = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY; + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) + flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN) + flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, flag, flags_en, + 0, phase_adjust); + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + flag = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE; + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN) + flag |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) + flag |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flag, 0, 0, + phase_adjust); + break; + default: + ret = -EINVAL; + } + if (!ret) + p->phase_adjust = phase_adjust; + mutex_unlock(&pf->dplls.lock); + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + phase_adjust, p->idx, d->dpll_idx); + + return ret; +} + +/** + * ice_dpll_input_phase_adjust_set - callback for set input pin phase adjust + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: phase_adjust to be set + * @extack: error reporting + * + * Dpll subsystem callback. Wraps a handler for setting phase adjust on input + * pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_input_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 phase_adjust, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv, + phase_adjust, extack, + ICE_DPLL_PIN_TYPE_INPUT); +} + +/** + * ice_dpll_output_phase_adjust_set - callback for set output pin phase adjust + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_adjust: phase_adjust to be set + * @extack: error reporting + * + * Dpll subsystem callback. Wraps a handler for setting phase adjust on output + * pin. + * + * Context: Calls a function which acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 phase_adjust, + struct netlink_ext_ack *extack) +{ + return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv, + phase_adjust, extack, + ICE_DPLL_PIN_TYPE_OUTPUT); +} + +#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100 +#define ICE_DPLL_PHASE_OFFSET_FACTOR \ + (DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER) +/** + * ice_dpll_phase_offset_get - callback for get dpll phase shift value + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @phase_offset: on success holds pin phase_offset value + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting phase shift value between + * dpll's input and output. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s64 *phase_offset, struct netlink_ext_ack *extack) +{ + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + mutex_lock(&pf->dplls.lock); + if (d->active_input == pin) + *phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR; + else + *phase_offset = 0; + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @parent_pin: pin parent pointer + * @parent_pin_priv: parent private data pointer passed on pin registration + * @state: state to be set on pin + * @extack: error reporting + * + * Dpll subsystem callback, set a state of a rclk pin on a parent pin + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv; + bool enable = state == DPLL_PIN_STATE_CONNECTED; + struct ice_pf *pf = p->pf; + int ret = -EINVAL; + u32 hw_idx; + + mutex_lock(&pf->dplls.lock); + hw_idx = parent->idx - pf->dplls.base_rclk_idx; + if (hw_idx >= pf->dplls.num_inputs) + goto unlock; + + if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) || + (!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) { + NL_SET_ERR_MSG_FMT(extack, + "pin:%u state:%u on parent:%u already set", + p->idx, state, parent->idx); + goto unlock; + } + ret = ice_aq_set_phy_rec_clk_out(&pf->hw, hw_idx, enable, + &p->freq); + if (ret) + NL_SET_ERR_MSG_FMT(extack, + "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n", + ret, + ice_aq_str(pf->hw.adminq.sq_last_status), + state, p->idx, parent->idx); +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_rclk_state_on_pin_get - get a state of rclk pin + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @parent_pin: pin parent pointer + * @parent_pin_priv: pin parent priv data pointer passed on pin registration + * @state: on success holds pin state on parent pin + * @extack: error reporting + * + * dpll subsystem callback, get a state of a recovered clock pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - failure + */ +static int +ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv; + struct ice_pf *pf = p->pf; + int ret = -EINVAL; + u32 hw_idx; + + mutex_lock(&pf->dplls.lock); + hw_idx = parent->idx - pf->dplls.base_rclk_idx; + if (hw_idx >= pf->dplls.num_inputs) + goto unlock; + + ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT, + extack); + if (ret) + goto unlock; + + *state = p->state[hw_idx]; + ret = 0; +unlock: + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +static const struct dpll_pin_ops ice_dpll_rclk_ops = { + .state_on_pin_set = ice_dpll_rclk_state_on_pin_set, + .state_on_pin_get = ice_dpll_rclk_state_on_pin_get, + .direction_get = ice_dpll_input_direction, +}; + +static const struct dpll_pin_ops ice_dpll_input_ops = { + .frequency_get = ice_dpll_input_frequency_get, + .frequency_set = ice_dpll_input_frequency_set, + .state_on_dpll_get = ice_dpll_input_state_get, + .state_on_dpll_set = ice_dpll_input_state_set, + .prio_get = ice_dpll_input_prio_get, + .prio_set = ice_dpll_input_prio_set, + .direction_get = ice_dpll_input_direction, + .phase_adjust_get = ice_dpll_pin_phase_adjust_get, + .phase_adjust_set = ice_dpll_input_phase_adjust_set, + .phase_offset_get = ice_dpll_phase_offset_get, +}; + +static const struct dpll_pin_ops ice_dpll_output_ops = { + .frequency_get = ice_dpll_output_frequency_get, + .frequency_set = ice_dpll_output_frequency_set, + .state_on_dpll_get = ice_dpll_output_state_get, + .state_on_dpll_set = ice_dpll_output_state_set, + .direction_get = ice_dpll_output_direction, + .phase_adjust_get = ice_dpll_pin_phase_adjust_get, + .phase_adjust_set = ice_dpll_output_phase_adjust_set, +}; + +static const struct dpll_device_ops ice_dpll_ops = { + .lock_status_get = ice_dpll_lock_status_get, + .mode_supported = ice_dpll_mode_supported, + .mode_get = ice_dpll_mode_get, +}; + +/** + * ice_generate_clock_id - generates unique clock_id for registering dpll. + * @pf: board private structure + * + * Generates unique (per board) clock_id for allocation and search of dpll + * devices in Linux dpll subsystem. + * + * Return: generated clock id for the board + */ +static u64 ice_generate_clock_id(struct ice_pf *pf) +{ + return pci_get_dsn(pf->pdev); +} + +/** + * ice_dpll_notify_changes - notify dpll subsystem about changes + * @d: pointer do dpll + * + * Once change detected appropriate event is submitted to the dpll subsystem. + */ +static void ice_dpll_notify_changes(struct ice_dpll *d) +{ + bool pin_notified = false; + + if (d->prev_dpll_state != d->dpll_state) { + d->prev_dpll_state = d->dpll_state; + dpll_device_change_ntf(d->dpll); + } + if (d->prev_input != d->active_input) { + if (d->prev_input) + dpll_pin_change_ntf(d->prev_input); + d->prev_input = d->active_input; + if (d->active_input) { + dpll_pin_change_ntf(d->active_input); + pin_notified = true; + } + } + if (d->prev_phase_offset != d->phase_offset) { + d->prev_phase_offset = d->phase_offset; + if (!pin_notified && d->active_input) + dpll_pin_change_ntf(d->active_input); + } +} + +/** + * ice_dpll_update_state - update dpll state + * @pf: pf private structure + * @d: pointer to queried dpll device + * @init: if function called on initialization of ice dpll + * + * Poll current state of dpll from hw and update ice_dpll struct. + * + * Context: Called by kworker under pf->dplls.lock + * Return: + * * 0 - success + * * negative - AQ failure + */ +static int +ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init) +{ + struct ice_dpll_pin *p = NULL; + int ret; + + ret = ice_get_cgu_state(&pf->hw, d->dpll_idx, d->prev_dpll_state, + &d->input_idx, &d->ref_state, &d->eec_mode, + &d->phase_offset, &d->dpll_state); + + dev_dbg(ice_pf_to_dev(pf), + "update dpll=%d, prev_src_idx:%u, src_idx:%u, state:%d, prev:%d mode:%d\n", + d->dpll_idx, d->prev_input_idx, d->input_idx, + d->dpll_state, d->prev_dpll_state, d->mode); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "update dpll=%d state failed, ret=%d %s\n", + d->dpll_idx, ret, + ice_aq_str(pf->hw.adminq.sq_last_status)); + return ret; + } + if (init) { + if (d->dpll_state == DPLL_LOCK_STATUS_LOCKED || + d->dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ) + d->active_input = pf->dplls.inputs[d->input_idx].pin; + p = &pf->dplls.inputs[d->input_idx]; + return ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, NULL); + } + if (d->dpll_state == DPLL_LOCK_STATUS_HOLDOVER || + d->dpll_state == DPLL_LOCK_STATUS_UNLOCKED) { + d->active_input = NULL; + if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID) + p = &pf->dplls.inputs[d->input_idx]; + d->prev_input_idx = ICE_DPLL_PIN_IDX_INVALID; + d->input_idx = ICE_DPLL_PIN_IDX_INVALID; + if (!p) + return 0; + ret = ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, NULL); + } else if (d->input_idx != d->prev_input_idx) { + if (d->prev_input_idx != ICE_DPLL_PIN_IDX_INVALID) { + p = &pf->dplls.inputs[d->prev_input_idx]; + ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, + NULL); + } + if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID) { + p = &pf->dplls.inputs[d->input_idx]; + d->active_input = p->pin; + ice_dpll_pin_state_update(pf, p, + ICE_DPLL_PIN_TYPE_INPUT, + NULL); + } + d->prev_input_idx = d->input_idx; + } + + return ret; +} + +/** + * ice_dpll_periodic_work - DPLLs periodic worker + * @work: pointer to kthread_work structure + * + * DPLLs periodic worker is responsible for polling state of dpll. + * Context: Holds pf->dplls.lock + */ +static void ice_dpll_periodic_work(struct kthread_work *work) +{ + struct ice_dplls *d = container_of(work, struct ice_dplls, work.work); + struct ice_pf *pf = container_of(d, struct ice_pf, dplls); + struct ice_dpll *de = &pf->dplls.eec; + struct ice_dpll *dp = &pf->dplls.pps; + int ret; + + mutex_lock(&pf->dplls.lock); + ret = ice_dpll_update_state(pf, de, false); + if (!ret) + ret = ice_dpll_update_state(pf, dp, false); + if (ret) { + d->cgu_state_acq_err_num++; + /* stop rescheduling this worker */ + if (d->cgu_state_acq_err_num > + ICE_CGU_STATE_ACQ_ERR_THRESHOLD) { + dev_err(ice_pf_to_dev(pf), + "EEC/PPS DPLLs periodic work disabled\n"); + mutex_unlock(&pf->dplls.lock); + return; + } + } + mutex_unlock(&pf->dplls.lock); + ice_dpll_notify_changes(de); + ice_dpll_notify_changes(dp); + + /* Run twice a second or reschedule if update failed */ + kthread_queue_delayed_work(d->kworker, &d->work, + ret ? msecs_to_jiffies(10) : + msecs_to_jiffies(500)); +} + +/** + * ice_dpll_release_pins - release pins resources from dpll subsystem + * @pins: pointer to pins array + * @count: number of pins + * + * Release resources of given pins array in the dpll subsystem. + */ +static void ice_dpll_release_pins(struct ice_dpll_pin *pins, int count) +{ + int i; + + for (i = 0; i < count; i++) + dpll_pin_put(pins[i].pin); +} + +/** + * ice_dpll_get_pins - get pins from dpll subsystem + * @pf: board private structure + * @pins: pointer to pins array + * @start_idx: get starts from this pin idx value + * @count: number of pins + * @clock_id: clock_id of dpll device + * + * Get pins - allocate - in dpll subsystem, store them in pin field of given + * pins array. + * + * Return: + * * 0 - success + * * negative - allocation failure reason + */ +static int +ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins, + int start_idx, int count, u64 clock_id) +{ + int i, ret; + + for (i = 0; i < count; i++) { + pins[i].pin = dpll_pin_get(clock_id, i + start_idx, THIS_MODULE, + &pins[i].prop); + if (IS_ERR(pins[i].pin)) { + ret = PTR_ERR(pins[i].pin); + goto release_pins; + } + } + + return 0; + +release_pins: + while (--i >= 0) + dpll_pin_put(pins[i].pin); + return ret; +} + +/** + * ice_dpll_unregister_pins - unregister pins from a dpll + * @dpll: dpll device pointer + * @pins: pointer to pins array + * @ops: callback ops registered with the pins + * @count: number of pins + * + * Unregister pins of a given array of pins from given dpll device registered in + * dpll subsystem. + */ +static void +ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins, + const struct dpll_pin_ops *ops, int count) +{ + int i; + + for (i = 0; i < count; i++) + dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); +} + +/** + * ice_dpll_register_pins - register pins with a dpll + * @dpll: dpll pointer to register pins with + * @pins: pointer to pins array + * @ops: callback ops registered with the pins + * @count: number of pins + * + * Register pins of a given array with given dpll in dpll subsystem. + * + * Return: + * * 0 - success + * * negative - registration failure reason + */ +static int +ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins, + const struct dpll_pin_ops *ops, int count) +{ + int ret, i; + + for (i = 0; i < count; i++) { + ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]); + if (ret) + goto unregister_pins; + } + + return 0; + +unregister_pins: + while (--i >= 0) + dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]); + return ret; +} + +/** + * ice_dpll_deinit_direct_pins - deinitialize direct pins + * @cgu: if cgu is present and controlled by this NIC + * @pins: pointer to pins array + * @count: number of pins + * @ops: callback ops registered with the pins + * @first: dpll device pointer + * @second: dpll device pointer + * + * If cgu is owned unregister pins from given dplls. + * Release pins resources to the dpll subsystem. + */ +static void +ice_dpll_deinit_direct_pins(bool cgu, struct ice_dpll_pin *pins, int count, + const struct dpll_pin_ops *ops, + struct dpll_device *first, + struct dpll_device *second) +{ + if (cgu) { + ice_dpll_unregister_pins(first, pins, ops, count); + ice_dpll_unregister_pins(second, pins, ops, count); + } + ice_dpll_release_pins(pins, count); +} + +/** + * ice_dpll_init_direct_pins - initialize direct pins + * @pf: board private structure + * @cgu: if cgu is present and controlled by this NIC + * @pins: pointer to pins array + * @start_idx: on which index shall allocation start in dpll subsystem + * @count: number of pins + * @ops: callback ops registered with the pins + * @first: dpll device pointer + * @second: dpll device pointer + * + * Allocate directly connected pins of a given array in dpll subsystem. + * If cgu is owned register allocated pins with given dplls. + * + * Return: + * * 0 - success + * * negative - registration failure reason + */ +static int +ice_dpll_init_direct_pins(struct ice_pf *pf, bool cgu, + struct ice_dpll_pin *pins, int start_idx, int count, + const struct dpll_pin_ops *ops, + struct dpll_device *first, struct dpll_device *second) +{ + int ret; + + ret = ice_dpll_get_pins(pf, pins, start_idx, count, pf->dplls.clock_id); + if (ret) + return ret; + if (cgu) { + ret = ice_dpll_register_pins(first, pins, ops, count); + if (ret) + goto release_pins; + ret = ice_dpll_register_pins(second, pins, ops, count); + if (ret) + goto unregister_first; + } + + return 0; + +unregister_first: + ice_dpll_unregister_pins(first, pins, ops, count); +release_pins: + ice_dpll_release_pins(pins, count); + return ret; +} + +/** + * ice_dpll_deinit_rclk_pin - release rclk pin resources + * @pf: board private structure + * + * Deregister rclk pin from parent pins and release resources in dpll subsystem. + */ +static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf) +{ + struct ice_dpll_pin *rclk = &pf->dplls.rclk; + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct dpll_pin *parent; + int i; + + for (i = 0; i < rclk->num_parents; i++) { + parent = pf->dplls.inputs[rclk->parent_idx[i]].pin; + if (!parent) + continue; + dpll_pin_on_pin_unregister(parent, rclk->pin, + &ice_dpll_rclk_ops, rclk); + } + if (WARN_ON_ONCE(!vsi || !vsi->netdev)) + return; + netdev_dpll_pin_clear(vsi->netdev); + dpll_pin_put(rclk->pin); +} + +/** + * ice_dpll_init_rclk_pins - initialize recovered clock pin + * @pf: board private structure + * @pin: pin to register + * @start_idx: on which index shall allocation start in dpll subsystem + * @ops: callback ops registered with the pins + * + * Allocate resource for recovered clock pin in dpll subsystem. Register the + * pin with the parents it has in the info. Register pin with the pf's main vsi + * netdev. + * + * Return: + * * 0 - success + * * negative - registration failure reason + */ +static int +ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, + int start_idx, const struct dpll_pin_ops *ops) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct dpll_pin *parent; + int ret, i; + + ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF, + pf->dplls.clock_id); + if (ret) + return ret; + for (i = 0; i < pf->dplls.rclk.num_parents; i++) { + parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[i]].pin; + if (!parent) { + ret = -ENODEV; + goto unregister_pins; + } + ret = dpll_pin_on_pin_register(parent, pf->dplls.rclk.pin, + ops, &pf->dplls.rclk); + if (ret) + goto unregister_pins; + } + if (WARN_ON((!vsi || !vsi->netdev))) + return -EINVAL; + netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin); + + return 0; + +unregister_pins: + while (i) { + parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[--i]].pin; + dpll_pin_on_pin_unregister(parent, pf->dplls.rclk.pin, + &ice_dpll_rclk_ops, &pf->dplls.rclk); + } + ice_dpll_release_pins(pin, ICE_DPLL_RCLK_NUM_PER_PF); + return ret; +} + +/** + * ice_dpll_deinit_pins - deinitialize direct pins + * @pf: board private structure + * @cgu: if cgu is controlled by this pf + * + * If cgu is owned unregister directly connected pins from the dplls. + * Release resources of directly connected pins from the dpll subsystem. + */ +static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu) +{ + struct ice_dpll_pin *outputs = pf->dplls.outputs; + struct ice_dpll_pin *inputs = pf->dplls.inputs; + int num_outputs = pf->dplls.num_outputs; + int num_inputs = pf->dplls.num_inputs; + struct ice_dplls *d = &pf->dplls; + struct ice_dpll *de = &d->eec; + struct ice_dpll *dp = &d->pps; + + ice_dpll_deinit_rclk_pin(pf); + if (cgu) { + ice_dpll_unregister_pins(dp->dpll, inputs, &ice_dpll_input_ops, + num_inputs); + ice_dpll_unregister_pins(de->dpll, inputs, &ice_dpll_input_ops, + num_inputs); + } + ice_dpll_release_pins(inputs, num_inputs); + if (cgu) { + ice_dpll_unregister_pins(dp->dpll, outputs, + &ice_dpll_output_ops, num_outputs); + ice_dpll_unregister_pins(de->dpll, outputs, + &ice_dpll_output_ops, num_outputs); + ice_dpll_release_pins(outputs, num_outputs); + } +} + +/** + * ice_dpll_init_pins - init pins and register pins with a dplls + * @pf: board private structure + * @cgu: if cgu is present and controlled by this NIC + * + * Initialize directly connected pf's pins within pf's dplls in a Linux dpll + * subsystem. + * + * Return: + * * 0 - success + * * negative - initialization failure reason + */ +static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu) +{ + u32 rclk_idx; + int ret; + + ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0, + pf->dplls.num_inputs, + &ice_dpll_input_ops, + pf->dplls.eec.dpll, pf->dplls.pps.dpll); + if (ret) + return ret; + if (cgu) { + ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs, + pf->dplls.num_inputs, + pf->dplls.num_outputs, + &ice_dpll_output_ops, + pf->dplls.eec.dpll, + pf->dplls.pps.dpll); + if (ret) + goto deinit_inputs; + } + rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id; + ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx, + &ice_dpll_rclk_ops); + if (ret) + goto deinit_outputs; + + return 0; +deinit_outputs: + ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs, + pf->dplls.num_outputs, + &ice_dpll_output_ops, pf->dplls.pps.dpll, + pf->dplls.eec.dpll); +deinit_inputs: + ice_dpll_deinit_direct_pins(cgu, pf->dplls.inputs, pf->dplls.num_inputs, + &ice_dpll_input_ops, pf->dplls.pps.dpll, + pf->dplls.eec.dpll); + return ret; +} + +/** + * ice_dpll_deinit_dpll - deinitialize dpll device + * @pf: board private structure + * @d: pointer to ice_dpll + * @cgu: if cgu is present and controlled by this NIC + * + * If cgu is owned unregister the dpll from dpll subsystem. + * Release resources of dpll device from dpll subsystem. + */ +static void +ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu) +{ + if (cgu) + dpll_device_unregister(d->dpll, &ice_dpll_ops, d); + dpll_device_put(d->dpll); +} + +/** + * ice_dpll_init_dpll - initialize dpll device in dpll subsystem + * @pf: board private structure + * @d: dpll to be initialized + * @cgu: if cgu is present and controlled by this NIC + * @type: type of dpll being initialized + * + * Allocate dpll instance for this board in dpll subsystem, if cgu is controlled + * by this NIC, register dpll with the callback ops. + * + * Return: + * * 0 - success + * * negative - initialization failure reason + */ +static int +ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu, + enum dpll_type type) +{ + u64 clock_id = pf->dplls.clock_id; + int ret; + + d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE); + if (IS_ERR(d->dpll)) { + ret = PTR_ERR(d->dpll); + dev_err(ice_pf_to_dev(pf), + "dpll_device_get failed (%p) err=%d\n", d, ret); + return ret; + } + d->pf = pf; + if (cgu) { + ice_dpll_update_state(pf, d, true); + ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d); + if (ret) { + dpll_device_put(d->dpll); + return ret; + } + } + + return 0; +} + +/** + * ice_dpll_deinit_worker - deinitialize dpll kworker + * @pf: board private structure + * + * Stop dpll's kworker, release it's resources. + */ +static void ice_dpll_deinit_worker(struct ice_pf *pf) +{ + struct ice_dplls *d = &pf->dplls; + + kthread_cancel_delayed_work_sync(&d->work); + kthread_destroy_worker(d->kworker); +} + +/** + * ice_dpll_init_worker - Initialize DPLLs periodic worker + * @pf: board private structure + * + * Create and start DPLLs periodic worker. + * + * Context: Shall be called after pf->dplls.lock is initialized. + * Return: + * * 0 - success + * * negative - create worker failure + */ +static int ice_dpll_init_worker(struct ice_pf *pf) +{ + struct ice_dplls *d = &pf->dplls; + struct kthread_worker *kworker; + + kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); + kworker = kthread_create_worker(0, "ice-dplls-%s", + dev_name(ice_pf_to_dev(pf))); + if (IS_ERR(kworker)) + return PTR_ERR(kworker); + d->kworker = kworker; + d->cgu_state_acq_err_num = 0; + kthread_queue_delayed_work(d->kworker, &d->work, 0); + + return 0; +} + +/** + * ice_dpll_init_info_direct_pins - initializes direct pins info + * @pf: board private structure + * @pin_type: type of pins being initialized + * + * Init information for directly connected pins, cache them in pf's pins + * structures. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int +ice_dpll_init_info_direct_pins(struct ice_pf *pf, + enum ice_dpll_pin_type pin_type) +{ + struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps; + int num_pins, i, ret = -EINVAL; + struct ice_hw *hw = &pf->hw; + struct ice_dpll_pin *pins; + unsigned long caps; + u8 freq_supp_num; + bool input; + + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + pins = pf->dplls.inputs; + num_pins = pf->dplls.num_inputs; + input = true; + break; + case ICE_DPLL_PIN_TYPE_OUTPUT: + pins = pf->dplls.outputs; + num_pins = pf->dplls.num_outputs; + input = false; + break; + default: + return -EINVAL; + } + + for (i = 0; i < num_pins; i++) { + caps = 0; + pins[i].idx = i; + pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input); + pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input); + if (input) { + ret = ice_aq_get_cgu_ref_prio(hw, de->dpll_idx, i, + &de->input_prio[i]); + if (ret) + return ret; + ret = ice_aq_get_cgu_ref_prio(hw, dp->dpll_idx, i, + &dp->input_prio[i]); + if (ret) + return ret; + caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | + DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE); + pins[i].prop.phase_range.min = + pf->dplls.input_phase_adj_max; + pins[i].prop.phase_range.max = + -pf->dplls.input_phase_adj_max; + } else { + pins[i].prop.phase_range.min = + pf->dplls.output_phase_adj_max; + pins[i].prop.phase_range.max = + -pf->dplls.output_phase_adj_max; + ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps); + if (ret) + return ret; + } + pins[i].prop.capabilities = caps; + ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); + if (ret) + return ret; + pins[i].prop.freq_supported = + ice_cgu_get_pin_freq_supp(hw, i, input, &freq_supp_num); + pins[i].prop.freq_supported_num = freq_supp_num; + pins[i].pf = pf; + } + + return ret; +} + +/** + * ice_dpll_init_info_rclk_pin - initializes rclk pin information + * @pf: board private structure + * + * Init information for rclk pin, cache them in pf->dplls.rclk. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf) +{ + struct ice_dpll_pin *pin = &pf->dplls.rclk; + + pin->prop.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT; + pin->prop.capabilities |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + pin->pf = pf; + + return ice_dpll_pin_state_update(pf, pin, + ICE_DPLL_PIN_TYPE_RCLK_INPUT, NULL); +} + +/** + * ice_dpll_init_pins_info - init pins info wrapper + * @pf: board private structure + * @pin_type: type of pins being initialized + * + * Wraps functions for pin initialization. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int +ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type) +{ + switch (pin_type) { + case ICE_DPLL_PIN_TYPE_INPUT: + case ICE_DPLL_PIN_TYPE_OUTPUT: + return ice_dpll_init_info_direct_pins(pf, pin_type); + case ICE_DPLL_PIN_TYPE_RCLK_INPUT: + return ice_dpll_init_info_rclk_pin(pf); + default: + return -EINVAL; + } +} + +/** + * ice_dpll_deinit_info - release memory allocated for pins info + * @pf: board private structure + * + * Release memory allocated for pins by ice_dpll_init_info function. + */ +static void ice_dpll_deinit_info(struct ice_pf *pf) +{ + kfree(pf->dplls.inputs); + kfree(pf->dplls.outputs); + kfree(pf->dplls.eec.input_prio); + kfree(pf->dplls.pps.input_prio); +} + +/** + * ice_dpll_init_info - prepare pf's dpll information structure + * @pf: board private structure + * @cgu: if cgu is present and controlled by this NIC + * + * Acquire (from HW) and set basic dpll information (on pf->dplls struct). + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int ice_dpll_init_info(struct ice_pf *pf, bool cgu) +{ + struct ice_aqc_get_cgu_abilities abilities; + struct ice_dpll *de = &pf->dplls.eec; + struct ice_dpll *dp = &pf->dplls.pps; + struct ice_dplls *d = &pf->dplls; + struct ice_hw *hw = &pf->hw; + int ret, alloc_size, i; + + d->clock_id = ice_generate_clock_id(pf); + ret = ice_aq_get_cgu_abilities(hw, &abilities); + if (ret) { + dev_err(ice_pf_to_dev(pf), + "err:%d %s failed to read cgu abilities\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); + return ret; + } + + de->dpll_idx = abilities.eec_dpll_idx; + dp->dpll_idx = abilities.pps_dpll_idx; + d->num_inputs = abilities.num_inputs; + d->num_outputs = abilities.num_outputs; + d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj); + d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj); + + alloc_size = sizeof(*d->inputs) * d->num_inputs; + d->inputs = kzalloc(alloc_size, GFP_KERNEL); + if (!d->inputs) + return -ENOMEM; + + alloc_size = sizeof(*de->input_prio) * d->num_inputs; + de->input_prio = kzalloc(alloc_size, GFP_KERNEL); + if (!de->input_prio) + return -ENOMEM; + + dp->input_prio = kzalloc(alloc_size, GFP_KERNEL); + if (!dp->input_prio) + return -ENOMEM; + + ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_INPUT); + if (ret) + goto deinit_info; + + if (cgu) { + alloc_size = sizeof(*d->outputs) * d->num_outputs; + d->outputs = kzalloc(alloc_size, GFP_KERNEL); + if (!d->outputs) { + ret = -ENOMEM; + goto deinit_info; + } + + ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT); + if (ret) + goto deinit_info; + } + + ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx, + &pf->dplls.rclk.num_parents); + if (ret) + return ret; + for (i = 0; i < pf->dplls.rclk.num_parents; i++) + pf->dplls.rclk.parent_idx[i] = d->base_rclk_idx + i; + ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_RCLK_INPUT); + if (ret) + return ret; + de->mode = DPLL_MODE_AUTOMATIC; + dp->mode = DPLL_MODE_AUTOMATIC; + + dev_dbg(ice_pf_to_dev(pf), + "%s - success, inputs:%u, outputs:%u rclk-parents:%u\n", + __func__, d->num_inputs, d->num_outputs, d->rclk.num_parents); + + return 0; + +deinit_info: + dev_err(ice_pf_to_dev(pf), + "%s - fail: d->inputs:%p, de->input_prio:%p, dp->input_prio:%p, d->outputs:%p\n", + __func__, d->inputs, de->input_prio, + dp->input_prio, d->outputs); + ice_dpll_deinit_info(pf); + return ret; +} + +/** + * ice_dpll_deinit - Disable the driver/HW support for dpll subsystem + * the dpll device. + * @pf: board private structure + * + * Handles the cleanup work required after dpll initialization, freeing + * resources and unregistering the dpll, pin and all resources used for + * handling them. + * + * Context: Destroys pf->dplls.lock mutex. Call only if ICE_FLAG_DPLL was set. + */ +void ice_dpll_deinit(struct ice_pf *pf) +{ + bool cgu = ice_is_feature_supported(pf, ICE_F_CGU); + + clear_bit(ICE_FLAG_DPLL, pf->flags); + if (cgu) + ice_dpll_deinit_worker(pf); + + ice_dpll_deinit_pins(pf, cgu); + ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu); + ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu); + ice_dpll_deinit_info(pf); + mutex_destroy(&pf->dplls.lock); +} + +/** + * ice_dpll_init - initialize support for dpll subsystem + * @pf: board private structure + * + * Set up the device dplls, register them and pins connected within Linux dpll + * subsystem. Allow userspace to obtain state of DPLL and handling of DPLL + * configuration requests. + * + * Context: Initializes pf->dplls.lock mutex. + */ +void ice_dpll_init(struct ice_pf *pf) +{ + bool cgu = ice_is_feature_supported(pf, ICE_F_CGU); + struct ice_dplls *d = &pf->dplls; + int err = 0; + + err = ice_dpll_init_info(pf, cgu); + if (err) + goto err_exit; + err = ice_dpll_init_dpll(pf, &pf->dplls.eec, cgu, DPLL_TYPE_EEC); + if (err) + goto deinit_info; + err = ice_dpll_init_dpll(pf, &pf->dplls.pps, cgu, DPLL_TYPE_PPS); + if (err) + goto deinit_eec; + err = ice_dpll_init_pins(pf, cgu); + if (err) + goto deinit_pps; + mutex_init(&d->lock); + if (cgu) { + err = ice_dpll_init_worker(pf); + if (err) + goto deinit_pins; + } + set_bit(ICE_FLAG_DPLL, pf->flags); + + return; + +deinit_pins: + ice_dpll_deinit_pins(pf, cgu); +deinit_pps: + ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu); +deinit_eec: + ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu); +deinit_info: + ice_dpll_deinit_info(pf); +err_exit: + mutex_destroy(&d->lock); + dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err); +} diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h new file mode 100644 index 000000000000..93172e93995b --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dpll.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2022, Intel Corporation. */ + +#ifndef _ICE_DPLL_H_ +#define _ICE_DPLL_H_ + +#include "ice.h" + +#define ICE_DPLL_RCLK_NUM_MAX 4 + +/** ice_dpll_pin - store info about pins + * @pin: dpll pin structure + * @pf: pointer to pf, which has registered the dpll_pin + * @idx: ice pin private idx + * @num_parents: hols number of parent pins + * @parent_idx: hold indexes of parent pins + * @flags: pin flags returned from HW + * @state: state of a pin + * @prop: pin properties + * @freq: current frequency of a pin + * @phase_adjust: current phase adjust value + */ +struct ice_dpll_pin { + struct dpll_pin *pin; + struct ice_pf *pf; + u8 idx; + u8 num_parents; + u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX]; + u8 flags[ICE_DPLL_RCLK_NUM_MAX]; + u8 state[ICE_DPLL_RCLK_NUM_MAX]; + struct dpll_pin_properties prop; + u32 freq; + s32 phase_adjust; +}; + +/** ice_dpll - store info required for DPLL control + * @dpll: pointer to dpll dev + * @pf: pointer to pf, which has registered the dpll_device + * @dpll_idx: index of dpll on the NIC + * @input_idx: currently selected input index + * @prev_input_idx: previously selected input index + * @ref_state: state of dpll reference signals + * @eec_mode: eec_mode dpll is configured for + * @phase_offset: phase offset of active pin vs dpll signal + * @prev_phase_offset: previous phase offset of active pin vs dpll signal + * @input_prio: priorities of each input + * @dpll_state: current dpll sync state + * @prev_dpll_state: last dpll sync state + * @active_input: pointer to active input pin + * @prev_input: pointer to previous active input pin + */ +struct ice_dpll { + struct dpll_device *dpll; + struct ice_pf *pf; + u8 dpll_idx; + u8 input_idx; + u8 prev_input_idx; + u8 ref_state; + u8 eec_mode; + s64 phase_offset; + s64 prev_phase_offset; + u8 *input_prio; + enum dpll_lock_status dpll_state; + enum dpll_lock_status prev_dpll_state; + enum dpll_mode mode; + struct dpll_pin *active_input; + struct dpll_pin *prev_input; +}; + +/** ice_dplls - store info required for CCU (clock controlling unit) + * @kworker: periodic worker + * @work: periodic work + * @lock: locks access to configuration of a dpll + * @eec: pointer to EEC dpll dev + * @pps: pointer to PPS dpll dev + * @inputs: input pins pointer + * @outputs: output pins pointer + * @rclk: recovered pins pointer + * @num_inputs: number of input pins available on dpll + * @num_outputs: number of output pins available on dpll + * @cgu_state_acq_err_num: number of errors returned during periodic work + * @base_rclk_idx: idx of first pin used for clock revocery pins + * @clock_id: clock_id of dplls + * @input_phase_adj_max: max phase adjust value for an input pins + * @output_phase_adj_max: max phase adjust value for an output pins + */ +struct ice_dplls { + struct kthread_worker *kworker; + struct kthread_delayed_work work; + struct mutex lock; + struct ice_dpll eec; + struct ice_dpll pps; + struct ice_dpll_pin *inputs; + struct ice_dpll_pin *outputs; + struct ice_dpll_pin rclk; + u8 num_inputs; + u8 num_outputs; + int cgu_state_acq_err_num; + u8 base_rclk_idx; + u64 clock_id; + s32 input_phase_adj_max; + s32 output_phase_adj_max; +}; + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +void ice_dpll_init(struct ice_pf *pf); +void ice_dpll_deinit(struct ice_pf *pf); +#else +static inline void ice_dpll_init(struct ice_pf *pf) { } +static inline void ice_dpll_deinit(struct ice_pf *pf) { } +#endif + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 8f232c41a89e..a655d499abfa 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -4,6 +4,7 @@ #include "ice.h" #include "ice_lib.h" #include "ice_eswitch.h" +#include "ice_eswitch_br.h" #include "ice_fltr.h" #include "ice_repr.h" #include "ice_devlink.h" @@ -83,10 +84,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) struct ice_vsi_vlan_ops *vlan_ops; bool rule_added = false; - vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi); - if (vlan_ops->dis_stripping(ctrl_vsi)) - return -ENODEV; - ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); netif_addr_lock_bh(uplink_netdev); @@ -103,17 +100,28 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) rule_added = true; } + vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); + if (vlan_ops->dis_rx_filtering(uplink_vsi)) + goto err_dis_rx; + if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_uplink; if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_control; + if (ice_vsi_update_local_lb(uplink_vsi, true)) + goto err_override_local_lb; + return 0; +err_override_local_lb: + ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); err_override_control: ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); err_override_uplink: + vlan_ops->ena_rx_filtering(uplink_vsi); +err_dis_rx: if (rule_added) ice_clear_dflt_vsi(uplink_vsi); err_def_rx: @@ -306,6 +314,9 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) repr->src_vsi = vsi; repr->dst->u.port_info.port_id = vsi->vsi_num; + if (repr->br_port) + repr->br_port->vsi = vsi; + ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); if (ret) { ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); @@ -331,6 +342,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) np = netdev_priv(netdev); vsi = np->vsi; + if (!vsi || !ice_is_switchdev_running(vsi->back)) + return NETDEV_TX_BUSY; + if (ice_is_reset_in_progress(vsi->back->state) || test_bit(ICE_VF_DIS, vsi->back->state)) return NETDEV_TX_BUSY; @@ -378,9 +392,14 @@ static void ice_eswitch_release_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_vsi_vlan_ops *vlan_ops; + vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); + + ice_vsi_update_local_lb(uplink_vsi, false); ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); + vlan_ops->ena_rx_filtering(uplink_vsi); ice_clear_dflt_vsi(uplink_vsi); ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, @@ -455,16 +474,24 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf) */ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi; + struct ice_vsi *ctrl_vsi, *uplink_vsi; + + uplink_vsi = ice_get_main_vsi(pf); + if (!uplink_vsi) + return -ENODEV; + + if (netif_is_any_bridge_port(uplink_vsi->netdev)) { + dev_err(ice_pf_to_dev(pf), + "Uplink port cannot be a bridge port\n"); + return -EINVAL; + } pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); if (!pf->switchdev.control_vsi) return -ENODEV; ctrl_vsi = pf->switchdev.control_vsi; - pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); - if (!pf->switchdev.uplink_vsi) - goto err_vsi; + pf->switchdev.uplink_vsi = uplink_vsi; if (ice_eswitch_setup_env(pf)) goto err_vsi; @@ -480,10 +507,15 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) if (ice_vsi_open(ctrl_vsi)) goto err_setup_reprs; + if (ice_eswitch_br_offloads_init(pf)) + goto err_br_offloads; + ice_eswitch_napi_enable(pf); return 0; +err_br_offloads: + ice_vsi_close(ctrl_vsi); err_setup_reprs: ice_repr_rem_from_all_vfs(pf); err_repr_add: @@ -502,8 +534,8 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; ice_eswitch_napi_disable(pf); + ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx); ice_eswitch_release_reprs(pf, ctrl_vsi); ice_vsi_release(ctrl_vsi); ice_repr_rem_from_all_vfs(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c new file mode 100644 index 000000000000..6ae0269bdf73 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -0,0 +1,1346 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023, Intel Corporation. */ + +#include "ice.h" +#include "ice_eswitch_br.h" +#include "ice_repr.h" +#include "ice_switch.h" +#include "ice_vlan.h" +#include "ice_vf_vsi_vlan_ops.h" +#include "ice_trace.h" + +#define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000) + +static const struct rhashtable_params ice_fdb_ht_params = { + .key_offset = offsetof(struct ice_esw_br_fdb_entry, data), + .key_len = sizeof(struct ice_esw_br_fdb_data), + .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node), + .automatic_shrinking = true, +}; + +static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev) +{ + /* Accept only PF netdev, PRs and LAG */ + return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) || + netif_is_lag_master(dev); +} + +static struct net_device * +ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev) +{ + struct net_device *lower; + struct list_head *iter; + + netdev_for_each_lower_dev(lag_dev, lower, iter) { + if (netif_is_ice(lower)) + return lower; + } + + return NULL; +} + +static struct ice_esw_br_port * +ice_eswitch_br_netdev_to_port(struct net_device *dev) +{ + if (ice_is_port_repr_netdev(dev)) { + struct ice_repr *repr = ice_netdev_to_repr(dev); + + return repr->br_port; + } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) { + struct net_device *ice_dev; + struct ice_pf *pf; + + if (netif_is_lag_master(dev)) + ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); + else + ice_dev = dev; + + if (!ice_dev) + return NULL; + + pf = ice_netdev_to_pf(ice_dev); + + return pf->br_port; + } + + return NULL; +} + +static void +ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info, + u8 pf_id, u16 vf_vsi_idx) +{ + rule_info->sw_act.vsi_handle = vf_vsi_idx; + rule_info->sw_act.flag |= ICE_FLTR_RX; + rule_info->sw_act.src = pf_id; + rule_info->priority = 2; +} + +static void +ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info, + u16 pf_vsi_idx) +{ + rule_info->sw_act.vsi_handle = pf_vsi_idx; + rule_info->sw_act.flag |= ICE_FLTR_TX; + rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; + rule_info->flags_info.act_valid = true; + rule_info->priority = 2; +} + +static int +ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule) +{ + int err; + + if (!rule) + return -EINVAL; + + err = ice_rem_adv_rule_by_id(hw, rule); + kfree(rule); + + return err; +} + +static u16 +ice_eswitch_br_get_lkups_cnt(u16 vid) +{ + return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1; +} + +static void +ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid) +{ + if (ice_eswitch_br_is_vid_valid(vid)) { + list[1].type = ICE_VLAN_OFOS; + list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK); + list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF); + } +} + +static struct ice_rule_query_data * +ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type, + const unsigned char *mac, u16 vid) +{ + struct ice_adv_rule_info rule_info = { 0 }; + struct ice_rule_query_data *rule; + struct ice_adv_lkup_elem *list; + u16 lkups_cnt; + int err; + + lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return ERR_PTR(-ENOMEM); + + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); + if (!list) { + err = -ENOMEM; + goto err_list_alloc; + } + + switch (port_type) { + case ICE_ESWITCH_BR_UPLINK_PORT: + ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx); + break; + case ICE_ESWITCH_BR_VF_REPR_PORT: + ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id, + vsi_idx); + break; + default: + err = -EINVAL; + goto err_add_rule; + } + + list[0].type = ICE_MAC_OFOS; + ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac); + eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr); + + ice_eswitch_br_add_vlan_lkup(list, vid); + + rule_info.need_pass_l2 = true; + + rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; + + err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); + if (err) + goto err_add_rule; + + kfree(list); + + return rule; + +err_add_rule: + kfree(list); +err_list_alloc: + kfree(rule); + + return ERR_PTR(err); +} + +static struct ice_rule_query_data * +ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx, + const unsigned char *mac, u16 vid) +{ + struct ice_adv_rule_info rule_info = { 0 }; + struct ice_rule_query_data *rule; + struct ice_adv_lkup_elem *list; + int err = -ENOMEM; + u16 lkups_cnt; + + lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid); + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + goto err_exit; + + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); + if (!list) + goto err_list_alloc; + + list[0].type = ICE_MAC_OFOS; + ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); + eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); + + ice_eswitch_br_add_vlan_lkup(list, vid); + + rule_info.allow_pass_l2 = true; + rule_info.sw_act.vsi_handle = vsi_idx; + rule_info.sw_act.fltr_act = ICE_NOP; + rule_info.priority = 2; + + err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule); + if (err) + goto err_add_rule; + + kfree(list); + + return rule; + +err_add_rule: + kfree(list); +err_list_alloc: + kfree(rule); +err_exit: + return ERR_PTR(err); +} + +static struct ice_esw_br_flow * +ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx, + int port_type, const unsigned char *mac, u16 vid) +{ + struct ice_rule_query_data *fwd_rule, *guard_rule; + struct ice_esw_br_flow *flow; + int err; + + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (!flow) + return ERR_PTR(-ENOMEM); + + fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac, + vid); + err = PTR_ERR_OR_ZERO(fwd_rule); + if (err) { + dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n", + port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", + err); + goto err_fwd_rule; + } + + guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid); + err = PTR_ERR_OR_ZERO(guard_rule); + if (err) { + dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n", + port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in", + err); + goto err_guard_rule; + } + + flow->fwd_rule = fwd_rule; + flow->guard_rule = guard_rule; + + return flow; + +err_guard_rule: + ice_eswitch_br_rule_delete(hw, fwd_rule); +err_fwd_rule: + kfree(flow); + + return ERR_PTR(err); +} + +static struct ice_esw_br_fdb_entry * +ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac, + u16 vid) +{ + struct ice_esw_br_fdb_data data = { + .vid = vid, + }; + + ether_addr_copy(data.addr, mac); + return rhashtable_lookup_fast(&bridge->fdb_ht, &data, + ice_fdb_ht_params); +} + +static void +ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule); + if (err) + dev_err(dev, "Failed to delete FDB forward rule, err: %d\n", + err); + + err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule); + if (err) + dev_err(dev, "Failed to delete FDB guard rule, err: %d\n", + err); + + kfree(flow); +} + +static struct ice_esw_br_vlan * +ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) +{ + struct ice_pf *pf = bridge->br_offloads->pf; + struct device *dev = ice_pf_to_dev(pf); + struct ice_esw_br_port *port; + struct ice_esw_br_vlan *vlan; + + port = xa_load(&bridge->ports, vsi_idx); + if (!port) { + dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx); + return ERR_PTR(-EINVAL); + } + + vlan = xa_load(&port->vlans, vid); + if (!vlan) { + dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n", + vsi_idx); + return ERR_PTR(-EINVAL); + } + + return vlan; +} + +static void +ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge, + struct ice_esw_br_fdb_entry *fdb_entry) +{ + struct ice_pf *pf = bridge->br_offloads->pf; + + rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node, + ice_fdb_ht_params); + list_del(&fdb_entry->list); + + ice_eswitch_br_flow_delete(pf, fdb_entry->flow); + + kfree(fdb_entry); +} + +static void +ice_eswitch_br_fdb_offload_notify(struct net_device *dev, + const unsigned char *mac, u16 vid, + unsigned long val) +{ + struct switchdev_notifier_fdb_info fdb_info = { + .addr = mac, + .vid = vid, + .offloaded = true, + }; + + call_switchdev_notifiers(val, dev, &fdb_info.info, NULL); +} + +static void +ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge, + struct ice_esw_br_fdb_entry *entry) +{ + if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)) + ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr, + entry->data.vid, + SWITCHDEV_FDB_DEL_TO_BRIDGE); + ice_eswitch_br_fdb_entry_delete(bridge, entry); +} + +static void +ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge, + const unsigned char *mac, u16 vid) +{ + struct ice_pf *pf = bridge->br_offloads->pf; + struct ice_esw_br_fdb_entry *fdb_entry; + struct device *dev = ice_pf_to_dev(pf); + + fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); + if (!fdb_entry) { + dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n", + mac, vid); + return; + } + + trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry); + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); +} + +static void +ice_eswitch_br_fdb_entry_create(struct net_device *netdev, + struct ice_esw_br_port *br_port, + bool added_by_user, + const unsigned char *mac, u16 vid) +{ + struct ice_esw_br *bridge = br_port->bridge; + struct ice_pf *pf = bridge->br_offloads->pf; + struct device *dev = ice_pf_to_dev(pf); + struct ice_esw_br_fdb_entry *fdb_entry; + struct ice_esw_br_flow *flow; + struct ice_esw_br_vlan *vlan; + struct ice_hw *hw = &pf->hw; + unsigned long event; + int err; + + /* untagged filtering is not yet supported */ + if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid) + return; + + if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) { + vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx, + vid); + if (IS_ERR(vlan)) { + dev_err(dev, "Failed to find vlan lookup, err: %ld\n", + PTR_ERR(vlan)); + return; + } + } + + fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid); + if (fdb_entry) + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry); + + fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); + if (!fdb_entry) { + err = -ENOMEM; + goto err_exit; + } + + flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx, + br_port->type, mac, vid); + if (IS_ERR(flow)) { + err = PTR_ERR(flow); + goto err_add_flow; + } + + ether_addr_copy(fdb_entry->data.addr, mac); + fdb_entry->data.vid = vid; + fdb_entry->br_port = br_port; + fdb_entry->flow = flow; + fdb_entry->dev = netdev; + fdb_entry->last_use = jiffies; + event = SWITCHDEV_FDB_ADD_TO_BRIDGE; + + if (added_by_user) { + fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER; + event = SWITCHDEV_FDB_OFFLOADED; + } + + err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node, + ice_fdb_ht_params); + if (err) + goto err_fdb_insert; + + list_add(&fdb_entry->list, &bridge->fdb_list); + trace_ice_eswitch_br_fdb_entry_create(fdb_entry); + + ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event); + + return; + +err_fdb_insert: + ice_eswitch_br_flow_delete(pf, flow); +err_add_flow: + kfree(fdb_entry); +err_exit: + dev_err(dev, "Failed to create fdb entry, err: %d\n", err); +} + +static void +ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work) +{ + kfree(fdb_work->fdb_info.addr); + kfree(fdb_work); +} + +static void +ice_eswitch_br_fdb_event_work(struct work_struct *work) +{ + struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work); + bool added_by_user = fdb_work->fdb_info.added_by_user; + const unsigned char *mac = fdb_work->fdb_info.addr; + u16 vid = fdb_work->fdb_info.vid; + struct ice_esw_br_port *br_port; + + rtnl_lock(); + + br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev); + if (!br_port) + goto err_exit; + + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port, + added_by_user, mac, vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge, + mac, vid); + break; + default: + goto err_exit; + } + +err_exit: + rtnl_unlock(); + dev_put(fdb_work->dev); + ice_eswitch_br_fdb_work_dealloc(fdb_work); +} + +static struct ice_esw_br_fdb_work * +ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info, + struct net_device *dev, + unsigned long event) +{ + struct ice_esw_br_fdb_work *work; + unsigned char *mac; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work); + memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info)); + + mac = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!mac) { + kfree(work); + return ERR_PTR(-ENOMEM); + } + + ether_addr_copy(mac, fdb_info->addr); + work->fdb_info.addr = mac; + work->event = event; + work->dev = dev; + + return work; +} + +static int +ice_eswitch_br_switchdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct ice_esw_br_offloads *br_offloads; + struct ice_esw_br_fdb_work *work; + struct netlink_ext_ack *extack; + struct net_device *upper; + + br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb); + extack = switchdev_notifier_info_to_extack(ptr); + + upper = netdev_master_upper_dev_get_rcu(dev); + if (!upper) + return NOTIFY_DONE; + + if (!netif_is_bridge_master(upper)) + return NOTIFY_DONE; + + if (!ice_eswitch_br_is_dev_valid(dev)) + return NOTIFY_DONE; + + if (!ice_eswitch_br_netdev_to_port(dev)) + return NOTIFY_DONE; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = container_of(info, typeof(*fdb_info), info); + + work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event); + if (IS_ERR(work)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work"); + return notifier_from_errno(PTR_ERR(work)); + } + dev_hold(dev); + + queue_work(br_offloads->wq, &work->work); + break; + default: + break; + } + return NOTIFY_DONE; +} + +static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) +{ + struct ice_esw_br_fdb_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); +} + +static void +ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable) +{ + if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) + return; + + ice_eswitch_br_fdb_flush(bridge); + if (enable) + bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING; + else + bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING; +} + +static void +ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port) +{ + struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0); + struct ice_vsi_vlan_ops *vlan_ops; + + vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); + + vlan_ops->del_vlan(port->vsi, &port_vlan); + vlan_ops->clear_port_vlan(port->vsi); + + ice_vf_vsi_disable_port_vlan(port->vsi); + + port->pvid = 0; +} + +static void +ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port, + struct ice_esw_br_vlan *vlan) +{ + struct ice_esw_br_fdb_entry *fdb_entry, *tmp; + struct ice_esw_br *bridge = port->bridge; + + trace_ice_eswitch_br_vlan_cleanup(vlan); + + list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { + if (vlan->vid == fdb_entry->data.vid) + ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); + } + + xa_erase(&port->vlans, vlan->vid); + if (port->pvid == vlan->vid) + ice_eswitch_br_clear_pvid(port); + kfree(vlan); +} + +static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port) +{ + struct ice_esw_br_vlan *vlan; + unsigned long index; + + xa_for_each(&port->vlans, index, vlan) + ice_eswitch_br_vlan_cleanup(port, vlan); +} + +static int +ice_eswitch_br_set_pvid(struct ice_esw_br_port *port, + struct ice_esw_br_vlan *vlan) +{ + struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0); + struct device *dev = ice_pf_to_dev(port->vsi->back); + struct ice_vsi_vlan_ops *vlan_ops; + int err; + + if (port->pvid == vlan->vid || vlan->vid == 1) + return 0; + + /* Setting port vlan on uplink isn't supported by hw */ + if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) + return -EOPNOTSUPP; + + if (port->pvid) { + dev_info(dev, + "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n", + port->vsi_idx, port->pvid); + return -EEXIST; + } + + ice_vf_vsi_enable_port_vlan(port->vsi); + + vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi); + err = vlan_ops->set_port_vlan(port->vsi, &port_vlan); + if (err) + return err; + + err = vlan_ops->add_vlan(port->vsi, &port_vlan); + if (err) + return err; + + ice_eswitch_br_port_vlans_flush(port); + port->pvid = vlan->vid; + + return 0; +} + +static struct ice_esw_br_vlan * +ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port) +{ + struct device *dev = ice_pf_to_dev(port->vsi->back); + struct ice_esw_br_vlan *vlan; + int err; + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return ERR_PTR(-ENOMEM); + + vlan->vid = vid; + vlan->flags = flags; + if ((flags & BRIDGE_VLAN_INFO_PVID) && + (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { + err = ice_eswitch_br_set_pvid(port, vlan); + if (err) + goto err_set_pvid; + } else if ((flags & BRIDGE_VLAN_INFO_PVID) || + (flags & BRIDGE_VLAN_INFO_UNTAGGED)) { + dev_info(dev, "VLAN push and pop are supported only simultaneously\n"); + err = -EOPNOTSUPP; + goto err_set_pvid; + } + + err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL); + if (err) + goto err_insert; + + trace_ice_eswitch_br_vlan_create(vlan); + + return vlan; + +err_insert: + if (port->pvid) + ice_eswitch_br_clear_pvid(port); +err_set_pvid: + kfree(vlan); + return ERR_PTR(err); +} + +static int +ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid, + u16 flags, struct netlink_ext_ack *extack) +{ + struct ice_esw_br_port *port; + struct ice_esw_br_vlan *vlan; + + port = xa_load(&bridge->ports, vsi_idx); + if (!port) + return -EINVAL; + + if (port->pvid) { + dev_info(ice_pf_to_dev(port->vsi->back), + "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n", + port->vsi_idx, port->pvid); + return -EEXIST; + } + + vlan = xa_load(&port->vlans, vid); + if (vlan) { + if (vlan->flags == flags) + return 0; + + ice_eswitch_br_vlan_cleanup(port, vlan); + } + + vlan = ice_eswitch_br_vlan_create(vid, flags, port); + if (IS_ERR(vlan)) { + NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u", + vid, vsi_idx); + return PTR_ERR(vlan); + } + + return 0; +} + +static void +ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid) +{ + struct ice_esw_br_port *port; + struct ice_esw_br_vlan *vlan; + + port = xa_load(&bridge->ports, vsi_idx); + if (!port) + return; + + vlan = xa_load(&port->vlans, vid); + if (!vlan) + return; + + ice_eswitch_br_vlan_cleanup(port, vlan); +} + +static int +ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); + struct switchdev_obj_port_vlan *vlan; + int err; + + if (!br_port) + return -EINVAL; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + err = ice_eswitch_br_port_vlan_add(br_port->bridge, + br_port->vsi_idx, vlan->vid, + vlan->flags, extack); + return err; + default: + return -EOPNOTSUPP; + } +} + +static int +ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); + struct switchdev_obj_port_vlan *vlan; + + if (!br_port) + return -EINVAL; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx, + vlan->vid); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int +ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev); + + if (!br_port) + return -EINVAL; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + ice_eswitch_br_vlan_filtering_set(br_port->bridge, + attr->u.vlan_filtering); + return 0; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + br_port->bridge->ageing_time = + clock_t_to_jiffies(attr->u.ageing_time); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int +ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + ice_eswitch_br_is_dev_valid, + ice_eswitch_br_port_obj_add); + break; + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + ice_eswitch_br_is_dev_valid, + ice_eswitch_br_port_obj_del); + break; + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + ice_eswitch_br_is_dev_valid, + ice_eswitch_br_port_obj_attr_set); + break; + default: + err = 0; + } + + return notifier_from_errno(err); +} + +static void +ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, + struct ice_esw_br_port *br_port) +{ + struct ice_esw_br_fdb_entry *fdb_entry, *tmp; + struct ice_vsi *vsi = br_port->vsi; + + list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) { + if (br_port == fdb_entry->br_port) + ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry); + } + + if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) + vsi->back->br_port = NULL; + else if (vsi->vf && vsi->vf->repr) + vsi->vf->repr->br_port = NULL; + + xa_erase(&bridge->ports, br_port->vsi_idx); + ice_eswitch_br_port_vlans_flush(br_port); + kfree(br_port); +} + +static struct ice_esw_br_port * +ice_eswitch_br_port_init(struct ice_esw_br *bridge) +{ + struct ice_esw_br_port *br_port; + + br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); + if (!br_port) + return ERR_PTR(-ENOMEM); + + xa_init(&br_port->vlans); + + br_port->bridge = bridge; + + return br_port; +} + +static int +ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, + struct ice_repr *repr) +{ + struct ice_esw_br_port *br_port; + int err; + + br_port = ice_eswitch_br_port_init(bridge); + if (IS_ERR(br_port)) + return PTR_ERR(br_port); + + br_port->vsi = repr->src_vsi; + br_port->vsi_idx = br_port->vsi->idx; + br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; + repr->br_port = br_port; + + err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); + if (err) { + ice_eswitch_br_port_deinit(bridge, br_port); + return err; + } + + return 0; +} + +static int +ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf) +{ + struct ice_vsi *vsi = pf->switchdev.uplink_vsi; + struct ice_esw_br_port *br_port; + int err; + + br_port = ice_eswitch_br_port_init(bridge); + if (IS_ERR(br_port)) + return PTR_ERR(br_port); + + br_port->vsi = vsi; + br_port->vsi_idx = br_port->vsi->idx; + br_port->type = ICE_ESWITCH_BR_UPLINK_PORT; + pf->br_port = br_port; + + err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); + if (err) { + ice_eswitch_br_port_deinit(bridge, br_port); + return err; + } + + return 0; +} + +static void +ice_eswitch_br_ports_flush(struct ice_esw_br *bridge) +{ + struct ice_esw_br_port *port; + unsigned long i; + + xa_for_each(&bridge->ports, i, port) + ice_eswitch_br_port_deinit(bridge, port); +} + +static void +ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads, + struct ice_esw_br *bridge) +{ + if (!bridge) + return; + + /* Cleanup all the ports that were added asynchronously + * through NETDEV_CHANGEUPPER event. + */ + ice_eswitch_br_ports_flush(bridge); + WARN_ON(!xa_empty(&bridge->ports)); + xa_destroy(&bridge->ports); + rhashtable_destroy(&bridge->fdb_ht); + + br_offloads->bridge = NULL; + kfree(bridge); +} + +static struct ice_esw_br * +ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex) +{ + struct ice_esw_br *bridge; + int err; + + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return ERR_PTR(-ENOMEM); + + err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params); + if (err) { + kfree(bridge); + return ERR_PTR(err); + } + + INIT_LIST_HEAD(&bridge->fdb_list); + bridge->br_offloads = br_offloads; + bridge->ifindex = ifindex; + bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); + xa_init(&bridge->ports); + br_offloads->bridge = bridge; + + return bridge; +} + +static struct ice_esw_br * +ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex, + struct netlink_ext_ack *extack) +{ + struct ice_esw_br *bridge = br_offloads->bridge; + + if (bridge) { + if (bridge->ifindex != ifindex) { + NL_SET_ERR_MSG_MOD(extack, + "Only one bridge is supported per eswitch"); + return ERR_PTR(-EOPNOTSUPP); + } + return bridge; + } + + /* Create the bridge if it doesn't exist yet */ + bridge = ice_eswitch_br_init(br_offloads, ifindex); + if (IS_ERR(bridge)) + NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge"); + + return bridge; +} + +static void +ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads, + struct ice_esw_br *bridge) +{ + /* Remove the bridge if it exists and there are no ports left */ + if (!bridge || !xa_empty(&bridge->ports)) + return; + + ice_eswitch_br_deinit(br_offloads, bridge); +} + +static int +ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads, + struct net_device *dev, int ifindex, + struct netlink_ext_ack *extack) +{ + struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev); + struct ice_esw_br *bridge; + + if (!br_port) { + NL_SET_ERR_MSG_MOD(extack, + "Port representor is not attached to any bridge"); + return -EINVAL; + } + + if (br_port->bridge->ifindex != ifindex) { + NL_SET_ERR_MSG_MOD(extack, + "Port representor is attached to another bridge"); + return -EINVAL; + } + + bridge = br_port->bridge; + + trace_ice_eswitch_br_port_unlink(br_port); + ice_eswitch_br_port_deinit(br_port->bridge, br_port); + ice_eswitch_br_verify_deinit(br_offloads, bridge); + + return 0; +} + +static int +ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads, + struct net_device *dev, int ifindex, + struct netlink_ext_ack *extack) +{ + struct ice_esw_br *bridge; + int err; + + if (ice_eswitch_br_netdev_to_port(dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Port is already attached to the bridge"); + return -EINVAL; + } + + bridge = ice_eswitch_br_get(br_offloads, ifindex, extack); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + if (ice_is_port_repr_netdev(dev)) { + struct ice_repr *repr = ice_netdev_to_repr(dev); + + err = ice_eswitch_br_vf_repr_port_init(bridge, repr); + trace_ice_eswitch_br_port_link(repr->br_port); + } else { + struct net_device *ice_dev; + struct ice_pf *pf; + + if (netif_is_lag_master(dev)) + ice_dev = ice_eswitch_br_get_uplink_from_lag(dev); + else + ice_dev = dev; + + if (!ice_dev) + return 0; + + pf = ice_netdev_to_pf(ice_dev); + + err = ice_eswitch_br_uplink_port_init(bridge, pf); + trace_ice_eswitch_br_port_link(pf->br_port); + } + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port"); + goto err_port_init; + } + + return 0; + +err_port_init: + ice_eswitch_br_verify_deinit(br_offloads, bridge); + return err; +} + +static int +ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info = ptr; + struct ice_esw_br_offloads *br_offloads; + struct netlink_ext_ack *extack; + struct net_device *upper; + + br_offloads = ice_nb_to_br_offloads(nb, netdev_nb); + + if (!ice_eswitch_br_is_dev_valid(dev)) + return 0; + + upper = info->upper_dev; + if (!netif_is_bridge_master(upper)) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (info->linking) + return ice_eswitch_br_port_link(br_offloads, dev, + upper->ifindex, extack); + else + return ice_eswitch_br_port_unlink(br_offloads, dev, + upper->ifindex, extack); +} + +static int +ice_eswitch_br_port_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + int err = 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + err = ice_eswitch_br_port_changeupper(nb, ptr); + break; + } + + return notifier_from_errno(err); +} + +static void +ice_eswitch_br_offloads_dealloc(struct ice_pf *pf) +{ + struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads; + + ASSERT_RTNL(); + + if (!br_offloads) + return; + + ice_eswitch_br_deinit(br_offloads, br_offloads->bridge); + + pf->switchdev.br_offloads = NULL; + kfree(br_offloads); +} + +static struct ice_esw_br_offloads * +ice_eswitch_br_offloads_alloc(struct ice_pf *pf) +{ + struct ice_esw_br_offloads *br_offloads; + + ASSERT_RTNL(); + + if (pf->switchdev.br_offloads) + return ERR_PTR(-EEXIST); + + br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL); + if (!br_offloads) + return ERR_PTR(-ENOMEM); + + pf->switchdev.br_offloads = br_offloads; + br_offloads->pf = pf; + + return br_offloads; +} + +void +ice_eswitch_br_offloads_deinit(struct ice_pf *pf) +{ + struct ice_esw_br_offloads *br_offloads; + + br_offloads = pf->switchdev.br_offloads; + if (!br_offloads) + return; + + cancel_delayed_work_sync(&br_offloads->update_work); + unregister_netdevice_notifier(&br_offloads->netdev_nb); + unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); + unregister_switchdev_notifier(&br_offloads->switchdev_nb); + destroy_workqueue(br_offloads->wq); + /* Although notifier block is unregistered just before, + * so we don't get any new events, some events might be + * already in progress. Hold the rtnl lock and wait for + * them to finished. + */ + rtnl_lock(); + ice_eswitch_br_offloads_dealloc(pf); + rtnl_unlock(); +} + +static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads) +{ + struct ice_esw_br *bridge = br_offloads->bridge; + struct ice_esw_br_fdb_entry *entry, *tmp; + + if (!bridge) + return; + + rtnl_lock(); + list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { + if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER) + continue; + + if (time_is_after_eq_jiffies(entry->last_use + + bridge->ageing_time)) + continue; + + ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); + } + rtnl_unlock(); +} + +static void ice_eswitch_br_update_work(struct work_struct *work) +{ + struct ice_esw_br_offloads *br_offloads; + + br_offloads = ice_work_to_br_offloads(work); + + ice_eswitch_br_update(br_offloads); + + queue_delayed_work(br_offloads->wq, &br_offloads->update_work, + ICE_ESW_BRIDGE_UPDATE_INTERVAL); +} + +int +ice_eswitch_br_offloads_init(struct ice_pf *pf) +{ + struct ice_esw_br_offloads *br_offloads; + struct device *dev = ice_pf_to_dev(pf); + int err; + + rtnl_lock(); + br_offloads = ice_eswitch_br_offloads_alloc(pf); + rtnl_unlock(); + if (IS_ERR(br_offloads)) { + dev_err(dev, "Failed to init eswitch bridge\n"); + return PTR_ERR(br_offloads); + } + + br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0); + if (!br_offloads->wq) { + err = -ENOMEM; + dev_err(dev, "Failed to allocate bridge workqueue\n"); + goto err_alloc_wq; + } + + br_offloads->switchdev_nb.notifier_call = + ice_eswitch_br_switchdev_event; + err = register_switchdev_notifier(&br_offloads->switchdev_nb); + if (err) { + dev_err(dev, + "Failed to register switchdev notifier\n"); + goto err_reg_switchdev_nb; + } + + br_offloads->switchdev_blk.notifier_call = + ice_eswitch_br_event_blocking; + err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk); + if (err) { + dev_err(dev, + "Failed to register bridge blocking switchdev notifier\n"); + goto err_reg_switchdev_blk; + } + + br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event; + err = register_netdevice_notifier(&br_offloads->netdev_nb); + if (err) { + dev_err(dev, + "Failed to register bridge port event notifier\n"); + goto err_reg_netdev_nb; + } + + INIT_DELAYED_WORK(&br_offloads->update_work, + ice_eswitch_br_update_work); + queue_delayed_work(br_offloads->wq, &br_offloads->update_work, + ICE_ESW_BRIDGE_UPDATE_INTERVAL); + + return 0; + +err_reg_netdev_nb: + unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk); +err_reg_switchdev_blk: + unregister_switchdev_notifier(&br_offloads->switchdev_nb); +err_reg_switchdev_nb: + destroy_workqueue(br_offloads->wq); +err_alloc_wq: + rtnl_lock(); + ice_eswitch_br_offloads_dealloc(pf); + rtnl_unlock(); + + return err; +} diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h new file mode 100644 index 000000000000..85a8fadb2928 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2023, Intel Corporation. */ + +#ifndef _ICE_ESWITCH_BR_H_ +#define _ICE_ESWITCH_BR_H_ + +#include <linux/rhashtable.h> +#include <linux/workqueue.h> + +struct ice_esw_br_fdb_data { + unsigned char addr[ETH_ALEN]; + u16 vid; +}; + +struct ice_esw_br_flow { + struct ice_rule_query_data *fwd_rule; + struct ice_rule_query_data *guard_rule; +}; + +enum { + ICE_ESWITCH_BR_FDB_ADDED_BY_USER = BIT(0), +}; + +struct ice_esw_br_fdb_entry { + struct ice_esw_br_fdb_data data; + struct rhash_head ht_node; + struct list_head list; + + int flags; + + struct net_device *dev; + struct ice_esw_br_port *br_port; + struct ice_esw_br_flow *flow; + + unsigned long last_use; +}; + +enum ice_esw_br_port_type { + ICE_ESWITCH_BR_UPLINK_PORT = 0, + ICE_ESWITCH_BR_VF_REPR_PORT = 1, +}; + +struct ice_esw_br_port { + struct ice_esw_br *bridge; + struct ice_vsi *vsi; + enum ice_esw_br_port_type type; + u16 vsi_idx; + u16 pvid; + struct xarray vlans; +}; + +enum { + ICE_ESWITCH_BR_VLAN_FILTERING = BIT(0), +}; + +struct ice_esw_br { + struct ice_esw_br_offloads *br_offloads; + struct xarray ports; + + struct rhashtable fdb_ht; + struct list_head fdb_list; + + int ifindex; + u32 flags; + unsigned long ageing_time; +}; + +struct ice_esw_br_offloads { + struct ice_pf *pf; + struct ice_esw_br *bridge; + struct notifier_block netdev_nb; + struct notifier_block switchdev_blk; + struct notifier_block switchdev_nb; + + struct workqueue_struct *wq; + struct delayed_work update_work; +}; + +struct ice_esw_br_fdb_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + unsigned long event; +}; + +struct ice_esw_br_vlan { + u16 vid; + u16 flags; +}; + +#define ice_nb_to_br_offloads(nb, nb_name) \ + container_of(nb, \ + struct ice_esw_br_offloads, \ + nb_name) + +#define ice_work_to_br_offloads(w) \ + container_of(w, \ + struct ice_esw_br_offloads, \ + update_work.work) + +#define ice_work_to_fdb_work(w) \ + container_of(w, \ + struct ice_esw_br_fdb_work, \ + work) + +static inline bool ice_eswitch_br_is_vid_valid(u16 vid) +{ + /* In trunk VLAN mode, for untagged traffic the bridge sends requests + * to offload VLAN 1 with pvid and untagged flags set. Since these + * flags are not supported, add a MAC filter instead. + */ + return vid > 1; +} + +void +ice_eswitch_br_offloads_deinit(struct ice_pf *pf); +int +ice_eswitch_br_offloads_init(struct ice_pf *pf); + +#endif /* _ICE_ESWITCH_BR_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index ad4d4702129f..bde9bc74f928 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -345,6 +345,88 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) +static const u32 ice_adv_lnk_speed_100[] __initconst = { + ETHTOOL_LINK_MODE_100baseT_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_1000[] __initconst = { + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_2500[] __initconst = { + ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_5000[] __initconst = { + ETHTOOL_LINK_MODE_5000baseT_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_10000[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_25000[] __initconst = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_40000[] __initconst = { + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_50000[] __initconst = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_100000[] __initconst = { + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, +}; + +static const u32 ice_adv_lnk_speed_200000[] __initconst = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = { + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000), + ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000), +}; + +void __init ice_adv_lnk_speed_maps_init(void) +{ + ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps, + ARRAY_SIZE(ice_adv_lnk_speed_maps)); +} + static void __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, struct ice_vsi *vsi) @@ -1060,7 +1142,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ICE_VSI_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", ice_gstrings_vsi_stats[i].stat_string); if (ice_is_port_repr_netdev(netdev)) @@ -1080,7 +1162,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, return; for (i = 0; i < ICE_PF_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", ice_gstrings_pf_stats[i].stat_string); for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { @@ -1097,7 +1179,8 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) - ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name); + ethtool_sprintf(&p, "%s", + ice_gstrings_priv_flags[i].name); break; default: break; @@ -1638,6 +1721,15 @@ ice_get_ethtool_stats(struct net_device *netdev, ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ ICE_PHY_TYPE_HIGH_100G_AUI2) +#define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \ + ICE_PHY_TYPE_HIGH_200G_SR4 | \ + ICE_PHY_TYPE_HIGH_200G_FR4 | \ + ICE_PHY_TYPE_HIGH_200G_LR4 | \ + ICE_PHY_TYPE_HIGH_200G_DR4 | \ + ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \ + ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \ + ICE_PHY_TYPE_HIGH_200G_AUI4) + /** * ice_mask_min_supported_speeds * @hw: pointer to the HW structure @@ -1652,8 +1744,9 @@ ice_mask_min_supported_speeds(struct ice_hw *hw, u64 phy_types_high, u64 *phy_types_low) { /* if QSFP connection with 100G speed, minimum supported speed is 25G */ - if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G || - phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) + if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) || + (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) || + (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G)) *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G; else if (!ice_is_100m_speed_supported(hw)) *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; @@ -1757,14 +1850,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev, linkmode_zero(ks->link_modes.supported); linkmode_zero(ks->link_modes.advertising); - for (i = 0; i < BITS_PER_TYPE(u64); i++) { + for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) { if (phy_types_low & BIT_ULL(i)) ice_linkmode_set_bit(&phy_type_low_lkup[i], ks, req_speeds, advert_phy_type_lo, i); } - for (i = 0; i < BITS_PER_TYPE(u64); i++) { + for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) { if (phy_types_high & BIT_ULL(i)) ice_linkmode_set_bit(&phy_type_high_lkup[i], ks, req_speeds, advert_phy_type_hi, @@ -1796,6 +1889,9 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ice_phy_type_to_ethtool(netdev, ks); switch (link_info->link_speed) { + case ICE_AQ_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; case ICE_AQ_LINK_SPEED_100GB: ks->base.speed = SPEED_100000; break; @@ -2008,79 +2104,69 @@ done: } /** + * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed + * @speed: ethtool forced speed + */ +static u16 ice_speed_to_aq_link(int speed) +{ + int aq_speed; + + switch (speed) { + case SPEED_10: + aq_speed = ICE_AQ_LINK_SPEED_10MB; + break; + case SPEED_100: + aq_speed = ICE_AQ_LINK_SPEED_100MB; + break; + case SPEED_1000: + aq_speed = ICE_AQ_LINK_SPEED_1000MB; + break; + case SPEED_2500: + aq_speed = ICE_AQ_LINK_SPEED_2500MB; + break; + case SPEED_5000: + aq_speed = ICE_AQ_LINK_SPEED_5GB; + break; + case SPEED_10000: + aq_speed = ICE_AQ_LINK_SPEED_10GB; + break; + case SPEED_20000: + aq_speed = ICE_AQ_LINK_SPEED_20GB; + break; + case SPEED_25000: + aq_speed = ICE_AQ_LINK_SPEED_25GB; + break; + case SPEED_40000: + aq_speed = ICE_AQ_LINK_SPEED_40GB; + break; + case SPEED_50000: + aq_speed = ICE_AQ_LINK_SPEED_50GB; + break; + case SPEED_100000: + aq_speed = ICE_AQ_LINK_SPEED_100GB; + break; + default: + aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + return aq_speed; +} + +/** * ice_ksettings_find_adv_link_speed - Find advertising link speed * @ks: ethtool ksettings */ static u16 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) { + const struct ethtool_forced_speed_map *map; u16 adv_link_speed = 0; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 100baseT_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseX_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseT_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 1000baseKX_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 2500baseT_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 2500baseX_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 5000baseT_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_5GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseT_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseKR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseSR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseLR_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 25000baseCR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 25000baseSR_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 25000baseKR_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_25GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseCR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseSR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseLR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 40000baseKR4_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseCR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseKR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 50000baseSR2_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; - if (ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseCR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseSR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseLR4_ER4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseKR4_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseCR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseSR2_Full) || - ethtool_link_ksettings_test_link_mode(ks, advertising, - 100000baseKR2_Full)) - adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; + for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) { + map = ice_adv_lnk_speed_maps + i; + if (linkmode_intersects(ks->link_modes.advertising, map->caps)) + adv_link_speed |= ice_speed_to_aq_link(map->speed); + } return adv_link_speed; } @@ -3285,7 +3371,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = ice_get_ptp_clock_index(pf); + info->phc_index = ice_ptp_clock_index(pf); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index b403ee79cd5e..b88e3da06f13 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -100,6 +100,14 @@ phy_type_high_lkup[] = { [2] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full), [3] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full), [4] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full), + [5] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full), + [6] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full), + [7] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full), + [8] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full), + [9] = ICE_PHY_TYPE(200GB, 200000baseDR4_Full), + [10] = ICE_PHY_TYPE(200GB, 200000baseKR4_Full), + [11] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full), + [12] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full), }; #endif /* !_ICE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 8c6e13f87b7d..d151e5bacfec 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2018-2020, Intel Corporation. */ +/* Copyright (C) 2018-2023, Intel Corporation. */ /* flow director ethtool support for ice */ @@ -540,16 +540,24 @@ static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) /* total guaranteed filters assigned to this VSI */ num_guar = vsi->num_gfltr; - /* minus the guaranteed filters programed by this VSI */ - num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) & - VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S; - /* total global best effort filters */ num_be = hw->func_caps.fd_fltr_best_effort; - /* minus the global best effort filters programmed */ - num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >> - GLQF_FD_CNT_FD_BCNT_S; + /* Subtract the number of programmed filters from the global values */ + switch (hw->mac_type) { + case ICE_MAC_E830: + num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, + rd32(hw, VSIQF_FD_CNT(vsi_num))); + num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M, + rd32(hw, GLQF_FD_CNT)); + break; + case ICE_MAC_E810: + default: + num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, + rd32(hw, VSIQF_FD_CNT(vsi_num))); + num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M, + rd32(hw, GLQF_FD_CNT)); + } return num_guar + num_be; } diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index 85cca572c22a..fb8b925aaf8b 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1318,7 +1318,6 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, list_del(&entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), entry->entry); devm_kfree(ice_hw_to_dev(hw), entry); return 0; @@ -1645,10 +1644,8 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, *entry_h = ICE_FLOW_ENTRY_HNDL(e); out: - if (status && e) { - devm_kfree(ice_hw_to_dev(hw), e->entry); + if (status) devm_kfree(ice_hw_to_dev(hw), e); - } return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index b465d27d9b80..96923ef0a5a8 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -350,11 +350,8 @@ struct ice_flow_entry { u64 id; struct ice_flow_prof *prof; - /* Flow entry's content */ - void *entry; enum ice_flow_priority priority; u16 vsi_handle; - u16 entry_sz; }; #define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e) diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 3dc5662d62a6..319a2d6fe26c 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -293,16 +293,17 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); - struct ice_rq_event_info event; + struct ice_aq_task task = {}; struct ice_hw *hw = &pf->hw; + struct ice_aq_desc *desc; u32 completion_offset; int err; - memset(&event, 0, sizeof(event)); - dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", block_size, module, offset); + ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write); + err = ice_aq_update_nvm(hw, module, offset, block_size, block, last_cmd, 0, NULL); if (err) { @@ -319,7 +320,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, * is conservative and is intended to prevent failure to update when * firmware is slow to respond. */ - err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event); + err = ice_aq_wait_for_event(pf, &task, 15 * HZ); if (err) { dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n", module, block_size, offset, err); @@ -327,11 +328,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, return -EIO; } - completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); - completion_retval = le16_to_cpu(event.desc.retval); + desc = &task.event.desc; + completion_module = le16_to_cpu(desc->params.nvm.module_typeid); + completion_retval = le16_to_cpu(desc->retval); - completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low); - completion_offset |= event.desc.params.nvm.offset_high << 16; + completion_offset = le16_to_cpu(desc->params.nvm.offset_low); + completion_offset |= desc->params.nvm.offset_high << 16; if (completion_module != module) { dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n", @@ -363,8 +365,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, */ if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) { if (hw->dev_caps.common_cap.pcie_reset_avoidance) { - *reset_level = (event.desc.params.nvm.cmd_flags & - ICE_AQC_NVM_RESET_LVL_M); + *reset_level = desc->params.nvm.cmd_flags & + ICE_AQC_NVM_RESET_LVL_M; dev_dbg(dev, "Firmware reported required reset level as %u\n", *reset_level); } else { @@ -479,19 +481,20 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); - struct ice_rq_event_info event; + struct ice_aq_task task = {}; struct ice_hw *hw = &pf->hw; + struct ice_aq_desc *desc; struct devlink *devlink; int err; dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); - memset(&event, 0, sizeof(event)); - devlink = priv_to_devlink(pf); devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT); + ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_erase); + err = ice_aq_erase_nvm(hw, module, NULL); if (err) { dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n", @@ -502,7 +505,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, goto out_notify_devlink; } - err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event); + err = ice_aq_wait_for_event(pf, &task, ICE_FW_ERASE_TIMEOUT * HZ); if (err) { dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n", component, module, err); @@ -510,8 +513,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, goto out_notify_devlink; } - completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); - completion_retval = le16_to_cpu(event.desc.retval); + desc = &task.event.desc; + completion_module = le16_to_cpu(desc->params.nvm.module_typeid); + completion_retval = le16_to_cpu(desc->retval); if (completion_module != module) { dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n", @@ -560,13 +564,13 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, u8 *emp_reset_available, struct netlink_ext_ack *extack) { struct device *dev = ice_pf_to_dev(pf); - struct ice_rq_event_info event; + struct ice_aq_task task = {}; struct ice_hw *hw = &pf->hw; u16 completion_retval; u8 response_flags; int err; - memset(&event, 0, sizeof(event)); + ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write_activate); err = ice_nvm_write_activate(hw, activate_flags, &response_flags); if (err) { @@ -592,8 +596,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, } } - err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ, - &event); + err = ice_aq_wait_for_event(pf, &task, 30 * HZ); if (err) { dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", err); @@ -601,7 +604,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, return err; } - completion_retval = le16_to_cpu(event.desc.retval); + completion_retval = le16_to_cpu(task.event.desc.retval); if (completion_retval) { dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n", ice_aq_str((enum ice_aq_err)completion_retval)); diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index 75c9de675f20..c8ea1af51ad3 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -389,6 +389,9 @@ bool ice_gnss_is_gps_present(struct ice_hw *hw) if (!hw->func_caps.ts_func_info.src_tmr_owned) return false; + if (!ice_is_gps_in_netlist(hw)) + return false; + #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) if (ice_is_e810t(hw)) { int err; diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index a92dc9a16035..86936b758ade 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ /* Machine-generated file */ @@ -231,6 +231,7 @@ #define PFINT_SB_CTL 0x0016B600 #define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_SB_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_TSYN_MSK 0x0016C980 #define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) #define QINT_RQCTL_MSIX_INDX_S 0 #define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0) @@ -284,11 +285,11 @@ #define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) #define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) #define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) -#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) -#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 -#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0) -#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) -#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0) +#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT(_i) (0x001E36E0 + ((_i) * 32)) +#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 8 +#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M GENMASK(15, 0) +#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR(_i) (0x001E3800 + ((_i) * 32)) +#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M GENMASK(15, 0) #define GL_MDCK_TX_TDPU 0x00049348 #define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1) #define GL_MDET_RX 0x00294C00 @@ -311,7 +312,11 @@ #define GL_MDET_TX_PQM_MAL_TYPE_S 26 #define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26) #define GL_MDET_TX_PQM_VALID_M BIT(31) -#define GL_MDET_TX_TCLAN 0x000FC068 +#define GL_MDET_TX_TCLAN_BY_MAC(hw) \ + ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MDET_TX_TCLAN : \ + E800_GL_MDET_TX_TCLAN) +#define E800_GL_MDET_TX_TCLAN 0x000FC068 +#define E830_GL_MDET_TX_TCLAN 0x000FCCC0 #define GL_MDET_TX_TCLAN_QNUM_S 0 #define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0) #define GL_MDET_TX_TCLAN_VF_NUM_S 15 @@ -325,7 +330,11 @@ #define PF_MDET_RX_VALID_M BIT(0) #define PF_MDET_TX_PQM 0x002D2C80 #define PF_MDET_TX_PQM_VALID_M BIT(0) -#define PF_MDET_TX_TCLAN 0x000FC000 +#define PF_MDET_TX_TCLAN_BY_MAC(hw) \ + ((hw)->mac_type == ICE_MAC_E830 ? E830_PF_MDET_TX_TCLAN : \ + E800_PF_MDET_TX_TCLAN) +#define E800_PF_MDET_TX_TCLAN 0x000FC000 +#define E830_PF_MDET_TX_TCLAN 0x000FCC00 #define PF_MDET_TX_TCLAN_VALID_M BIT(0) #define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) #define VP_MDET_RX_VALID_M BIT(0) @@ -335,6 +344,10 @@ #define VP_MDET_TX_TCLAN_VALID_M BIT(0) #define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) #define VP_MDET_TX_TDPU_VALID_M BIT(0) +#define E800_GL_MNG_FWSM_FW_MODES_M GENMASK(2, 0) +#define E830_GL_MNG_FWSM_FW_MODES_M GENMASK(1, 0) +#define GL_MNG_FWSM 0x000B6134 +#define GL_MNG_FWSM_FW_LOADING_M BIT(30) #define GLNVM_FLA 0x000B6108 #define GLNVM_FLA_LOCKED_M BIT(6) #define GLNVM_GENS 0x000B6100 @@ -361,13 +374,18 @@ #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) #define GLQF_FD_CNT 0x00460018 +#define E800_GLQF_FD_CNT_FD_GCNT_M GENMASK(14, 0) +#define E830_GLQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) #define GLQF_FD_CNT_FD_BCNT_S 16 -#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16) +#define E800_GLQF_FD_CNT_FD_BCNT_M GENMASK(30, 16) +#define E830_GLQF_FD_CNT_FD_BCNT_M GENMASK(31, 16) #define GLQF_FD_SIZE 0x00460010 #define GLQF_FD_SIZE_FD_GSIZE_S 0 -#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0) +#define E800_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(14, 0) +#define E830_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(15, 0) #define GLQF_FD_SIZE_FD_BSIZE_S 16 -#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16) +#define E800_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(30, 16) +#define E830_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(31, 16) #define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) #define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4)) #define GLQF_FDMASK_MAX_INDEX 31 @@ -386,6 +404,10 @@ #define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) #define GLQF_HMASK_SEL_MAX_INDEX 127 #define GLQF_HMASK_SEL_MASK_SEL_S 0 +#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0) +#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) +#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16) +#define E830_PFQF_FD_CNT_FD_BCNT_M GENMASK(31, 16) #define PFQF_FD_ENA 0x0043A000 #define PFQF_FD_ENA_FD_ENA_M BIT(0) #define PFQF_FD_SIZE 0x00460100 @@ -476,6 +498,7 @@ #define GLTSYN_SYNC_DLAY 0x00088818 #define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4)) #define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4)) +#define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4)) #define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4)) #define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4)) #define PFHH_SEM 0x000A4200 /* Reset Source: PFR */ @@ -484,12 +507,13 @@ #define PFTSYN_SEM_BUSY_M BIT(0) #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT_FD_GCNT_S 0 -#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) +#define E800_VSIQF_FD_CNT_FD_GCNT_M GENMASK(13, 0) +#define E830_VSIQF_FD_CNT_FD_GCNT_M GENMASK(15, 0) #define VSIQF_FD_CNT_FD_BCNT_S 16 -#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16) +#define E800_VSIQF_FD_CNT_FD_BCNT_M GENMASK(29, 16) +#define E830_VSIQF_FD_CNT_FD_BCNT_M GENMASK(31, 16) #define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) #define VSIQF_HKEY_MAX_INDEX 12 -#define VSIQF_HLUT_MAX_INDEX 15 #define PFPM_APM 0x000B8080 #define PFPM_APM_APME_M BIT(0) #define PFPM_WUFC 0x0009DC00 @@ -499,6 +523,10 @@ #define PFPM_WUS_MAG_M BIT(1) #define PFPM_WUS_MNG_M BIT(3) #define PFPM_WUS_FW_RST_WK_M BIT(31) +#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0 +#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0) +#define E830_PRTMAC_CL01_QNT_THR 0x001E3320 +#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 5a7753bda324..b47cd43ae871 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -4,8 +4,27 @@ /* Link Aggregation code */ #include "ice.h" +#include "ice_lib.h" #include "ice_lag.h" +#define ICE_LAG_RES_SHARED BIT(14) +#define ICE_LAG_RES_VALID BIT(15) + +#define LACP_TRAIN_PKT_LEN 16 +static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0x88, 0x09, 0, 0 }; + +#define ICE_RECIPE_LEN 64 +static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = { + 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0x30 }; +static const u8 ice_lport_rcp[ICE_RECIPE_LEN] = { + 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x85, 0, 0x16, 0, 0, 0, 0xff, 0xff, 0x07, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0x30 }; + /** * ice_lag_set_primary - set PF LAG state as Primary * @lag: LAG info struct @@ -47,16 +66,257 @@ static void ice_lag_set_backup(struct ice_lag *lag) } /** + * netif_is_same_ice - determine if netdev is on the same ice NIC as local PF + * @pf: local PF struct + * @netdev: netdev we are evaluating + */ +static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev) +{ + struct ice_netdev_priv *np; + struct ice_pf *test_pf; + struct ice_vsi *vsi; + + if (!netif_is_ice(netdev)) + return false; + + np = netdev_priv(netdev); + if (!np) + return false; + + vsi = np->vsi; + if (!vsi) + return false; + + test_pf = vsi->back; + if (!test_pf) + return false; + + if (pf->pdev->bus != test_pf->pdev->bus || + pf->pdev->slot != test_pf->pdev->slot) + return false; + + return true; +} + +/** + * ice_netdev_to_lag - return pointer to associated lag struct from netdev + * @netdev: pointer to net_device struct to query + */ +static struct ice_lag *ice_netdev_to_lag(struct net_device *netdev) +{ + struct ice_netdev_priv *np; + struct ice_vsi *vsi; + + if (!netif_is_ice(netdev)) + return NULL; + + np = netdev_priv(netdev); + if (!np) + return NULL; + + vsi = np->vsi; + if (!vsi) + return NULL; + + return vsi->back->lag; +} + +/** + * ice_lag_find_hw_by_lport - return an hw struct from bond members lport + * @lag: lag struct + * @lport: lport value to search for + */ +static struct ice_hw * +ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport) +{ + struct ice_lag_netdev_list *entry; + struct net_device *tmp_netdev; + struct ice_netdev_priv *np; + struct ice_hw *hw; + + list_for_each_entry(entry, lag->netdev_head, node) { + tmp_netdev = entry->netdev; + if (!tmp_netdev || !netif_is_ice(tmp_netdev)) + continue; + + np = netdev_priv(tmp_netdev); + if (!np || !np->vsi) + continue; + + hw = &np->vsi->back->hw; + if (hw->port_info->lport == lport) + return hw; + } + + return NULL; +} + +/** + * ice_lag_find_primary - returns pointer to primary interfaces lag struct + * @lag: local interfaces lag struct + */ +static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag) +{ + struct ice_lag *primary_lag = NULL; + struct list_head *tmp; + + list_for_each(tmp, lag->netdev_head) { + struct ice_lag_netdev_list *entry; + struct ice_lag *tmp_lag; + + entry = list_entry(tmp, struct ice_lag_netdev_list, node); + tmp_lag = ice_netdev_to_lag(entry->netdev); + if (tmp_lag && tmp_lag->primary) { + primary_lag = tmp_lag; + break; + } + } + + return primary_lag; +} + +/** + * ice_lag_cfg_fltr - Add/Remove rule for LAG + * @lag: lag struct for local interface + * @act: rule action + * @recipe_id: recipe id for the new rule + * @rule_idx: pointer to rule index + * @add: boolean on whether we are adding filters + */ +static int +ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, + bool add) +{ + struct ice_sw_rule_lkup_rx_tx *s_rule; + u16 s_rule_sz, vsi_num; + struct ice_hw *hw; + u8 *eth_hdr; + u32 opc; + int err; + + hw = &lag->pf->hw; + vsi_num = ice_get_hw_vsi_num(hw, 0); + + s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule); + s_rule = kzalloc(s_rule_sz, GFP_KERNEL); + if (!s_rule) { + dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG\n"); + return -ENOMEM; + } + + if (add) { + eth_hdr = s_rule->hdr_data; + ice_fill_eth_hdr(eth_hdr); + + act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) & + ICE_SINGLE_ACT_VSI_ID_M; + + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->recipe_id = cpu_to_le16(recipe_id); + s_rule->src = cpu_to_le16(hw->port_info->lport); + s_rule->act = cpu_to_le32(act); + s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN); + opc = ice_aqc_opc_add_sw_rules; + } else { + s_rule->index = cpu_to_le16(*rule_idx); + opc = ice_aqc_opc_remove_sw_rules; + } + + err = ice_aq_sw_rules(&lag->pf->hw, s_rule, s_rule_sz, 1, opc, NULL); + if (err) + goto dflt_fltr_free; + + if (add) + *rule_idx = le16_to_cpu(s_rule->index); + else + *rule_idx = 0; + +dflt_fltr_free: + kfree(s_rule); + return err; +} + +/** + * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG + * @lag: lag struct for local interface + * @add: boolean on whether to add filter + */ +static int +ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) +{ + u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE; + + return ice_lag_cfg_fltr(lag, act, lag->pf_recipe, + &lag->pf_rule_id, add); +} + +/** + * ice_lag_cfg_drop_fltr - Add/Remove lport drop rule + * @lag: lag struct for local interface + * @add: boolean on whether to add filter + */ +static int +ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add) +{ + u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | + ICE_SINGLE_ACT_VALID_BIT | + ICE_SINGLE_ACT_DROP; + + return ice_lag_cfg_fltr(lag, act, lag->lport_recipe, + &lag->lport_rule_idx, add); +} + +/** + * ice_lag_cfg_pf_fltrs - set filters up for new active port + * @lag: local interfaces lag struct + * @ptr: opaque data containing notifier event + */ +static void +ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) +{ + struct netdev_notifier_bonding_info *info; + struct netdev_bonding_info *bonding_info; + struct net_device *event_netdev; + struct device *dev; + + event_netdev = netdev_notifier_info_to_dev(ptr); + /* not for this netdev */ + if (event_netdev != lag->netdev) + return; + + info = (struct netdev_notifier_bonding_info *)ptr; + bonding_info = &info->bonding_info; + dev = ice_pf_to_dev(lag->pf); + + /* interface not active - remove old default VSI rule */ + if (bonding_info->slave.state && lag->pf_rule_id) { + if (ice_lag_cfg_dflt_fltr(lag, false)) + dev_err(dev, "Error removing old default VSI filter\n"); + if (ice_lag_cfg_drop_fltr(lag, true)) + dev_err(dev, "Error adding new drop filter\n"); + return; + } + + /* interface becoming active - add new default VSI rule */ + if (!bonding_info->slave.state && !lag->pf_rule_id) { + if (ice_lag_cfg_dflt_fltr(lag, true)) + dev_err(dev, "Error adding new default VSI filter\n"); + if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false)) + dev_err(dev, "Error removing old drop filter\n"); + } +} + +/** * ice_display_lag_info - print LAG info * @lag: LAG info struct */ static void ice_display_lag_info(struct ice_lag *lag) { - const char *name, *peer, *upper, *role, *bonded, *primary; + const char *name, *upper, *role, *bonded, *primary; struct device *dev = &lag->pf->pdev->dev; name = lag->netdev ? netdev_name(lag->netdev) : "unset"; - peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset"; upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset"; primary = lag->primary ? "TRUE" : "FALSE"; bonded = lag->bonded ? "BONDED" : "UNBONDED"; @@ -78,8 +338,445 @@ static void ice_display_lag_info(struct ice_lag *lag) role = "ERROR"; } - dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name, - bonded, peer, upper, role, primary); + dev_dbg(dev, "%s %s, upper:%s, role:%s, primary:%s\n", name, bonded, + upper, role, primary); +} + +/** + * ice_lag_qbuf_recfg - generate a buffer of queues for a reconfigure command + * @hw: HW struct that contains the queue contexts + * @qbuf: pointer to buffer to populate + * @vsi_num: index of the VSI in PF space + * @numq: number of queues to search for + * @tc: traffic class that contains the queues + * + * function returns the number of valid queues in buffer + */ +static u16 +ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf, + u16 vsi_num, u16 numq, u8 tc) +{ + struct ice_q_ctx *q_ctx; + u16 qid, count = 0; + struct ice_pf *pf; + int i; + + pf = hw->back; + for (i = 0; i < numq; i++) { + q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i); + if (!q_ctx) { + dev_dbg(ice_hw_to_dev(hw), "%s queue %d NO Q CONTEXT\n", + __func__, i); + continue; + } + if (q_ctx->q_teid == ICE_INVAL_TEID) { + dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL TEID\n", + __func__, i); + continue; + } + if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) { + dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL Q HANDLE\n", + __func__, i); + continue; + } + + qid = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle]; + qbuf->queue_info[count].q_handle = cpu_to_le16(qid); + qbuf->queue_info[count].tc = tc; + qbuf->queue_info[count].q_teid = cpu_to_le32(q_ctx->q_teid); + count++; + } + + return count; +} + +/** + * ice_lag_get_sched_parent - locate or create a sched node parent + * @hw: HW struct for getting parent in + * @tc: traffic class on parent/node + */ +static struct ice_sched_node * +ice_lag_get_sched_parent(struct ice_hw *hw, u8 tc) +{ + struct ice_sched_node *tc_node, *aggnode, *parent = NULL; + u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; + struct ice_port_info *pi = hw->port_info; + struct device *dev; + u8 aggl, vsil; + int n; + + dev = ice_hw_to_dev(hw); + + tc_node = ice_sched_get_tc_node(pi, tc); + if (!tc_node) { + dev_warn(dev, "Failure to find TC node for LAG move\n"); + return parent; + } + + aggnode = ice_sched_get_agg_node(pi, tc_node, ICE_DFLT_AGG_ID); + if (!aggnode) { + dev_warn(dev, "Failure to find aggregate node for LAG move\n"); + return parent; + } + + aggl = ice_sched_get_agg_layer(hw); + vsil = ice_sched_get_vsi_layer(hw); + + for (n = aggl + 1; n < vsil; n++) + num_nodes[n] = 1; + + for (n = 0; n < aggnode->num_children; n++) { + parent = ice_sched_get_free_vsi_parent(hw, aggnode->children[n], + num_nodes); + if (parent) + return parent; + } + + /* if free parent not found - add one */ + parent = aggnode; + for (n = aggl + 1; n < vsil; n++) { + u16 num_nodes_added; + u32 first_teid; + int err; + + err = ice_sched_add_nodes_to_layer(pi, tc_node, parent, n, + num_nodes[n], &first_teid, + &num_nodes_added); + if (err || num_nodes[n] != num_nodes_added) + return NULL; + + if (num_nodes_added) + parent = ice_sched_find_node_by_teid(tc_node, + first_teid); + else + parent = parent->children[0]; + if (!parent) { + dev_warn(dev, "Failure to add new parent for LAG move\n"); + return parent; + } + } + + return parent; +} + +/** + * ice_lag_move_vf_node_tc - move scheduling nodes for one VF on one TC + * @lag: lag info struct + * @oldport: lport of previous nodes location + * @newport: lport of destination nodes location + * @vsi_num: array index of VSI in PF space + * @tc: traffic class to move + */ +static void +ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport, + u16 vsi_num, u8 tc) +{ + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); + struct device *dev = ice_pf_to_dev(lag->pf); + u16 numq, valq, num_moved, qbuf_size; + u16 buf_size = __struct_size(buf); + struct ice_aqc_cfg_txqs_buf *qbuf; + struct ice_sched_node *n_prt; + struct ice_hw *new_hw = NULL; + __le32 teid, parent_teid; + struct ice_vsi_ctx *ctx; + u32 tmp_teid; + + ctx = ice_get_vsi_ctx(&lag->pf->hw, vsi_num); + if (!ctx) { + dev_warn(dev, "Unable to locate VSI context for LAG failover\n"); + return; + } + + /* check to see if this VF is enabled on this TC */ + if (!ctx->sched.vsi_node[tc]) + return; + + /* locate HW struct for destination port */ + new_hw = ice_lag_find_hw_by_lport(lag, newport); + if (!new_hw) { + dev_warn(dev, "Unable to locate HW struct for LAG node destination\n"); + return; + } + + numq = ctx->num_lan_q_entries[tc]; + teid = ctx->sched.vsi_node[tc]->info.node_teid; + tmp_teid = le32_to_cpu(teid); + parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid; + /* if no teid assigned or numq == 0, then this TC is not active */ + if (!tmp_teid || !numq) + return; + + /* suspend VSI subtree for Traffic Class "tc" on + * this VF's VSI + */ + if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, true)) + dev_dbg(dev, "Problem suspending traffic for LAG node move\n"); + + /* reconfigure all VF's queues on this Traffic Class + * to new port + */ + qbuf_size = struct_size(qbuf, queue_info, numq); + qbuf = kzalloc(qbuf_size, GFP_KERNEL); + if (!qbuf) { + dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n"); + goto resume_traffic; + } + + /* add the per queue info for the reconfigure command buffer */ + valq = ice_lag_qbuf_recfg(&lag->pf->hw, qbuf, vsi_num, numq, tc); + if (!valq) { + dev_dbg(dev, "No valid queues found for LAG failover\n"); + goto qbuf_none; + } + + if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport, + newport, NULL)) { + dev_warn(dev, "Failure to configure queues for LAG failover\n"); + goto qbuf_err; + } + +qbuf_none: + kfree(qbuf); + + /* find new parent in destination port's tree for VF VSI node on this + * Traffic Class + */ + n_prt = ice_lag_get_sched_parent(new_hw, tc); + if (!n_prt) + goto resume_traffic; + + /* Move Vf's VSI node for this TC to newport's scheduler tree */ + buf->hdr.src_parent_teid = parent_teid; + buf->hdr.dest_parent_teid = n_prt->info.node_teid; + buf->hdr.num_elems = cpu_to_le16(1); + buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; + buf->teid[0] = teid; + + if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) + dev_warn(dev, "Failure to move VF nodes for failover\n"); + else + ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); + + goto resume_traffic; + +qbuf_err: + kfree(qbuf); + +resume_traffic: + /* restart traffic for VSI node */ + if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, false)) + dev_dbg(dev, "Problem restarting traffic for LAG node move\n"); +} + +/** + * ice_lag_build_netdev_list - populate the lag struct's netdev list + * @lag: local lag struct + * @ndlist: pointer to netdev list to populate + */ +static void ice_lag_build_netdev_list(struct ice_lag *lag, + struct ice_lag_netdev_list *ndlist) +{ + struct ice_lag_netdev_list *nl; + struct net_device *tmp_nd; + + INIT_LIST_HEAD(&ndlist->node); + rcu_read_lock(); + for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { + nl = kzalloc(sizeof(*nl), GFP_ATOMIC); + if (!nl) + break; + + nl->netdev = tmp_nd; + list_add(&nl->node, &ndlist->node); + } + rcu_read_unlock(); + lag->netdev_head = &ndlist->node; +} + +/** + * ice_lag_destroy_netdev_list - free lag struct's netdev list + * @lag: pointer to local lag struct + * @ndlist: pointer to lag struct netdev list + */ +static void ice_lag_destroy_netdev_list(struct ice_lag *lag, + struct ice_lag_netdev_list *ndlist) +{ + struct ice_lag_netdev_list *entry, *n; + + rcu_read_lock(); + list_for_each_entry_safe(entry, n, &ndlist->node, node) { + list_del(&entry->node); + kfree(entry); + } + rcu_read_unlock(); + lag->netdev_head = NULL; +} + +/** + * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF + * @lag: primary interface LAG struct + * @oldport: lport of previous interface + * @newport: lport of destination interface + * @vsi_num: SW index of VF's VSI + */ +static void +ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport, + u16 vsi_num) +{ + u8 tc; + + ice_for_each_traffic_class(tc) + ice_lag_move_vf_node_tc(lag, oldport, newport, vsi_num, tc); +} + +/** + * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required + * @vf: the VF to move Tx nodes for + * + * Called just after configuring new VF queues. Check whether the VF Tx + * scheduling nodes need to be updated to fail over to the active port. If so, + * move them now. + */ +void ice_lag_move_new_vf_nodes(struct ice_vf *vf) +{ + struct ice_lag_netdev_list ndlist; + u8 pri_port, act_port; + struct ice_lag *lag; + struct ice_vsi *vsi; + struct ice_pf *pf; + + vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + if (WARN_ON(vsi->type != ICE_VSI_VF)) + return; + + pf = vf->pf; + lag = pf->lag; + + mutex_lock(&pf->lag_mutex); + if (!lag->bonded) + goto new_vf_unlock; + + pri_port = pf->hw.port_info->lport; + act_port = lag->active_port; + + if (lag->upper_netdev) + ice_lag_build_netdev_list(lag, &ndlist); + + if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) && + lag->bonded && lag->primary && pri_port != act_port && + !list_empty(lag->netdev_head)) + ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx); + + ice_lag_destroy_netdev_list(lag, &ndlist); + +new_vf_unlock: + mutex_unlock(&pf->lag_mutex); +} + +/** + * ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port + * @lag: lag info struct + * @oldport: lport of previous interface + * @newport: lport of destination interface + */ +static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport) +{ + struct ice_pf *pf; + int i; + + if (!lag->primary) + return; + + pf = lag->pf; + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || + pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + ice_lag_move_single_vf_nodes(lag, oldport, newport, i); +} + +/** + * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context + * @lag: local lag struct + * @src_prt: lport value for source port + * @dst_prt: lport value for destination port + * + * This function is used to move nodes during an out-of-netdev-event situation, + * primarily when the driver needs to reconfigure or recreate resources. + * + * Must be called while holding the lag_mutex to avoid lag events from + * processing while out-of-sync moves are happening. Also, paired moves, + * such as used in a reset flow, should both be called under the same mutex + * lock to avoid changes between start of reset and end of reset. + */ +void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt) +{ + struct ice_lag_netdev_list ndlist; + + ice_lag_build_netdev_list(lag, &ndlist); + ice_lag_move_vf_nodes(lag, src_prt, dst_prt); + ice_lag_destroy_netdev_list(lag, &ndlist); +} + +#define ICE_LAG_SRIOV_CP_RECIPE 10 +#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16 + +/** + * ice_lag_cfg_cp_fltr - configure filter for control packets + * @lag: local interface's lag struct + * @add: add or remove rule + */ +static void +ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add) +{ + struct ice_sw_rule_lkup_rx_tx *s_rule = NULL; + struct ice_vsi *vsi; + u16 buf_len, opc; + + vsi = lag->pf->vsi[0]; + + buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, + ICE_LAG_SRIOV_TRAIN_PKT_LEN); + s_rule = kzalloc(buf_len, GFP_KERNEL); + if (!s_rule) { + netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n"); + return; + } + + if (add) { + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE); + s_rule->src = cpu_to_le16(vsi->port_info->lport); + s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI | + ICE_SINGLE_ACT_LAN_ENABLE | + ICE_SINGLE_ACT_VALID_BIT | + ((vsi->vsi_num << + ICE_SINGLE_ACT_VSI_ID_S) & + ICE_SINGLE_ACT_VSI_ID_M)); + s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN); + memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN); + opc = ice_aqc_opc_add_sw_rules; + } else { + opc = ice_aqc_opc_remove_sw_rules; + s_rule->index = cpu_to_le16(lag->cp_rule_idx); + } + if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) { + netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n", + add ? "ADDING" : "REMOVING"); + goto cp_free; + } + + if (add) + lag->cp_rule_idx = le16_to_cpu(s_rule->index); + else + lag->cp_rule_idx = 0; + +cp_free: + kfree(s_rule); } /** @@ -124,117 +821,422 @@ lag_out: } /** + * ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface + * @lag: primary interface lag struct + * @src_hw: HW struct current node location + * @vsi_num: VSI index in PF space + * @tc: traffic class to move + */ +static void +ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num, + u8 tc) +{ + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); + struct device *dev = ice_pf_to_dev(lag->pf); + u16 numq, valq, num_moved, qbuf_size; + u16 buf_size = __struct_size(buf); + struct ice_aqc_cfg_txqs_buf *qbuf; + struct ice_sched_node *n_prt; + __le32 teid, parent_teid; + struct ice_vsi_ctx *ctx; + struct ice_hw *hw; + u32 tmp_teid; + + hw = &lag->pf->hw; + ctx = ice_get_vsi_ctx(hw, vsi_num); + if (!ctx) { + dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n"); + return; + } + + /* check to see if this VF is enabled on this TC */ + if (!ctx->sched.vsi_node[tc]) + return; + + numq = ctx->num_lan_q_entries[tc]; + teid = ctx->sched.vsi_node[tc]->info.node_teid; + tmp_teid = le32_to_cpu(teid); + parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid; + + /* if !teid or !numq, then this TC is not active */ + if (!tmp_teid || !numq) + return; + + /* suspend traffic */ + if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true)) + dev_dbg(dev, "Problem suspending traffic for LAG node move\n"); + + /* reconfig queues for new port */ + qbuf_size = struct_size(qbuf, queue_info, numq); + qbuf = kzalloc(qbuf_size, GFP_KERNEL); + if (!qbuf) { + dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n"); + goto resume_reclaim; + } + + /* add the per queue info for the reconfigure command buffer */ + valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc); + if (!valq) { + dev_dbg(dev, "No valid queues found for LAG reclaim\n"); + goto reclaim_none; + } + + if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, + src_hw->port_info->lport, hw->port_info->lport, + NULL)) { + dev_warn(dev, "Failure to configure queues for LAG failover\n"); + goto reclaim_qerr; + } + +reclaim_none: + kfree(qbuf); + + /* find parent in primary tree */ + n_prt = ice_lag_get_sched_parent(hw, tc); + if (!n_prt) + goto resume_reclaim; + + /* Move node to new parent */ + buf->hdr.src_parent_teid = parent_teid; + buf->hdr.dest_parent_teid = n_prt->info.node_teid; + buf->hdr.num_elems = cpu_to_le16(1); + buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; + buf->teid[0] = teid; + + if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) + dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n"); + else + ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); + + goto resume_reclaim; + +reclaim_qerr: + kfree(qbuf); + +resume_reclaim: + /* restart traffic */ + if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false)) + dev_warn(dev, "Problem restarting traffic for LAG node reclaim\n"); +} + +/** + * ice_lag_reclaim_vf_nodes - When interface leaving bond primary reclaims nodes + * @lag: primary interface lag struct + * @src_hw: HW struct for current node location + */ +static void +ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw) +{ + struct ice_pf *pf; + int i, tc; + + if (!lag->primary || !src_hw) + return; + + pf = lag->pf; + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || + pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + ice_for_each_traffic_class(tc) + ice_lag_reclaim_vf_tc(lag, src_hw, i, tc); +} + +/** * ice_lag_link - handle LAG link event * @lag: LAG info struct - * @info: info from the netdev notifier */ -static void -ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info) +static void ice_lag_link(struct ice_lag *lag) { - struct net_device *netdev_tmp, *upper = info->upper_dev; struct ice_pf *pf = lag->pf; - int peers = 0; if (lag->bonded) dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n", netdev_name(lag->netdev)); - rcu_read_lock(); - for_each_netdev_in_bond_rcu(upper, netdev_tmp) - peers++; - rcu_read_unlock(); - - if (lag->upper_netdev != upper) { - dev_hold(upper); - lag->upper_netdev = upper; - } - - ice_clear_rdma_cap(pf); - lag->bonded = true; lag->role = ICE_LAG_UNSET; - - /* if this is the first element in an LAG mark as primary */ - lag->primary = !!(peers == 1); + netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n"); } /** * ice_lag_unlink - handle unlink event * @lag: LAG info struct - * @info: info from netdev notification */ -static void -ice_lag_unlink(struct ice_lag *lag, - struct netdev_notifier_changeupper_info *info) +static void ice_lag_unlink(struct ice_lag *lag) { - struct net_device *netdev_tmp, *upper = info->upper_dev; + u8 pri_port, act_port, loc_port; struct ice_pf *pf = lag->pf; - bool found = false; if (!lag->bonded) { netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n"); return; } - /* determine if we are in the new LAG config or not */ - rcu_read_lock(); - for_each_netdev_in_bond_rcu(upper, netdev_tmp) { - if (netdev_tmp == lag->netdev) { - found = true; - break; + if (lag->primary) { + act_port = lag->active_port; + pri_port = lag->pf->hw.port_info->lport; + if (act_port != pri_port && act_port != ICE_LAG_INVALID_PORT) + ice_lag_move_vf_nodes(lag, act_port, pri_port); + lag->primary = false; + lag->active_port = ICE_LAG_INVALID_PORT; + } else { + struct ice_lag *primary_lag; + + primary_lag = ice_lag_find_primary(lag); + if (primary_lag) { + act_port = primary_lag->active_port; + pri_port = primary_lag->pf->hw.port_info->lport; + loc_port = pf->hw.port_info->lport; + if (act_port == loc_port && + act_port != ICE_LAG_INVALID_PORT) { + ice_lag_reclaim_vf_nodes(primary_lag, + &lag->pf->hw); + primary_lag->active_port = ICE_LAG_INVALID_PORT; + } } } - rcu_read_unlock(); - if (found) + lag->bonded = false; + lag->role = ICE_LAG_NONE; + lag->upper_netdev = NULL; +} + +/** + * ice_lag_link_unlink - helper function to call lag_link/unlink + * @lag: lag info struct + * @ptr: opaque pointer data + */ +static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info = ptr; + + if (netdev != lag->netdev) return; - if (lag->upper_netdev) { - dev_put(lag->upper_netdev); - lag->upper_netdev = NULL; + if (info->linking) + ice_lag_link(lag); + else + ice_lag_unlink(lag); +} + +/** + * ice_lag_set_swid - set the SWID on secondary interface + * @primary_swid: primary interface's SWID + * @local_lag: local interfaces LAG struct + * @link: Is this a linking activity + * + * If link is false, then primary_swid should be expected to not be valid + * This function should never be called in interrupt context. + */ +static void +ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag, + bool link) +{ + struct ice_aqc_alloc_free_res_elem *buf; + struct ice_aqc_set_port_params *cmd; + struct ice_aq_desc desc; + u16 buf_len, swid; + int status, i; + + buf_len = struct_size(buf, elem, 1); + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) { + dev_err(ice_pf_to_dev(local_lag->pf), "-ENOMEM error setting SWID\n"); + return; } - lag->peer_netdev = NULL; - ice_set_rdma_cap(pf); - lag->bonded = false; - lag->role = ICE_LAG_NONE; + buf->num_elems = cpu_to_le16(1); + buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_SWID); + /* if unlinnking need to free the shared resource */ + if (!link && local_lag->bond_swid) { + buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid); + status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, + buf_len, ice_aqc_opc_free_res); + if (status) + dev_err(ice_pf_to_dev(local_lag->pf), "Error freeing SWID during LAG unlink\n"); + local_lag->bond_swid = 0; + } + + if (link) { + buf->res_type |= cpu_to_le16(ICE_LAG_RES_SHARED | + ICE_LAG_RES_VALID); + /* store the primary's SWID in case it leaves bond first */ + local_lag->bond_swid = primary_swid; + buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid); + } else { + buf->elem[0].e.sw_resp = + cpu_to_le16(local_lag->pf->hw.port_info->sw_id); + } + + status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, buf_len, + ice_aqc_opc_alloc_res); + if (status) + dev_err(ice_pf_to_dev(local_lag->pf), "Error subscribing to SWID 0x%04X\n", + local_lag->bond_swid); + + kfree(buf); + + /* Configure port param SWID to correct value */ + if (link) + swid = primary_swid; + else + swid = local_lag->pf->hw.port_info->sw_id; + + cmd = &desc.params.set_port_params; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); + + cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid); + /* If this is happening in reset context, it is possible that the + * primary interface has not finished setting its SWID to SHARED + * yet. Allow retries to account for this timing issue between + * interfaces. + */ + for (i = 0; i < ICE_LAG_RESET_RETRIES; i++) { + status = ice_aq_send_cmd(&local_lag->pf->hw, &desc, NULL, 0, + NULL); + if (!status) + break; + + usleep_range(1000, 2000); + } + + if (status) + dev_err(ice_pf_to_dev(local_lag->pf), "Error setting SWID in port params %d\n", + status); } /** - * ice_lag_unregister - handle netdev unregister events - * @lag: LAG info struct - * @netdev: netdev reporting the event + * ice_lag_primary_swid - set/clear the SHARED attrib of primary's SWID + * @lag: primary interface's lag struct + * @link: is this a linking activity + * + * Implement setting primary SWID as shared using 0x020B */ -static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev) +static void ice_lag_primary_swid(struct ice_lag *lag, bool link) { - struct ice_pf *pf = lag->pf; + struct ice_hw *hw; + u16 swid; - /* check to see if this event is for this netdev - * check that we are in an aggregate - */ - if (netdev != lag->netdev || !lag->bonded) + hw = &lag->pf->hw; + swid = hw->port_info->sw_id; + + if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid)) + dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n"); +} + +/** + * ice_lag_add_prune_list - Adds event_pf's VSI to primary's prune list + * @lag: lag info struct + * @event_pf: PF struct for VSI we are adding to primary's prune list + */ +static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) +{ + u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx; + struct ice_sw_rule_vsi_list *s_rule = NULL; + struct device *dev; + + num_vsi = 1; + + dev = ice_pf_to_dev(lag->pf); + event_vsi_num = event_pf->vsi[0]->vsi_num; + prim_vsi_idx = lag->pf->vsi[0]->idx; + + if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN, + prim_vsi_idx, &vsi_list_id)) { + dev_warn(dev, "Could not locate prune list when setting up SRIOV LAG\n"); return; + } - if (lag->upper_netdev) { - dev_put(lag->upper_netdev); - lag->upper_netdev = NULL; - ice_set_rdma_cap(pf); + rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi); + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) { + dev_warn(dev, "Error allocating space for prune list when configuring SRIOV LAG\n"); + return; } - /* perform some cleanup in case we come back */ - lag->bonded = false; - lag->role = ICE_LAG_NONE; + + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_SET); + s_rule->index = cpu_to_le16(vsi_list_id); + s_rule->number_vsi = cpu_to_le16(num_vsi); + s_rule->vsi[0] = cpu_to_le16(event_vsi_num); + + if (ice_aq_sw_rules(&event_pf->hw, s_rule, rule_buf_sz, 1, + ice_aqc_opc_update_sw_rules, NULL)) + dev_warn(dev, "Error adding VSI prune list\n"); + kfree(s_rule); +} + +/** + * ice_lag_del_prune_list - Remove secondary's vsi from primary's prune list + * @lag: primary interface's ice_lag struct + * @event_pf: PF struct for unlinking interface + */ +static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) +{ + u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id; + struct ice_sw_rule_vsi_list *s_rule = NULL; + struct device *dev; + + num_vsi = 1; + + dev = ice_pf_to_dev(lag->pf); + vsi_num = event_pf->vsi[0]->vsi_num; + vsi_idx = lag->pf->vsi[0]->idx; + + if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN, + vsi_idx, &vsi_list_id)) { + dev_warn(dev, "Could not locate prune list when unwinding SRIOV LAG\n"); + return; + } + + rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi); + s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); + if (!s_rule) { + dev_warn(dev, "Error allocating prune list when unwinding SRIOV LAG\n"); + return; + } + + s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR); + s_rule->index = cpu_to_le16(vsi_list_id); + s_rule->number_vsi = cpu_to_le16(num_vsi); + s_rule->vsi[0] = cpu_to_le16(vsi_num); + + if (ice_aq_sw_rules(&event_pf->hw, (struct ice_aqc_sw_rules *)s_rule, + rule_buf_sz, 1, ice_aqc_opc_update_sw_rules, NULL)) + dev_warn(dev, "Error clearing VSI prune list\n"); + + kfree(s_rule); +} + +/** + * ice_lag_init_feature_support_flag - Check for NVM support for LAG + * @pf: PF struct + */ +static void ice_lag_init_feature_support_flag(struct ice_pf *pf) +{ + struct ice_hw_common_caps *caps; + + caps = &pf->hw.dev_caps.common_cap; + if (caps->roce_lag) + ice_set_feature_support(pf, ICE_F_ROCE_LAG); + else + ice_clear_feature_support(pf, ICE_F_ROCE_LAG); + + if (caps->sriov_lag) + ice_set_feature_support(pf, ICE_F_SRIOV_LAG); + else + ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); } /** * ice_lag_changeupper_event - handle LAG changeupper event * @lag: LAG info struct * @ptr: opaque pointer data - * - * ptr is to be cast into netdev_notifier_changeupper_info */ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) { struct netdev_notifier_changeupper_info *info; + struct ice_lag *primary_lag; struct net_device *netdev; info = ptr; @@ -244,44 +1246,437 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) if (netdev != lag->netdev) return; - if (!info->upper_dev) { - netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n"); + primary_lag = ice_lag_find_primary(lag); + if (info->linking) { + lag->upper_netdev = info->upper_dev; + /* If there is not already a primary interface in the LAG, + * then mark this one as primary. + */ + if (!primary_lag) { + lag->primary = true; + /* Configure primary's SWID to be shared */ + ice_lag_primary_swid(lag, true); + primary_lag = lag; + } else { + u16 swid; + + swid = primary_lag->pf->hw.port_info->sw_id; + ice_lag_set_swid(swid, lag, true); + ice_lag_add_prune_list(primary_lag, lag->pf); + ice_lag_cfg_drop_fltr(lag, true); + } + /* add filter for primary control packets */ + ice_lag_cfg_cp_fltr(lag, true); + } else { + if (!primary_lag && lag->primary) + primary_lag = lag; + + if (!lag->primary) { + ice_lag_set_swid(0, lag, false); + } else { + if (primary_lag && lag->primary) { + ice_lag_primary_swid(lag, false); + ice_lag_del_prune_list(primary_lag, lag->pf); + } + } + /* remove filter for control packets */ + ice_lag_cfg_cp_fltr(lag, false); + } +} + +/** + * ice_lag_monitor_link - monitor interfaces entering/leaving the aggregate + * @lag: lag info struct + * @ptr: opaque data containing notifier event + * + * This function only operates after a primary has been set. + */ +static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr) +{ + struct netdev_notifier_changeupper_info *info; + struct ice_hw *prim_hw, *active_hw; + struct net_device *event_netdev; + struct ice_pf *pf; + u8 prim_port; + + if (!lag->primary) return; + + event_netdev = netdev_notifier_info_to_dev(ptr); + if (!netif_is_same_ice(lag->pf, event_netdev)) + return; + + pf = lag->pf; + prim_hw = &pf->hw; + prim_port = prim_hw->port_info->lport; + + info = (struct netdev_notifier_changeupper_info *)ptr; + if (info->upper_dev != lag->upper_netdev) + return; + + if (!info->linking) { + /* Since there are only two interfaces allowed in SRIOV+LAG, if + * one port is leaving, then nodes need to be on primary + * interface. + */ + if (prim_port != lag->active_port && + lag->active_port != ICE_LAG_INVALID_PORT) { + active_hw = ice_lag_find_hw_by_lport(lag, + lag->active_port); + ice_lag_reclaim_vf_nodes(lag, active_hw); + lag->active_port = ICE_LAG_INVALID_PORT; + } } +} + +/** + * ice_lag_monitor_active - main PF keep track of which port is active + * @lag: lag info struct + * @ptr: opaque data containing notifier event + * + * This function is for the primary PF to monitor changes in which port is + * active and handle changes for SRIOV VF functionality + */ +static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) +{ + struct net_device *event_netdev, *event_upper; + struct netdev_notifier_bonding_info *info; + struct netdev_bonding_info *bonding_info; + struct ice_netdev_priv *event_np; + struct ice_pf *pf, *event_pf; + u8 prim_port, event_port; + + if (!lag->primary) + return; - netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK"); + pf = lag->pf; + if (!pf) + return; - if (!netif_is_lag_master(info->upper_dev)) { - netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n"); + event_netdev = netdev_notifier_info_to_dev(ptr); + rcu_read_lock(); + event_upper = netdev_master_upper_dev_get_rcu(event_netdev); + rcu_read_unlock(); + if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev) return; + + event_np = netdev_priv(event_netdev); + event_pf = event_np->vsi->back; + event_port = event_pf->hw.port_info->lport; + prim_port = pf->hw.port_info->lport; + + info = (struct netdev_notifier_bonding_info *)ptr; + bonding_info = &info->bonding_info; + + if (!bonding_info->slave.state) { + /* if no port is currently active, then nodes and filters exist + * on primary port, check if we need to move them + */ + if (lag->active_port == ICE_LAG_INVALID_PORT) { + if (event_port != prim_port) + ice_lag_move_vf_nodes(lag, prim_port, + event_port); + lag->active_port = event_port; + return; + } + + /* active port is already set and is current event port */ + if (lag->active_port == event_port) + return; + /* new active port */ + ice_lag_move_vf_nodes(lag, lag->active_port, event_port); + lag->active_port = event_port; + } else { + /* port not set as currently active (e.g. new active port + * has already claimed the nodes and filters + */ + if (lag->active_port != event_port) + return; + /* This is the case when neither port is active (both link down) + * Link down on the bond - set active port to invalid and move + * nodes and filters back to primary if not already there + */ + if (event_port != prim_port) + ice_lag_move_vf_nodes(lag, event_port, prim_port); + lag->active_port = ICE_LAG_INVALID_PORT; } +} - if (info->linking) - ice_lag_link(lag, info); - else - ice_lag_unlink(lag, info); +/** + * ice_lag_chk_comp - evaluate bonded interface for feature support + * @lag: lag info struct + * @ptr: opaque data for netdev event info + */ +static bool +ice_lag_chk_comp(struct ice_lag *lag, void *ptr) +{ + struct net_device *event_netdev, *event_upper; + struct netdev_notifier_bonding_info *info; + struct netdev_bonding_info *bonding_info; + struct list_head *tmp; + struct device *dev; + int count = 0; - ice_display_lag_info(lag); + if (!lag->primary) + return true; + + event_netdev = netdev_notifier_info_to_dev(ptr); + rcu_read_lock(); + event_upper = netdev_master_upper_dev_get_rcu(event_netdev); + rcu_read_unlock(); + if (event_upper != lag->upper_netdev) + return true; + + dev = ice_pf_to_dev(lag->pf); + + /* only supporting switchdev mode for SRIOV VF LAG. + * primary interface has to be in switchdev mode + */ + if (!ice_is_switchdev_running(lag->pf)) { + dev_info(dev, "Primary interface not in switchdev mode - VF LAG disabled\n"); + return false; + } + + info = (struct netdev_notifier_bonding_info *)ptr; + bonding_info = &info->bonding_info; + lag->bond_mode = bonding_info->master.bond_mode; + if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) { + dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n"); + return false; + } + + list_for_each(tmp, lag->netdev_head) { + struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg; + struct ice_lag_netdev_list *entry; + struct ice_netdev_priv *peer_np; + struct net_device *peer_netdev; + struct ice_vsi *vsi, *peer_vsi; + struct ice_pf *peer_pf; + + entry = list_entry(tmp, struct ice_lag_netdev_list, node); + peer_netdev = entry->netdev; + if (!netif_is_ice(peer_netdev)) { + dev_info(dev, "Found %s non-ice netdev in LAG - VF LAG disabled\n", + netdev_name(peer_netdev)); + return false; + } + + count++; + if (count > 2) { + dev_info(dev, "Found more than two netdevs in LAG - VF LAG disabled\n"); + return false; + } + + peer_np = netdev_priv(peer_netdev); + vsi = ice_get_main_vsi(lag->pf); + peer_vsi = peer_np->vsi; + if (lag->pf->pdev->bus != peer_vsi->back->pdev->bus || + lag->pf->pdev->slot != peer_vsi->back->pdev->slot) { + dev_info(dev, "Found %s on different device in LAG - VF LAG disabled\n", + netdev_name(peer_netdev)); + return false; + } + + dcb_cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg; + peer_dcb_cfg = &peer_vsi->port_info->qos_cfg.local_dcbx_cfg; + if (memcmp(dcb_cfg, peer_dcb_cfg, + sizeof(struct ice_dcbx_cfg))) { + dev_info(dev, "Found %s with different DCB in LAG - VF LAG disabled\n", + netdev_name(peer_netdev)); + return false; + } + + peer_pf = peer_vsi->back; + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, peer_pf->flags)) { + dev_warn(dev, "Found %s with FW LLDP agent active - VF LAG disabled\n", + netdev_name(peer_netdev)); + return false; + } + } + + return true; } /** - * ice_lag_changelower_event - handle LAG changelower event + * ice_lag_unregister - handle netdev unregister events * @lag: LAG info struct - * @ptr: opaque data pointer + * @event_netdev: netdev struct for target of notifier event + */ +static void +ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev) +{ + struct ice_netdev_priv *np; + struct ice_pf *event_pf; + struct ice_lag *p_lag; + + p_lag = ice_lag_find_primary(lag); + np = netdev_priv(event_netdev); + event_pf = np->vsi->back; + + if (p_lag) { + if (p_lag->active_port != p_lag->pf->hw.port_info->lport && + p_lag->active_port != ICE_LAG_INVALID_PORT) { + struct ice_hw *active_hw; + + active_hw = ice_lag_find_hw_by_lport(lag, + p_lag->active_port); + if (active_hw) + ice_lag_reclaim_vf_nodes(p_lag, active_hw); + lag->active_port = ICE_LAG_INVALID_PORT; + } + } + + /* primary processing for primary */ + if (lag->primary && lag->netdev == event_netdev) + ice_lag_primary_swid(lag, false); + + /* primary processing for secondary */ + if (lag->primary && lag->netdev != event_netdev) + ice_lag_del_prune_list(lag, event_pf); + + /* secondary processing for secondary */ + if (!lag->primary && lag->netdev == event_netdev) + ice_lag_set_swid(0, lag, false); +} + +/** + * ice_lag_monitor_rdma - set and clear rdma functionality + * @lag: pointer to lag struct + * @ptr: opaque data for netdev event info + */ +static void +ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr) +{ + struct netdev_notifier_changeupper_info *info; + struct net_device *netdev; + + info = ptr; + netdev = netdev_notifier_info_to_dev(ptr); + + if (netdev != lag->netdev) + return; + + if (info->linking) + ice_clear_rdma_cap(lag->pf); + else + ice_set_rdma_cap(lag->pf); +} + +/** + * ice_lag_chk_disabled_bond - monitor interfaces entering/leaving disabled bond + * @lag: lag info struct + * @ptr: opaque data containing event * - * ptr to be cast to netdev_notifier_changelowerstate_info + * as interfaces enter a bond - determine if the bond is currently + * SRIOV LAG compliant and flag if not. As interfaces leave the + * bond, reset their compliant status. */ -static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr) +static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info = ptr; + struct ice_lag *prim_lag; if (netdev != lag->netdev) return; - netdev_dbg(netdev, "bonding info\n"); + if (info->linking) { + prim_lag = ice_lag_find_primary(lag); + if (prim_lag && + !ice_is_feature_supported(prim_lag->pf, ICE_F_SRIOV_LAG)) { + ice_clear_feature_support(lag->pf, ICE_F_SRIOV_LAG); + netdev_info(netdev, "Interface added to non-compliant SRIOV LAG aggregate\n"); + } + } else { + ice_lag_init_feature_support_flag(lag->pf); + } +} + +/** + * ice_lag_disable_sriov_bond - set members of bond as not supporting SRIOV LAG + * @lag: primary interfaces lag struct + */ +static void ice_lag_disable_sriov_bond(struct ice_lag *lag) +{ + struct ice_netdev_priv *np; + struct ice_pf *pf; + + np = netdev_priv(lag->netdev); + pf = np->vsi->back; + ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); +} + +/** + * ice_lag_process_event - process a task assigned to the lag_wq + * @work: pointer to work_struct + */ +static void ice_lag_process_event(struct work_struct *work) +{ + struct netdev_notifier_changeupper_info *info; + struct ice_lag_work *lag_work; + struct net_device *netdev; + struct list_head *tmp, *n; + struct ice_pf *pf; + + lag_work = container_of(work, struct ice_lag_work, lag_task); + pf = lag_work->lag->pf; + + mutex_lock(&pf->lag_mutex); + lag_work->lag->netdev_head = &lag_work->netdev_list.node; + + switch (lag_work->event) { + case NETDEV_CHANGEUPPER: + info = &lag_work->info.changeupper_info; + ice_lag_chk_disabled_bond(lag_work->lag, info); + if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) { + ice_lag_monitor_link(lag_work->lag, info); + ice_lag_changeupper_event(lag_work->lag, info); + ice_lag_link_unlink(lag_work->lag, info); + } + ice_lag_monitor_rdma(lag_work->lag, info); + break; + case NETDEV_BONDING_INFO: + if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) { + if (!ice_lag_chk_comp(lag_work->lag, + &lag_work->info.bonding_info)) { + netdev = lag_work->info.bonding_info.info.dev; + ice_lag_disable_sriov_bond(lag_work->lag); + ice_lag_unregister(lag_work->lag, netdev); + goto lag_cleanup; + } + ice_lag_monitor_active(lag_work->lag, + &lag_work->info.bonding_info); + ice_lag_cfg_pf_fltrs(lag_work->lag, + &lag_work->info.bonding_info); + } + ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info); + break; + case NETDEV_UNREGISTER: + if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) { + netdev = lag_work->info.bonding_info.info.dev; + if ((netdev == lag_work->lag->netdev || + lag_work->lag->primary) && lag_work->lag->bonded) + ice_lag_unregister(lag_work->lag, netdev); + } + break; + default: + break; + } + +lag_cleanup: + /* cleanup resources allocated for this work item */ + list_for_each_safe(tmp, n, &lag_work->netdev_list.node) { + struct ice_lag_netdev_list *entry; + + entry = list_entry(tmp, struct ice_lag_netdev_list, node); + list_del(&entry->node); + kfree(entry); + } + lag_work->lag->netdev_head = NULL; + + mutex_unlock(&pf->lag_mutex); - if (!netif_is_lag_port(netdev)) - netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n"); + kfree(lag_work); } /** @@ -295,34 +1690,79 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct net_device *upper_netdev; + struct ice_lag_work *lag_work; struct ice_lag *lag; - lag = container_of(notif_blk, struct ice_lag, notif_block); + if (!netif_is_ice(netdev)) + return NOTIFY_DONE; + + if (event != NETDEV_CHANGEUPPER && event != NETDEV_BONDING_INFO && + event != NETDEV_UNREGISTER) + return NOTIFY_DONE; + + if (!(netdev->priv_flags & IFF_BONDING)) + return NOTIFY_DONE; + lag = container_of(notif_blk, struct ice_lag, notif_block); if (!lag->netdev) return NOTIFY_DONE; - /* Check that the netdev is in the working namespace */ if (!net_eq(dev_net(netdev), &init_net)) return NOTIFY_DONE; + /* This memory will be freed at the end of ice_lag_process_event */ + lag_work = kzalloc(sizeof(*lag_work), GFP_KERNEL); + if (!lag_work) + return -ENOMEM; + + lag_work->event_netdev = netdev; + lag_work->lag = lag; + lag_work->event = event; + if (event == NETDEV_CHANGEUPPER) { + struct netdev_notifier_changeupper_info *info; + + info = ptr; + upper_netdev = info->upper_dev; + } else { + upper_netdev = netdev_master_upper_dev_get(netdev); + } + + INIT_LIST_HEAD(&lag_work->netdev_list.node); + if (upper_netdev) { + struct ice_lag_netdev_list *nd_list; + struct net_device *tmp_nd; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) { + nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC); + if (!nd_list) + break; + + nd_list->netdev = tmp_nd; + list_add(&nd_list->node, &lag_work->netdev_list.node); + } + rcu_read_unlock(); + } + switch (event) { case NETDEV_CHANGEUPPER: - ice_lag_changeupper_event(lag, ptr); - break; - case NETDEV_CHANGELOWERSTATE: - ice_lag_changelower_event(lag, ptr); + lag_work->info.changeupper_info = + *((struct netdev_notifier_changeupper_info *)ptr); break; case NETDEV_BONDING_INFO: - ice_lag_info_event(lag, ptr); - break; - case NETDEV_UNREGISTER: - ice_lag_unregister(lag, netdev); + lag_work->info.bonding_info = + *((struct netdev_notifier_bonding_info *)ptr); break; default: + lag_work->info.notifier_info = + *((struct netdev_notifier_info *)ptr); break; } + INIT_WORK(&lag_work->lag_task, ice_lag_process_event); + queue_work(ice_lag_wq, &lag_work->lag_task); + return NOTIFY_DONE; } @@ -366,6 +1806,166 @@ static void ice_unregister_lag_handler(struct ice_lag *lag) } /** + * ice_create_lag_recipe + * @hw: pointer to HW struct + * @rid: pointer to u16 to pass back recipe index + * @base_recipe: recipe to base the new recipe on + * @prio: priority for new recipe + * + * function returns 0 on error + */ +static int ice_create_lag_recipe(struct ice_hw *hw, u16 *rid, + const u8 *base_recipe, u8 prio) +{ + struct ice_aqc_recipe_data_elem *new_rcp; + int err; + + err = ice_alloc_recipe(hw, rid); + if (err) + return err; + + new_rcp = kzalloc(ICE_RECIPE_LEN * ICE_MAX_NUM_RECIPES, GFP_KERNEL); + if (!new_rcp) + return -ENOMEM; + + memcpy(new_rcp, base_recipe, ICE_RECIPE_LEN); + new_rcp->content.act_ctrl_fwd_priority = prio; + new_rcp->content.rid = *rid | ICE_AQ_RECIPE_ID_IS_ROOT; + new_rcp->recipe_indx = *rid; + bitmap_zero((unsigned long *)new_rcp->recipe_bitmap, + ICE_MAX_NUM_RECIPES); + set_bit(*rid, (unsigned long *)new_rcp->recipe_bitmap); + + err = ice_aq_add_recipe(hw, new_rcp, 1, NULL); + if (err) + *rid = 0; + + kfree(new_rcp); + return err; +} + +/** + * ice_lag_move_vf_nodes_tc_sync - move a VF's nodes for a tc during reset + * @lag: primary interfaces lag struct + * @dest_hw: HW struct for destination's interface + * @vsi_num: VSI index in PF space + * @tc: traffic class to move + */ +static void +ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw, + u16 vsi_num, u8 tc) +{ + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); + struct device *dev = ice_pf_to_dev(lag->pf); + u16 numq, valq, num_moved, qbuf_size; + u16 buf_size = __struct_size(buf); + struct ice_aqc_cfg_txqs_buf *qbuf; + struct ice_sched_node *n_prt; + __le32 teid, parent_teid; + struct ice_vsi_ctx *ctx; + struct ice_hw *hw; + u32 tmp_teid; + + hw = &lag->pf->hw; + ctx = ice_get_vsi_ctx(hw, vsi_num); + if (!ctx) { + dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n"); + return; + } + + if (!ctx->sched.vsi_node[tc]) + return; + + numq = ctx->num_lan_q_entries[tc]; + teid = ctx->sched.vsi_node[tc]->info.node_teid; + tmp_teid = le32_to_cpu(teid); + parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid; + + if (!tmp_teid || !numq) + return; + + if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true)) + dev_dbg(dev, "Problem suspending traffic during reset rebuild\n"); + + /* reconfig queues for new port */ + qbuf_size = struct_size(qbuf, queue_info, numq); + qbuf = kzalloc(qbuf_size, GFP_KERNEL); + if (!qbuf) { + dev_warn(dev, "Failure allocating VF queue recfg buffer for reset rebuild\n"); + goto resume_sync; + } + + /* add the per queue info for the reconfigure command buffer */ + valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc); + if (!valq) { + dev_warn(dev, "Failure to reconfig queues for LAG reset rebuild\n"); + goto sync_none; + } + + if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport, + dest_hw->port_info->lport, NULL)) { + dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n"); + goto sync_qerr; + } + +sync_none: + kfree(qbuf); + + /* find parent in destination tree */ + n_prt = ice_lag_get_sched_parent(dest_hw, tc); + if (!n_prt) + goto resume_sync; + + /* Move node to new parent */ + buf->hdr.src_parent_teid = parent_teid; + buf->hdr.dest_parent_teid = n_prt->info.node_teid; + buf->hdr.num_elems = cpu_to_le16(1); + buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; + buf->teid[0] = teid; + + if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved)) + dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n"); + else + ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); + + goto resume_sync; + +sync_qerr: + kfree(qbuf); + +resume_sync: + if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false)) + dev_warn(dev, "Problem restarting traffic for LAG node reset rebuild\n"); +} + +/** + * ice_lag_move_vf_nodes_sync - move vf nodes to active interface + * @lag: primary interfaces lag struct + * @dest_hw: lport value for currently active port + * + * This function is used in a reset context, outside of event handling, + * to move the VF nodes to the secondary interface when that interface + * is the active interface during a reset rebuild + */ +static void +ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw) +{ + struct ice_pf *pf; + int i, tc; + + if (!lag->primary || !dest_hw) + return; + + pf = lag->pf; + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || + pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + ice_for_each_traffic_class(tc) + ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i, + tc); +} + +/** * ice_init_lag - initialize support for LAG * @pf: PF struct * @@ -377,7 +1977,12 @@ int ice_init_lag(struct ice_pf *pf) struct device *dev = ice_pf_to_dev(pf); struct ice_lag *lag; struct ice_vsi *vsi; - int err; + u64 recipe_bits = 0; + int n, err; + + ice_lag_init_feature_support_flag(pf); + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) + return 0; pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL); if (!pf->lag) @@ -394,8 +1999,8 @@ int ice_init_lag(struct ice_pf *pf) lag->pf = pf; lag->netdev = vsi->netdev; lag->role = ICE_LAG_NONE; + lag->active_port = ICE_LAG_INVALID_PORT; lag->bonded = false; - lag->peer_netdev = NULL; lag->upper_netdev = NULL; lag->notif_block.notifier_call = NULL; @@ -405,11 +2010,39 @@ int ice_init_lag(struct ice_pf *pf) goto lag_error; } + err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe, + ice_dflt_vsi_rcp, 1); + if (err) + goto lag_error; + + err = ice_create_lag_recipe(&pf->hw, &lag->lport_recipe, + ice_lport_rcp, 3); + if (err) + goto free_rcp_res; + + /* associate recipes to profiles */ + for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) { + err = ice_aq_get_recipe_to_profile(&pf->hw, n, + (u8 *)&recipe_bits, NULL); + if (err) + continue; + + if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) { + recipe_bits |= BIT(lag->pf_recipe) | + BIT(lag->lport_recipe); + ice_aq_map_recipe_to_profile(&pf->hw, n, + (u8 *)&recipe_bits, NULL); + } + } + ice_display_lag_info(lag); dev_dbg(dev, "INIT LAG complete\n"); return 0; +free_rcp_res: + ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, + &pf->lag->pf_recipe); lag_error: kfree(lag); pf->lag = NULL; @@ -435,11 +2068,107 @@ void ice_deinit_lag(struct ice_pf *pf) if (lag->pf) ice_unregister_lag_handler(lag); - dev_put(lag->upper_netdev); + flush_workqueue(ice_lag_wq); - dev_put(lag->peer_netdev); + ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, + &pf->lag->pf_recipe); + ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1, + &pf->lag->lport_recipe); kfree(lag); pf->lag = NULL; } + +/** + * ice_lag_rebuild - rebuild lag resources after reset + * @pf: pointer to local pf struct + * + * PF resets are promoted to CORER resets when interface in an aggregate. This + * means that we need to rebuild the PF resources for the interface. Since + * this will happen outside the normal event processing, need to acquire the lag + * lock. + * + * This function will also evaluate the VF resources if this is the primary + * interface. + */ +void ice_lag_rebuild(struct ice_pf *pf) +{ + struct ice_lag_netdev_list ndlist; + struct ice_lag *lag, *prim_lag; + u8 act_port, loc_port; + + if (!pf->lag || !pf->lag->bonded) + return; + + mutex_lock(&pf->lag_mutex); + + lag = pf->lag; + if (lag->primary) { + prim_lag = lag; + } else { + ice_lag_build_netdev_list(lag, &ndlist); + prim_lag = ice_lag_find_primary(lag); + } + + if (!prim_lag) { + dev_dbg(ice_pf_to_dev(pf), "No primary interface in aggregate, can't rebuild\n"); + goto lag_rebuild_out; + } + + act_port = prim_lag->active_port; + loc_port = lag->pf->hw.port_info->lport; + + /* configure SWID for this port */ + if (lag->primary) { + ice_lag_primary_swid(lag, true); + } else { + ice_lag_set_swid(prim_lag->pf->hw.port_info->sw_id, lag, true); + ice_lag_add_prune_list(prim_lag, pf); + if (act_port == loc_port) + ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw); + } + + ice_lag_cfg_cp_fltr(lag, true); + + if (lag->pf_rule_id) + if (ice_lag_cfg_dflt_fltr(lag, true)) + dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n"); + + ice_clear_rdma_cap(pf); +lag_rebuild_out: + ice_lag_destroy_netdev_list(lag, &ndlist); + mutex_unlock(&pf->lag_mutex); +} + +/** + * ice_lag_is_switchdev_running + * @pf: pointer to PF structure + * + * Check if switchdev is running on any of the interfaces connected to lag. + */ +bool ice_lag_is_switchdev_running(struct ice_pf *pf) +{ + struct ice_lag *lag = pf->lag; + struct net_device *tmp_nd; + + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag) + return false; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { + struct ice_netdev_priv *priv = netdev_priv(tmp_nd); + + if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi || + !priv->vsi->back) + continue; + + if (ice_is_switchdev_running(priv->vsi->back)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 2c373676c42f..ede833dfa658 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -14,20 +14,56 @@ enum ice_lag_role { ICE_LAG_UNSET }; +#define ICE_LAG_INVALID_PORT 0xFF + +#define ICE_LAG_RESET_RETRIES 5 + struct ice_pf; +struct ice_vf; + +struct ice_lag_netdev_list { + struct list_head node; + struct net_device *netdev; +}; /* LAG info struct */ struct ice_lag { struct ice_pf *pf; /* backlink to PF struct */ struct net_device *netdev; /* this PF's netdev */ - struct net_device *peer_netdev; struct net_device *upper_netdev; /* upper bonding netdev */ + struct list_head *netdev_head; struct notifier_block notif_block; + s32 bond_mode; + u16 bond_swid; /* swid for primary interface */ + u8 active_port; /* lport value for the current active port */ u8 bonded:1; /* currently bonded */ u8 primary:1; /* this is primary */ + u16 pf_recipe; + u16 lport_recipe; + u16 pf_rule_id; + u16 cp_rule_idx; + u16 lport_rule_idx; u8 role; }; +/* LAG workqueue struct */ +struct ice_lag_work { + struct work_struct lag_task; + struct ice_lag_netdev_list netdev_list; + struct ice_lag *lag; + unsigned long event; + struct net_device *event_netdev; + union { + struct netdev_notifier_changeupper_info changeupper_info; + struct netdev_notifier_bonding_info bonding_info; + struct netdev_notifier_info notifier_info; + } info; +}; + +void ice_lag_move_new_vf_nodes(struct ice_vf *vf); int ice_init_lag(struct ice_pf *pf); void ice_deinit_lag(struct ice_pf *pf); +void ice_lag_rebuild(struct ice_pf *pf); +bool ice_lag_is_switchdev_running(struct ice_pf *pf); +void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt); #endif /* _ICE_LAG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 0054d7e64ec3..1bad6e17f9be 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -229,7 +229,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * original vector count */ - vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF; + vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF; break; case ICE_VSI_CTRL: vsi->alloc_txq = 1; @@ -907,6 +907,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) { struct ice_hw_common_caps *cap; struct ice_pf *pf = vsi->back; + u16 max_rss_size; if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { vsi->rss_size = 1; @@ -914,32 +915,31 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) } cap = &pf->hw.func_caps.common_cap; + max_rss_size = BIT(cap->rss_table_entry_width); switch (vsi->type) { case ICE_VSI_CHNL: case ICE_VSI_PF: /* PF VSI will inherit RSS instance of PF */ vsi->rss_table_size = (u16)cap->rss_table_size; if (vsi->type == ICE_VSI_CHNL) - vsi->rss_size = min_t(u16, vsi->num_rxq, - BIT(cap->rss_table_entry_width)); + vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size); else vsi->rss_size = min_t(u16, num_online_cpus(), - BIT(cap->rss_table_entry_width)); - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; + max_rss_size); + vsi->rss_lut_type = ICE_LUT_PF; break; case ICE_VSI_SWITCHDEV_CTRL: - vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; - vsi->rss_size = min_t(u16, num_online_cpus(), - BIT(cap->rss_table_entry_width)); - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + vsi->rss_table_size = ICE_LUT_VSI_SIZE; + vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); + vsi->rss_lut_type = ICE_LUT_VSI; break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes. */ - vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vsi->rss_table_size = ICE_LUT_VSI_SIZE; vsi->rss_size = ICE_MAX_RSS_QS_PER_VF; - vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + vsi->rss_lut_type = ICE_LUT_VSI; break; case ICE_VSI_LB: break; @@ -1201,8 +1201,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | - ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & - ICE_AQ_VSI_Q_OPT_RSS_HASH_M); + (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M); } static void @@ -1228,6 +1227,17 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) } /** + * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not + * @vsi: VSI to check whether or not VLAN pruning is enabled. + * + * returns true if Rx VLAN pruning is enabled and false otherwise. + */ +static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) +{ + return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; +} + +/** * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured * @vsi_flags: VSI configuration flags @@ -1685,6 +1695,27 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) } /** + * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length + * @vsi: VSI + */ +static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) +{ + if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; + vsi->rx_buf_len = ICE_RXBUF_1664; +#if (PAGE_SIZE < 8192) + } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && + (vsi->netdev->mtu <= ETH_DATA_LEN)) { + vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; +#endif + } else { + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; + vsi->rx_buf_len = ICE_RXBUF_3072; + } +} + +/** * ice_pf_state_is_nominal - checks the PF for nominal state * @pf: pointer to PF to check * @@ -1759,27 +1790,6 @@ void ice_update_eth_stats(struct ice_vsi *vsi) } /** - * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length - * @vsi: VSI - */ -void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) -{ - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; - vsi->rx_buf_len = ICE_RXBUF_1664; -#if (PAGE_SIZE < 8192) - } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { - vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; -#endif - } else { - vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - vsi->rx_buf_len = ICE_RXBUF_3072; - } -} - -/** * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register * @hw: HW pointer * @pf_q: index of the Rx queue in the PF's queue space @@ -1821,21 +1831,14 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) { - struct ice_aqc_add_tx_qgrp *qg_buf; - int err; + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) return -EINVAL; - qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; - qg_buf->num_txqs = 1; - err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); - kfree(qg_buf); - return err; + return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); } /** @@ -1877,24 +1880,18 @@ setup_rings: static int ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) { - struct ice_aqc_add_tx_qgrp *qg_buf; - u16 q_idx = 0; + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); int err = 0; - - qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; + u16 q_idx; qg_buf->num_txqs = 1; for (q_idx = 0; q_idx < count; q_idx++) { err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); if (err) - goto err_cfg_txqs; + break; } -err_cfg_txqs: - kfree(qg_buf); return err; } @@ -2185,20 +2182,6 @@ bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) return false; } -/** - * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not - * @vsi: VSI to check whether or not VLAN pruning is enabled. - * - * returns true if Rx VLAN pruning is enabled and false otherwise. - */ -bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi) -{ - if (!vsi) - return false; - - return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA); -} - static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) { if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { @@ -2388,6 +2371,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) } else { max_txqs[i] = vsi->alloc_txq; } + + if (vsi->type == ICE_VSI_PF) + max_txqs[i] += vsi->num_xdp_txq; } dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); @@ -2637,10 +2623,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_VF && vsi->agg_node && vsi->agg_node->valid) vsi->agg_node->num_vsis--; - if (vsi->agg_node) { - vsi->agg_node->valid = false; - vsi->agg_node->agg_id = 0; - } } /** @@ -2944,21 +2926,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /** - * ice_napi_del - Remove NAPI handler for the VSI - * @vsi: VSI for which NAPI handler is to be removed - */ -void ice_napi_del(struct ice_vsi *vsi) -{ - int v_idx; - - if (!vsi->netdev) - return; - - ice_for_each_q_vector(vsi, v_idx) - netif_napi_del(&vsi->q_vectors[v_idx]->napi); -} - -/** * ice_vsi_release - Delete a VSI and free its resources * @vsi: the VSI being removed * @@ -3593,6 +3560,12 @@ int ice_set_dflt_vsi(struct ice_vsi *vsi) dev = ice_pf_to_dev(vsi->back); + if (ice_lag_is_switchdev_running(vsi->back)) { + dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n", + vsi->vsi_num); + return 0; + } + /* the VSI passed in is already the default VSI */ if (ice_is_vsi_dflt_vsi(vsi)) { dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", @@ -3970,7 +3943,7 @@ bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) * @pf: pointer to the struct ice_pf instance * @f: feature enum to set */ -static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) +void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) { if (f < 0 || f >= ICE_F_MAX) return; @@ -4003,13 +3976,21 @@ void ice_init_feature_support(struct ice_pf *pf) case ICE_DEV_ID_E810C_BACKPLANE: case ICE_DEV_ID_E810C_QSFP: case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810_XXV_BACKPLANE: + case ICE_DEV_ID_E810_XXV_QSFP: + case ICE_DEV_ID_E810_XXV_SFP: ice_set_feature_support(pf, ICE_F_DSCP); - ice_set_feature_support(pf, ICE_F_PTP_EXTTS); - if (ice_is_e810t(&pf->hw)) { + if (ice_is_phy_rclk_in_netlist(&pf->hw)) + ice_set_feature_support(pf, ICE_F_PHY_RCLK); + /* If we don't own the timer - don't enable other caps */ + if (!ice_pf_src_tmr_owned(pf)) + break; + if (ice_is_cgu_in_netlist(&pf->hw)) + ice_set_feature_support(pf, ICE_F_CGU); + if (ice_is_clock_mux_in_netlist(&pf->hw)) ice_set_feature_support(pf, ICE_F_SMA_CTRL); - if (ice_gnss_is_gps_present(&pf->hw)) - ice_set_feature_support(pf, ICE_F_GNSS); - } + if (ice_gnss_is_gps_present(&pf->hw)) + ice_set_feature_support(pf, ICE_F_GNSS); break; default: break; @@ -4076,3 +4057,28 @@ void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) { ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; } + +/** + * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit + * @vsi: pointer to VSI structure + * @set: set or unset the bit + */ +int +ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set) +{ + struct ice_vsi_ctx ctx = { + .info = vsi->info, + }; + + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + if (set) + ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB; + else + ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB; + + if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) + return -ENODEV; + + vsi->info = ctx.info; + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index e985766e6bb5..f24f5d1e6f9c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -76,8 +76,6 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi); int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); -bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi); - void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); int ice_set_link(struct ice_vsi *vsi, bool ena); @@ -93,8 +91,6 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); -void ice_napi_del(struct ice_vsi *vsi); - int ice_vsi_release(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi); @@ -130,7 +126,6 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); -void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); @@ -157,11 +152,13 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); +int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi); u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi); bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f); +void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f); void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f); void ice_init_feature_support(struct ice_pf *pf); bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b40dfe6ae321..fb9c93f37e84 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ /* Intel(R) Ethernet Connection E800 Series Linux Driver */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <generated/utsrelease.h> +#include <linux/crash_dump.h> #include "ice.h" #include "ice_base.h" #include "ice_lib.h" @@ -64,6 +65,7 @@ struct device *ice_hw_to_dev(struct ice_hw *hw) } static struct workqueue_struct *ice_wq; +struct workqueue_struct *ice_lag_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; @@ -80,7 +82,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *data, void (*cleanup)(struct flow_block_cb *block_cb)); -bool netif_is_ice(struct net_device *dev) +bool netif_is_ice(const struct net_device *dev) { return dev && (dev->netdev_ops == &ice_netdev_ops); } @@ -635,6 +637,11 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); + if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { + dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n"); + reset_type = ICE_RESET_CORER; + } + ice_prepare_for_reset(pf, reset_type); /* trigger the reset */ @@ -718,8 +725,13 @@ static void ice_reset_subtask(struct ice_pf *pf) } /* No pending resets to finish processing. Check for new resets */ - if (test_bit(ICE_PFR_REQ, pf->state)) + if (test_bit(ICE_PFR_REQ, pf->state)) { reset_type = ICE_RESET_PFR; + if (pf->lag && pf->lag->bonded) { + dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); + reset_type = ICE_RESET_CORER; + } + } if (test_bit(ICE_CORER_REQ, pf->state)) reset_type = ICE_RESET_CORER; if (test_bit(ICE_GLOBR_REQ, pf->state)) @@ -1239,64 +1251,63 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) return status; } -enum ice_aq_task_state { - ICE_AQ_TASK_WAITING = 0, - ICE_AQ_TASK_COMPLETE, - ICE_AQ_TASK_CANCELED, -}; - -struct ice_aq_task { - struct hlist_node entry; +/** + * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware + * @pf: pointer to the PF private structure + * @task: intermediate helper storage and identifier for waiting + * @opcode: the opcode to wait for + * + * Prepares to wait for a specific AdminQ completion event on the ARQ for + * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event(). + * + * Calls are separated to allow caller registering for event before sending + * the command, which mitigates a race between registering and FW responding. + * + * To obtain only the descriptor contents, pass an task->event with null + * msg_buf. If the complete data buffer is desired, allocate the + * task->event.msg_buf with enough space ahead of time. + */ +void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, + u16 opcode) +{ + INIT_HLIST_NODE(&task->entry); + task->opcode = opcode; + task->state = ICE_AQ_TASK_WAITING; - u16 opcode; - struct ice_rq_event_info *event; - enum ice_aq_task_state state; -}; + spin_lock_bh(&pf->aq_wait_lock); + hlist_add_head(&task->entry, &pf->aq_wait_list); + spin_unlock_bh(&pf->aq_wait_lock); +} /** * ice_aq_wait_for_event - Wait for an AdminQ event from firmware * @pf: pointer to the PF private structure - * @opcode: the opcode to wait for + * @task: ptr prepared by ice_aq_prep_for_event() * @timeout: how long to wait, in jiffies - * @event: storage for the event info * * Waits for a specific AdminQ completion event on the ARQ for a given PF. The * current thread will be put to sleep until the specified event occurs or * until the given timeout is reached. * - * To obtain only the descriptor contents, pass an event without an allocated - * msg_buf. If the complete data buffer is desired, allocate the - * event->msg_buf with enough space ahead of time. - * * Returns: zero on success, or a negative error code on failure. */ -int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, - struct ice_rq_event_info *event) +int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, + unsigned long timeout) { + enum ice_aq_task_state *state = &task->state; struct device *dev = ice_pf_to_dev(pf); - struct ice_aq_task *task; - unsigned long start; + unsigned long start = jiffies; long ret; int err; - task = kzalloc(sizeof(*task), GFP_KERNEL); - if (!task) - return -ENOMEM; - - INIT_HLIST_NODE(&task->entry); - task->opcode = opcode; - task->event = event; - task->state = ICE_AQ_TASK_WAITING; - - spin_lock_bh(&pf->aq_wait_lock); - hlist_add_head(&task->entry, &pf->aq_wait_list); - spin_unlock_bh(&pf->aq_wait_lock); - - start = jiffies; - - ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, + ret = wait_event_interruptible_timeout(pf->aq_wait_queue, + *state != ICE_AQ_TASK_WAITING, timeout); - switch (task->state) { + switch (*state) { + case ICE_AQ_TASK_NOT_PREPARED: + WARN(1, "call to %s without ice_aq_prep_for_event()", __func__); + err = -EINVAL; + break; case ICE_AQ_TASK_WAITING: err = ret < 0 ? ret : -ETIMEDOUT; break; @@ -1307,7 +1318,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, err = ret < 0 ? ret : 0; break; default: - WARN(1, "Unexpected AdminQ wait task state %u", task->state); + WARN(1, "Unexpected AdminQ wait task state %u", *state); err = -EINVAL; break; } @@ -1315,12 +1326,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", jiffies_to_msecs(jiffies - start), jiffies_to_msecs(timeout), - opcode); + task->opcode); spin_lock_bh(&pf->aq_wait_lock); hlist_del(&task->entry); spin_unlock_bh(&pf->aq_wait_lock); - kfree(task); return err; } @@ -1346,23 +1356,26 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, struct ice_rq_event_info *event) { + struct ice_rq_event_info *task_ev; struct ice_aq_task *task; bool found = false; spin_lock_bh(&pf->aq_wait_lock); hlist_for_each_entry(task, &pf->aq_wait_list, entry) { - if (task->state || task->opcode != opcode) + if (task->state != ICE_AQ_TASK_WAITING) + continue; + if (task->opcode != opcode) continue; - memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); - task->event->msg_len = event->msg_len; + task_ev = &task->event; + memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); + task_ev->msg_len = event->msg_len; /* Only copy the data buffer if a destination was set */ - if (task->event->msg_buf && - task->event->buf_len > event->buf_len) { - memcpy(task->event->msg_buf, event->msg_buf, + if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { + memcpy(task_ev->msg_buf, event->msg_buf, event->buf_len); - task->event->buf_len = event->buf_len; + task_ev->buf_len = event->buf_len; } task->state = ICE_AQ_TASK_COMPLETE; @@ -1746,7 +1759,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) wr32(hw, GL_MDET_TX_PQM, 0xffffffff); } - reg = rd32(hw, GL_MDET_TX_TCLAN); + reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); if (reg & GL_MDET_TX_TCLAN_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; @@ -1760,7 +1773,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); - wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); + wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); } reg = rd32(hw, GL_MDET_RX); @@ -1788,9 +1801,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); } - reg = rd32(hw, PF_MDET_TX_TCLAN); + reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw)); if (reg & PF_MDET_TX_TCLAN_VALID_M) { - wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff); if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); } @@ -3137,7 +3150,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (!hw->reset_ongoing) + if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf)) set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); } @@ -3147,7 +3160,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; - if (hw->func_caps.ts_func_info.src_tmr_owned) { + if (ice_pf_src_tmr_owned(pf)) { /* Save EVENTs from GLTSYN register */ pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | @@ -3392,6 +3405,7 @@ static void ice_set_ops(struct ice_vsi *vsi) netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY | NETDEV_XDP_ACT_RX_SG; + netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; } /** @@ -3794,6 +3808,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf) static void ice_deinit_pf(struct ice_pf *pf) { ice_service_task_stop(pf); + mutex_destroy(&pf->lag_mutex); mutex_destroy(&pf->adev_mutex); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->tc_mutex); @@ -3856,7 +3871,8 @@ static void ice_set_pf_caps(struct ice_pf *pf) } clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); - if (func_caps->common_cap.ieee_1588) + if (func_caps->common_cap.ieee_1588 && + !(pf->hw.mac_type == ICE_MAC_E830)) set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); pf->max_pf_txqs = func_caps->common_cap.num_txq; @@ -3874,6 +3890,7 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); mutex_init(&pf->adev_mutex); + mutex_init(&pf->lag_mutex); INIT_HLIST_HEAD(&pf->aq_wait_list); spin_lock_init(&pf->aq_wait_lock); @@ -4506,6 +4523,31 @@ static void ice_deinit_eth(struct ice_pf *pf) ice_decfg_netdev(vsi); } +/** + * ice_wait_for_fw - wait for full FW readiness + * @hw: pointer to the hardware structure + * @timeout: milliseconds that can elapse before timing out + */ +static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) +{ + int fw_loading; + u32 elapsed = 0; + + while (elapsed <= timeout) { + fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; + + /* firmware was not yet loaded, we have to wait more */ + if (fw_loading) { + elapsed += 100; + msleep(100); + continue; + } + return 0; + } + + return -ETIMEDOUT; +} + static int ice_init_dev(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); @@ -4518,6 +4560,18 @@ static int ice_init_dev(struct ice_pf *pf) return err; } + /* Some cards require longer initialization times + * due to necessity of loading FW from an external source. + * This can take even half a minute. + */ + if (ice_is_pf_c827(hw)) { + err = ice_wait_for_fw(hw, 30000); + if (err) { + dev_err(dev, "ice_wait_for_fw timed out"); + return err; + } + } + ice_init_feature_support(pf); ice_request_fw(pf); @@ -4613,6 +4667,10 @@ static void ice_init_features(struct ice_pf *pf) if (ice_is_feature_supported(pf, ICE_F_GNSS)) ice_gnss_init(pf); + if (ice_is_feature_supported(pf, ICE_F_CGU) || + ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) + ice_dpll_init(pf); + /* Note: Flow director init failure is non-fatal to load */ if (ice_init_fdir(pf)) dev_err(dev, "could not initialize flow director\n"); @@ -4631,6 +4689,9 @@ static void ice_init_features(struct ice_pf *pf) static void ice_deinit_features(struct ice_pf *pf) { + if (ice_is_safe_mode(pf)) + return; + ice_deinit_lag(pf); if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) ice_cfg_lldp_mib_change(&pf->hw, false); @@ -4639,6 +4700,8 @@ static void ice_deinit_features(struct ice_pf *pf) ice_gnss_exit(pf); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) ice_ptp_release(pf); + if (test_bit(ICE_FLAG_DPLL, pf->flags)) + ice_dpll_deinit(pf); } static void ice_init_wakeup(struct ice_pf *pf) @@ -4962,6 +5025,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) return -EINVAL; } + /* when under a kdump kernel initiate a reset before enabling the + * device in order to clear out any pending DMA transactions. These + * transactions can cause some systems to machine check when doing + * the pcim_enable_device() below. + */ + if (is_kdump_kernel()) { + pci_save_state(pdev); + pci_clear_master(pdev); + err = pcie_flr(pdev); + if (err) + return err; + pci_restore_state(pdev); + } + /* this driver uses devres, see * Documentation/driver-api/driver-model/devres.rst */ @@ -5465,7 +5542,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev) return; } - ice_restore_all_vfs_msi_state(pdev); + ice_restore_all_vfs_msi_state(pf); ice_do_reset(pf, ICE_RESET_PFR); ice_service_task_restart(pf); @@ -5508,34 +5585,38 @@ static void ice_pci_err_reset_done(struct pci_dev *pdev) * Class, Class Mask, private data (not used) } */ static const struct pci_device_id ice_pci_tbl[] = { - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) }, /* required last entry */ - { 0, } + {} }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); @@ -5559,6 +5640,8 @@ static struct pci_driver ice_driver = { #endif /* CONFIG_PM */ .shutdown = ice_shutdown, .sriov_configure = ice_sriov_configure, + .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, + .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count, .err_handler = &ice_pci_err_handler }; @@ -5570,23 +5653,37 @@ static struct pci_driver ice_driver = { */ static int __init ice_module_init(void) { - int status; + int status = -ENOMEM; pr_info("%s\n", ice_driver_string); pr_info("%s\n", ice_copyright); + ice_adv_lnk_speed_maps_init(); + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); - return -ENOMEM; + return status; + } + + ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0); + if (!ice_lag_wq) { + pr_err("Failed to create LAG workqueue\n"); + goto err_dest_wq; } status = pci_register_driver(&ice_driver); if (status) { pr_err("failed to register PCI driver, err %d\n", status); - destroy_workqueue(ice_wq); + goto err_dest_lag_wq; } + return 0; + +err_dest_lag_wq: + destroy_workqueue(ice_lag_wq); +err_dest_wq: + destroy_workqueue(ice_wq); return status; } module_init(ice_module_init); @@ -5601,6 +5698,7 @@ static void __exit ice_module_exit(void) { pci_unregister_driver(&ice_driver); destroy_workqueue(ice_wq); + destroy_workqueue(ice_lag_wq); pr_info("module unloaded\n"); } module_exit(ice_module_exit); @@ -5703,7 +5801,7 @@ static void ice_set_rx_mode(struct net_device *netdev) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - if (!vsi) + if (!vsi || ice_is_switchdev_running(vsi->back)) return; /* Set the flags to synchronize filters @@ -6255,7 +6353,7 @@ static void ice_tx_dim_work(struct work_struct *work) u16 itr; dim = container_of(work, struct dim, work); - rc = (struct ice_ring_container *)dim->priv; + rc = dim->priv; WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); @@ -6275,7 +6373,7 @@ static void ice_rx_dim_work(struct work_struct *work) u16 itr; dim = container_of(work, struct dim, work); - rc = (struct ice_ring_container *)dim->priv; + rc = dim->priv; WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); @@ -7303,10 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - /* configure PTP timestamping after VSI rebuild */ - if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_cfg_timestamp(pf, false); - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); if (err) { dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); @@ -7356,6 +7450,11 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) clear_bit(ICE_RESET_FAILED, pf->state); ice_plug_aux_dev(pf); + if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) + ice_lag_rebuild(pf); + + /* Restore timestamp mode settings after VSI rebuild */ + ice_ptp_restore_timestamp_mode(pf); return; err_vsi_rebuild: diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 6a9364761165..f6f27361c3cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -287,6 +287,7 @@ struct ice_nvgre_hdr { * M = EVLAN (0x8100) - Outer L2 header has EVLAN (ethernet type 0x8100) * N = EVLAN (0x9100) - Outer L2 header has EVLAN (ethernet type 0x9100) */ +#define ICE_PKT_FROM_NETWORK BIT(3) #define ICE_PKT_VLAN_STAG BIT(12) #define ICE_PKT_VLAN_ITAG BIT(13) #define ICE_PKT_VLAN_EVLAN (BIT(14) | BIT(15)) @@ -392,10 +393,10 @@ enum ice_hw_metadata_offset { }; enum ice_pkt_flags { - ICE_PKT_FLAGS_VLAN = 0, - ICE_PKT_FLAGS_TUNNEL = 1, - ICE_PKT_FLAGS_TCP = 2, - ICE_PKT_FLAGS_ERROR = 3, + ICE_PKT_FLAGS_MDID20 = 0, + ICE_PKT_FLAGS_MDID21 = 1, + ICE_PKT_FLAGS_MDID22 = 2, + ICE_PKT_FLAGS_MDID23 = 3, }; struct ice_hw_metadata { diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 81d96a40d5a7..71f405f8a6fe 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -39,8 +39,8 @@ ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) /* initialize with defaults */ for (i = 0; i < NUM_PTP_PINS_E810T; i++) { - snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), - "%s", ice_pin_desc_e810t[i].name); + strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, + sizeof(ptp_pins[i].name)); ptp_pins[i].index = ice_pin_desc_e810t[i].index; ptp_pins[i].func = ice_pin_desc_e810t[i].func; ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; @@ -256,36 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, } /** - * ice_set_tx_tstamp - Enable or disable Tx timestamping - * @pf: The PF pointer to search in - * @on: bool value for whether timestamps are enabled or disabled + * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device + * @pf: Board private structure + * + * Program the device to respond appropriately to the Tx timestamp interrupt + * cause. */ -static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) +static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf) { - struct ice_vsi *vsi; + struct ice_hw *hw = &pf->hw; + bool enable; u32 val; - u16 i; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return; - /* Set the timestamp enable flag for all the Tx rings */ - ice_for_each_txq(vsi, i) { - if (!vsi->tx_rings[i]) - continue; - vsi->tx_rings[i]->ptp_tx = on; + switch (pf->ptp.tx_interrupt_mode) { + case ICE_PTP_TX_INTERRUPT_ALL: + /* React to interrupts across all quads. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f); + enable = true; + break; + case ICE_PTP_TX_INTERRUPT_NONE: + /* Do not react to interrupts on any quad. */ + wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0); + enable = false; + break; + case ICE_PTP_TX_INTERRUPT_SELF: + default: + enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON; + break; } /* Configure the Tx timestamp interrupt */ - val = rd32(&pf->hw, PFINT_OICR_ENA); - if (on) + val = rd32(hw, PFINT_OICR_ENA); + if (enable) val |= PFINT_OICR_TSYN_TX_M; else val &= ~PFINT_OICR_TSYN_TX_M; - wr32(&pf->hw, PFINT_OICR_ENA, val); - - pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + wr32(hw, PFINT_OICR_ENA, val); } /** @@ -299,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) u16 i; vsi = ice_get_main_vsi(pf); - if (!vsi) + if (!vsi || !vsi->rx_rings) return; /* Set the timestamp flag for all the Rx rings */ @@ -308,148 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) continue; vsi->rx_rings[i]->ptp_rx = on; } - - pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : - HWTSTAMP_FILTER_NONE; } /** - * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit + * ice_ptp_disable_timestamp_mode - Disable current timestamp mode * @pf: Board private structure - * @ena: bool value to enable or disable time stamp - * - * This function will configure timestamping during PTP initialization - * and deinitialization - */ -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) -{ - ice_set_tx_tstamp(pf, ena); - ice_set_rx_tstamp(pf, ena); -} - -/** - * ice_get_ptp_clock_index - Get the PTP clock index - * @pf: the PF pointer - * - * Determine the clock index of the PTP clock associated with this device. If - * this is the PF controlling the clock, just use the local access to the - * clock device pointer. - * - * Otherwise, read from the driver shared parameters to determine the clock - * index value. - * - * Returns: the index of the PTP clock associated with this device, or -1 if - * there is no associated clock. - */ -int ice_get_ptp_clock_index(struct ice_pf *pf) -{ - struct device *dev = ice_pf_to_dev(pf); - enum ice_aqc_driver_params param_idx; - struct ice_hw *hw = &pf->hw; - u8 tmr_idx; - u32 value; - int err; - - /* Use the ptp_clock structure if we're the main PF */ - if (pf->ptp.clock) - return ptp_clock_index(pf->ptp.clock); - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - if (!tmr_idx) - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; - else - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; - - err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); - if (err) { - dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n", - err, ice_aq_str(hw->adminq.sq_last_status)); - return -1; - } - - /* The PTP clock index is an integer, and will be between 0 and - * INT_MAX. The highest bit of the driver shared parameter is used to - * indicate whether or not the currently stored clock index is valid. - */ - if (!(value & PTP_SHARED_CLK_IDX_VALID)) - return -1; - - return value & ~PTP_SHARED_CLK_IDX_VALID; -} - -/** - * ice_set_ptp_clock_index - Set the PTP clock index - * @pf: the PF pointer - * - * Set the PTP clock index for this device into the shared driver parameters, - * so that other PFs associated with this device can read it. * - * If the PF is unable to store the clock index, it will log an error, but - * will continue operating PTP. + * Called during preparation for reset to temporarily disable timestamping on + * the device. Called during remove to disable timestamping while cleaning up + * driver resources. */ -static void ice_set_ptp_clock_index(struct ice_pf *pf) +static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - enum ice_aqc_driver_params param_idx; struct ice_hw *hw = &pf->hw; - u8 tmr_idx; - u32 value; - int err; - - if (!pf->ptp.clock) - return; - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - if (!tmr_idx) - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; - else - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; + u32 val; - value = (u32)ptp_clock_index(pf->ptp.clock); - if (value > INT_MAX) { - dev_err(dev, "PTP Clock index is too large to store\n"); - return; - } - value |= PTP_SHARED_CLK_IDX_VALID; + val = rd32(hw, PFINT_OICR_ENA); + val &= ~PFINT_OICR_TSYN_TX_M; + wr32(hw, PFINT_OICR_ENA, val); - err = ice_aq_set_driver_param(hw, param_idx, value, NULL); - if (err) { - dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n", - err, ice_aq_str(hw->adminq.sq_last_status)); - } + ice_set_rx_tstamp(pf, false); } /** - * ice_clear_ptp_clock_index - Clear the PTP clock index - * @pf: the PF pointer + * ice_ptp_restore_timestamp_mode - Restore timestamp configuration + * @pf: Board private structure * - * Clear the PTP clock index for this device. Must be called when - * unregistering the PTP clock, in order to ensure other PFs stop reporting - * a clock object that no longer exists. + * Called at the end of rebuild to restore timestamp configuration after + * a device reset. */ -static void ice_clear_ptp_clock_index(struct ice_pf *pf) +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - enum ice_aqc_driver_params param_idx; struct ice_hw *hw = &pf->hw; - u8 tmr_idx; - int err; + bool enable_rx; - /* Do not clear the index if we don't own the timer */ - if (!hw->func_caps.ts_func_info.src_tmr_owned) - return; + ice_ptp_cfg_tx_interrupt(pf); - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - if (!tmr_idx) - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; - else - param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; + enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; + ice_set_rx_tstamp(pf, enable_rx); - err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); - if (err) { - dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n", - err, ice_aq_str(hw->adminq.sq_last_status)); - } + /* Trigger an immediate software interrupt to ensure that timestamps + * which occurred during reset are handled now. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); } /** @@ -674,9 +582,6 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) int err; u8 idx; - if (!tx->init) - return; - ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); hw = &pf->hw; @@ -775,6 +680,39 @@ skip_ts_read: } /** + * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device + * @pf: Board private structure + */ +static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) +{ + struct ice_ptp_port *port; + unsigned int i; + + mutex_lock(&pf->ptp.ports_owner.lock); + list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { + struct ice_ptp_tx *tx = &port->tx; + + if (!tx || !tx->init) + continue; + + ice_ptp_process_tx_tstamp(tx); + } + mutex_unlock(&pf->ptp.ports_owner.lock); + + for (i = 0; i < ICE_MAX_QUAD; i++) { + u64 tstamp_ready; + int err; + + /* Read the Tx ready status first */ + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); + if (err || tstamp_ready) + return ICE_TX_TSTAMP_WORK_PENDING; + } + + return ICE_TX_TSTAMP_WORK_DONE; +} + +/** * ice_ptp_tx_tstamp - Process Tx timestamps for this function. * @tx: Tx tracking structure to initialize * @@ -1366,6 +1304,7 @@ out_unlock: void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) { struct ice_ptp_port *ptp_port; + struct ice_hw *hw = &pf->hw; if (!test_bit(ICE_FLAG_PTP, pf->flags)) return; @@ -1380,11 +1319,16 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) /* Update cached link status for this port immediately */ ptp_port->link_up = linkup; - /* E810 devices do not need to reconfigure the PHY */ - if (ice_is_e810(&pf->hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: + /* Do not reconfigure E810 PHY */ return; - - ice_ptp_port_phy_restart(ptp_port); + case ICE_PHY_E822: + ice_ptp_port_phy_restart(ptp_port); + return; + default: + dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); + } } /** @@ -1441,6 +1385,24 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) } /** + * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping + * @pf: Board private structure + */ +static void ice_ptp_restart_all_phy(struct ice_pf *pf) +{ + struct list_head *entry; + + list_for_each(entry, &pf->ptp.ports_owner.ports) { + struct ice_ptp_port *port = list_entry(entry, + struct ice_ptp_port, + list_member); + + if (port->link_up) + ice_ptp_port_phy_restart(port); + } +} + +/** * ice_ptp_adjfine - Adjust clock increment rate * @info: the driver's PTP info structure * @scaled_ppm: Parts per million with 16-bit fractional field @@ -1877,9 +1839,9 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) /* Reenable periodic outputs */ ice_ptp_enable_all_clkout(pf); - /* Recalibrate and re-enable timestamp block */ - if (pf->ptp.port.link_up) - ice_ptp_port_phy_restart(&pf->ptp.port); + /* Recalibrate and re-enable timestamp blocks for E822/E823 */ + if (hw->phy_model == ICE_PHY_E822) + ice_ptp_restart_all_phy(pf); exit: if (err) { dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); @@ -1976,21 +1938,32 @@ ice_ptp_get_syncdevicetime(ktime_t *device, u32 hh_lock, hh_art_ctl; int i; - /* Get the HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); +#define MAX_HH_HW_LOCK_TRIES 5 +#define MAX_HH_CTL_LOCK_TRIES 100 + + for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { + /* Get the HW lock */ + hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); + if (hh_lock & PFHH_SEM_BUSY_M) { + usleep_range(10000, 15000); + continue; + } + break; + } if (hh_lock & PFHH_SEM_BUSY_M) { dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); - return -EFAULT; + return -EBUSY; } + /* Program cmd to master timer */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + /* Start the ART and device clock sync sequence */ hh_art_ctl = rd32(hw, GLHH_ART_CTL); hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; wr32(hw, GLHH_ART_CTL, hh_art_ctl); -#define MAX_HH_LOCK_TRIES 100 - - for (i = 0; i < MAX_HH_LOCK_TRIES; i++) { + for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { /* Wait for sync to complete */ hh_art_ctl = rd32(hw, GLHH_ART_CTL); if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { @@ -2014,19 +1987,23 @@ ice_ptp_get_syncdevicetime(ktime_t *device, break; } } + + /* Clear the master timer */ + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + /* Release HW lock */ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); - if (i == MAX_HH_LOCK_TRIES) + if (i == MAX_HH_CTL_LOCK_TRIES) return -ETIMEDOUT; return 0; } /** - * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp + * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp * @info: the driver's PTP info structure * @cts: The memory to fill the cross timestamp info * @@ -2034,14 +2011,14 @@ ice_ptp_get_syncdevicetime(ktime_t *device, * clock. Fill the cross timestamp information and report it back to the * caller. * - * This is only valid for E822 devices which have support for generating the - * cross timestamp via PCIe PTM. + * This is only valid for E822 and E823 devices which have support for + * generating the cross timestamp via PCIe PTM. * * In order to correctly correlate the ART timestamp back to the TSC time, the * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. */ static int -ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info, +ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, struct system_device_crosststamp *cts) { struct ice_pf *pf = ptp_info_to_pf(info); @@ -2081,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) { switch (config->tx_type) { case HWTSTAMP_TX_OFF: - ice_set_tx_tstamp(pf, false); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; break; case HWTSTAMP_TX_ON: - ice_set_tx_tstamp(pf, true); + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; break; default: return -ERANGE; @@ -2092,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - ice_set_rx_tstamp(pf, false); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: @@ -2108,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: - ice_set_rx_tstamp(pf, true); + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } + /* Immediately update the device timestamping mode */ + ice_ptp_restore_timestamp_mode(pf); + return 0; } @@ -2246,18 +2226,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) { - info->n_per_out = N_PER_OUT_E810; - - if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) - info->n_ext_ts = N_EXT_TS_E810; - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { info->n_ext_ts = N_EXT_TS_E810; + info->n_per_out = N_PER_OUT_E810T; info->n_pins = NUM_PTP_PINS_E810T; info->verify = ice_verify_pin_e810t; /* Complete setup of the SMA pins */ ice_ptp_setup_sma_pins_e810t(pf, info); + } else if (ice_is_e810t(&pf->hw)) { + info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; + info->n_per_out = N_PER_OUT_NO_SMA_E810T; + } else { + info->n_per_out = N_PER_OUT_E810; + info->n_ext_ts = N_EXT_TS_E810; } } @@ -2275,22 +2257,22 @@ ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) } /** - * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support + * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support * @pf: Board private structure * @info: PTP info to fill * - * Assign functions to the PTP capabiltiies structure for E822 devices. + * Assign functions to the PTP capabiltiies structure for E82x devices. * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for E822 + * in ice_ptp_set_caps. Only add functions here which are distinct for E82x * devices. */ static void -ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info) +ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) { #ifdef CONFIG_ICE_HWTS if (boot_cpu_has(X86_FEATURE_ART) && boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) - info->getcrosststamp = ice_ptp_getcrosststamp_e822; + info->getcrosststamp = ice_ptp_getcrosststamp_e82x; #endif /* CONFIG_ICE_HWTS */ } @@ -2324,6 +2306,8 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) static void ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) { + ice_ptp_set_funcs_e82x(pf, info); + info->enable = ice_ptp_gpio_enable_e823; ice_ptp_setup_pins_e823(pf, info); } @@ -2351,7 +2335,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf) else if (ice_is_e823(&pf->hw)) ice_ptp_set_funcs_e823(pf, info); else - ice_ptp_set_funcs_e822(pf, info); + ice_ptp_set_funcs_e82x(pf, info); } /** @@ -2366,7 +2350,6 @@ static void ice_ptp_set_caps(struct ice_pf *pf) static long ice_ptp_create_clock(struct ice_pf *pf) { struct ptp_clock_info *info; - struct ptp_clock *clock; struct device *dev; /* No need to create a clock device if we already have one */ @@ -2379,11 +2362,11 @@ static long ice_ptp_create_clock(struct ice_pf *pf) dev = ice_pf_to_dev(pf); /* Attempt to register the clock before enabling the hardware. */ - clock = ptp_clock_register(info, dev); - if (IS_ERR(clock)) - return PTR_ERR(clock); - - pf->ptp.clock = clock; + pf->ptp.clock = ptp_clock_register(info, dev); + if (IS_ERR(pf->ptp.clock)) { + dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device"); + return PTR_ERR(pf->ptp.clock); + } return 0; } @@ -2440,7 +2423,21 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) */ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) { - return ice_ptp_tx_tstamp(&pf->ptp.port.tx); + switch (pf->ptp.tx_interrupt_mode) { + case ICE_PTP_TX_INTERRUPT_NONE: + /* This device has the clock owner handle timestamps for it */ + return ICE_TX_TSTAMP_WORK_DONE; + case ICE_PTP_TX_INTERRUPT_SELF: + /* This device handles its own timestamps */ + return ice_ptp_tx_tstamp(&pf->ptp.port.tx); + case ICE_PTP_TX_INTERRUPT_ALL: + /* This device handles timestamps for all ports */ + return ice_ptp_tx_tstamp_owner(pf); + default: + WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", + pf->ptp.tx_interrupt_mode); + return ICE_TX_TSTAMP_WORK_DONE; + } } static void ice_ptp_periodic_work(struct kthread_work *work) @@ -2474,7 +2471,7 @@ void ice_ptp_reset(struct ice_pf *pf) if (test_bit(ICE_PFR_REQ, pf->state)) goto pfr; - if (!hw->func_caps.ts_func_info.src_tmr_owned) + if (!ice_pf_src_tmr_owned(pf)) goto reset_ts; err = ice_ptp_init_phc(hw); @@ -2550,6 +2547,209 @@ err: } /** + * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device + * @aux_dev: auxiliary device to get the auxiliary PF for + */ +static struct ice_pf * +ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) +{ + struct ice_ptp_port *aux_port; + struct ice_ptp *aux_ptp; + + aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); + aux_ptp = container_of(aux_port, struct ice_ptp, port); + + return container_of(aux_ptp, struct ice_pf, ptp); +} + +/** + * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device + * @aux_dev: auxiliary device to get the PF for + */ +static struct ice_pf * +ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) +{ + struct ice_ptp_port_owner *ports_owner; + struct auxiliary_driver *aux_drv; + struct ice_ptp *owner_ptp; + + if (!aux_dev->dev.driver) + return NULL; + + aux_drv = to_auxiliary_drv(aux_dev->dev.driver); + ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, + aux_driver); + owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); + return container_of(owner_ptp, struct ice_pf, ptp); +} + +/** + * ice_ptp_auxbus_probe - Probe auxiliary devices + * @aux_dev: PF's auxiliary device + * @id: Auxiliary device ID + */ +static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, + const struct auxiliary_device_id *id) +{ + struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); + struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); + + if (WARN_ON(!owner_pf)) + return -ENODEV; + + INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); + mutex_lock(&owner_pf->ptp.ports_owner.lock); + list_add(&aux_pf->ptp.port.list_member, + &owner_pf->ptp.ports_owner.ports); + mutex_unlock(&owner_pf->ptp.ports_owner.lock); + + return 0; +} + +/** + * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus + * @aux_dev: PF's auxiliary device + */ +static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) +{ + struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); + struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); + + mutex_lock(&owner_pf->ptp.ports_owner.lock); + list_del(&aux_pf->ptp.port.list_member); + mutex_unlock(&owner_pf->ptp.ports_owner.lock); +} + +/** + * ice_ptp_auxbus_shutdown + * @aux_dev: PF's auxiliary device + */ +static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) +{ + /* Doing nothing here, but handle to auxbus driver must be satisfied */ +} + +/** + * ice_ptp_auxbus_suspend + * @aux_dev: PF's auxiliary device + * @state: power management state indicator + */ +static int +ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) +{ + /* Doing nothing here, but handle to auxbus driver must be satisfied */ + return 0; +} + +/** + * ice_ptp_auxbus_resume + * @aux_dev: PF's auxiliary device + */ +static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) +{ + /* Doing nothing here, but handle to auxbus driver must be satisfied */ + return 0; +} + +/** + * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table + * @pf: Board private structure + * @name: auxiliary bus driver name + */ +static struct auxiliary_device_id * +ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) +{ + struct auxiliary_device_id *ids; + + /* Second id left empty to terminate the array */ + ids = devm_kcalloc(ice_pf_to_dev(pf), 2, + sizeof(struct auxiliary_device_id), GFP_KERNEL); + if (!ids) + return NULL; + + snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); + + return ids; +} + +/** + * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver + * @pf: Board private structure + */ +static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) +{ + struct auxiliary_driver *aux_driver; + struct ice_ptp *ptp; + struct device *dev; + char *name; + int err; + + ptp = &pf->ptp; + dev = ice_pf_to_dev(pf); + aux_driver = &ptp->ports_owner.aux_driver; + INIT_LIST_HEAD(&ptp->ports_owner.ports); + mutex_init(&ptp->ports_owner.lock); + name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", + pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), + ice_get_ptp_src_clock_index(&pf->hw)); + + aux_driver->name = name; + aux_driver->shutdown = ice_ptp_auxbus_shutdown; + aux_driver->suspend = ice_ptp_auxbus_suspend; + aux_driver->remove = ice_ptp_auxbus_remove; + aux_driver->resume = ice_ptp_auxbus_resume; + aux_driver->probe = ice_ptp_auxbus_probe; + aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); + if (!aux_driver->id_table) + return -ENOMEM; + + err = auxiliary_driver_register(aux_driver); + if (err) { + devm_kfree(dev, aux_driver->id_table); + dev_err(dev, "Failed registering aux_driver, name <%s>\n", + name); + } + + return err; +} + +/** + * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver + * @pf: Board private structure + */ +static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) +{ + struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; + + auxiliary_driver_unregister(aux_driver); + devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); + + mutex_destroy(&pf->ptp.ports_owner.lock); +} + +/** + * ice_ptp_clock_index - Get the PTP clock index for this device + * @pf: Board private structure + * + * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock + * is associated. + */ +int ice_ptp_clock_index(struct ice_pf *pf) +{ + struct auxiliary_device *aux_dev; + struct ice_pf *owner_pf; + struct ptp_clock *clock; + + aux_dev = &pf->ptp.port.aux_dev; + owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); + if (!owner_pf) + return -1; + clock = owner_pf->ptp.clock; + + return clock ? ptp_clock_index(clock) : -1; +} + +/** * ice_ptp_prepare_for_reset - Prepare PTP for reset * @pf: Board private structure */ @@ -2561,7 +2761,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf) clear_bit(ICE_FLAG_PTP, pf->flags); /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); kthread_cancel_delayed_work_sync(&ptp->work); @@ -2639,11 +2839,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf) if (err) goto err_clk; - /* Store the PTP clock index for other PFs */ - ice_set_ptp_clock_index(pf); + err = ice_ptp_register_auxbus_driver(pf); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); + goto err_aux; + } return 0; - +err_aux: + ptp_clock_unregister(pf->ptp.clock); err_clk: pf->ptp.clock = NULL; err_exit: @@ -2685,14 +2889,117 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) */ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) { + struct ice_hw *hw = &pf->hw; + mutex_init(&ptp_port->ps_lock); - if (ice_is_e810(&pf->hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); + case ICE_PHY_E822: + kthread_init_delayed_work(&ptp_port->ov_work, + ice_ptp_wait_for_offsets); + + return ice_ptp_init_tx_e822(pf, &ptp_port->tx, + ptp_port->port_num); + default: + return -ENODEV; + } +} - kthread_init_delayed_work(&ptp_port->ov_work, - ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num); +/** + * ice_ptp_release_auxbus_device + * @dev: device that utilizes the auxbus + */ +static void ice_ptp_release_auxbus_device(struct device *dev) +{ + /* Doing nothing here, but handle to auxbux device must be satisfied */ +} + +/** + * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device + * @pf: Board private structure + */ +static int ice_ptp_create_auxbus_device(struct ice_pf *pf) +{ + struct auxiliary_device *aux_dev; + struct ice_ptp *ptp; + struct device *dev; + char *name; + int err; + u32 id; + + ptp = &pf->ptp; + id = ptp->port.port_num; + dev = ice_pf_to_dev(pf); + + aux_dev = &ptp->port.aux_dev; + + name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", + pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), + ice_get_ptp_src_clock_index(&pf->hw)); + + aux_dev->name = name; + aux_dev->id = id; + aux_dev->dev.release = ice_ptp_release_auxbus_device; + aux_dev->dev.parent = dev; + + err = auxiliary_device_init(aux_dev); + if (err) + goto aux_err; + + err = auxiliary_device_add(aux_dev); + if (err) { + auxiliary_device_uninit(aux_dev); + goto aux_err; + } + + return 0; +aux_err: + dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); + devm_kfree(dev, name); + return err; +} + +/** + * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device + * @pf: Board private structure + */ +static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) +{ + struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; + + auxiliary_device_delete(aux_dev); + auxiliary_device_uninit(aux_dev); + + memset(aux_dev, 0, sizeof(*aux_dev)); +} + +/** + * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode + * @pf: Board private structure + * + * Initialize the Tx timestamp interrupt mode for this device. For most device + * types, each PF processes the interrupt and manages its own timestamps. For + * E822-based devices, only the clock owner processes the timestamps. Other + * PFs disable the interrupt and do not process their own timestamps. + */ +static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) +{ + switch (pf->hw.phy_model) { + case ICE_PHY_E822: + /* E822 based PHY has the clock owner process the interrupt + * for all ports. + */ + if (ice_pf_src_tmr_owned(pf)) + pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL; + else + pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE; + break; + default: + /* other PHY types handle their own Tx interrupt */ + pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF; + } } /** @@ -2713,10 +3020,14 @@ void ice_ptp_init(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; int err; + ice_ptp_init_phy_model(hw); + + ice_ptp_init_tx_interrupt_mode(pf); + /* If this function owns the clock hardware, it must allocate and * configure the PTP clock device to represent it. */ - if (hw->func_caps.ts_func_info.src_tmr_owned) { + if (ice_pf_src_tmr_owned(pf)) { err = ice_ptp_init_owner(pf); if (err) goto err; @@ -2730,11 +3041,18 @@ void ice_ptp_init(struct ice_pf *pf) /* Start the PHY timestamping block */ ice_ptp_reset_phy_timestamping(pf); + /* Configure initial Tx interrupt settings */ + ice_ptp_cfg_tx_interrupt(pf); + set_bit(ICE_FLAG_PTP, pf->flags); err = ice_ptp_init_work(pf, ptp); if (err) goto err; + err = ice_ptp_create_auxbus_device(pf); + if (err) + goto err; + dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); return; @@ -2761,7 +3079,9 @@ void ice_ptp_release(struct ice_pf *pf) return; /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); + ice_ptp_disable_timestamp_mode(pf); + + ice_ptp_remove_auxbus_device(pf); ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); @@ -2782,9 +3102,10 @@ void ice_ptp_release(struct ice_pf *pf) /* Disable periodic outputs */ ice_ptp_disable_all_clkout(pf); - ice_clear_ptp_clock_index(pf); ptp_clock_unregister(pf->ptp.clock); pf->ptp.clock = NULL; + ice_ptp_unregister_auxbus_driver(pf); + dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 995a57019ba7..06a330867fc9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -157,7 +157,9 @@ struct ice_ptp_tx { * ready for PTP functionality. It is used to track the port initialization * and determine when the port's PHY offset is valid. * + * @list_member: list member structure of auxiliary device * @tx: Tx timestamp tracking for this port + * @aux_dev: auxiliary device associated with this port * @ov_work: delayed work task for tracking when PHY offset is valid * @ps_lock: mutex used to protect the overall PTP PHY start procedure * @link_up: indicates whether the link is up @@ -165,7 +167,9 @@ struct ice_ptp_tx { * @port_num: the port number this structure represents */ struct ice_ptp_port { + struct list_head list_member; struct ice_ptp_tx tx; + struct auxiliary_device aux_dev; struct kthread_delayed_work ov_work; struct mutex ps_lock; /* protects overall PTP PHY start procedure */ bool link_up; @@ -173,11 +177,35 @@ struct ice_ptp_port { u8 port_num; }; +enum ice_ptp_tx_interrupt { + ICE_PTP_TX_INTERRUPT_NONE = 0, + ICE_PTP_TX_INTERRUPT_SELF, + ICE_PTP_TX_INTERRUPT_ALL, +}; + +/** + * struct ice_ptp_port_owner - data used to handle the PTP clock owner info + * + * This structure contains data necessary for the PTP clock owner to correctly + * handle the timestamping feature for all attached ports. + * + * @aux_driver: the structure carring the auxiliary driver information + * @ports: list of porst handled by this port owner + * @lock: protect access to ports list + */ +struct ice_ptp_port_owner { + struct auxiliary_driver aux_driver; + struct list_head ports; + struct mutex lock; +}; + #define GLTSYN_TGT_H_IDX_MAX 4 /** * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK + * @tx_interrupt_mode: the TX interrupt mode for the PTP clock * @port: data for the PHY port initialization procedure + * @ports_owner: data for the auxiliary driver owner * @work: delayed work function for periodic tasks * @cached_phc_time: a cached copy of the PHC time for timestamp extension * @cached_phc_jiffies: jiffies when cached_phc_time was last updated @@ -197,7 +225,9 @@ struct ice_ptp_port { * @late_cached_phc_updates: number of times cached PHC update is late */ struct ice_ptp { + enum ice_ptp_tx_interrupt tx_interrupt_mode; struct ice_ptp_port port; + struct ice_ptp_port_owner ports_owner; struct kthread_delayed_work work; u64 cached_phc_time; unsigned long cached_phc_jiffies; @@ -258,11 +288,11 @@ struct ice_ptp { #define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int ice_ptp_clock_index(struct ice_pf *pf); struct ice_pf; int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); -void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena); -int ice_get_ptp_clock_index(struct ice_pf *pf); +void ice_ptp_restore_timestamp_mode(struct ice_pf *pf); void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); @@ -287,12 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) return -EOPNOTSUPP; } -static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { } -static inline int ice_get_ptp_clock_index(struct ice_pf *pf) -{ - return -1; -} - +static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { } static inline void ice_ptp_extts_event(struct ice_pf *pf) { } static inline s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) @@ -314,5 +339,10 @@ static inline void ice_ptp_release(struct ice_pf *pf) { } static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) { } + +static inline int ice_ptp_clock_index(struct ice_pf *pf) +{ + return -1; +} #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ #endif /* _ICE_PTP_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index a38614d21ea8..a00b55e14aac 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -7,6 +7,132 @@ #include "ice_ptp_consts.h" #include "ice_cgu_regs.h" +static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = { + DPLL_PIN_FREQUENCY_1PPS, + DPLL_PIN_FREQUENCY_10MHZ, +}; + +static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = { + DPLL_PIN_FREQUENCY_1PPS, +}; + +static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = { + DPLL_PIN_FREQUENCY_10MHZ, +}; + +static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = { + { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, }, + { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, }, + { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, }, +}; + +static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = { + { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, }, + { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, }, + { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, }, + { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, }, + { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, }, +}; + +static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = { + { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, }, + { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, }, + { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, +}; + +static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = { + { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, +}; + +static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = { + { "NONE", SI_REF0P, 0, 0 }, + { "NONE", SI_REF0N, 0, 0 }, + { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 }, + { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 }, + { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "NONE", SI_REF2N, 0, 0 }, + { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, +}; + +static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = { + { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz }, + { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, +}; + +static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = { + { "NONE", ZL_REF0P, 0, 0 }, + { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 }, + { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 }, + { "NONE", ZL_REF2P, 0, 0 }, + { "NONE", ZL_REF2N, 0, 0 }, + { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "NONE", ZL_REF3N, 0, 0 }, + { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 }, +}; + +static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = { + { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, + { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz }, + { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 }, + { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT, + ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, + { "NONE", ZL_OUT5, 0, 0 }, +}; + /* Low level functions for interacting with and managing the device clock used * for the Precision Time Protocol. * @@ -107,7 +233,7 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw) * * Prepare the source timer for an upcoming timer sync command. */ -static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { u32 cmd_val; u8 tmr_idx; @@ -116,21 +242,23 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) cmd_val = tmr_idx << SEL_CPK_SRC; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val |= GLTSYN_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val |= GLTSYN_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val |= GLTSYN_CMD_ADJ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val |= GLTSYN_CMD_READ_TIME; break; + case ICE_PTP_NOP: + break; } wr32(hw, GLTSYN_CMD, cmd_val); @@ -166,9 +294,9 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY; - phy = port / ICE_PORTS_PER_PHY; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE; + phy_port = port % ICE_PORTS_PER_PHY_E822; + phy = port / ICE_PORTS_PER_PHY_E822; + quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822; if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -293,7 +421,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) * * Read a PHY register for the given port over the device sideband queue. */ -int +static int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) { struct ice_sbq_msg_input msg = {0}; @@ -370,7 +498,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) * * Write a PHY register for the given port over the device sideband queue. */ -int +static int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) { struct ice_sbq_msg_input msg = {0}; @@ -493,20 +621,25 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) * Fill a message buffer for accessing a register in a quad shared between * multiple PHYs. */ -static void +static int ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) { u32 addr; + if (quad >= ICE_MAX_QUAD) + return -EINVAL; + msg->dest_dev = rmn_0; - if ((quad % ICE_NUM_QUAD_TYPE) == 0) + if ((quad % ICE_QUADS_PER_PHY_E822) == 0) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; msg->msg_addr_low = lower_16_bits(addr); msg->msg_addr_high = upper_16_bits(addr); + + return 0; } /** @@ -525,10 +658,10 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - if (quad >= ICE_MAX_QUAD) - return -EINVAL; + err = ice_fill_quad_msg_e822(&msg, quad, offset); + if (err) + return err; - ice_fill_quad_msg_e822(&msg, quad, offset); msg.opcode = ice_sbq_msg_rd; err = ice_sbq_rw_reg(hw, &msg); @@ -559,10 +692,10 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - if (quad >= ICE_MAX_QUAD) - return -EINVAL; + err = ice_fill_quad_msg_e822(&msg, quad, offset); + if (err) + return err; - ice_fill_quad_msg_e822(&msg, quad, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; @@ -626,29 +759,32 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * @quad: the quad to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the PHY quad block that is - * shared between the internal PHYs on the E822 devices. + * Read the timestamp out of the quad to clear its timestamp status bit from + * the PHY quad block that is shared between the internal PHYs of the E822 + * devices. + * + * Note that unlike E810, software cannot directly write to the quad memory + * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function + * to determine which timestamps are valid. Reading a timestamp auto-clears + * the valid bit. + * + * To directly clear the contents of the timestamp block entirely, discarding + * all timestamp data at once, software should instead use + * ice_ptp_reset_ts_memory_quad_e822(). + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready(). */ static int ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) { - u16 lo_addr, hi_addr; + u64 unused_tstamp; int err; - lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); - hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); - - err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0); + err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp); if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", - err); - return err; - } - - err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", - err); + ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n", + quad, idx, err); return err; } @@ -1023,7 +1159,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw) * @time: Time to initialize the PHY port clocks to * * Program the PHY port registers with a new initial time value. The port - * clock will be initialized once the driver issues an INIT_TIME sync + * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync * command. The time value is the upper 32 bits of the PHY timer, usually in * units of nominal nanoseconds. */ @@ -1072,14 +1208,14 @@ exit_err: * * Program the port for an atomic adjustment by writing the Tx and Rx timer * registers. The atomic adjustment won't be completed until the driver issues - * an ADJ_TIME command. + * an ICE_PTP_ADJ_TIME command. * * Note that time is not in units of nanoseconds. It is in clock time * including the lower sub-nanosecond portion of the port timer. * * Negative adjustments are supported using 2s complement arithmetic. */ -int +static int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) { u32 l_time, u_time; @@ -1125,7 +1261,7 @@ exit_err: * * Prepare the PHY ports for an atomic time adjustment by programming the PHY * Tx and Rx port registers. The actual adjustment is completed by issuing an - * ADJ_TIME or ADJ_TIME_AT_TIME sync command. + * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. */ static int ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) @@ -1160,7 +1296,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) * * Prepare each of the PHY ports for a new increment value by programming the * port's TIMETUS registers. The new increment value will be updated after - * issuing an INIT_INCVAL command. + * issuing an ICE_PTP_INIT_INCVAL command. */ static int ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) @@ -1226,18 +1362,18 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) } /** - * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command + * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command * @hw: pointer to HW struct * @port: Port to which cmd has to be sent * @cmd: Command to be sent to the port * * Prepare the requested port for an upcoming timer sync command. * - * Note there is no equivalent of this operation on E810, as that device - * always handles all external PHYs internally. + * Do not use this function directly. If you want to configure exactly one + * port, use ice_ptp_one_port_cmd() instead. */ static int -ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) +ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) { u32 cmd_val, val; u8 tmr_idx; @@ -1246,21 +1382,23 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) tmr_idx = ice_get_ptp_src_clock_index(hw); cmd_val = tmr_idx << SEL_PHY_SRC; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val |= PHY_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val |= PHY_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val |= PHY_CMD_ADJ_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val |= PHY_CMD_READ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; break; + case ICE_PTP_NOP: + break; } /* Tx case */ @@ -1307,6 +1445,39 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) } /** + * ice_ptp_one_port_cmd - Prepare one port for a timer command + * @hw: pointer to the HW struct + * @configured_port: the port to configure with configured_cmd + * @configured_cmd: timer command to prepare on the configured_port + * + * Prepare the configured_port for the configured_cmd, and prepare all other + * ports for ICE_PTP_NOP. This causes the configured_port to execute the + * desired command while all other ports perform no operation. + */ +static int +ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, + enum ice_ptp_tmr_cmd configured_cmd) +{ + u8 port; + + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + enum ice_ptp_tmr_cmd cmd; + int err; + + if (port == configured_port) + cmd = configured_cmd; + else + cmd = ICE_PTP_NOP; + + err = ice_ptp_write_port_cmd_e822(hw, port, cmd); + if (err) + return err; + } + + return 0; +} + +/** * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command * @hw: pointer to the HW struct * @cmd: timer command to prepare @@ -1322,7 +1493,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { int err; - err = ice_ptp_one_port_cmd(hw, port, cmd); + err = ice_ptp_write_port_cmd_e822(hw, port, cmd); if (err) return err; } @@ -2159,8 +2330,8 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) * @phy_time: on return, the 64bit PHY timer value * @phc_time: on return, the lower 64bits of PHC time * - * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC - * timer values. + * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY + * and PHC timer values. */ static int ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, @@ -2173,15 +2344,15 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, tmr_idx = ice_get_ptp_src_clock_index(hw); - /* Prepare the PHC timer for a READ_TIME capture command */ - ice_ptp_src_cmd(hw, READ_TIME); + /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); - /* Prepare the PHY timer for a READ_TIME capture command */ - err = ice_ptp_one_port_cmd(hw, port, READ_TIME); + /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */ + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME); if (err) return err; - /* Issue the sync to start the READ_TIME capture */ + /* Issue the sync to start the ICE_PTP_READ_TIME capture */ ice_ptp_exec_tmr_cmd(hw); /* Read the captured PHC time from the shadow time registers */ @@ -2215,10 +2386,11 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, * @port: the PHY port to synchronize * * Perform an adjustment to ensure that the PHY and PHC timers are in sync. - * This is done by issuing a READ_TIME command which triggers a simultaneous - * read of the PHY timer and PHC timer. Then we use the difference to - * calculate an appropriate 2s complement addition to add to the PHY timer in - * order to ensure it reads the same value as the primary PHC timer. + * This is done by issuing a ICE_PTP_READ_TIME command which triggers a + * simultaneous read of the PHY timer and PHC timer. Then we use the + * difference to calculate an appropriate 2s complement addition to add + * to the PHY timer in order to ensure it reads the same value as the + * primary PHC timer. */ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) { @@ -2248,10 +2420,13 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) if (err) goto err_unlock; - err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME); + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME); if (err) goto err_unlock; + /* Do not perform any action on the main timer */ + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + /* Issue the sync to activate the time adjustment */ ice_ptp_exec_tmr_cmd(hw); @@ -2368,10 +2543,13 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) if (err) return err; - err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); if (err) return err; + /* Do not perform any action on the main timer */ + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + ice_ptp_exec_tmr_cmd(hw); err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); @@ -2393,7 +2571,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) if (err) return err; - err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); if (err) return err; @@ -2642,28 +2820,39 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) * @lport: the lport to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the timestamp block of the - * external PHY on the E810 device. + * Read the timestamp and then forcibly overwrite its value to clear the valid + * bit from the timestamp block of the external PHY on the E810 device. + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready(). */ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) { u32 lo_addr, hi_addr; + u64 unused_tstamp; int err; + err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n", + lport, idx, err); + return err; + } + lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); err = ice_write_phy_reg_e810(hw, lo_addr, 0); if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", - err); + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n", + lport, idx, err); return err; } err = ice_write_phy_reg_e810(hw, hi_addr, 0); if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", - err); + ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n", + lport, idx, err); return err; } @@ -2714,7 +2903,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw) * * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the * initial clock time. The time will not actually be programmed until the - * driver issues an INIT_TIME command. + * driver issues an ICE_PTP_INIT_TIME command. * * The time value is the upper 32 bits of the PHY timer, usually in units of * nominal nanoseconds. @@ -2749,7 +2938,7 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) * * Prepare the PHY port for an atomic adjustment by programming the PHY * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment - * is completed by issuing an ADJ_TIME sync command. + * is completed by issuing an ICE_PTP_ADJ_TIME sync command. * * The adjustment value only contains the portion used for the upper 32bits of * the PHY timer, usually in units of nominal nanoseconds. Negative @@ -2789,7 +2978,7 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) * * Prepare the PHY port for a new increment value by programming the PHY * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is - * completed by issuing an INIT_INCVAL command. + * completed by issuing an ICE_PTP_INIT_INCVAL command. */ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) { @@ -2832,21 +3021,23 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) int err; switch (cmd) { - case INIT_TIME: + case ICE_PTP_INIT_TIME: cmd_val = GLTSYN_CMD_INIT_TIME; break; - case INIT_INCVAL: + case ICE_PTP_INIT_INCVAL: cmd_val = GLTSYN_CMD_INIT_INCVAL; break; - case ADJ_TIME: + case ICE_PTP_ADJ_TIME: cmd_val = GLTSYN_CMD_ADJ_TIME; break; - case READ_TIME: + case ICE_PTP_READ_TIME: cmd_val = GLTSYN_CMD_READ_TIME; break; - case ADJ_TIME_AT_TIME: + case ICE_PTP_ADJ_TIME_AT_TIME: cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; break; + case ICE_PTP_NOP: + return 0; } /* Read, modify, write */ @@ -2869,6 +3060,185 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) return 0; } +/** + * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @tstamp_ready: contents of the Tx memory status register + * + * E810 devices do not use a Tx memory status register. Instead simply + * indicate that all timestamps are currently ready. + */ +static int +ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) +{ + *tstamp_ready = 0xFFFFFFFFFFFFFFFF; + return 0; +} + +/* E810T SMA functions + * + * The following functions operate specifically on E810T hardware and are used + * to access the extended GPIOs available. + */ + +/** + * ice_get_pca9575_handle + * @hw: pointer to the hw struct + * @pca9575_handle: GPIO controller's handle + * + * Find and return the GPIO controller's handle in the netlist. + * When found - the value will be cached in the hw structure and following calls + * will return cached value + */ +static int +ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + int status; + u8 idx; + + /* If handle was read previously return cached value */ + if (hw->io_expander_handle) { + *pca9575_handle = hw->io_expander_handle; + return 0; + } + + /* If handle was not detected read it from the netlist */ + cmd = &desc.params.get_link_topo; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + + /* Set node type to GPIO controller */ + cmd->addr.topo_params.node_type_ctx = + (ICE_AQC_LINK_TOPO_NODE_TYPE_M & + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); + +#define SW_PCA9575_SFP_TOPO_IDX 2 +#define SW_PCA9575_QSFP_TOPO_IDX 1 + + /* Check if the SW IO expander controlling SMA exists in the netlist. */ + if (hw->device_id == ICE_DEV_ID_E810C_SFP) + idx = SW_PCA9575_SFP_TOPO_IDX; + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) + idx = SW_PCA9575_QSFP_TOPO_IDX; + else + return -EOPNOTSUPP; + + cmd->addr.topo_params.index = idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (status) + return -EOPNOTSUPP; + + /* Verify if we found the right IO expander type */ + if (desc.params.get_link_topo.node_part_num != + ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) + return -EOPNOTSUPP; + + /* If present save the handle and return it */ + hw->io_expander_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + *pca9575_handle = hw->io_expander_handle; + + return 0; +} + +/** + * ice_read_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: pointer to data to be read from the GPIO controller + * + * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the + * PCA9575 expander, so only bits 3-7 in data are valid. + */ +int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + *data = 0; + + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + bool pin; + + status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, + &pin, NULL); + if (status) + break; + *data |= (u8)(!pin) << i; + } + + return status; +} + +/** + * ice_write_sma_ctrl_e810t + * @hw: pointer to the hw struct + * @data: data to be written to the GPIO controller + * + * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 + * of the PCA9575 expander, so only bits 3-7 in data are valid. + */ +int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) +{ + int status; + u16 handle; + u8 i; + + status = ice_get_pca9575_handle(hw, &handle); + if (status) + return status; + + for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + bool pin; + + pin = !(data & (1 << i)); + status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, + pin, NULL); + if (status) + break; + } + + return status; +} + +/** + * ice_read_pca9575_reg_e810t + * @hw: pointer to the hw struct + * @offset: GPIO controller register offset + * @data: pointer to data to be read from the GPIO controller + * + * Read the register from the GPIO controller + */ +int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) +{ + struct ice_aqc_link_topo_addr link_topo; + __le16 addr; + u16 handle; + int err; + + memset(&link_topo, 0, sizeof(link_topo)); + + err = ice_get_pca9575_handle(hw, &handle); + if (err) + return err; + + link_topo.handle = cpu_to_le16(handle); + link_topo.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, + ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); + + addr = cpu_to_le16((u16)offset); + + return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); +} + /* Device agnostic functions * * The following functions implement shared behavior common to both E822 and @@ -2926,6 +3296,21 @@ void ice_ptp_unlock(struct ice_hw *hw) } /** + * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type + * @hw: pointer to the HW structure + * + * Determine the PHY model for the device, and initialize hw->phy_model + * for use by other functions. + */ +void ice_ptp_init_phy_model(struct ice_hw *hw) +{ + if (ice_is_e810(hw)) + hw->phy_model = ICE_PHY_E810; + else + hw->phy_model = ICE_PHY_E822; +} + +/** * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command * @hw: pointer to HW struct * @cmd: the command to issue @@ -2943,10 +3328,17 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) ice_ptp_src_cmd(hw, cmd); /* Next, prepare the ports */ - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_port_cmd_e810(hw, cmd); - else + break; + case ICE_PHY_E822: err = ice_ptp_port_cmd_e822(hw, cmd); + break; + default: + err = -EOPNOTSUPP; + } + if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", cmd, err); @@ -2988,14 +3380,21 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); - else + break; + case ICE_PHY_E822: err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); + break; + default: + err = -EOPNOTSUPP; + } + if (err) return err; - return ice_ptp_tmr_cmd(hw, INIT_TIME); + return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME); } /** @@ -3008,8 +3407,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) * * 1) Write the increment value to the source timer shadow registers * 2) Write the increment value to the PHY timer shadow registers - * 3) Issue an INIT_INCVAL timer command to synchronously switch both the - * source and port timers to the new increment value at the next clock + * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both + * the source and port timers to the new increment value at the next clock * cycle. */ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) @@ -3023,14 +3422,21 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); - else + break; + case ICE_PHY_E822: err = ice_ptp_prep_phy_incval_e822(hw, incval); + break; + default: + err = -EOPNOTSUPP; + } + if (err) return err; - return ice_ptp_tmr_cmd(hw, INIT_INCVAL); + return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL); } /** @@ -3064,8 +3470,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) * * 1) Write the adjustment to the source timer shadow registers * 2) Write the adjustment to the PHY timer shadow registers - * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to - * both the source and port timers at the next clock cycle. + * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the + * adjustment to both the source and port timers at the next clock cycle. */ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) { @@ -3075,21 +3481,28 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Write the desired clock adjustment into the GLTSYN_SHADJ register. - * For an ADJ_TIME command, this set of registers represents the value - * to add to the clock time. It supports subtraction by interpreting - * the value as a 2's complement integer. + * For an ICE_PTP_ADJ_TIME command, this set of registers represents + * the value to add to the clock time. It supports subtraction by + * interpreting the value as a 2's complement integer. */ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); - else + break; + case ICE_PHY_E822: err = ice_ptp_prep_phy_adj_e822(hw, adj); + break; + default: + err = -EOPNOTSUPP; + } + if (err) return err; - return ice_ptp_tmr_cmd(hw, ADJ_TIME); + return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME); } /** @@ -3105,10 +3518,14 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - else + case ICE_PHY_E822: return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); + default: + return -EOPNOTSUPP; + } } /** @@ -3117,267 +3534,484 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) * @block: the block to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the timestamp block. For - * E822 devices, the block is the quad to clear from. For E810 devices, the - * block is the logical port to clear from. + * Clear a timestamp from the timestamp block, discarding its value without + * returning it. This resets the memory status bit for the timestamp index + * allowing it to be reused for another timestamp in the future. + * + * For E822 devices, the block number is the PHY quad to clear from. For E810 + * devices, the block number is the logical port to clear from. + * + * This function must only be called on a timestamp index whose valid bit is + * set according to ice_get_phy_tx_tstamp_ready(). */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - if (ice_is_e810(hw)) + switch (hw->phy_model) { + case ICE_PHY_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - else + case ICE_PHY_E822: return ice_clear_phy_tstamp_e822(hw, block, idx); + default: + return -EOPNOTSUPP; + } } /** - * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register - * @hw: pointer to the HW struct - * @port: the PHY port to read - * @tstamp_ready: contents of the Tx memory status register - * - * E810 devices do not use a Tx memory status register. Instead simply - * indicate that all timestamps are currently ready. - */ -static int -ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) -{ - *tstamp_ready = 0xFFFFFFFFFFFFFFFF; - return 0; -} - -/* E810T SMA functions - * - * The following functions operate specifically on E810T hardware and are used - * to access the extended GPIOs available. - */ - -/** - * ice_get_pca9575_handle + * ice_get_pf_c827_idx - find and return the C827 index for the current pf * @hw: pointer to the hw struct - * @pca9575_handle: GPIO controller's handle - * - * Find and return the GPIO controller's handle in the netlist. - * When found - the value will be cached in the hw structure and following calls - * will return cached value + * @idx: index of the found C827 PHY + * Return: + * * 0 - success + * * negative - failure */ -static int -ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) { - struct ice_aqc_get_link_topo *cmd; - struct ice_aq_desc desc; + struct ice_aqc_get_link_topo cmd; + u8 node_part_number; + u16 node_handle; int status; - u8 idx; + u8 ctx; - /* If handle was read previously return cached value */ - if (hw->io_expander_handle) { - *pca9575_handle = hw->io_expander_handle; + if (hw->mac_type != ICE_MAC_E810) + return -ENODEV; + + if (hw->device_id != ICE_DEV_ID_E810C_QSFP) { + *idx = C827_0; return 0; } - /* If handle was not detected read it from the netlist */ - cmd = &desc.params.get_link_topo; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + memset(&cmd, 0, sizeof(cmd)); - /* Set node type to GPIO controller */ - cmd->addr.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & - ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); + ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S; + ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; + cmd.addr.topo_params.node_type_ctx = ctx; -#define SW_PCA9575_SFP_TOPO_IDX 2 -#define SW_PCA9575_QSFP_TOPO_IDX 1 + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) + return -ENOENT; - /* Check if the SW IO expander controlling SMA exists in the netlist. */ - if (hw->device_id == ICE_DEV_ID_E810C_SFP) - idx = SW_PCA9575_SFP_TOPO_IDX; - else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) - idx = SW_PCA9575_QSFP_TOPO_IDX; + if (node_handle == E810C_QSFP_C827_0_HANDLE) + *idx = C827_0; + else if (node_handle == E810C_QSFP_C827_1_HANDLE) + *idx = C827_1; else - return -EOPNOTSUPP; + return -EIO; - cmd->addr.topo_params.index = idx; + return 0; +} - status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); - if (status) - return -EOPNOTSUPP; +/** + * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks + * @hw: pointer to the HW struct + */ +void ice_ptp_reset_ts_memory(struct ice_hw *hw) +{ + switch (hw->phy_model) { + case ICE_PHY_E822: + ice_ptp_reset_ts_memory_e822(hw); + break; + case ICE_PHY_E810: + default: + return; + } +} - /* Verify if we found the right IO expander type */ - if (desc.params.get_link_topo.node_part_num != - ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) - return -EOPNOTSUPP; +/** + * ice_ptp_init_phc - Initialize PTP hardware clock + * @hw: pointer to the HW struct + * + * Perform the steps required to initialize the PTP hardware clock. + */ +int ice_ptp_init_phc(struct ice_hw *hw) +{ + u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned; - /* If present save the handle and return it */ - hw->io_expander_handle = - le16_to_cpu(desc.params.get_link_topo.addr.handle); - *pca9575_handle = hw->io_expander_handle; + /* Enable source clocks */ + wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); - return 0; + /* Clear event err indications for auxiliary pins */ + (void)rd32(hw, GLTSYN_STAT(src_idx)); + + switch (hw->phy_model) { + case ICE_PHY_E810: + return ice_ptp_init_phc_e810(hw); + case ICE_PHY_E822: + return ice_ptp_init_phc_e822(hw); + default: + return -EOPNOTSUPP; + } } /** - * ice_read_sma_ctrl_e810t + * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication + * @hw: pointer to the HW struct + * @block: the timestamp block to check + * @tstamp_ready: storage for the PHY Tx memory status information + * + * Check the PHY for Tx timestamp memory status. This reports a 64 bit value + * which indicates which timestamps in the block may be captured. A set bit + * means the timestamp can be read. An unset bit means the timestamp is not + * ready and software should avoid reading the register. + */ +int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) +{ + switch (hw->phy_model) { + case ICE_PHY_E810: + return ice_get_phy_tx_tstamp_ready_e810(hw, block, + tstamp_ready); + case ICE_PHY_E822: + return ice_get_phy_tx_tstamp_ready_e822(hw, block, + tstamp_ready); + break; + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_cgu_get_pin_desc_e823 - get pin description array * @hw: pointer to the hw struct - * @data: pointer to data to be read from the GPIO controller + * @input: if request is done against input or output pin + * @size: number of inputs/outputs * - * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the - * PCA9575 expander, so only bits 3-7 in data are valid. + * Return: pointer to pin description array associated to given hw. */ -int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) +static const struct ice_cgu_pin_desc * +ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size) { - int status; - u16 handle; - u8 i; + static const struct ice_cgu_pin_desc *t; - status = ice_get_pca9575_handle(hw, &handle); - if (status) - return status; + if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) { + if (input) { + t = ice_e823_zl_cgu_inputs; + *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs); + } else { + t = ice_e823_zl_cgu_outputs; + *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs); + } + } else if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) { + if (input) { + t = ice_e823_si_cgu_inputs; + *size = ARRAY_SIZE(ice_e823_si_cgu_inputs); + } else { + t = ice_e823_si_cgu_outputs; + *size = ARRAY_SIZE(ice_e823_si_cgu_outputs); + } + } else { + t = NULL; + *size = 0; + } - *data = 0; + return t; +} - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { - bool pin; +/** + * ice_cgu_get_pin_desc - get pin description array + * @hw: pointer to the hw struct + * @input: if request is done against input or output pins + * @size: size of array returned by function + * + * Return: pointer to pin description array associated to given hw. + */ +static const struct ice_cgu_pin_desc * +ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size) +{ + const struct ice_cgu_pin_desc *t = NULL; - status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, - &pin, NULL); - if (status) - break; - *data |= (u8)(!pin) << i; + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + if (input) { + t = ice_e810t_sfp_cgu_inputs; + *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs); + } else { + t = ice_e810t_sfp_cgu_outputs; + *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs); + } + break; + case ICE_DEV_ID_E810C_QSFP: + if (input) { + t = ice_e810t_qsfp_cgu_inputs; + *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs); + } else { + t = ice_e810t_qsfp_cgu_outputs; + *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs); + } + break; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + t = ice_cgu_get_pin_desc_e823(hw, input, size); + break; + default: + break; } - return status; + return t; } /** - * ice_write_sma_ctrl_e810t + * ice_cgu_get_pin_type - get pin's type * @hw: pointer to the hw struct - * @data: data to be written to the GPIO controller + * @pin: pin index + * @input: if request is done against input or output pin * - * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 - * of the PCA9575 expander, so only bits 3-7 in data are valid. + * Return: type of a pin. */ -int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) +enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input) { - int status; - u16 handle; - u8 i; + const struct ice_cgu_pin_desc *t; + int t_size; - status = ice_get_pca9575_handle(hw, &handle); - if (status) - return status; + t = ice_cgu_get_pin_desc(hw, input, &t_size); - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { - bool pin; + if (!t) + return 0; - pin = !(data & (1 << i)); - status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, - pin, NULL); - if (status) - break; - } + if (pin >= t_size) + return 0; - return status; + return t[pin].type; } /** - * ice_read_pca9575_reg_e810t + * ice_cgu_get_pin_freq_supp - get pin's supported frequency * @hw: pointer to the hw struct - * @offset: GPIO controller register offset - * @data: pointer to data to be read from the GPIO controller + * @pin: pin index + * @input: if request is done against input or output pin + * @num: output number of supported frequencies * - * Read the register from the GPIO controller + * Get frequency supported number and array of supported frequencies. + * + * Return: array of supported frequencies for given pin. */ -int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) +struct dpll_pin_frequency * +ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num) { - struct ice_aqc_link_topo_addr link_topo; - __le16 addr; - u16 handle; - int err; + const struct ice_cgu_pin_desc *t; + int t_size; - memset(&link_topo, 0, sizeof(link_topo)); - - err = ice_get_pca9575_handle(hw, &handle); - if (err) - return err; + *num = 0; + t = ice_cgu_get_pin_desc(hw, input, &t_size); + if (!t) + return NULL; + if (pin >= t_size) + return NULL; + *num = t[pin].freq_supp_num; - link_topo.handle = cpu_to_le16(handle); - link_topo.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, - ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); - - addr = cpu_to_le16((u16)offset); - - return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); + return t[pin].freq_supp; } /** - * ice_is_pca9575_present + * ice_cgu_get_pin_name - get pin's name * @hw: pointer to the hw struct + * @pin: pin index + * @input: if request is done against input or output pin * - * Check if the SW IO expander is present in the netlist + * Return: + * * null terminated char array with name + * * NULL in case of failure */ -bool ice_is_pca9575_present(struct ice_hw *hw) +const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input) { - u16 handle = 0; - int status; + const struct ice_cgu_pin_desc *t; + int t_size; - if (!ice_is_e810t(hw)) - return false; + t = ice_cgu_get_pin_desc(hw, input, &t_size); - status = ice_get_pca9575_handle(hw, &handle); + if (!t) + return NULL; - return !status && handle; + if (pin >= t_size) + return NULL; + + return t[pin].name; } /** - * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks - * @hw: pointer to the HW struct - */ -void ice_ptp_reset_ts_memory(struct ice_hw *hw) -{ - if (ice_is_e810(hw)) - return; + * ice_get_cgu_state - get the state of the DPLL + * @hw: pointer to the hw struct + * @dpll_idx: Index of internal DPLL unit + * @last_dpll_state: last known state of DPLL + * @pin: pointer to a buffer for returning currently active pin + * @ref_state: reference clock state + * @eec_mode: eec mode of the DPLL + * @phase_offset: pointer to a buffer for returning phase offset + * @dpll_state: state of the DPLL (output) + * + * This function will read the state of the DPLL(dpll_idx). Non-null + * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to + * retrieve currently active pin, state, mode and phase_offset respectively. + * + * Return: state of the DPLL + */ +int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, + enum dpll_lock_status last_dpll_state, u8 *pin, + u8 *ref_state, u8 *eec_mode, s64 *phase_offset, + enum dpll_lock_status *dpll_state) +{ + u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config; + s64 hw_phase_offset; + int status; + + status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state, + &hw_dpll_state, &hw_config, + &hw_phase_offset, &hw_eec_mode); + if (status) + return status; + + if (pin) + /* current ref pin in dpll_state_refsel_status_X register */ + *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL; + if (phase_offset) + *phase_offset = hw_phase_offset; + if (ref_state) + *ref_state = hw_ref_state; + if (eec_mode) + *eec_mode = hw_eec_mode; + if (!dpll_state) + return 0; - ice_ptp_reset_ts_memory_e822(hw); + /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ + * it would never return to FREERUN. This aligns to ITU-T G.781 + * Recommendation. We cannot report HOLDOVER as HO memory is cleared + * while switching to another reference. + * Only for situations where previous state was either: "LOCKED without + * HO_ACQ" or "HOLDOVER" we actually back to FREERUN. + */ + if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) { + if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY) + *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ; + else + *dpll_state = DPLL_LOCK_STATUS_LOCKED; + } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ || + last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) { + *dpll_state = DPLL_LOCK_STATUS_HOLDOVER; + } else { + *dpll_state = DPLL_LOCK_STATUS_UNLOCKED; + } + + return 0; } /** - * ice_ptp_init_phc - Initialize PTP hardware clock - * @hw: pointer to the HW struct + * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins + * @hw: pointer to the hw struct + * @base_idx: returns index of first recovered clock pin on device + * @pin_num: returns number of recovered clock pins available on device * - * Perform the steps required to initialize the PTP hardware clock. + * Based on hw provide caller info about recovery clock pins available on the + * board. + * + * Return: + * * 0 - success, information is valid + * * negative - failure, information is not valid */ -int ice_ptp_init_phc(struct ice_hw *hw) +int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num) { - u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned; + u8 phy_idx; + int ret; - /* Enable source clocks */ - wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810C_QSFP: - /* Clear event err indications for auxiliary pins */ - (void)rd32(hw, GLTSYN_STAT(src_idx)); + ret = ice_get_pf_c827_idx(hw, &phy_idx); + if (ret) + return ret; + *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN); + *pin_num = ICE_E810_RCLK_PINS_NUM; + ret = 0; + break; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + *pin_num = ICE_E822_RCLK_PINS_NUM; + ret = 0; + if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) + *base_idx = ZL_REF1P; + else if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) + *base_idx = SI_REF1P; + else + ret = -ENODEV; - if (ice_is_e810(hw)) - return ice_ptp_init_phc_e810(hw); - else - return ice_ptp_init_phc_e822(hw); + break; + default: + ret = -ENODEV; + break; + } + + return ret; } /** - * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication - * @hw: pointer to the HW struct - * @block: the timestamp block to check - * @tstamp_ready: storage for the PHY Tx memory status information + * ice_cgu_get_output_pin_state_caps - get output pin state capabilities + * @hw: pointer to the hw struct + * @pin_id: id of a pin + * @caps: capabilities to modify * - * Check the PHY for Tx timestamp memory status. This reports a 64 bit value - * which indicates which timestamps in the block may be captured. A set bit - * means the timestamp can be read. An unset bit means the timestamp is not - * ready and software should avoid reading the register. + * Return: + * * 0 - success, state capabilities were modified + * * negative - failure, capabilities were not modified */ -int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) +int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, + unsigned long *caps) { - if (ice_is_e810(hw)) - return ice_get_phy_tx_tstamp_ready_e810(hw, block, - tstamp_ready); + bool can_change = true; + + switch (hw->device_id) { + case ICE_DEV_ID_E810C_SFP: + if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3) + can_change = false; + break; + case ICE_DEV_ID_E810C_QSFP: + if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4) + can_change = false; + break; + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 && + pin_id == ZL_OUT2) + can_change = false; + else if (hw->cgu_part_number == + ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 && + pin_id == SI_OUT1) + can_change = false; + break; + default: + return -EINVAL; + } + if (can_change) + *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; else - return ice_get_phy_tx_tstamp_ready_e822(hw, block, - tstamp_ready); + *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 3b68cb91bd81..cf76701566c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -3,13 +3,15 @@ #ifndef _ICE_PTP_HW_H_ #define _ICE_PTP_HW_H_ +#include <linux/dpll.h> enum ice_ptp_tmr_cmd { - INIT_TIME, - INIT_INCVAL, - ADJ_TIME, - ADJ_TIME_AT_TIME, - READ_TIME + ICE_PTP_INIT_TIME, + ICE_PTP_INIT_INCVAL, + ICE_PTP_ADJ_TIME, + ICE_PTP_ADJ_TIME_AT_TIME, + ICE_PTP_READ_TIME, + ICE_PTP_NOP, }; enum ice_ptp_serdes { @@ -109,9 +111,83 @@ struct ice_cgu_pll_params_e822 { u32 post_pll_div; }; +#define E810C_QSFP_C827_0_HANDLE 2 +#define E810C_QSFP_C827_1_HANDLE 3 +enum ice_e810_c827_idx { + C827_0, + C827_1 +}; + +enum ice_phy_rclk_pins { + ICE_RCLKA_PIN = 0, /* SCL pin */ + ICE_RCLKB_PIN, /* SDA pin */ +}; + +#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1) +#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1) +#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \ + (_pin) + ZL_REF1P) + +enum ice_zl_cgu_in_pins { + ZL_REF0P = 0, + ZL_REF0N, + ZL_REF1P, + ZL_REF1N, + ZL_REF2P, + ZL_REF2N, + ZL_REF3P, + ZL_REF3N, + ZL_REF4P, + ZL_REF4N, + NUM_ZL_CGU_INPUT_PINS +}; + +enum ice_zl_cgu_out_pins { + ZL_OUT0 = 0, + ZL_OUT1, + ZL_OUT2, + ZL_OUT3, + ZL_OUT4, + ZL_OUT5, + ZL_OUT6, + NUM_ZL_CGU_OUTPUT_PINS +}; + +enum ice_si_cgu_in_pins { + SI_REF0P = 0, + SI_REF0N, + SI_REF1P, + SI_REF1N, + SI_REF2P, + SI_REF2N, + SI_REF3, + SI_REF4, + NUM_SI_CGU_INPUT_PINS +}; + +enum ice_si_cgu_out_pins { + SI_OUT0 = 0, + SI_OUT1, + SI_OUT2, + SI_OUT3, + SI_OUT4, + NUM_SI_CGU_OUTPUT_PINS +}; + +struct ice_cgu_pin_desc { + char *name; + u8 index; + enum dpll_pin_type type; + u32 freq_supp_num; + struct dpll_pin_frequency *freq_supp; +}; + extern const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; +#define E810C_QSFP_C827_0_HANDLE 2 +#define E810C_QSFP_C827_1_HANDLE 3 + /* Table of constants related to possible TIME_REF sources */ extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ]; @@ -127,6 +203,7 @@ extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD]; u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); bool ice_ptp_lock(struct ice_hw *hw); void ice_ptp_unlock(struct ice_hw *hw); +void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd); int ice_ptp_init_time(struct ice_hw *hw, u64 time); int ice_ptp_write_incval(struct ice_hw *hw, u64 incval); int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); @@ -138,11 +215,8 @@ int ice_ptp_init_phc(struct ice_hw *hw); int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); /* E822 family functions */ -int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val); -int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val); int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val); -int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time); void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad); /** @@ -197,6 +271,19 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); bool ice_is_pca9575_present(struct ice_hw *hw); +enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input); +struct dpll_pin_frequency * +ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num); +const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input); +int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, + enum dpll_lock_status last_dpll_state, u8 *pin, + u8 *ref_state, u8 *eec_mode, s64 *phase_offset, + enum dpll_lock_status *dpll_state); +int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num); + +void ice_ptp_init_phy_model(struct ice_hw *hw); +int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, + unsigned long *caps); #define PFTSYN_SEM_BYTES 4 diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index e30e12321abd..c686ac0935eb 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -254,7 +254,7 @@ static const struct net_device_ops ice_repr_netdev_ops = { * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev * @netdev: pointer to netdev */ -bool ice_is_port_repr_netdev(struct net_device *netdev) +bool ice_is_port_repr_netdev(const struct net_device *netdev) { return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); } diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index 9c2a6f496b3b..e1ee2d2c1d2d 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -12,6 +12,7 @@ struct ice_repr { struct ice_q_vector *q_vector; struct net_device *netdev; struct metadata_dst *dst; + struct ice_esw_br_port *br_port; #ifdef CONFIG_ICE_SWITCHDEV /* info about slow path rule */ struct ice_rule_query_data sp_rule; @@ -27,5 +28,5 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr); void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); -bool ice_is_port_repr_netdev(struct net_device *netdev); +bool ice_is_port_repr_netdev(const struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index b664d60fd037..2f4a621254e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -229,29 +229,22 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, * ice_sched_remove_elems - remove nodes from HW * @hw: pointer to the HW struct * @parent: pointer to the parent node - * @num_nodes: number of nodes - * @node_teids: array of node teids to be deleted + * @node_teid: node teid to be deleted * * This function remove nodes from HW */ static int ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, - u16 num_nodes, u32 *node_teids) + u32 node_teid) { - struct ice_aqc_delete_elem *buf; - u16 i, num_groups_removed = 0; - u16 buf_size; + DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1); + u16 buf_size = __struct_size(buf); + u16 num_groups_removed = 0; int status; - buf_size = struct_size(buf, teid, num_nodes); - buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->hdr.parent_teid = parent->info.node_teid; - buf->hdr.num_elems = cpu_to_le16(num_nodes); - for (i = 0; i < num_nodes; i++) - buf->teid[i] = cpu_to_le32(node_teids[i]); + buf->hdr.num_elems = cpu_to_le16(1); + buf->teid[0] = cpu_to_le32(node_teid); status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); @@ -259,7 +252,6 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", hw->adminq.sq_last_status); - devm_kfree(ice_hw_to_dev(hw), buf); return status; } @@ -326,7 +318,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - ice_sched_remove_elems(hw, node->parent, 1, &teid); + ice_sched_remove_elems(hw, node->parent, teid); } parent = node->parent; /* root has no parent */ @@ -437,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, } /** - * ice_aq_move_sched_elems - move scheduler elements + * ice_aq_move_sched_elems - move scheduler element (just 1 group) * @hw: pointer to the HW struct - * @grps_req: number of groups to move * @buf: pointer to buffer * @buf_size: buffer size in bytes * @grps_movd: returns total number of groups moved - * @cd: pointer to command details structure or NULL * * Move scheduling elements (0x0408) */ -static int -ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, - struct ice_aqc_move_elem *buf, u16 buf_size, - u16 *grps_movd, struct ice_sq_cd *cd) +int +ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf, + u16 buf_size, u16 *grps_movd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, - grps_req, (void *)buf, buf_size, - grps_movd, cd); + 1, buf, buf_size, grps_movd, NULL); } /** @@ -526,7 +514,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, * * This function suspends or resumes HW nodes */ -static int +int ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { @@ -569,18 +557,24 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; struct ice_q_ctx *q_ctx; + u16 idx; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) return -EINVAL; /* allocate LAN queue contexts */ if (!vsi_ctx->lan_q_ctx[tc]) { - vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), - new_numqs, - sizeof(*q_ctx), - GFP_KERNEL); - if (!vsi_ctx->lan_q_ctx[tc]) + q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, + sizeof(*q_ctx), GFP_KERNEL); + if (!q_ctx) return -ENOMEM; + + for (idx = 0; idx < new_numqs; idx++) { + q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; + q_ctx[idx].q_teid = ICE_INVAL_TEID; + } + + vsi_ctx->lan_q_ctx[tc] = q_ctx; vsi_ctx->num_lan_q_entries[tc] = new_numqs; return 0; } @@ -592,9 +586,16 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) return -ENOMEM; + memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); + + for (idx = prev_num; idx < new_numqs; idx++) { + q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; + q_ctx[idx].q_teid = ICE_INVAL_TEID; + } + vsi_ctx->lan_q_ctx[tc] = q_ctx; vsi_ctx->num_lan_q_entries[tc] = new_numqs; } @@ -1044,7 +1045,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, * * This function add nodes to a given layer. */ -static int +int ice_sched_add_nodes_to_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1119,7 +1120,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) * * This function returns the current VSI layer number */ -static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) +u8 ice_sched_get_vsi_layer(struct ice_hw *hw) { /* Num Layers VSI layer * 9 6 @@ -1142,7 +1143,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * * This function returns the current aggregator layer number */ -static u8 ice_sched_get_agg_layer(struct ice_hw *hw) +u8 ice_sched_get_agg_layer(struct ice_hw *hw) { /* Num Layers aggregator layer * 9 4 @@ -1180,7 +1181,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) int status; /* remove the default leaf node */ - status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); + status = ice_sched_remove_elems(pi->hw, node->parent, teid); if (!status) ice_free_sched_node(pi, node); } @@ -1577,7 +1578,7 @@ ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, * This function retrieves an aggregator node for a given aggregator ID from * a given TC branch */ -static struct ice_sched_node * +struct ice_sched_node * ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, u32 agg_id) { @@ -2139,7 +2140,7 @@ ice_get_agg_info(struct ice_hw *hw, u32 agg_id) * This function walks through the aggregator subtree to find a free parent * node */ -static struct ice_sched_node * +struct ice_sched_node * ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, u16 *num_nodes) { @@ -2219,12 +2220,12 @@ int ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { - struct ice_aqc_move_elem *buf; + DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); + u16 buf_len = __struct_size(buf); struct ice_sched_node *node; u16 i, grps_movd = 0; struct ice_hw *hw; int status = 0; - u16 buf_len; hw = pi->hw; @@ -2236,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, hw->max_children[parent->tx_sched_layer]) return -ENOSPC; - buf_len = struct_size(buf, teid, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - for (i = 0; i < num_items; i++) { node = ice_sched_find_node_by_teid(pi->root, list[i]); if (!node) { status = -EINVAL; - goto move_err_exit; + break; } buf->hdr.src_parent_teid = node->info.parent_teid; buf->hdr.dest_parent_teid = parent->info.node_teid; buf->teid[0] = node->info.node_teid; buf->hdr.num_elems = cpu_to_le16(1); - status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, - &grps_movd, NULL); + status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd); if (status && grps_movd != 1) { status = -EIO; - goto move_err_exit; + break; } /* update the SW DB */ ice_sched_update_parent(parent, node); } -move_err_exit: - kfree(buf); return status; } @@ -3958,7 +3951,7 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, * This function sets BW limit of VSI or Aggregator scheduling node * based on TC information from passed in argument BW. */ -int +static int ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 9c100747445a..1aef05ea5a57 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -141,13 +141,28 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type); -int -ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, - enum ice_agg_type agg_type, u8 tc, - enum ice_rl_type rl_type, u32 bw); int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); +int +ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, + bool suspend); +struct ice_sched_node * +ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, + u32 agg_id); +u8 ice_sched_get_agg_layer(struct ice_hw *hw); +u8 ice_sched_get_vsi_layer(struct ice_hw *hw); +struct ice_sched_node * +ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, + u16 *num_nodes); +int +ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw); +int ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf, + u16 buf_size, u16 *grps_movd); int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 31314e7540f8..e1494f24f661 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -64,7 +64,7 @@ static void ice_free_vf_res(struct ice_vf *vf) vf->num_mac = 0; } - last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1; + last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; /* clear VF MDD event information */ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); @@ -102,7 +102,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); first = vf->first_vector_idx; - last = first + pf->vfs.num_msix_per - 1; + last = first + vf->num_msix - 1; for (v = first; v <= last; v++) { u32 reg; @@ -138,6 +138,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) if (!pf) return -EINVAL; + bitmap_free(pf->sriov_irq_bm); + pf->sriov_irq_size = 0; pf->sriov_base_vector = 0; return 0; @@ -244,22 +246,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) return vsi; } -/** - * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space - * @pf: pointer to PF structure - * @vf: pointer to VF that the first MSIX vector index is being calculated for - * - * This returns the first MSIX vector index in PF space that is used by this VF. - * This index is used when accessing PF relative registers such as - * GLINT_VECT2FUNC and GLINT_DYN_CTL. - * This will always be the OICR index in the AVF driver so any functionality - * using vf->first_vector_idx for queue configuration will have to increment by - * 1 to avoid meddling with the OICR index. - */ -static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) -{ - return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; -} /** * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware @@ -280,12 +266,12 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf) hw = &pf->hw; pf_based_first_msix = vf->first_vector_idx; - pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1; + pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1; device_based_first_msix = pf_based_first_msix + pf->hw.func_caps.common_cap.msix_vector_first_id; device_based_last_msix = - (device_based_first_msix + pf->vfs.num_msix_per) - 1; + (device_based_first_msix + vf->num_msix) - 1; device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) & @@ -388,16 +374,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) */ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) { - struct ice_pf *pf; - if (!vf || !q_vector) return -EINVAL; - pf = vf->pf; - /* always add one to account for the OICR being the first MSIX */ - return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id + - q_vector->v_idx + 1; + return vf->first_vector_idx + q_vector->v_idx + 1; } /** @@ -527,6 +508,52 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) } /** + * ice_sriov_get_irqs - get irqs for SR-IOV usacase + * @pf: pointer to PF structure + * @needed: number of irqs to get + * + * This returns the first MSI-X vector index in PF space that is used by this + * VF. This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality + * using vf->first_vector_idx for queue configuration_id: id of VF which will + * use this irqs + * + * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are + * allocated from the end of global irq index. First bit in sriov_irq_bm means + * last irq index etc. It simplifies extension of SRIOV vectors. + * They will be always located from sriov_base_vector to the last irq + * index. While increasing/decreasing sriov_base_vector can be moved. + */ +static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed) +{ + int res = bitmap_find_next_zero_area(pf->sriov_irq_bm, + pf->sriov_irq_size, 0, needed, 0); + /* conversion from number in bitmap to global irq index */ + int index = pf->sriov_irq_size - res - needed; + + if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector) + return -ENOENT; + + bitmap_set(pf->sriov_irq_bm, res, needed); + return index; +} + +/** + * ice_sriov_free_irqs - free irqs used by the VF + * @pf: pointer to PF structure + * @vf: pointer to VF structure + */ +static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf) +{ + /* Move back from first vector index to first index in bitmap */ + int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix; + + bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix); + vf->first_vector_idx = 0; +} + +/** * ice_init_vf_vsi_res - initialize/setup VF VSI resources * @vf: VF to initialize/setup the VSI for * @@ -539,7 +566,9 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) struct ice_vsi *vsi; int err; - vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); + vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + return -ENOMEM; vsi = ice_vf_vsi_setup(vf); if (!vsi) @@ -789,14 +818,19 @@ static const struct ice_vf_ops ice_sriov_vf_ops = { */ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) { + struct pci_dev *pdev = pf->pdev; struct ice_vfs *vfs = &pf->vfs; + struct pci_dev *vfdev = NULL; struct ice_vf *vf; - u16 vf_id; - int err; + u16 vf_pdev_id; + int err, pos; lockdep_assert_held(&vfs->table_lock); - for (vf_id = 0; vf_id < num_vfs; vf_id++) { + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id); + + for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) { vf = kzalloc(sizeof(*vf), GFP_KERNEL); if (!vf) { err = -ENOMEM; @@ -812,11 +846,28 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) ice_initialize_vf_entry(vf); + do { + vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev); + } while (vfdev && vfdev->physfn != pdev); + vf->vfdev = vfdev; vf->vf_sw_id = pf->first_sw; + pci_dev_get(vfdev); + + /* set default number of MSI-X */ + vf->num_msix = pf->vfs.num_msix_per; + vf->num_vf_qs = pf->vfs.num_qps_per; + ice_vc_set_default_allowlist(vf); + hash_add_rcu(vfs->table, &vf->entry, vf_id); } + /* Decrement of refcount done by pci_get_device() inside the loop does + * not touch the last iteration's vfdev, so it has to be done manually + * to balance pci_dev_get() added within the loop. + */ + pci_dev_put(vfdev); + return 0; err_free_entries: @@ -831,10 +882,16 @@ err_free_entries: */ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) { + int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int ret; + pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL); + if (!pf->sriov_irq_bm) + return -ENOMEM; + pf->sriov_irq_size = total_vectors; + /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); @@ -893,6 +950,7 @@ err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); clear_bit(ICE_OICR_INTR_DIS, pf->state); + bitmap_free(pf->sriov_irq_bm); return ret; } @@ -957,6 +1015,175 @@ static int ice_check_sriov_allowed(struct ice_pf *pf) } /** + * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs + * @pdev: pointer to pci_dev struct + * + * The function is called via sysfs ops + */ +u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf); +} + +static int ice_sriov_move_base_vector(struct ice_pf *pf, int move) +{ + if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf)) + return -ENOMEM; + + pf->sriov_base_vector -= move; + return 0; +} + +static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) +{ + u16 vf_ids[ICE_MAX_SRIOV_VFS]; + struct ice_vf *tmp_vf; + int to_remap = 0, bkt; + + /* For better irqs usage try to remap irqs of VFs + * that aren't running yet + */ + ice_for_each_vf(pf, bkt, tmp_vf) { + /* skip VF which is changing the number of MSI-X */ + if (restricted_id == tmp_vf->vf_id || + test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states)) + continue; + + ice_dis_vf_mappings(tmp_vf); + ice_sriov_free_irqs(pf, tmp_vf); + + vf_ids[to_remap] = tmp_vf->vf_id; + to_remap += 1; + } + + for (int i = 0; i < to_remap; i++) { + tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]); + if (!tmp_vf) + continue; + + tmp_vf->first_vector_idx = + ice_sriov_get_irqs(pf, tmp_vf->num_msix); + /* there is no need to rebuild VSI as we are only changing the + * vector indexes not amount of MSI-X or queues + */ + ice_ena_vf_mappings(tmp_vf); + ice_put_vf(tmp_vf); + } +} + +/** + * ice_sriov_set_msix_vec_count + * @vf_dev: pointer to pci_dev struct of VF device + * @msix_vec_count: new value for MSI-X amount on this VF + * + * Set requested MSI-X, queues and registers for @vf_dev. + * + * First do some sanity checks like if there are any VFs, if the new value + * is correct etc. Then disable old mapping (MSI-X and queues registers), change + * MSI-X and queues, rebuild VSI and enable new mapping. + * + * If it is possible (driver not binded to VF) try to remap also other VFs to + * linearize irqs register usage. + */ +int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) +{ + struct pci_dev *pdev = pci_physfn(vf_dev); + struct ice_pf *pf = pci_get_drvdata(pdev); + u16 prev_msix, prev_queues, queues; + bool needs_rebuild = false; + struct ice_vf *vf; + int id; + + if (!ice_get_num_vfs(pf)) + return -ENOENT; + + if (!msix_vec_count) + return 0; + + queues = msix_vec_count; + /* add 1 MSI-X for OICR */ + msix_vec_count += 1; + + if (queues > min(ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf))) + return -EINVAL; + + if (msix_vec_count < ICE_MIN_INTR_PER_VF) + return -EINVAL; + + /* Transition of PCI VF function number to function_id */ + for (id = 0; id < pci_num_vf(pdev); id++) { + if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id)) + break; + } + + if (id == pci_num_vf(pdev)) + return -ENOENT; + + vf = ice_get_vf_by_id(pf, id); + + if (!vf) + return -ENOENT; + + prev_msix = vf->num_msix; + prev_queues = vf->num_vf_qs; + + if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) { + ice_put_vf(vf); + return -ENOSPC; + } + + ice_dis_vf_mappings(vf); + ice_sriov_free_irqs(pf, vf); + + /* Remap all VFs beside the one is now configured */ + ice_sriov_remap_vectors(pf, vf->vf_id); + + vf->num_msix = msix_vec_count; + vf->num_vf_qs = queues; + vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + goto unroll; + + ice_vf_vsi_release(vf); + if (vf->vf_ops->create_vsi(vf)) { + /* Try to rebuild with previous values */ + needs_rebuild = true; + goto unroll; + } + + dev_info(ice_pf_to_dev(pf), + "Changing VF %d resources to %d vectors and %d queues\n", + vf->vf_id, vf->num_msix, vf->num_vf_qs); + + ice_ena_vf_mappings(vf); + ice_put_vf(vf); + + return 0; + +unroll: + dev_info(ice_pf_to_dev(pf), + "Can't set %d vectors on VF %d, falling back to %d\n", + vf->num_msix, vf->vf_id, prev_msix); + + vf->num_msix = prev_msix; + vf->num_vf_qs = prev_queues; + vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) + return -EINVAL; + + if (needs_rebuild) + vf->vf_ops->create_vsi(vf); + + ice_ena_vf_mappings(vf); + ice_put_vf(vf); + + return -EINVAL; +} + +/** * ice_sriov_configure - Enable or change number of VFs via sysfs * @pdev: pointer to a pci_dev structure * @num_vfs: number of VFs to allocate or 0 to free VFs @@ -1709,31 +1936,16 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) /** * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR - * @pdev: pointer to a pci_dev structure + * @pf: pointer to the PF structure * * Called when recovering from a PF FLR to restore interrupt capability to * the VFs. */ -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) +void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { - u16 vf_id; - int pos; - - if (!pci_num_vf(pdev)) - return; + struct ice_vf *vf; + u32 bkt; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (pos) { - struct pci_dev *vfdev; - - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, - &vf_id); - vfdev = pci_get_device(pdev->vendor, vf_id, NULL); - while (vfdev) { - if (vfdev->is_virtfn && vfdev->physfn == pdev) - pci_restore_msi_state(vfdev); - vfdev = pci_get_device(pdev->vendor, vf_id, - vfdev); - } - } + ice_for_each_vf(pf, bkt, vf) + pci_restore_msi_state(vf->vfdev); } diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 346cb2666f3a..8488df38b586 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -33,7 +33,7 @@ int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); void ice_free_vfs(struct ice_pf *pf); -void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); +void ice_restore_all_vfs_msi_state(struct ice_pf *pf); int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, @@ -60,6 +60,8 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); bool ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); +u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev); +int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count); #else /* CONFIG_PCI_IOV */ static inline void ice_process_vflr_event(struct ice_pf *pf) { } static inline void ice_free_vfs(struct ice_pf *pf) { } @@ -67,7 +69,7 @@ static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } -static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { } +static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { } static inline int ice_sriov_configure(struct pci_dev __always_unused *pdev, @@ -142,5 +144,16 @@ ice_get_vf_stats(struct net_device __always_unused *netdev, { return -EOPNOTSUPP; } + +static inline u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) +{ + return 0; +} + +static inline int +ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 6db4ca7978cb..ee19f3aa3d19 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -20,12 +20,11 @@ * byte 0 = 0x2: to identify it as locally administered DA MAC * byte 6 = 0x2: to identify it as locally administered SA MAC * byte 12 = 0x81 & byte 13 = 0x00: - * In case of VLAN filter first two bytes defines ether type (0x8100) - * and remaining two bytes are placeholder for programming a given VLAN ID - * In case of Ether type filter it is treated as header without VLAN tag - * and byte 12 and 13 is used to program a given Ether type instead + * In case of VLAN filter first two bytes defines ether type (0x8100) + * and remaining two bytes are placeholder for programming a given VLAN ID + * In case of Ether type filter it is treated as header without VLAN tag + * and byte 12 and 13 is used to program a given Ether type instead */ -#define DUMMY_ETH_HDR_LEN 16 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; @@ -1369,14 +1368,6 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { ICE_PKT_PROFILE(tcp, 0), }; -#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) -#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \ - ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN) -#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \ - ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0) -#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n)) -#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) - /* this is a recipe to profile association bitmap */ static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], ICE_MAX_NUM_PROFILES); @@ -1821,15 +1812,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { - struct ice_aqc_alloc_free_res_elem *sw_buf; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); struct ice_aqc_res_elem *vsi_ele; - u16 buf_len; int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -1841,28 +1828,30 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, lkup_type == ICE_SW_LKUP_DFLT) { sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); } else if (lkup_type == ICE_SW_LKUP_VLAN) { - sw_buf->res_type = - cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); + if (opc == ice_aqc_opc_alloc_res) + sw_buf->res_type = + cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE | + ICE_AQC_RES_TYPE_FLAG_SHARED); + else + sw_buf->res_type = + cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = -EINVAL; - goto ice_aq_alloc_free_vsi_list_exit; + return -EINVAL; } if (opc == ice_aqc_opc_free_res) sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc); if (status) - goto ice_aq_alloc_free_vsi_list_exit; + return status; if (opc == ice_aqc_opc_alloc_res) { vsi_ele = &sw_buf->elem[0]; *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); } -ice_aq_alloc_free_vsi_list_exit: - devm_kfree(ice_hw_to_dev(hw), sw_buf); - return status; + return 0; } /** @@ -1910,7 +1899,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, * * Add(0x0290) */ -static int +int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd) @@ -1947,7 +1936,7 @@ ice_aq_add_recipe(struct ice_hw *hw, * The caller must supply enough space in s_recipe_list to hold all possible * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. */ -static int +int ice_aq_get_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) @@ -2040,7 +2029,7 @@ error_out: * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ -static int +int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { @@ -2066,7 +2055,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ -static int +int ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { @@ -2090,26 +2079,20 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call */ -static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { - struct ice_aqc_alloc_free_res_elem *sw_buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); int status; - buf_len = struct_size(sw_buf, elem, 1); - sw_buf = kzalloc(buf_len, GFP_KERNEL); - if (!sw_buf) - return -ENOMEM; - sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << ICE_AQC_RES_TYPE_S) | ICE_AQC_RES_TYPE_FLAG_SHARED); - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, - ice_aqc_opc_alloc_res, NULL); + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, + ice_aqc_opc_alloc_res); if (!status) *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); - kfree(sw_buf); return status; } @@ -2272,6 +2255,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Propagate some data to the recipe database */ recps[idx].is_root = !!is_root; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; + recps[idx].need_pass_l2 = root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_NEED_PASS_L2; + recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { recps[idx].chain_idx = root_bufs.content.result_indx & @@ -2460,6 +2447,15 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) } /** + * ice_fill_eth_hdr - helper to copy dummy_eth_hdr into supplied buffer + * @eth_hdr: pointer to buffer to populate + */ +void ice_fill_eth_hdr(u8 *eth_hdr) +{ + memcpy(eth_hdr, dummy_eth_header, DUMMY_ETH_HDR_LEN); +} + +/** * ice_fill_sw_rule - Helper function to fill switch rule structure * @hw: pointer to the hardware structure * @f_info: entry containing packet forwarding information @@ -3118,7 +3114,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) * handle element. This can be extended further to search VSI list with more * than 1 vsi_count. Returns pointer to VSI list entry if found. */ -static struct ice_vsi_list_map_info * +struct ice_vsi_list_map_info * ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, u16 *vsi_list_id) { @@ -3129,7 +3125,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, list_head = &sw->recp_list[recp_id].filt_rules; list_for_each_entry(list_itr, list_head, list_entry) { - if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { + if (list_itr->vsi_list_info) { map_info = list_itr->vsi_list_info; if (test_bit(vsi_handle, map_info->vsi_map)) { *vsi_list_id = map_info->vsi_list_id; @@ -3400,54 +3396,6 @@ exit: } /** - * ice_mac_fltr_exist - does this MAC filter exist for given VSI - * @hw: pointer to the hardware structure - * @mac: MAC address to be checked (for MAC filter) - * @vsi_handle: check MAC filter for this VSI - */ -bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) -{ - struct ice_fltr_mgmt_list_entry *entry; - struct list_head *rule_head; - struct ice_switch_info *sw; - struct mutex *rule_lock; /* Lock to protect filter rule list */ - u16 hw_vsi_id; - - if (!ice_is_vsi_valid(hw, vsi_handle)) - return false; - - hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - sw = hw->switch_info; - rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; - if (!rule_head) - return false; - - rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; - mutex_lock(rule_lock); - list_for_each_entry(entry, rule_head, list_entry) { - struct ice_fltr_info *f_info = &entry->fltr_info; - u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; - - if (is_zero_ether_addr(mac_addr)) - continue; - - if (f_info->flag != ICE_FLTR_TX || - f_info->src_id != ICE_SRC_ID_VSI || - f_info->lkup_type != ICE_SW_LKUP_MAC || - f_info->fltr_act != ICE_FWD_TO_VSI || - hw_vsi_id != f_info->fwd_id.hw_vsi_id) - continue; - - if (ether_addr_equal(mac, mac_addr)) { - mutex_unlock(rule_lock); - return true; - } - } - mutex_unlock(rule_lock); - return false; -} - -/** * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI * @hw: pointer to the hardware structure * @vlan_id: VLAN ID @@ -4473,29 +4421,19 @@ int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Allocate resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, - ice_aqc_opc_alloc_res, NULL); + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); if (status) - goto exit; + return status; *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); - -exit: - kfree(buf); return status; } @@ -4511,27 +4449,19 @@ int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { - struct ice_aqc_alloc_free_res_elem *buf; - u16 buf_len; + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); int status; - /* Free resource */ - buf_len = struct_size(buf, elem, 1); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); - status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, - ice_aqc_opc_free_res, NULL); + status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); if (status) ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); - kfree(buf); return status; } @@ -4540,6 +4470,39 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, .offs = {__VA_ARGS__}, \ } +/** + * ice_share_res - set a resource as shared or dedicated + * @hw: hw struct of original owner of resource + * @type: resource type + * @shared: is the resource being set to shared + * @res_id: resource id (descriptor) + */ +int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id) +{ + DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1); + u16 buf_len = __struct_size(buf); + int status; + + buf->num_elems = cpu_to_le16(1); + if (shared) + buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) | + ICE_AQC_RES_TYPE_FLAG_SHARED); + else + buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & + ICE_AQC_RES_TYPE_M) & + ~ICE_AQC_RES_TYPE_FLAG_SHARED); + + buf->elem[0].e.sw_resp = cpu_to_le16(res_id); + status = ice_aq_alloc_free_res(hw, buf, buf_len, + ice_aqc_opc_share_res); + if (status) + ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n", + type, res_id, shared ? "SHARED" : "DEDICATED"); + + return status; +} + /* This is mapping table entry that maps every word within a given protocol * structure to the real byte offset as per the specification of that * protocol header. @@ -4613,13 +4576,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * ice_find_recp - find a recipe * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match - * @tun_type: type of recipe tunnel + * @rinfo: information regarding the rule e.g. priority and action info * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, - enum ice_sw_tunnel_type tun_type) + const struct ice_adv_rule_info *rinfo) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -4680,9 +4643,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, } /* If for "i"th recipe the found was never set to false * then it means we found our match - * Also tun type of recipe needs to be checked + * Also tun type and *_pass_l2 of recipe needs to be + * checked */ - if (found && recp[i].tun_type == tun_type) + if (found && recp[i].tun_type == rinfo->tun_type && + recp[i].need_pass_l2 == rinfo->need_pass_l2 && + recp[i].allow_pass_l2 == rinfo->allow_pass_l2) return i; /* Return the recipe ID */ } } @@ -4952,6 +4918,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, unsigned long *profiles) { DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_content *content; struct ice_aqc_recipe_data_elem *tmp; struct ice_aqc_recipe_data_elem *buf; struct ice_recp_grp_entry *entry; @@ -5012,6 +4979,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, if (status) goto err_unroll; + content = &buf[recps].content; + /* Clear the result index of the located recipe, as this will be * updated, if needed, later in the recipe creation process. */ @@ -5022,26 +4991,24 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, /* if the recipe is a non-root recipe RID should be programmed * as 0 for the rules to be applied correctly. */ - buf[recps].content.rid = 0; - memset(&buf[recps].content.lkup_indx, 0, - sizeof(buf[recps].content.lkup_indx)); + content->rid = 0; + memset(&content->lkup_indx, 0, + sizeof(content->lkup_indx)); /* All recipes use look-up index 0 to match switch ID. */ - buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - buf[recps].content.mask[0] = - cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); + content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask * to be 0 */ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - buf[recps].content.lkup_indx[i] = 0x80; - buf[recps].content.mask[i] = 0; + content->lkup_indx[i] = 0x80; + content->mask[i] = 0; } for (i = 0; i < entry->r_group.n_val_pairs; i++) { - buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; - buf[recps].content.mask[i + 1] = - cpu_to_le16(entry->fv_mask[i]); + content->lkup_indx[i + 1] = entry->fv_idx[i]; + content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]); } if (rm->n_grp_count > 1) { @@ -5055,7 +5022,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, } entry->chain_idx = chain_idx; - buf[recps].content.result_indx = + content->result_indx = ICE_AQ_RECIPE_RESULT_EN | ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & ICE_AQ_RECIPE_RESULT_DATA_M); @@ -5069,7 +5036,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ICE_MAX_NUM_RECIPES); set_bit(buf[recps].recipe_indx, (unsigned long *)buf[recps].recipe_bitmap); - buf[recps].content.act_ctrl_fwd_priority = rm->priority; + content->act_ctrl_fwd_priority = rm->priority; + + if (rm->need_pass_l2) + content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; + + if (rm->allow_pass_l2) + content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; recps++; } @@ -5107,9 +5080,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, if (status) goto err_unroll; + content = &buf[recps].content; + buf[recps].recipe_indx = (u8)rid; - buf[recps].content.rid = (u8)rid; - buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; + content->rid = (u8)rid; + content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT; /* the new entry created should also be part of rg_list to * make sure we have complete recipe */ @@ -5121,16 +5096,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, goto err_unroll; } last_chain_entry->rid = rid; - memset(&buf[recps].content.lkup_indx, 0, - sizeof(buf[recps].content.lkup_indx)); + memset(&content->lkup_indx, 0, sizeof(content->lkup_indx)); /* All recipes use look-up index 0 to match switch ID. */ - buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - buf[recps].content.mask[0] = - cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); + content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; + content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - buf[recps].content.lkup_indx[i] = - ICE_AQ_RECIPE_LKUP_IGNORE; - buf[recps].content.mask[i] = 0; + content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; + content->mask[i] = 0; } i = 1; @@ -5142,8 +5114,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; list_for_each_entry(entry, &rm->rg_list, l_entry) { last_chain_entry->fv_idx[i] = entry->chain_idx; - buf[recps].content.lkup_indx[i] = entry->chain_idx; - buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); + content->lkup_indx[i] = entry->chain_idx; + content->mask[i++] = cpu_to_le16(0xFFFF); set_bit(entry->rid, rm->r_bitmap); } list_add(&last_chain_entry->l_entry, &rm->rg_list); @@ -5155,7 +5127,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, status = -EINVAL; goto err_unroll; } - buf[recps].content.act_ctrl_fwd_priority = rm->priority; + content->act_ctrl_fwd_priority = rm->priority; recps++; rm->root_rid = (u8)rid; @@ -5220,6 +5192,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; recp->n_grp_count = rm->n_grp_count; recp->tun_type = rm->tun_type; + recp->need_pass_l2 = rm->need_pass_l2; + recp->allow_pass_l2 = rm->allow_pass_l2; recp->recp_created = true; } rm->root_buf = buf; @@ -5388,6 +5362,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, /* set the recipe priority if specified */ rm->priority = (u8)rinfo->priority; + rm->need_pass_l2 = rinfo->need_pass_l2; + rm->allow_pass_l2 = rinfo->allow_pass_l2; + /* Find offsets from the field vector. Pick the first one for all the * recipes. */ @@ -5403,7 +5380,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); + *rid = ice_find_recp(hw, lkup_exts, rinfo); if (*rid < ICE_MAX_NUM_RECIPES) /* Success if found a recipe that match the existing criteria */ goto err_unroll; @@ -5839,7 +5816,9 @@ static bool ice_rules_equal(const struct ice_adv_rule_info *first, return first->sw_act.flag == second->sw_act.flag && first->tun_type == second->tun_type && first->vlan_type == second->vlan_type && - first->src_vsi == second->src_vsi; + first->src_vsi == second->src_vsi && + first->need_pass_l2 == second->need_pass_l2 && + first->allow_pass_l2 == second->allow_pass_l2; } /** @@ -5994,14 +5973,21 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup) { lkup->type = ICE_HW_METADATA; - lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] = + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID21] |= cpu_to_be16(ICE_PKT_TUNNEL_MASK); } +void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup) +{ + lkup->type = ICE_HW_METADATA; + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |= + cpu_to_be16(ICE_PKT_FROM_NETWORK); +} + void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup) { lkup->type = ICE_HW_METADATA; - lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] = + lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |= cpu_to_be16(ICE_PKT_VLAN_MASK); } @@ -6078,7 +6064,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || - rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) { + rinfo->sw_act.fltr_act == ICE_DROP_PACKET || + rinfo->sw_act.fltr_act == ICE_NOP)) { status = -EIO; goto free_pkt_profile; } @@ -6089,7 +6076,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, goto free_pkt_profile; } - if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) + if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || + rinfo->sw_act.fltr_act == ICE_NOP) rinfo->sw_act.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); @@ -6159,6 +6147,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | ICE_SINGLE_ACT_VALID_BIT; break; + case ICE_NOP: + act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, + rinfo->sw_act.fwd_id.hw_vsi_id); + act &= ~ICE_SINGLE_ACT_VALID_BIT; + break; default: status = -EIO; goto err_ice_add_adv_rule; @@ -6439,7 +6432,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return -EIO; } - rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); + rid = ice_find_recp(hw, &lkup_exts, rinfo); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) return -EINVAL; @@ -6533,59 +6526,6 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, } /** - * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a - * given VSI handle - * @hw: pointer to the hardware structure - * @vsi_handle: VSI handle for which we are supposed to remove all the rules. - * - * This function is used to remove all the rules for a given VSI and as soon - * as removing a rule fails, it will return immediately with the error code, - * else it will return success. - */ -int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) -{ - struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; - struct ice_vsi_list_map_info *map_info; - struct ice_adv_rule_info rinfo; - struct list_head *list_head; - struct ice_switch_info *sw; - int status; - u8 rid; - - sw = hw->switch_info; - for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { - if (!sw->recp_list[rid].recp_created) - continue; - if (!sw->recp_list[rid].adv_rule) - continue; - - list_head = &sw->recp_list[rid].filt_rules; - list_for_each_entry_safe(list_itr, tmp_entry, list_head, - list_entry) { - rinfo = list_itr->rule_info; - - if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { - map_info = list_itr->vsi_list_info; - if (!map_info) - continue; - - if (!test_bit(vsi_handle, map_info->vsi_map)) - continue; - } else if (rinfo.sw_act.vsi_handle != vsi_handle) { - continue; - } - - rinfo.sw_act.vsi_handle = vsi_handle; - status = ice_rem_adv_rule(hw, list_itr->lkups, - list_itr->lkups_cnt, &rinfo); - if (status) - return status; - } - } - return 0; -} - -/** * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI * @hw: pointer to the hardware structure * @vsi_handle: driver VSI handle diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index c84b56fe84a5..db7e501b7e0a 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -22,6 +22,16 @@ #define ICE_PROFID_IPV6_GTPU_TEID 46 #define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70 +#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) +#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) +#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \ + ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN) +#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \ + ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0) +#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n)) + +#define DUMMY_ETH_HDR_LEN 16 + /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { u16 vsi_num; @@ -191,6 +201,8 @@ struct ice_adv_rule_info { u16 vlan_type; u16 fltr_rule_id; u32 priority; + u16 need_pass_l2:1; + u16 allow_pass_l2:1; u16 src_vsi; struct ice_sw_act_ctrl sw_act; struct ice_adv_rule_flags_info flags_info; @@ -254,6 +266,9 @@ struct ice_sw_recipe { */ u8 priority; + u8 need_pass_l2:1; + u8 allow_pass_l2:1; + struct list_head rg_list; /* AQ buffer associated with this recipe */ @@ -340,9 +355,11 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); +int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id); /* Switch/bridge related commands */ void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup); +void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup); void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup); void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup); int @@ -354,7 +371,6 @@ int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); -bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle); bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle); int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); @@ -379,7 +395,6 @@ int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); -int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry); @@ -389,6 +404,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); +void ice_fill_eth_hdr(u8 *eth_hdr); int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, @@ -397,4 +413,21 @@ int ice_update_recipe_lkup_idx(struct ice_hw *hw, struct ice_update_recipe_lkup_idx_params *params); void ice_change_proto_id_to_dvm(void); +struct ice_vsi_list_map_info * +ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, + u16 *vsi_list_id); +int ice_alloc_recipe(struct ice_hw *hw, u16 *rid); +int ice_aq_get_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd); +int ice_aq_add_recipe(struct ice_hw *hw, + struct ice_aqc_recipe_data_elem *s_recipe_list, + u16 num_recipes, struct ice_sq_cd *cd); +int +ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd); +int +ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, + struct ice_sq_cd *cd); + #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 4a34ef5f58d3..dd03cb69ad26 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -7,6 +7,8 @@ #include "ice_lib.h" #include "ice_protocol_type.h" +#define ICE_TC_METADATA_LKUP_IDX 0 + /** * ice_tc_count_lkups - determine lookup count for switch filter * @flags: TC-flower flags @@ -19,7 +21,13 @@ static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, struct ice_tc_flower_fltr *fltr) { - int lkups_cnt = 0; + int lkups_cnt = 1; /* 0th lookup is metadata */ + + /* Always add metadata as the 0th lookup. Included elements: + * - Direction flag (always present) + * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified) + * - Tunnel flag (present if tunnel) + */ if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) lkups_cnt++; @@ -54,10 +62,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) lkups_cnt++; - /* is VLAN TPID specified */ - if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) - lkups_cnt++; - /* is CVLAN specified? */ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) lkups_cnt++; @@ -84,10 +88,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, ICE_TC_FLWR_FIELD_SRC_L4_PORT)) lkups_cnt++; - /* matching for tunneled packets in metadata */ - if (fltr->tunnel_type != TNL_LAST) - lkups_cnt++; - return lkups_cnt; } @@ -176,10 +176,9 @@ static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid) static int ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, - struct ice_adv_lkup_elem *list) + struct ice_adv_lkup_elem *list, int i) { struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; - int i = 0; if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { u32 tenant_id; @@ -329,8 +328,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, } /* always fill matching on tunneled packets in metadata */ - ice_rule_add_tunnel_metadata(&list[i]); - i++; + ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); return i; } @@ -358,13 +356,16 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; bool inner = false; u16 vlan_tpid = 0; - int i = 0; + int i = 1; /* 0th lookup is metadata */ rule_info->vlan_type = vlan_tpid; + /* Always add direction metadata */ + ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); + rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type); if (tc_fltr->tunnel_type != TNL_LAST) { - i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list); + i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); headers = &tc_fltr->inner_headers; inner = true; @@ -431,8 +432,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, rule_info->vlan_type = ice_check_supported_vlan_tpid(vlan_tpid); - ice_rule_add_vlan_metadata(&list[i]); - i++; + ice_rule_add_vlan_metadata(&list[ICE_TC_METADATA_LKUP_IDX]); } if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) { @@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev) return ice_tc_tun_get_type(dev) != TNL_LAST; } -static int -ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, - struct flow_action_entry *act) +static bool ice_tc_is_dev_uplink(struct net_device *dev) +{ + return netif_is_ice(dev) || ice_is_tunnel_supported(dev); +} + +static int ice_tc_setup_redirect_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev) { struct ice_repr *repr; + fltr->action.fltr_act = ICE_FWD_TO_VSI; + + if (ice_is_port_repr_netdev(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_is_port_repr_netdev(filter_dev) && + ice_tc_is_dev_uplink(target_dev)) { + repr = ice_netdev_to_repr(filter_dev); + + fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi; + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_tc_is_dev_uplink(filter_dev) && + ice_is_port_repr_netdev(target_dev)) { + repr = ice_netdev_to_repr(target_dev); + + fltr->dest_vsi = repr->src_vsi; + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + return 0; +} + +static int +ice_tc_setup_drop_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr) +{ + fltr->action.fltr_act = ICE_DROP_PACKET; + + if (ice_is_port_repr_netdev(filter_dev)) { + fltr->direction = ICE_ESWITCH_FLTR_EGRESS; + } else if (ice_tc_is_dev_uplink(filter_dev)) { + fltr->direction = ICE_ESWITCH_FLTR_INGRESS; + } else { + NL_SET_ERR_MSG_MOD(fltr->extack, + "Unsupported netdevice in switchdev mode"); + return -EINVAL; + } + + return 0; +} + +static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct flow_action_entry *act) +{ + int err; + switch (act->id) { case FLOW_ACTION_DROP: - fltr->action.fltr_act = ICE_DROP_PACKET; + err = ice_tc_setup_drop_action(filter_dev, fltr); + if (err) + return err; + break; case FLOW_ACTION_REDIRECT: - fltr->action.fltr_act = ICE_FWD_TO_VSI; - - if (ice_is_port_repr_netdev(act->dev)) { - repr = ice_netdev_to_repr(act->dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else if (netif_is_ice(act->dev) || - ice_is_tunnel_supported(act->dev)) { - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else { - NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode"); - return -EINVAL; - } + err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev); + if (err) + return err; break; @@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) goto exit; } - /* egress traffic is always redirect to uplink */ - if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) - fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; - rule_info.sw_act.fltr_act = fltr->action.fltr_act; if (fltr->action.fltr_act != ICE_DROP_PACKET) rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; @@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.flags_info.act_valid = true; if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { + /* Uplink to VF */ rule_info.sw_act.flag |= ICE_FLTR_RX; rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; - } else { + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) { + /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; + } else { + /* VF to VF */ + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } /* specify the cookie as filter_rule_id */ @@ -1343,24 +1398,24 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, dissector = rule->match.dissector; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_CVLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | - BIT(FLOW_DISSECTOR_KEY_IP) | - BIT(FLOW_DISSECTOR_KEY_ENC_IP) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_PPPOE) | - BIT(FLOW_DISSECTOR_KEY_L2TPV3))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) | + BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used"); return -EOPNOTSUPP; } @@ -1382,10 +1437,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, */ headers = &fltr->inner_headers; } else if (dissector->used_keys & - (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { + (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) { NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel"); return -EOPNOTSUPP; } else { @@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, /** * ice_parse_tc_flower_actions - Parse the actions for a TC filter + * @filter_dev: Pointer to device on which filter is being added * @vsi: Pointer to VSI * @cls_flower: Pointer to TC flower offload structure * @fltr: Pointer to TC flower filter structure * * Parse the actions for a TC filter */ -static int -ice_parse_tc_flower_actions(struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower, - struct ice_tc_flower_fltr *fltr) +static int ice_parse_tc_flower_actions(struct net_device *filter_dev, + struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, + struct ice_tc_flower_fltr *fltr) { struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); struct flow_action *flow_action = &rule->action; @@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi, flow_action_for_each(i, act, flow_action) { if (ice_is_eswitch_mode_switchdev(vsi->back)) - err = ice_eswitch_tc_parse_action(fltr, act); + err = ice_eswitch_tc_parse_action(filter_dev, fltr, act); else err = ice_tc_parse_action(vsi, fltr, act); if (err) @@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, if (err < 0) goto err; - err = ice_parse_tc_flower_actions(vsi, f, fltr); + err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr); if (err < 0) goto err; diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h index ae98d5a8ff60..b2f5c9fe0149 100644 --- a/drivers/net/ethernet/intel/ice/ice_trace.h +++ b/drivers/net/ethernet/intel/ice/ice_trace.h @@ -21,6 +21,7 @@ #define _ICE_TRACE_H_ #include <linux/tracepoint.h> +#include "ice_eswitch_br.h" /* ice_trace() macro enables shared code to refer to trace points * like: @@ -240,6 +241,95 @@ DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req); DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done); DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete); +DECLARE_EVENT_CLASS(ice_esw_br_fdb_template, + TP_PROTO(struct ice_esw_br_fdb_entry *fdb), + TP_ARGS(fdb), + TP_STRUCT__entry(__array(char, dev_name, IFNAMSIZ) + __array(unsigned char, addr, ETH_ALEN) + __field(u16, vid) + __field(int, flags)), + TP_fast_assign(strscpy(__entry->dev_name, + netdev_name(fdb->dev), + IFNAMSIZ); + memcpy(__entry->addr, fdb->data.addr, ETH_ALEN); + __entry->vid = fdb->data.vid; + __entry->flags = fdb->flags;), + TP_printk("net_device=%s addr=%pM vid=%u flags=%x", + __entry->dev_name, + __entry->addr, + __entry->vid, + __entry->flags) +); + +DEFINE_EVENT(ice_esw_br_fdb_template, + ice_eswitch_br_fdb_entry_create, + TP_PROTO(struct ice_esw_br_fdb_entry *fdb), + TP_ARGS(fdb) +); + +DEFINE_EVENT(ice_esw_br_fdb_template, + ice_eswitch_br_fdb_entry_find_and_delete, + TP_PROTO(struct ice_esw_br_fdb_entry *fdb), + TP_ARGS(fdb) +); + +DECLARE_EVENT_CLASS(ice_esw_br_vlan_template, + TP_PROTO(struct ice_esw_br_vlan *vlan), + TP_ARGS(vlan), + TP_STRUCT__entry(__field(u16, vid) + __field(u16, flags)), + TP_fast_assign(__entry->vid = vlan->vid; + __entry->flags = vlan->flags;), + TP_printk("vid=%u flags=%x", + __entry->vid, + __entry->flags) +); + +DEFINE_EVENT(ice_esw_br_vlan_template, + ice_eswitch_br_vlan_create, + TP_PROTO(struct ice_esw_br_vlan *vlan), + TP_ARGS(vlan) +); + +DEFINE_EVENT(ice_esw_br_vlan_template, + ice_eswitch_br_vlan_cleanup, + TP_PROTO(struct ice_esw_br_vlan *vlan), + TP_ARGS(vlan) +); + +#define ICE_ESW_BR_PORT_NAME_L 16 + +DECLARE_EVENT_CLASS(ice_esw_br_port_template, + TP_PROTO(struct ice_esw_br_port *port), + TP_ARGS(port), + TP_STRUCT__entry(__field(u16, vport_num) + __array(char, port_type, ICE_ESW_BR_PORT_NAME_L)), + TP_fast_assign(__entry->vport_num = port->vsi_idx; + if (port->type == ICE_ESWITCH_BR_UPLINK_PORT) + strscpy(__entry->port_type, + "Uplink", + ICE_ESW_BR_PORT_NAME_L); + else + strscpy(__entry->port_type, + "VF Representor", + ICE_ESW_BR_PORT_NAME_L);), + TP_printk("vport_num=%u port type=%s", + __entry->vport_num, + __entry->port_type) +); + +DEFINE_EVENT(ice_esw_br_port_template, + ice_eswitch_br_port_link, + TP_PROTO(struct ice_esw_br_port *port), + TP_ARGS(port) +); + +DEFINE_EVENT(ice_esw_br_port_template, + ice_eswitch_br_port_unlink, + TP_PROTO(struct ice_esw_br_port *port), + TP_ARGS(port) +); + /* End tracepoints */ #endif /* _ICE_TRACE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 52d0a126eb61..9e97ea863068 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2306,9 +2306,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) return; - if (!tx_ring->ptp_tx) - return; - /* Tx timestamps cannot be sampled when doing TSO */ if (first->tx_flags & ICE_TX_FLAGS_TSO) return; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 166413fc33f4..daf7b9dbb143 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -380,7 +380,6 @@ struct ice_tx_ring { #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) u8 flags; u8 dcb_tc; /* Traffic class of ring */ - u8 ptp_tx; } ____cacheline_internodealigned_in_smp; static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index c8322fb6f2b3..7e06373e14d9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx]; if (xdp_res & ICE_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_res & ICE_XDP_TX) { if (static_branch_unlikely(&ice_xdp_locking_key)) diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index a09556e57803..a18ca0ff879f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ +/* Copyright (c) 2018-2023, Intel Corporation. */ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ @@ -129,6 +129,7 @@ enum ice_set_fc_aq_failures { enum ice_mac_type { ICE_MAC_UNKNOWN = 0, ICE_MAC_E810, + ICE_MAC_E830, ICE_MAC_GENERIC, }; @@ -277,6 +278,8 @@ struct ice_hw_common_caps { u8 dcb; u8 ieee_1588; u8 rdma; + u8 roce_lag; + u8 sriov_lag; bool nvm_update_pending_nvm; bool nvm_update_pending_orom; @@ -820,6 +823,13 @@ struct ice_mbx_data { u16 async_watermark_val; }; +/* PHY model */ +enum ice_phy_model { + ICE_PHY_UNSUP = -1, + ICE_PHY_E810 = 1, + ICE_PHY_E822, +}; + /* Port hardware description */ struct ice_hw { u8 __iomem *hw_addr; @@ -841,6 +851,7 @@ struct ice_hw { u8 revision_id; u8 pf_id; /* device profile info */ + enum ice_phy_model phy_model; u16 max_burst_size; /* driver sets this value */ @@ -899,17 +910,20 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_PHY_PER_NAC 1 -#define ICE_MAX_QUAD 2 -#define ICE_NUM_QUAD_TYPE 2 -#define ICE_PORTS_PER_QUAD 4 -#define ICE_PHY_0_LAST_QUAD 1 -#define ICE_PORTS_PER_PHY 8 -#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY +#define ICE_PHY_PER_NAC_E822 1 +#define ICE_MAX_QUAD 2 +#define ICE_QUADS_PER_PHY_E822 2 +#define ICE_PORTS_PER_PHY_E822 8 +#define ICE_PORTS_PER_QUAD 4 +#define ICE_PORTS_PER_PHY_E810 4 +#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; + u32 pkg_seg_id; + u32 pkg_sign_type; u32 active_track_id; + u8 pkg_has_signing_seg:1; u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; @@ -963,6 +977,7 @@ struct ice_hw { DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX); u8 dvm_ena; u16 io_expander_handle; + u8 cgu_part_number; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ @@ -1033,14 +1048,15 @@ enum ice_sw_fwd_act_type { ICE_FWD_TO_Q, ICE_FWD_TO_QGRP, ICE_DROP_PACKET, + ICE_NOP, ICE_INVAL_ACT }; struct ice_aq_get_set_rss_lut_params { - u16 vsi_handle; /* software VSI handle */ - u16 lut_size; /* size of the LUT buffer */ - u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */ u8 *lut; /* input RSS LUT for set and output RSS LUT for get */ + enum ice_lut_size lut_size; /* size of the LUT buffer */ + enum ice_lut_type lut_type; /* type of the LUT (i.e. VSI, PF, Global) */ + u16 vsi_handle; /* software VSI handle */ u8 global_lut_id; /* only valid when lut_type is global */ }; @@ -1142,9 +1158,6 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_SR_WORDS_IN_1KB 512 -/* Hash redirection LUT for VSI - maximum array size */ -#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4) - /* AQ API version for LLDP_FILTER_CONTROL */ #define ICE_FW_API_LLDP_FLTR_MAJ 1 #define ICE_FW_API_LLDP_FLTR_MIN 7 diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index ea3310be8354..b7ae09952156 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -56,6 +56,8 @@ static void ice_release_vf(struct kref *ref) { struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt); + pci_dev_put(vf->vfdev); + vf->vf_ops->free(vf); } @@ -304,6 +306,237 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf) } /** + * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN + * @vf: VF to add MAC filters for + * @vsi: Pointer to VSI + * + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds either a VLAN 0 or port VLAN based filter after reset. + */ +static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); + struct device *dev = ice_pf_to_dev(vf->pf); + int err; + + if (ice_vf_is_port_vlan_ena(vf)) { + err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); + if (err) { + dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", + vf->vf_id, err); + return err; + } + + err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); + } else { + err = ice_vsi_add_vlan_zero(vsi); + } + + if (err) { + dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", + ice_vf_is_port_vlan_ena(vf) ? + ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); + return err; + } + + err = vlan_ops->ena_rx_filtering(vsi); + if (err) + dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", + vf->vf_id, vsi->idx, err); + + return 0; +} + +/** + * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration + * @vf: VF to re-apply the configuration for + * + * Called after a VF VSI has been re-added/rebuild during reset. The PF driver + * needs to re-apply the host configured Tx rate limiting configuration. + */ +static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + int err; + + if (WARN_ON(!vsi)) + return -EINVAL; + + if (vf->min_tx_rate) { + err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); + if (err) { + dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", + vf->min_tx_rate, vf->vf_id, err); + return err; + } + } + + if (vf->max_tx_rate) { + err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); + if (err) { + dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", + vf->max_tx_rate, vf->vf_id, err); + return err; + } + } + + return 0; +} + +/** + * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value + * @vf: VF to configure trust setting for + */ +static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) +{ + assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted); +} + +/** + * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA + * @vf: VF to add MAC filters for + * + * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver + * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. + */ +static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + u8 broadcast[ETH_ALEN]; + int status; + + if (WARN_ON(!vsi)) + return -EINVAL; + + if (ice_is_eswitch_mode_switchdev(vf->pf)) + return 0; + + eth_broadcast_addr(broadcast); + status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", + vf->vf_id, status); + return status; + } + + vf->num_mac++; + + if (is_valid_ether_addr(vf->hw_lan_addr)) { + status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, + ICE_FWD_TO_VSI); + if (status) { + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", + &vf->hw_lan_addr[0], vf->vf_id, + status); + return status; + } + vf->num_mac++; + + ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); + } + + return 0; +} + +/** + * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config + * @vsi: Pointer to VSI + * + * This function moves VSI into corresponding scheduler aggregator node + * based on cached value of "aggregator node info" per VSI + */ +static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + int status; + + if (!vsi->agg_node) + return; + + dev = ice_pf_to_dev(pf); + if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { + dev_dbg(dev, + "agg_id %u already has reached max_num_vsis %u\n", + vsi->agg_node->agg_id, vsi->agg_node->num_vsis); + return; + } + + status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, + vsi->idx, vsi->tc_cfg.ena_tc); + if (status) + dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", + vsi->idx, vsi->agg_node->agg_id); + else + vsi->agg_node->num_vsis++; +} + +/** + * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset + * @vf: VF to rebuild host configuration on + */ +static void ice_vf_rebuild_host_cfg(struct ice_vf *vf) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vf_set_host_trust_cfg(vf); + + if (ice_vf_rebuild_host_mac_cfg(vf)) + dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", + vf->vf_id); + + if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) + dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", + vf->vf_id); + + if (ice_vf_rebuild_host_tx_rate_cfg(vf)) + dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", + vf->vf_id); + + if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) + dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", + vf->vf_id); + + /* rebuild aggregator node config for main VF VSI */ + ice_vf_rebuild_aggregator_node_cfg(vsi); +} + +/** + * ice_set_vf_state_qs_dis - Set VF queues state to disabled + * @vf: pointer to the VF structure + */ +static void ice_set_vf_state_qs_dis(struct ice_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); +} + +/** + * ice_vf_set_initialized - VF is ready for VIRTCHNL communication + * @vf: VF to set in initialized state + * + * After this function the VF will be ready to receive/handle the + * VIRTCHNL_OP_GET_VF_RESOURCES message + */ +static void ice_vf_set_initialized(struct ice_vf *vf) +{ + ice_set_vf_state_qs_dis(vf); + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); +} + +/** * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild * @vf: the VF being reset * @@ -596,12 +829,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf) int ice_reset_vf(struct ice_vf *vf, u32 flags) { struct ice_pf *pf = vf->pf; + struct ice_lag *lag; struct ice_vsi *vsi; + u8 act_prt, pri_prt; struct device *dev; int err = 0; bool rsd; dev = ice_pf_to_dev(pf); + act_prt = ICE_LAG_INVALID_PORT; + pri_prt = pf->hw.port_info->lport; if (flags & ICE_VF_RESET_NOTIFY) ice_notify_vf_reset(vf); @@ -612,6 +849,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) return 0; } + lag = pf->lag; + mutex_lock(&pf->lag_mutex); + if (lag && lag->bonded && lag->primary) { + act_prt = lag->active_port; + if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && + lag->upper_netdev) + ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); + else + act_prt = ICE_LAG_INVALID_PORT; + } + if (flags & ICE_VF_RESET_LOCK) mutex_lock(&vf->cfg_lock); else @@ -704,19 +952,12 @@ out_unlock: if (flags & ICE_VF_RESET_LOCK) mutex_unlock(&vf->cfg_lock); - return err; -} + if (lag && lag->bonded && lag->primary && + act_prt != ICE_LAG_INVALID_PORT) + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); + mutex_unlock(&pf->lag_mutex); -/** - * ice_set_vf_state_qs_dis - Set VF queues state to disabled - * @vf: pointer to the VF structure - */ -static void ice_set_vf_state_qs_dis(struct ice_vf *vf) -{ - /* Clear Rx/Tx enabled queues flag */ - bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF); - bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); - clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); + return err; } /** @@ -960,211 +1201,6 @@ bool ice_is_vf_link_up(struct ice_vf *vf) } /** - * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value - * @vf: VF to configure trust setting for - */ -static void ice_vf_set_host_trust_cfg(struct ice_vf *vf) -{ - if (vf->trusted) - set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); - else - clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); -} - -/** - * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA - * @vf: VF to add MAC filters for - * - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver - * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset. - */ -static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - u8 broadcast[ETH_ALEN]; - int status; - - if (WARN_ON(!vsi)) - return -EINVAL; - - if (ice_is_eswitch_mode_switchdev(vf->pf)) - return 0; - - eth_broadcast_addr(broadcast); - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", - vf->vf_id, status); - return status; - } - - vf->num_mac++; - - if (is_valid_ether_addr(vf->hw_lan_addr)) { - status = ice_fltr_add_mac(vsi, vf->hw_lan_addr, - ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", - &vf->hw_lan_addr[0], vf->vf_id, - status); - return status; - } - vf->num_mac++; - - ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr); - } - - return 0; -} - -/** - * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN - * @vf: VF to add MAC filters for - * @vsi: Pointer to VSI - * - * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver - * always re-adds either a VLAN 0 or port VLAN based filter after reset. - */ -static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) -{ - struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); - struct device *dev = ice_pf_to_dev(vf->pf); - int err; - - if (ice_vf_is_port_vlan_ena(vf)) { - err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info); - if (err) { - dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n", - vf->vf_id, err); - return err; - } - - err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); - } else { - err = ice_vsi_add_vlan_zero(vsi); - } - - if (err) { - dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n", - ice_vf_is_port_vlan_ena(vf) ? - ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); - return err; - } - - err = vlan_ops->ena_rx_filtering(vsi); - if (err) - dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n", - vf->vf_id, vsi->idx, err); - - return 0; -} - -/** - * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration - * @vf: VF to re-apply the configuration for - * - * Called after a VF VSI has been re-added/rebuild during reset. The PF driver - * needs to re-apply the host configured Tx rate limiting configuration. - */ -static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - int err; - - if (WARN_ON(!vsi)) - return -EINVAL; - - if (vf->min_tx_rate) { - err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); - if (err) { - dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n", - vf->min_tx_rate, vf->vf_id, err); - return err; - } - } - - if (vf->max_tx_rate) { - err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000); - if (err) { - dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n", - vf->max_tx_rate, vf->vf_id, err); - return err; - } - } - - return 0; -} - -/** - * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config - * @vsi: Pointer to VSI - * - * This function moves VSI into corresponding scheduler aggregator node - * based on cached value of "aggregator node info" per VSI - */ -static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct device *dev; - int status; - - if (!vsi->agg_node) - return; - - dev = ice_pf_to_dev(pf); - if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) { - dev_dbg(dev, - "agg_id %u already has reached max_num_vsis %u\n", - vsi->agg_node->agg_id, vsi->agg_node->num_vsis); - return; - } - - status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, - vsi->idx, vsi->tc_cfg.ena_tc); - if (status) - dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node", - vsi->idx, vsi->agg_node->agg_id); - else - vsi->agg_node->num_vsis++; -} - -/** - * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset - * @vf: VF to rebuild host configuration on - */ -void ice_vf_rebuild_host_cfg(struct ice_vf *vf) -{ - struct device *dev = ice_pf_to_dev(vf->pf); - struct ice_vsi *vsi = ice_get_vf_vsi(vf); - - if (WARN_ON(!vsi)) - return; - - ice_vf_set_host_trust_cfg(vf); - - if (ice_vf_rebuild_host_mac_cfg(vf)) - dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n", - vf->vf_id); - - if (ice_vf_rebuild_host_vlan_cfg(vf, vsi)) - dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n", - vf->vf_id); - - if (ice_vf_rebuild_host_tx_rate_cfg(vf)) - dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n", - vf->vf_id); - - if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk)) - dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n", - vf->vf_id); - - /* rebuild aggregator node config for main VF VSI */ - ice_vf_rebuild_aggregator_node_cfg(vsi); -} - -/** * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access * @vf: VF that control VSI is being invalidated on */ @@ -1293,23 +1329,6 @@ void ice_vf_vsi_release(struct ice_vf *vf) } /** - * ice_vf_set_initialized - VF is ready for VIRTCHNL communication - * @vf: VF to set in initialized state - * - * After this function the VF will be ready to receive/handle the - * VIRTCHNL_OP_GET_VF_RESOURCES message - */ -void ice_vf_set_initialized(struct ice_vf *vf) -{ - ice_set_vf_state_qs_dis(vf); - clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); - clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); - clear_bit(ICE_VF_STATE_DIS, vf->vf_states); - set_bit(ICE_VF_STATE_INIT, vf->vf_states); - memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps)); -} - -/** * ice_get_vf_ctrl_vsi - Get first VF control VSI pointer * @pf: the PF private structure * @vsi: pointer to the VSI diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 48fea6fa0362..93c774f2f437 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -72,7 +72,7 @@ struct ice_vfs { struct mutex table_lock; /* Lock for protecting the hash table */ u16 num_supported; /* max supported VFs on this PF */ u16 num_qps_per; /* number of queue pairs per VF */ - u16 num_msix_per; /* number of MSI-X vectors per VF */ + u16 num_msix_per; /* default MSI-X vectors per VF */ unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */ }; @@ -82,7 +82,7 @@ struct ice_vf { struct rcu_head rcu; struct kref refcnt; struct ice_pf *pf; - + struct pci_dev *vfdev; /* Used during virtchnl message handling and NDO ops against the VF * that will trigger a VFR */ @@ -123,6 +123,9 @@ struct ice_vf { u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; u16 num_vf_qs; /* num of queue configured per VF */ + u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ +#define ICE_INNER_VLAN_STRIP_ENA BIT(0) +#define ICE_OUTER_VLAN_STRIP_ENA BIT(1) struct ice_mdd_vf_events mdd_rx_events; struct ice_mdd_vf_events mdd_tx_events; DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); @@ -133,6 +136,8 @@ struct ice_vf { /* devlink port data */ struct devlink_port devlink_port; + + u16 num_msix; /* num of MSI-X configured on this VF */ }; /* Flags for controlling behavior of ice_reset_vf */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 6f3293b793b5..0c7e77c0a09f 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -32,13 +32,11 @@ int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable); bool ice_is_vf_trusted(struct ice_vf *vf); bool ice_vf_has_no_qs_ena(struct ice_vf *vf); bool ice_is_vf_link_up(struct ice_vf *vf); -void ice_vf_rebuild_host_cfg(struct ice_vf *vf); void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf); void ice_vf_ctrl_vsi_release(struct ice_vf *vf); struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi); void ice_vf_invalidate_vsi(struct ice_vf *vf); void ice_vf_vsi_release(struct ice_vf *vf); -void ice_vf_set_initialized(struct ice_vf *vf); #endif /* _ICE_VF_LIB_PRIVATE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c index b1ffb81893d4..80dc4bcdd3a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c @@ -21,6 +21,105 @@ noop_vlan(struct ice_vsi __always_unused *vsi) return 0; } +static void ice_port_vlan_on(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + struct ice_pf *pf = vsi->back; + + if (ice_is_dvm_ena(&pf->hw)) { + vlan_ops = &vsi->outer_vlan_ops; + + /* setup outer VLAN ops */ + vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; + vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan; + + /* setup inner VLAN ops */ + vlan_ops = &vsi->inner_vlan_ops; + vlan_ops->add_vlan = noop_vlan_arg; + vlan_ops->del_vlan = noop_vlan_arg; + vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; + vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; + vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; + vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; + } else { + vlan_ops = &vsi->inner_vlan_ops; + + vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; + vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan; + } + + /* all Rx traffic should be in the domain of the assigned port VLAN, + * so prevent disabling Rx VLAN filtering + */ + vlan_ops->dis_rx_filtering = noop_vlan; + + vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering; +} + +static void ice_port_vlan_off(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + struct ice_pf *pf = vsi->back; + + /* setup inner VLAN ops */ + vlan_ops = &vsi->inner_vlan_ops; + + vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; + vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; + vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; + vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; + + if (ice_is_dvm_ena(&pf->hw)) { + vlan_ops = &vsi->outer_vlan_ops; + + vlan_ops->del_vlan = ice_vsi_del_vlan; + vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping; + vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping; + vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion; + vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion; + } else { + vlan_ops->del_vlan = ice_vsi_del_vlan; + } + + vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; + + if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) + vlan_ops->ena_rx_filtering = noop_vlan; + else + vlan_ops->ena_rx_filtering = + ice_vsi_ena_rx_vlan_filtering; +} + +/** + * ice_vf_vsi_enable_port_vlan - Set VSI VLAN ops to support port VLAN + * @vsi: VF's VSI being configured + * + * The function won't create port VLAN, it only allows to create port VLAN + * using VLAN ops on the VF VSI. + */ +void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) +{ + if (WARN_ON_ONCE(!vsi->vf)) + return; + + ice_port_vlan_on(vsi); +} + +/** + * ice_vf_vsi_disable_port_vlan - Clear VSI support for creating port VLAN + * @vsi: VF's VSI being configured + * + * The function should be called after removing port VLAN on VSI + * (using VLAN ops) + */ +void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) +{ + if (WARN_ON_ONCE(!vsi->vf)) + return; + + ice_port_vlan_off(vsi); +} + /** * ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI * @vsi: VF's VSI being configured @@ -39,91 +138,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) if (WARN_ON(!vf)) return; - if (ice_is_dvm_ena(&pf->hw)) { - vlan_ops = &vsi->outer_vlan_ops; + if (ice_vf_is_port_vlan_ena(vf)) + ice_port_vlan_on(vsi); + else + ice_port_vlan_off(vsi); - /* outer VLAN ops regardless of port VLAN config */ - vlan_ops->add_vlan = ice_vsi_add_vlan; - vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; - vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; - - if (ice_vf_is_port_vlan_ena(vf)) { - /* setup outer VLAN ops */ - vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan; - /* all Rx traffic should be in the domain of the - * assigned port VLAN, so prevent disabling Rx VLAN - * filtering - */ - vlan_ops->dis_rx_filtering = noop_vlan; - vlan_ops->ena_rx_filtering = - ice_vsi_ena_rx_vlan_filtering; - - /* setup inner VLAN ops */ - vlan_ops = &vsi->inner_vlan_ops; - vlan_ops->add_vlan = noop_vlan_arg; - vlan_ops->del_vlan = noop_vlan_arg; - vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; - vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; - vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; - vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; - } else { - vlan_ops->dis_rx_filtering = - ice_vsi_dis_rx_vlan_filtering; - - if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) - vlan_ops->ena_rx_filtering = noop_vlan; - else - vlan_ops->ena_rx_filtering = - ice_vsi_ena_rx_vlan_filtering; - - vlan_ops->del_vlan = ice_vsi_del_vlan; - vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping; - vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping; - vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion; - vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion; - - /* setup inner VLAN ops */ - vlan_ops = &vsi->inner_vlan_ops; - - vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; - vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; - vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; - vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; - } - } else { - vlan_ops = &vsi->inner_vlan_ops; + vlan_ops = ice_is_dvm_ena(&pf->hw) ? + &vsi->outer_vlan_ops : &vsi->inner_vlan_ops; - /* inner VLAN ops regardless of port VLAN config */ - vlan_ops->add_vlan = ice_vsi_add_vlan; - vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; - vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; - vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; - - if (ice_vf_is_port_vlan_ena(vf)) { - vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan; - vlan_ops->ena_rx_filtering = - ice_vsi_ena_rx_vlan_filtering; - /* all Rx traffic should be in the domain of the - * assigned port VLAN, so prevent disabling Rx VLAN - * filtering - */ - vlan_ops->dis_rx_filtering = noop_vlan; - } else { - vlan_ops->dis_rx_filtering = - ice_vsi_dis_rx_vlan_filtering; - if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags)) - vlan_ops->ena_rx_filtering = noop_vlan; - else - vlan_ops->ena_rx_filtering = - ice_vsi_ena_rx_vlan_filtering; - - vlan_ops->del_vlan = ice_vsi_del_vlan; - vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping; - vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping; - vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion; - vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion; - } - } + vlan_ops->add_vlan = ice_vsi_add_vlan; + vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; + vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h index 875a4e615f39..df8aa09df3e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h @@ -13,7 +13,11 @@ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi); #ifdef CONFIG_PCI_IOV void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi); +void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi); +void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi); #else static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { } +static inline void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) { } +static inline void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) { } #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_PF_VSI_VLAN_OPS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index dcf628b1fccd..1c7b4ded948b 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -428,7 +428,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) goto err; } - len = sizeof(struct virtchnl_vf_resource); + len = virtchnl_struct_size(vfres, vsi_res, 0); vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { @@ -486,6 +486,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC; + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; @@ -498,9 +501,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; - vfres->max_vectors = vf->pf->vfs.num_msix_per; + vfres->max_vectors = vf->num_msix; vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; - vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vfres->rss_lut_size = ICE_LUT_VSI_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf); vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; @@ -962,7 +965,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) goto error_param; } - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + if (vrl->lut_entries != ICE_LUT_VSI_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -978,7 +981,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) goto error_param; } - if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) + if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, @@ -1520,7 +1523,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) u16 num_q_vectors_mapped, vsi_id, vector_id; struct virtchnl_irq_map_info *irqmap_info; struct virtchnl_vector_map *map; - struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; @@ -1532,7 +1534,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) * there is actually at least a single VF queue vector mapped */ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || - pf->vfs.num_msix_per < num_q_vectors_mapped || + vf->num_msix < num_q_vectors_mapped || !num_q_vectors_mapped) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -1554,7 +1556,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* vector_id is always 0-based for each VF, and can never be * larger than or equal to the max allowed interrupts per VF */ - if (!(vector_id < pf->vfs.num_msix_per) || + if (!(vector_id < vf->num_msix) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1600,9 +1602,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; struct ice_pf *pf = vf->pf; + struct ice_lag *lag; struct ice_vsi *vsi; + u8 act_prt, pri_prt; int i = -1, q_idx; + lag = pf->lag; + mutex_lock(&pf->lag_mutex); + act_prt = ICE_LAG_INVALID_PORT; + pri_prt = pf->hw.port_info->lport; + if (lag && lag->bonded && lag->primary) { + act_prt = lag->active_port; + if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && + lag->upper_netdev) + ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); + else + act_prt = ICE_LAG_INVALID_PORT; + } + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) goto error_param; @@ -1621,6 +1638,15 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } for (i = 0; i < qci->num_queue_pairs; i++) { + if (!qci->qpair[i].rxq.crc_disable) + continue; + + if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) || + vf->vlan_strip_ena) + goto error_param; + } + + for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || @@ -1666,6 +1692,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; + if (qpi->rxq.crc_disable) + vsi->rx_rings[q_idx]->flags |= + ICE_RX_FLAGS_CRC_STRIP_DIS; + else + vsi->rx_rings[q_idx]->flags &= + ~ICE_RX_FLAGS_CRC_STRIP_DIS; + if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) @@ -1710,6 +1743,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } } + if (lag && lag->bonded && lag->primary && + act_prt != ICE_LAG_INVALID_PORT) + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); + mutex_unlock(&pf->lag_mutex); + /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, VIRTCHNL_STATUS_SUCCESS, NULL, 0); @@ -1724,6 +1762,13 @@ error_param: vf->vf_id, i); } + if (lag && lag->bonded && lag->primary && + act_prt != ICE_LAG_INVALID_PORT) + ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); + mutex_unlock(&pf->lag_mutex); + + ice_lag_move_new_vf_nodes(vf); + /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); @@ -2409,6 +2454,21 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) } /** + * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not + * @vsi: pointer to the VF VSI info + */ +static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi) +{ + unsigned int i; + + ice_for_each_alloc_rxq(vsi, i) + if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS) + return true; + + return false; +} + +/** * ice_vc_ena_vlan_stripping * @vf: pointer to the VF info * @@ -2437,6 +2497,8 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; + else + vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, @@ -2472,6 +2534,8 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) if (vsi->inner_vlan_ops.dis_stripping(vsi)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; + else + vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, @@ -2615,12 +2679,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf) goto err; } - /* Read flexiflag registers to determine whether the - * corresponding RXDID is configured and supported or not. - * Since Legacy 16byte descriptor format is not supported, - * start from Legacy 32byte descriptor. + /* RXDIDs supported by DDP package can be read from the register + * to get the supported RXDID bitmap. But the legacy 32byte RXDID + * is not listed in DDP package, add it in the bitmap manually. + * Legacy 16byte descriptor is not supported. */ - for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { + rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1); + + for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) @@ -2647,6 +2713,8 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + vf->vlan_strip_ena = 0; + if (!vsi) return -EINVAL; @@ -2656,10 +2724,16 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf) if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw)) return 0; - if (ice_vf_vlan_offload_ena(vf->driver_caps)) - return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q); - else - return vsi->inner_vlan_ops.dis_stripping(vsi); + if (ice_vf_vlan_offload_ena(vf->driver_caps)) { + int err; + + err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q); + if (!err) + vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; + return err; + } + + return vsi->inner_vlan_ops.dis_stripping(vsi); } static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf) @@ -3433,6 +3507,11 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) goto out; } + if (ice_vsi_is_rxq_crc_strip_dis(vsi)) { + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto out; + } + ethertype_setting = strip_msg->outer_ethertype_setting; if (ethertype_setting) { if (ice_vc_ena_vlan_offload(vsi, @@ -3453,6 +3532,8 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) * enabled, is extracted in L2TAG1. */ ice_vsi_update_l2tsel(vsi, l2tsel); + + vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA; } } @@ -3464,6 +3545,9 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) goto out; } + if (ethertype_setting) + vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; + out: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); @@ -3525,6 +3609,8 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) * in L2TAG1. */ ice_vsi_update_l2tsel(vsi, l2tsel); + + vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA; } } @@ -3534,6 +3620,9 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) goto out; } + if (ethertype_setting) + vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA; + out: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, v_ret, NULL, 0); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index daa6a1e894cf..24b23b7ef04a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021, Intel Corporation. */ +/* Copyright (C) 2021-2023, Intel Corporation. */ #include "ice.h" #include "ice_base.h" @@ -1422,8 +1422,8 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, */ static void ice_vf_fdir_dump_info(struct ice_vf *vf) { + u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b; struct ice_vsi *vf_vsi; - u32 fd_size, fd_cnt; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -1442,12 +1442,25 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf) fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); - dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", - vf->vf_id, - (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, - (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, - (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, - (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S); + switch (hw->mac_type) { + case ICE_MAC_E830: + fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size); + fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size); + fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); + fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); + break; + case ICE_MAC_E810: + default: + fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size); + fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size); + fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt); + fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt); + } + + dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n", + vf->vf_id, fd_size_g, fd_size_b); + dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n", + vf->vf_id, fd_cnt_g, fd_cnt_b); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c index 5b4a0abb4607..76266e709a39 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c @@ -202,6 +202,24 @@ int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi) return ice_vsi_manage_vlan_insertion(vsi); } +static void +ice_save_vlan_info(struct ice_aqc_vsi_props *info, + struct ice_vsi_vlan_info *vlan) +{ + vlan->sw_flags2 = info->sw_flags2; + vlan->inner_vlan_flags = info->inner_vlan_flags; + vlan->outer_vlan_flags = info->outer_vlan_flags; +} + +static void +ice_restore_vlan_info(struct ice_aqc_vsi_props *info, + struct ice_vsi_vlan_info *vlan) +{ + info->sw_flags2 = vlan->sw_flags2; + info->inner_vlan_flags = vlan->inner_vlan_flags; + info->outer_vlan_flags = vlan->outer_vlan_flags; +} + /** * __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN * @vsi: the VSI to update @@ -218,6 +236,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info) if (!ctxt) return -ENOMEM; + ice_save_vlan_info(&vsi->info, &vsi->vlan_info); ctxt->info = vsi->info; info = &ctxt->info; info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED | @@ -259,6 +278,33 @@ int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info); } +int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_aqc_vsi_props *info; + struct ice_vsi_ctx *ctxt; + int ret; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ice_restore_vlan_info(&vsi->info, &vsi->vlan_info); + vsi->info.port_based_inner_vlan = 0; + ctxt->info = vsi->info; + info = &ctxt->info; + info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) + dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); + + kfree(ctxt); + return ret; +} + /** * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * @vsi: VSI to enable or disable VLAN pruning on @@ -647,6 +693,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) if (!ctxt) return -ENOMEM; + ice_save_vlan_info(&vsi->info, &vsi->vlan_info); ctxt->info = vsi->info; ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; @@ -689,9 +736,6 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) * used if DVM is supported. Also, this function should never be called directly * as it should be part of ice_vsi_vlan_ops if it's needed. * - * This function does not support clearing the port VLAN as there is currently - * no use case for this. - * * Use the ice_vlan structure passed in to set this VSI in a port VLAN. */ int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) @@ -705,3 +749,37 @@ int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid); } + +/** + * ice_vsi_clear_outer_port_vlan - clear outer port vlan + * @vsi: VSI to configure + * + * The function is restoring previously set vlan config (saved in + * vsi->vlan_info). Setting happens in port vlan configuration. + */ +int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx *ctxt; + int err; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ice_restore_vlan_info(&vsi->info, &vsi->vlan_info); + vsi->info.port_based_outer_vlan = 0; + ctxt->info = vsi->info; + + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) + dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + + kfree(ctxt); + return err; +} diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h index f459909490ec..f0d84d11bd5b 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h @@ -7,6 +7,12 @@ #include <linux/types.h> #include "ice_vlan.h" +struct ice_vsi_vlan_info { + u8 sw_flags2; + u8 inner_vlan_flags; + u8 outer_vlan_flags; +}; + struct ice_vsi; int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); @@ -17,6 +23,7 @@ int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi); int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid); int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi); int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); +int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi); int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi); int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi); @@ -28,5 +35,6 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi); int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid); int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi); int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); +int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi); #endif /* _ICE_VSI_VLAN_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h index 5b47568f6256..b2d2330dedcb 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h @@ -21,6 +21,7 @@ struct ice_vsi_vlan_ops { int (*ena_tx_filtering)(struct ice_vsi *vsi); int (*dis_tx_filtering)(struct ice_vsi *vsi); int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan); + int (*clear_port_vlan)(struct ice_vsi *vsi); }; void ice_vsi_init_vlan_ops(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index a7fe2b4ce655..99954508184f 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -217,21 +217,16 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) */ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) { - struct ice_aqc_add_tx_qgrp *qg_buf; + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); + u16 size = __struct_size(qg_buf); struct ice_q_vector *q_vector; struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; - u16 size; int err; if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) return -EINVAL; - size = struct_size(qg_buf, txqs, 1); - qg_buf = kzalloc(size, GFP_KERNEL); - if (!qg_buf) - return -ENOMEM; - qg_buf->num_txqs = 1; tx_ring = vsi->tx_rings[q_idx]; @@ -240,7 +235,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf); if (err) - goto free_buf; + return err; if (ice_is_xdp_ena_vsi(vsi)) { struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; @@ -249,29 +244,28 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) qg_buf->num_txqs = 1; err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); if (err) - goto free_buf; + return err; ice_set_ring_xdp(xdp_ring); ice_tx_xsk_pool(vsi, q_idx); } err = ice_vsi_cfg_rxq(rx_ring); if (err) - goto free_buf; + return err; ice_qvec_cfg_msix(vsi, q_vector); err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); if (err) - goto free_buf; + return err; clear_bit(ICE_CFG_BUSY, vsi->state); ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); -free_buf: - kfree(qg_buf); - return err; + + return 0; } /** @@ -546,19 +540,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) } /** - * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring - * @rx_ring: Rx ring - */ -static void ice_bump_ntc(struct ice_rx_ring *rx_ring) -{ - int ntc = rx_ring->next_to_clean + 1; - - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - prefetch(ICE_RX_DESC(rx_ring, ntc)); -} - -/** * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer * @rx_ring: Rx ring * @xdp: Pointer to XDP buffer @@ -572,8 +553,14 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) { unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; + struct skb_shared_info *sinfo = NULL; struct sk_buff *skb; + u32 nr_frags = 0; + if (unlikely(xdp_buff_has_frags(xdp))) { + sinfo = xdp_get_shared_info_from_buff(xdp); + nr_frags = sinfo->nr_frags; + } net_prefetch(xdp->data_meta); skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, @@ -589,6 +576,29 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) __skb_pull(skb, metasize); } + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + + for (int i = 0; i < nr_frags; i++) { + struct skb_shared_info *skinfo = skb_shinfo(skb); + skb_frag_t *frag = &sinfo->frags[i]; + struct page *page; + void *addr; + + page = dev_alloc_page(); + if (!page) { + dev_kfree_skb(skb); + return NULL; + } + addr = page_to_virt(page); + + memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); + + __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, + addr, 0, skb_frag_size(frag)); + } + +out: xsk_buff_free(xdp); return skb; } @@ -597,7 +607,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ * @xdp_ring: XDP Tx ring */ -static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) +static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) { u16 ntc = xdp_ring->next_to_clean; struct ice_tx_desc *tx_desc; @@ -619,7 +629,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) } if (!completed_frames) - return; + return 0; if (likely(!xdp_ring->xdp_tx_active)) { xsk_frames = completed_frames; @@ -649,6 +659,8 @@ skip: xdp_ring->next_to_clean -= cnt; if (xsk_frames) xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); + + return completed_frames; } /** @@ -666,37 +678,72 @@ skip: static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) { + struct skb_shared_info *sinfo = NULL; u32 size = xdp->data_end - xdp->data; u32 ntu = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; - dma_addr_t dma; - - if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) { - ice_clean_xdp_irq_zc(xdp_ring); - if (!ICE_DESC_UNUSED(xdp_ring)) { - xdp_ring->ring_stats->tx_stats.tx_busy++; - return ICE_XDP_CONSUMED; - } + struct xdp_buff *head; + u32 nr_frags = 0; + u32 free_space; + u32 frag = 0; + + free_space = ICE_DESC_UNUSED(xdp_ring); + if (free_space < ICE_RING_QUARTER(xdp_ring)) + free_space += ice_clean_xdp_irq_zc(xdp_ring); + + if (unlikely(!free_space)) + goto busy; + + if (unlikely(xdp_buff_has_frags(xdp))) { + sinfo = xdp_get_shared_info_from_buff(xdp); + nr_frags = sinfo->nr_frags; + if (free_space < nr_frags + 1) + goto busy; } - dma = xsk_buff_xdp_get_dma(xdp); - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size); - - tx_buf = &xdp_ring->tx_buf[ntu]; - tx_buf->xdp = xdp; - tx_buf->type = ICE_TX_BUF_XSK_TX; tx_desc = ICE_TX_DESC(xdp_ring, ntu); - tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, - 0, size, 0); - xdp_ring->xdp_tx_active++; + tx_buf = &xdp_ring->tx_buf[ntu]; + head = xdp; + + for (;;) { + dma_addr_t dma; + + dma = xsk_buff_xdp_get_dma(xdp); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size); + + tx_buf->xdp = xdp; + tx_buf->type = ICE_TX_BUF_XSK_TX; + tx_desc->buf_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); + /* account for each xdp_buff from xsk_buff_pool */ + xdp_ring->xdp_tx_active++; + + if (++ntu == xdp_ring->count) + ntu = 0; + + if (frag == nr_frags) + break; + + tx_desc = ICE_TX_DESC(xdp_ring, ntu); + tx_buf = &xdp_ring->tx_buf[ntu]; + + xdp = xsk_buff_get_frag(head); + size = skb_frag_size(&sinfo->frags[frag]); + frag++; + } - if (++ntu == xdp_ring->count) - ntu = 0; xdp_ring->next_to_use = ntu; + /* update last descriptor from a frame with EOP */ + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S); return ICE_XDP_TX; + +busy: + xdp_ring->ring_stats->tx_stats.tx_busy++; + + return ICE_XDP_CONSUMED; } /** @@ -752,6 +799,34 @@ out_failure: return result; } +static int +ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first, + struct xdp_buff *xdp, const unsigned int size) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); + + if (!size) + return 0; + + if (!xdp_buff_has_frags(first)) { + sinfo->nr_frags = 0; + sinfo->xdp_frags_size = 0; + xdp_buff_set_frags_flag(first); + } + + if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { + xsk_buff_free(first); + return -ENOMEM; + } + + __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, + virt_to_page(xdp->data_hard_start), 0, size); + sinfo->xdp_frags_size += size; + xsk_buff_add_frag(xdp); + + return 0; +} + /** * ice_clean_rx_irq_zc - consumes packets from the hardware ring * @rx_ring: AF_XDP Rx ring @@ -762,9 +837,14 @@ out_failure: int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool; + u32 ntc = rx_ring->next_to_clean; + u32 ntu = rx_ring->next_to_use; + struct xdp_buff *first = NULL; struct ice_tx_ring *xdp_ring; unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; + u32 cnt = rx_ring->count; bool failure = false; int entries_to_alloc; @@ -774,6 +854,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) xdp_prog = READ_ONCE(rx_ring->xdp_prog); xdp_ring = rx_ring->xdp_ring; + if (ntc != rx_ring->first_desc) + first = *ice_xdp_buf(rx_ring, rx_ring->first_desc); + while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; unsigned int size, xdp_res = 0; @@ -783,7 +866,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) u16 vlan_tag = 0; u16 rx_ptype; - rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); + rx_desc = ICE_RX_DESC(rx_ring, ntc); stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) @@ -795,51 +878,61 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) */ dma_rmb(); - if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use)) + if (unlikely(ntc == ntu)) break; - xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean); + xdp = *ice_xdp_buf(rx_ring, ntc); size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; - if (!size) { - xdp->data = NULL; - xdp->data_end = NULL; - xdp->data_hard_start = NULL; - xdp->data_meta = NULL; - goto construct_skb; - } xsk_buff_set_size(xdp, size); - xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); + xsk_buff_dma_sync_for_cpu(xdp, xsk_pool); - xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring); + if (!first) { + first = xdp; + xdp_buff_clear_frags_flag(first); + } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) { + break; + } + + if (++ntc == cnt) + ntc = 0; + + if (ice_is_non_eop(rx_ring, rx_desc)) + continue; + + xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring); if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { xdp_xmit |= xdp_res; } else if (xdp_res == ICE_XDP_EXIT) { failure = true; + first = NULL; + rx_ring->first_desc = ntc; break; } else if (xdp_res == ICE_XDP_CONSUMED) { - xsk_buff_free(xdp); + xsk_buff_free(first); } else if (xdp_res == ICE_XDP_PASS) { goto construct_skb; } - total_rx_bytes += size; + total_rx_bytes += xdp_get_buff_len(first); total_rx_packets++; - ice_bump_ntc(rx_ring); + first = NULL; + rx_ring->first_desc = ntc; continue; construct_skb: /* XDP_PASS path */ - skb = ice_construct_skb_zc(rx_ring, xdp); + skb = ice_construct_skb_zc(rx_ring, first); if (!skb) { rx_ring->ring_stats->rx_stats.alloc_buf_failed++; break; } - ice_bump_ntc(rx_ring); + first = NULL; + rx_ring->first_desc = ntc; if (eth_skb_pad(skb)) { skb = NULL; @@ -858,18 +951,22 @@ construct_skb: ice_receive_skb(rx_ring, skb, vlan_tag); } - entries_to_alloc = ICE_DESC_UNUSED(rx_ring); + rx_ring->next_to_clean = ntc; + entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring); if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); - if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { - if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) - xsk_set_rx_need_wakeup(rx_ring->xsk_pool); + if (xsk_uses_need_wakeup(xsk_pool)) { + /* ntu could have changed when allocating entries above, so + * use rx_ring value instead of stack based one + */ + if (failure || ntc == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(xsk_pool); else - xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); + xsk_clear_rx_need_wakeup(xsk_pool); return (int)total_rx_packets; } @@ -894,7 +991,7 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc, tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++); tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, + tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc), 0, desc->len, 0); *total_bytes += desc->len; @@ -921,7 +1018,7 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de tx_desc = ICE_TX_DESC(xdp_ring, ntu++); tx_desc->buf_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, + tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]), 0, descs[i].len, 0); *total_bytes += descs[i].len; diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile new file mode 100644 index 000000000000..6844ead2f3ac --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2023 Intel Corporation + +# Makefile for Intel(R) Infrastructure Data Path Function Linux Driver + +obj-$(CONFIG_IDPF) += idpf.o + +idpf-y := \ + idpf_controlq.o \ + idpf_controlq_setup.o \ + idpf_dev.o \ + idpf_ethtool.o \ + idpf_lib.o \ + idpf_main.o \ + idpf_singleq_txrx.o \ + idpf_txrx.o \ + idpf_virtchnl.o \ + idpf_vf_dev.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h new file mode 100644 index 000000000000..bee73353b56a --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -0,0 +1,968 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_H_ +#define _IDPF_H_ + +/* Forward declaration */ +struct idpf_adapter; +struct idpf_vport; +struct idpf_vport_max_q; + +#include <net/pkt_sched.h> +#include <linux/aer.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> +#include <linux/bitfield.h> +#include <linux/sctp.h> +#include <linux/ethtool.h> +#include <net/gro.h> +#include <linux/dim.h> + +#include "virtchnl2.h" +#include "idpf_lan_txrx.h" +#include "idpf_txrx.h" +#include "idpf_controlq.h" + +#define GETMAXVAL(num_bits) GENMASK((num_bits) - 1, 0) + +#define IDPF_NO_FREE_SLOT 0xffff + +/* Default Mailbox settings */ +#define IDPF_NUM_FILTERS_PER_MSG 20 +#define IDPF_NUM_DFLT_MBX_Q 2 /* includes both TX and RX */ +#define IDPF_DFLT_MBX_Q_LEN 64 +#define IDPF_DFLT_MBX_ID -1 +/* maximum number of times to try before resetting mailbox */ +#define IDPF_MB_MAX_ERR 20 +#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \ + ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz)) +#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000 +#define IDPF_WAIT_FOR_EVENT_TIMEO 60000 + +#define IDPF_MAX_WAIT 500 + +/* available message levels */ +#define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IDPF_DIM_PROFILE_SLOTS 5 + +#define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2 +#define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0 + +/** + * struct idpf_mac_filter + * @list: list member field + * @macaddr: MAC address + * @remove: filter should be removed (virtchnl) + * @add: filter should be added (virtchnl) + */ +struct idpf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + bool remove; + bool add; +}; + +/** + * enum idpf_state - State machine to handle bring up + * @__IDPF_STARTUP: Start the state machine + * @__IDPF_VER_CHECK: Negotiate virtchnl version + * @__IDPF_GET_CAPS: Negotiate capabilities + * @__IDPF_INIT_SW: Init based on given capabilities + * @__IDPF_STATE_LAST: Must be last, used to determine size + */ +enum idpf_state { + __IDPF_STARTUP, + __IDPF_VER_CHECK, + __IDPF_GET_CAPS, + __IDPF_INIT_SW, + __IDPF_STATE_LAST, +}; + +/** + * enum idpf_flags - Hard reset causes. + * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout + * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW + * @IDPF_HR_RESET_IN_PROG: Reset in progress + * @IDPF_REMOVE_IN_PROG: Driver remove in progress + * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode + * @IDPF_FLAGS_NBITS: Must be last + */ +enum idpf_flags { + IDPF_HR_FUNC_RESET, + IDPF_HR_DRV_LOAD, + IDPF_HR_RESET_IN_PROG, + IDPF_REMOVE_IN_PROG, + IDPF_MB_INTR_MODE, + IDPF_FLAGS_NBITS, +}; + +/** + * enum idpf_cap_field - Offsets into capabilities struct for specific caps + * @IDPF_BASE_CAPS: generic base capabilities + * @IDPF_CSUM_CAPS: checksum offload capabilities + * @IDPF_SEG_CAPS: segmentation offload capabilities + * @IDPF_RSS_CAPS: RSS offload capabilities + * @IDPF_HSPLIT_CAPS: Header split capabilities + * @IDPF_RSC_CAPS: RSC offload capabilities + * @IDPF_OTHER_CAPS: miscellaneous offloads + * + * Used when checking for a specific capability flag since different capability + * sets are not mutually exclusive numerically, the caller must specify which + * type of capability they are checking for. + */ +enum idpf_cap_field { + IDPF_BASE_CAPS = -1, + IDPF_CSUM_CAPS = offsetof(struct virtchnl2_get_capabilities, + csum_caps), + IDPF_SEG_CAPS = offsetof(struct virtchnl2_get_capabilities, + seg_caps), + IDPF_RSS_CAPS = offsetof(struct virtchnl2_get_capabilities, + rss_caps), + IDPF_HSPLIT_CAPS = offsetof(struct virtchnl2_get_capabilities, + hsplit_caps), + IDPF_RSC_CAPS = offsetof(struct virtchnl2_get_capabilities, + rsc_caps), + IDPF_OTHER_CAPS = offsetof(struct virtchnl2_get_capabilities, + other_caps), +}; + +/** + * enum idpf_vport_state - Current vport state + * @__IDPF_VPORT_DOWN: Vport is down + * @__IDPF_VPORT_UP: Vport is up + * @__IDPF_VPORT_STATE_LAST: Must be last, number of states + */ +enum idpf_vport_state { + __IDPF_VPORT_DOWN, + __IDPF_VPORT_UP, + __IDPF_VPORT_STATE_LAST, +}; + +/** + * struct idpf_netdev_priv - Struct to store vport back pointer + * @adapter: Adapter back pointer + * @vport: Vport back pointer + * @vport_id: Vport identifier + * @vport_idx: Relative vport index + * @state: See enum idpf_vport_state + * @netstats: Packet and byte stats + * @stats_lock: Lock to protect stats update + */ +struct idpf_netdev_priv { + struct idpf_adapter *adapter; + struct idpf_vport *vport; + u32 vport_id; + u16 vport_idx; + enum idpf_vport_state state; + struct rtnl_link_stats64 netstats; + spinlock_t stats_lock; +}; + +/** + * struct idpf_reset_reg - Reset register offsets/masks + * @rstat: Reset status register + * @rstat_m: Reset status mask + */ +struct idpf_reset_reg { + void __iomem *rstat; + u32 rstat_m; +}; + +/** + * struct idpf_vport_max_q - Queue limits + * @max_rxq: Maximum number of RX queues supported + * @max_txq: Maixmum number of TX queues supported + * @max_bufq: In splitq, maximum number of buffer queues supported + * @max_complq: In splitq, maximum number of completion queues supported + */ +struct idpf_vport_max_q { + u16 max_rxq; + u16 max_txq; + u16 max_bufq; + u16 max_complq; +}; + +/** + * struct idpf_reg_ops - Device specific register operation function pointers + * @ctlq_reg_init: Mailbox control queue register initialization + * @intr_reg_init: Traffic interrupt register initialization + * @mb_intr_reg_init: Mailbox interrupt register initialization + * @reset_reg_init: Reset register initialization + * @trigger_reset: Trigger a reset to occur + */ +struct idpf_reg_ops { + void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); + int (*intr_reg_init)(struct idpf_vport *vport); + void (*mb_intr_reg_init)(struct idpf_adapter *adapter); + void (*reset_reg_init)(struct idpf_adapter *adapter); + void (*trigger_reset)(struct idpf_adapter *adapter, + enum idpf_flags trig_cause); +}; + +/** + * struct idpf_dev_ops - Device specific operations + * @reg_ops: Register operations + */ +struct idpf_dev_ops { + struct idpf_reg_ops reg_ops; +}; + +/* These macros allow us to generate an enum and a matching char * array of + * stringified enums that are always in sync. Checkpatch issues a bogus warning + * about this being a complex macro; but it's wrong, these are never used as a + * statement and instead only used to define the enum and array. + */ +#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \ + STATE(IDPF_VC_CREATE_VPORT) \ + STATE(IDPF_VC_CREATE_VPORT_ERR) \ + STATE(IDPF_VC_ENA_VPORT) \ + STATE(IDPF_VC_ENA_VPORT_ERR) \ + STATE(IDPF_VC_DIS_VPORT) \ + STATE(IDPF_VC_DIS_VPORT_ERR) \ + STATE(IDPF_VC_DESTROY_VPORT) \ + STATE(IDPF_VC_DESTROY_VPORT_ERR) \ + STATE(IDPF_VC_CONFIG_TXQ) \ + STATE(IDPF_VC_CONFIG_TXQ_ERR) \ + STATE(IDPF_VC_CONFIG_RXQ) \ + STATE(IDPF_VC_CONFIG_RXQ_ERR) \ + STATE(IDPF_VC_ENA_QUEUES) \ + STATE(IDPF_VC_ENA_QUEUES_ERR) \ + STATE(IDPF_VC_DIS_QUEUES) \ + STATE(IDPF_VC_DIS_QUEUES_ERR) \ + STATE(IDPF_VC_MAP_IRQ) \ + STATE(IDPF_VC_MAP_IRQ_ERR) \ + STATE(IDPF_VC_UNMAP_IRQ) \ + STATE(IDPF_VC_UNMAP_IRQ_ERR) \ + STATE(IDPF_VC_ADD_QUEUES) \ + STATE(IDPF_VC_ADD_QUEUES_ERR) \ + STATE(IDPF_VC_DEL_QUEUES) \ + STATE(IDPF_VC_DEL_QUEUES_ERR) \ + STATE(IDPF_VC_ALLOC_VECTORS) \ + STATE(IDPF_VC_ALLOC_VECTORS_ERR) \ + STATE(IDPF_VC_DEALLOC_VECTORS) \ + STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \ + STATE(IDPF_VC_SET_SRIOV_VFS) \ + STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \ + STATE(IDPF_VC_GET_RSS_LUT) \ + STATE(IDPF_VC_GET_RSS_LUT_ERR) \ + STATE(IDPF_VC_SET_RSS_LUT) \ + STATE(IDPF_VC_SET_RSS_LUT_ERR) \ + STATE(IDPF_VC_GET_RSS_KEY) \ + STATE(IDPF_VC_GET_RSS_KEY_ERR) \ + STATE(IDPF_VC_SET_RSS_KEY) \ + STATE(IDPF_VC_SET_RSS_KEY_ERR) \ + STATE(IDPF_VC_GET_STATS) \ + STATE(IDPF_VC_GET_STATS_ERR) \ + STATE(IDPF_VC_ADD_MAC_ADDR) \ + STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \ + STATE(IDPF_VC_DEL_MAC_ADDR) \ + STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \ + STATE(IDPF_VC_GET_PTYPE_INFO) \ + STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \ + STATE(IDPF_VC_LOOPBACK_STATE) \ + STATE(IDPF_VC_LOOPBACK_STATE_ERR) \ + STATE(IDPF_VC_NBITS) + +#define IDPF_GEN_ENUM(ENUM) ENUM, +#define IDPF_GEN_STRING(STRING) #STRING, + +enum idpf_vport_vc_state { + IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM) +}; + +extern const char * const idpf_vport_vc_state_str[]; + +/** + * enum idpf_vport_reset_cause - Vport soft reset causes + * @IDPF_SR_Q_CHANGE: Soft reset queue change + * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change + * @IDPF_SR_MTU_CHANGE: Soft reset MTU change + * @IDPF_SR_RSC_CHANGE: Soft reset RSC change + */ +enum idpf_vport_reset_cause { + IDPF_SR_Q_CHANGE, + IDPF_SR_Q_DESC_CHANGE, + IDPF_SR_MTU_CHANGE, + IDPF_SR_RSC_CHANGE, +}; + +/** + * enum idpf_vport_flags - Vport flags + * @IDPF_VPORT_DEL_QUEUES: To send delete queues message + * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets + * processing is done + * @IDPF_VPORT_FLAGS_NBITS: Must be last + */ +enum idpf_vport_flags { + IDPF_VPORT_DEL_QUEUES, + IDPF_VPORT_SW_MARKER, + IDPF_VPORT_FLAGS_NBITS, +}; + +struct idpf_port_stats { + struct u64_stats_sync stats_sync; + u64_stats_t rx_hw_csum_err; + u64_stats_t rx_hsplit; + u64_stats_t rx_hsplit_hbo; + u64_stats_t rx_bad_descs; + u64_stats_t tx_linearize; + u64_stats_t tx_busy; + u64_stats_t tx_drops; + u64_stats_t tx_dma_map_errs; + struct virtchnl2_vport_stats vport_stats; +}; + +/** + * struct idpf_vport - Handle for netdevices and queue resources + * @num_txq: Number of allocated TX queues + * @num_complq: Number of allocated completion queues + * @txq_desc_count: TX queue descriptor count + * @complq_desc_count: Completion queue descriptor count + * @compln_clean_budget: Work budget for completion clean + * @num_txq_grp: Number of TX queue groups + * @txq_grps: Array of TX queue groups + * @txq_model: Split queue or single queue queuing model + * @txqs: Used only in hotpath to get to the right queue very fast + * @crc_enable: Enable CRC insertion offload + * @num_rxq: Number of allocated RX queues + * @num_bufq: Number of allocated buffer queues + * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors + * to complete all buffer descriptors for all buffer queues in + * the worst case. + * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping + * @bufq_desc_count: Buffer queue descriptor count + * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc) + * @num_rxq_grp: Number of RX queues in a group + * @rxq_grps: Total number of RX groups. Number of groups * number of RX per + * group will yield total number of RX queues. + * @rxq_model: Splitq queue or single queue queuing model + * @rx_ptype_lkup: Lookup table for ptypes on RX + * @adapter: back pointer to associated adapter + * @netdev: Associated net_device. Each vport should have one and only one + * associated netdev. + * @flags: See enum idpf_vport_flags + * @vport_type: Default SRIOV, SIOV, etc. + * @vport_id: Device given vport identifier + * @idx: Software index in adapter vports struct + * @default_vport: Use this vport if one isn't specified + * @base_rxd: True if the driver should use base descriptors instead of flex + * @num_q_vectors: Number of IRQ vectors allocated + * @q_vectors: Array of queue vectors + * @q_vector_idxs: Starting index of queue vectors + * @max_mtu: device given max possible MTU + * @default_mac_addr: device will give a default MAC to use + * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation + * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation + * @port_stats: per port csum, header split, and other offload stats + * @link_up: True if link is up + * @link_speed_mbps: Link speed in mbps + * @vc_msg: Virtchnl message buffer + * @vc_state: Virtchnl message state + * @vchnl_wq: Wait queue for virtchnl messages + * @sw_marker_wq: workqueue for marker packets + * @vc_buf_lock: Lock to protect virtchnl buffer + */ +struct idpf_vport { + u16 num_txq; + u16 num_complq; + u32 txq_desc_count; + u32 complq_desc_count; + u32 compln_clean_budget; + u16 num_txq_grp; + struct idpf_txq_group *txq_grps; + u32 txq_model; + struct idpf_queue **txqs; + bool crc_enable; + + u16 num_rxq; + u16 num_bufq; + u32 rxq_desc_count; + u8 num_bufqs_per_qgrp; + u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; + u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP]; + u16 num_rxq_grp; + struct idpf_rxq_group *rxq_grps; + u32 rxq_model; + struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE]; + + struct idpf_adapter *adapter; + struct net_device *netdev; + DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS); + u16 vport_type; + u32 vport_id; + u16 idx; + bool default_vport; + bool base_rxd; + + u16 num_q_vectors; + struct idpf_q_vector *q_vectors; + u16 *q_vector_idxs; + u16 max_mtu; + u8 default_mac_addr[ETH_ALEN]; + u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; + u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; + struct idpf_port_stats port_stats; + + bool link_up; + u32 link_speed_mbps; + + char vc_msg[IDPF_CTLQ_MAX_BUF_LEN]; + DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); + + wait_queue_head_t vchnl_wq; + wait_queue_head_t sw_marker_wq; + struct mutex vc_buf_lock; +}; + +/** + * enum idpf_user_flags + * @__IDPF_PROMISC_UC: Unicast promiscuous mode + * @__IDPF_PROMISC_MC: Multicast promiscuous mode + * @__IDPF_USER_FLAGS_NBITS: Must be last + */ +enum idpf_user_flags { + __IDPF_PROMISC_UC = 32, + __IDPF_PROMISC_MC, + + __IDPF_USER_FLAGS_NBITS, +}; + +/** + * struct idpf_rss_data - Associated RSS data + * @rss_key_size: Size of RSS hash key + * @rss_key: RSS hash key + * @rss_lut_size: Size of RSS lookup table + * @rss_lut: RSS lookup table + * @cached_lut: Used to restore previously init RSS lut + */ +struct idpf_rss_data { + u16 rss_key_size; + u8 *rss_key; + u16 rss_lut_size; + u32 *rss_lut; + u32 *cached_lut; +}; + +/** + * struct idpf_vport_user_config_data - User defined configuration values for + * each vport. + * @rss_data: See struct idpf_rss_data + * @num_req_tx_qs: Number of user requested TX queues through ethtool + * @num_req_rx_qs: Number of user requested RX queues through ethtool + * @num_req_txq_desc: Number of user requested TX queue descriptors through + * ethtool + * @num_req_rxq_desc: Number of user requested RX queue descriptors through + * ethtool + * @user_flags: User toggled config flags + * @mac_filter_list: List of MAC filters + * + * Used to restore configuration after a reset as the vport will get wiped. + */ +struct idpf_vport_user_config_data { + struct idpf_rss_data rss_data; + u16 num_req_tx_qs; + u16 num_req_rx_qs; + u32 num_req_txq_desc; + u32 num_req_rxq_desc; + DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); + struct list_head mac_filter_list; +}; + +/** + * enum idpf_vport_config_flags - Vport config flags + * @IDPF_VPORT_REG_NETDEV: Register netdev + * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset + * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight + * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight + * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last + */ +enum idpf_vport_config_flags { + IDPF_VPORT_REG_NETDEV, + IDPF_VPORT_UP_REQUESTED, + IDPF_VPORT_ADD_MAC_REQ, + IDPF_VPORT_DEL_MAC_REQ, + IDPF_VPORT_CONFIG_FLAGS_NBITS, +}; + +/** + * struct idpf_avail_queue_info + * @avail_rxq: Available RX queues + * @avail_txq: Available TX queues + * @avail_bufq: Available buffer queues + * @avail_complq: Available completion queues + * + * Maintain total queues available after allocating max queues to each vport. + */ +struct idpf_avail_queue_info { + u16 avail_rxq; + u16 avail_txq; + u16 avail_bufq; + u16 avail_complq; +}; + +/** + * struct idpf_vector_info - Utility structure to pass function arguments as a + * structure + * @num_req_vecs: Vectors required based on the number of queues updated by the + * user via ethtool + * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs + * @index: Relative starting index for vectors + * @default_vport: Vectors are for default vport + */ +struct idpf_vector_info { + u16 num_req_vecs; + u16 num_curr_vecs; + u16 index; + bool default_vport; +}; + +/** + * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector + * distribution algorithm + * @top: Points to stack top i.e. next available vector index + * @base: Always points to start of the free pool + * @size: Total size of the vector stack + * @vec_idx: Array to store all the vector indexes + * + * Vector stack maintains all the relative vector indexes at the *adapter* + * level. This stack is divided into 2 parts, first one is called as 'default + * pool' and other one is called 'free pool'. Vector distribution algorithm + * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC + * vectors are allocated per default vport and the relative vector indexes for + * those are maintained in default pool. Free pool contains all the unallocated + * vector indexes which can be allocated on-demand basis. Mailbox vector index + * is maintained in the default pool of the stack. + */ +struct idpf_vector_lifo { + u16 top; + u16 base; + u16 size; + u16 *vec_idx; +}; + +/** + * struct idpf_vport_config - Vport configuration data + * @user_config: see struct idpf_vport_user_config_data + * @max_q: Maximum possible queues + * @req_qs_chunks: Queue chunk data for requested queues + * @mac_filter_list_lock: Lock to protect mac filters + * @flags: See enum idpf_vport_config_flags + */ +struct idpf_vport_config { + struct idpf_vport_user_config_data user_config; + struct idpf_vport_max_q max_q; + void *req_qs_chunks; + spinlock_t mac_filter_list_lock; + DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); +}; + +/** + * struct idpf_adapter - Device data struct generated on probe + * @pdev: PCI device struct given on probe + * @virt_ver_maj: Virtchnl version major + * @virt_ver_min: Virtchnl version minor + * @msg_enable: Debug message level enabled + * @mb_wait_count: Number of times mailbox was attempted initialization + * @state: Init state machine + * @flags: See enum idpf_flags + * @reset_reg: See struct idpf_reset_reg + * @hw: Device access data + * @num_req_msix: Requested number of MSIX vectors + * @num_avail_msix: Available number of MSIX vectors + * @num_msix_entries: Number of entries in MSIX table + * @msix_entries: MSIX table + * @req_vec_chunks: Requested vector chunk data + * @mb_vector: Mailbox vector data + * @vector_stack: Stack to store the msix vector indexes + * @irq_mb_handler: Handler for hard interrupt for mailbox + * @tx_timeout_count: Number of TX timeouts that have occurred + * @avail_queues: Device given queue limits + * @vports: Array to store vports created by the driver + * @netdevs: Associated Vport netdevs + * @vport_params_reqd: Vport params requested + * @vport_params_recvd: Vport params received + * @vport_ids: Array of device given vport identifiers + * @vport_config: Vport config parameters + * @max_vports: Maximum vports that can be allocated + * @num_alloc_vports: Current number of vports allocated + * @next_vport: Next free slot in pf->vport[] - 0-based! + * @init_task: Initialization task + * @init_wq: Workqueue for initialization task + * @serv_task: Periodically recurring maintenance task + * @serv_wq: Workqueue for service task + * @mbx_task: Task to handle mailbox interrupts + * @mbx_wq: Workqueue for mailbox responses + * @vc_event_task: Task to handle out of band virtchnl event notifications + * @vc_event_wq: Workqueue for virtchnl events + * @stats_task: Periodic statistics retrieval task + * @stats_wq: Workqueue for statistics task + * @caps: Negotiated capabilities with device + * @vchnl_wq: Wait queue for virtchnl messages + * @vc_state: Virtchnl message state + * @vc_msg: Virtchnl message buffer + * @dev_ops: See idpf_dev_ops + * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk + * to VFs but is used to initialize them + * @crc_enable: Enable CRC insertion offload + * @req_tx_splitq: TX split or single queue model to request + * @req_rx_splitq: RX split or single queue model to request + * @vport_ctrl_lock: Lock to protect the vport control flow + * @vector_lock: Lock to protect vector distribution + * @queue_lock: Lock to protect queue distribution + * @vc_buf_lock: Lock to protect virtchnl buffer + */ +struct idpf_adapter { + struct pci_dev *pdev; + u32 virt_ver_maj; + u32 virt_ver_min; + + u32 msg_enable; + u32 mb_wait_count; + enum idpf_state state; + DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS); + struct idpf_reset_reg reset_reg; + struct idpf_hw hw; + u16 num_req_msix; + u16 num_avail_msix; + u16 num_msix_entries; + struct msix_entry *msix_entries; + struct virtchnl2_alloc_vectors *req_vec_chunks; + struct idpf_q_vector mb_vector; + struct idpf_vector_lifo vector_stack; + irqreturn_t (*irq_mb_handler)(int irq, void *data); + + u32 tx_timeout_count; + struct idpf_avail_queue_info avail_queues; + struct idpf_vport **vports; + struct net_device **netdevs; + struct virtchnl2_create_vport **vport_params_reqd; + struct virtchnl2_create_vport **vport_params_recvd; + u32 *vport_ids; + + struct idpf_vport_config **vport_config; + u16 max_vports; + u16 num_alloc_vports; + u16 next_vport; + + struct delayed_work init_task; + struct workqueue_struct *init_wq; + struct delayed_work serv_task; + struct workqueue_struct *serv_wq; + struct delayed_work mbx_task; + struct workqueue_struct *mbx_wq; + struct delayed_work vc_event_task; + struct workqueue_struct *vc_event_wq; + struct delayed_work stats_task; + struct workqueue_struct *stats_wq; + struct virtchnl2_get_capabilities caps; + + wait_queue_head_t vchnl_wq; + DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); + char vc_msg[IDPF_CTLQ_MAX_BUF_LEN]; + struct idpf_dev_ops dev_ops; + int num_vfs; + bool crc_enable; + bool req_tx_splitq; + bool req_rx_splitq; + + struct mutex vport_ctrl_lock; + struct mutex vector_lock; + struct mutex queue_lock; + struct mutex vc_buf_lock; +}; + +/** + * idpf_is_queue_model_split - check if queue model is split + * @q_model: queue model single or split + * + * Returns true if queue model is split else false + */ +static inline int idpf_is_queue_model_split(u16 q_model) +{ + return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; +} + +#define idpf_is_cap_ena(adapter, field, flag) \ + idpf_is_capability_ena(adapter, false, field, flag) +#define idpf_is_cap_ena_all(adapter, field, flag) \ + idpf_is_capability_ena(adapter, true, field, flag) + +bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, + enum idpf_cap_field field, u64 flag); + +#define IDPF_CAP_RSS (\ + VIRTCHNL2_CAP_RSS_IPV4_TCP |\ + VIRTCHNL2_CAP_RSS_IPV4_TCP |\ + VIRTCHNL2_CAP_RSS_IPV4_UDP |\ + VIRTCHNL2_CAP_RSS_IPV4_SCTP |\ + VIRTCHNL2_CAP_RSS_IPV4_OTHER |\ + VIRTCHNL2_CAP_RSS_IPV6_TCP |\ + VIRTCHNL2_CAP_RSS_IPV6_TCP |\ + VIRTCHNL2_CAP_RSS_IPV6_UDP |\ + VIRTCHNL2_CAP_RSS_IPV6_SCTP |\ + VIRTCHNL2_CAP_RSS_IPV6_OTHER) + +#define IDPF_CAP_RSC (\ + VIRTCHNL2_CAP_RSC_IPV4_TCP |\ + VIRTCHNL2_CAP_RSC_IPV6_TCP) + +#define IDPF_CAP_HSPLIT (\ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) + +#define IDPF_CAP_RX_CSUM_L4V4 (\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP) + +#define IDPF_CAP_RX_CSUM_L4V6 (\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) + +#define IDPF_CAP_RX_CSUM (\ + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) + +#define IDPF_CAP_SCTP_CSUM (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\ + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP) + +#define IDPF_CAP_TUNNEL_TX_CSUM (\ + VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\ + VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL) + +/** + * idpf_get_reserved_vecs - Get reserved vectors + * @adapter: private data struct + */ +static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.num_allocated_vectors); +} + +/** + * idpf_get_default_vports - Get default number of vports + * @adapter: private data struct + */ +static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.default_num_vports); +} + +/** + * idpf_get_max_vports - Get max number of vports + * @adapter: private data struct + */ +static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.max_vports); +} + +/** + * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device + * @adapter: private data struct + */ +static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter) +{ + return adapter->caps.max_sg_bufs_per_tx_pkt; +} + +/** + * idpf_get_min_tx_pkt_len - Get min packet length supported by the device + * @adapter: private data struct + */ +static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) +{ + u8 pkt_len = adapter->caps.min_sso_packet_len; + + return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN; +} + +/** + * idpf_get_reg_addr - Get BAR0 register address + * @adapter: private data struct + * @reg_offset: register offset value + * + * Based on the register offset, return the actual BAR0 register address + */ +static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter, + resource_size_t reg_offset) +{ + return (void __iomem *)(adapter->hw.hw_addr + reg_offset); +} + +/** + * idpf_is_reset_detected - check if we were reset at some point + * @adapter: driver specific private structure + * + * Returns true if we are either in reset currently or were previously reset. + */ +static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter) +{ + if (!adapter->hw.arq) + return true; + + return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) & + adapter->hw.arq->reg.len_mask); +} + +/** + * idpf_is_reset_in_prog - check if reset is in progress + * @adapter: driver specific private structure + * + * Returns true if hard reset is in progress, false otherwise + */ +static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter) +{ + return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) || + test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || + test_bit(IDPF_HR_DRV_LOAD, adapter->flags)); +} + +/** + * idpf_netdev_to_vport - get a vport handle from a netdev + * @netdev: network interface device structure + */ +static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + return np->vport; +} + +/** + * idpf_netdev_to_adapter - Get adapter handle from a netdev + * @netdev: Network interface device structure + */ +static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + return np->adapter; +} + +/** + * idpf_is_feature_ena - Determine if a particular feature is enabled + * @vport: Vport to check + * @feature: Netdev flag to check + * + * Returns true or false if a particular feature is enabled. + */ +static inline bool idpf_is_feature_ena(const struct idpf_vport *vport, + netdev_features_t feature) +{ + return vport->netdev->features & feature; +} + +/** + * idpf_get_max_tx_hdr_size -- get the size of tx header + * @adapter: Driver specific private structure + */ +static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.max_tx_hdr_size); +} + +/** + * idpf_vport_ctrl_lock - Acquire the vport control lock + * @netdev: Network interface device structure + * + * This lock should be used by non-datapath code to protect against vport + * destruction. + */ +static inline void idpf_vport_ctrl_lock(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + mutex_lock(&np->adapter->vport_ctrl_lock); +} + +/** + * idpf_vport_ctrl_unlock - Release the vport control lock + * @netdev: Network interface device structure + */ +static inline void idpf_vport_ctrl_unlock(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + mutex_unlock(&np->adapter->vport_ctrl_lock); +} + +void idpf_statistics_task(struct work_struct *work); +void idpf_init_task(struct work_struct *work); +void idpf_service_task(struct work_struct *work); +void idpf_mbx_task(struct work_struct *work); +void idpf_vc_event_task(struct work_struct *work); +void idpf_dev_ops_init(struct idpf_adapter *adapter); +void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); +int idpf_vport_adjust_qs(struct idpf_vport *vport); +int idpf_init_dflt_mbx(struct idpf_adapter *adapter); +void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter); +int idpf_vc_core_init(struct idpf_adapter *adapter); +void idpf_vc_core_deinit(struct idpf_adapter *adapter); +int idpf_intr_req(struct idpf_adapter *adapter); +void idpf_intr_rel(struct idpf_adapter *adapter); +int idpf_get_reg_intr_vecs(struct idpf_vport *vport, + struct idpf_vec_regs *reg_vals); +u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); +int idpf_send_delete_queues_msg(struct idpf_vport *vport); +int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, + u16 num_complq, u16 num_rx_q, u16 num_rx_bufq); +int idpf_initiate_soft_reset(struct idpf_vport *vport, + enum idpf_vport_reset_cause reset_cause); +int idpf_send_enable_vport_msg(struct idpf_vport *vport); +int idpf_send_disable_vport_msg(struct idpf_vport *vport); +int idpf_send_destroy_vport_msg(struct idpf_vport *vport); +int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport); +int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport); +int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); +int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); +int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter); +int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors); +void idpf_deinit_task(struct idpf_adapter *adapter); +int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, + u16 *q_vector_idxs, + struct idpf_vector_info *vec_info); +int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport); +int idpf_send_get_stats_msg(struct idpf_vport *vport); +int idpf_get_vec_ids(struct idpf_adapter *adapter, + u16 *vecids, int num_vecids, + struct virtchnl2_vector_chunks *chunks); +int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, + void *msg, int msg_size); +int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, + u16 msg_size, u8 *msg); +void idpf_set_ethtool_ops(struct net_device *netdev); +int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +int idpf_add_del_mac_filters(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + bool add, bool async); +int idpf_set_promiscuous(struct idpf_adapter *adapter, + struct idpf_vport_user_config_data *config_data, + u32 vport_id); +int idpf_send_disable_queues_msg(struct idpf_vport *vport); +void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q); +u32 idpf_get_vport_id(struct idpf_vport *vport); +int idpf_vport_queue_ids_init(struct idpf_vport *vport); +int idpf_queue_reg_init(struct idpf_vport *vport); +int idpf_send_config_queues_msg(struct idpf_vport *vport); +int idpf_send_enable_queues_msg(struct idpf_vport *vport); +int idpf_send_create_vport_msg(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +int idpf_check_supported_desc_ids(struct idpf_vport *vport); +void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, + u16 itr, bool tx); +int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); +int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); +int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); + +#endif /* !_IDPF_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c new file mode 100644 index 000000000000..c7f43d2fcd13 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf_controlq.h" + +/** + * idpf_ctlq_setup_regs - initialize control queue registers + * @cq: pointer to the specific control queue + * @q_create_info: structs containing info for each queue to be initialized + */ +static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq, + struct idpf_ctlq_create_info *q_create_info) +{ + /* set control queue registers in our local struct */ + cq->reg.head = q_create_info->reg.head; + cq->reg.tail = q_create_info->reg.tail; + cq->reg.len = q_create_info->reg.len; + cq->reg.bah = q_create_info->reg.bah; + cq->reg.bal = q_create_info->reg.bal; + cq->reg.len_mask = q_create_info->reg.len_mask; + cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask; + cq->reg.head_mask = q_create_info->reg.head_mask; +} + +/** + * idpf_ctlq_init_regs - Initialize control queue registers + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * @is_rxq: true if receive control queue, false otherwise + * + * Initialize registers. The caller is expected to have already initialized the + * descriptor ring memory and buffer memory + */ +static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + bool is_rxq) +{ + /* Update tail to post pre-allocated buffers for rx queues */ + if (is_rxq) + wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); + + /* For non-Mailbox control queues only TAIL need to be set */ + if (cq->q_id != -1) + return; + + /* Clear Head for both send or receive */ + wr32(hw, cq->reg.head, 0); + + /* set starting point */ + wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); + wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); + wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); +} + +/** + * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf + * @cq: pointer to the specific Control queue + * + * Record the address of the receive queue DMA buffers in the descriptors. + * The buffers must have been previously allocated. + */ +static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) +{ + int i; + + for (i = 0; i < cq->ring_size; i++) { + struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i); + struct idpf_dma_mem *bi = cq->bi.rx_buff[i]; + + /* No buffer to post to descriptor, continue */ + if (!bi) + continue; + + desc->flags = + cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); + desc->opcode = 0; + desc->datalen = cpu_to_le16(bi->size); + desc->ret_val = 0; + desc->v_opcode_dtype = 0; + desc->v_retval = 0; + desc->params.indirect.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.indirect.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.indirect.param0 = 0; + desc->params.indirect.sw_cookie = 0; + desc->params.indirect.v_flags = 0; + } +} + +/** + * idpf_ctlq_shutdown - shutdown the CQ + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for any controq queue + */ +static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + mutex_lock(&cq->cq_lock); + + /* free ring buffers and the ring itself */ + idpf_ctlq_dealloc_ring_res(hw, cq); + + /* Set ring_size to 0 to indicate uninitialized queue */ + cq->ring_size = 0; + + mutex_unlock(&cq->cq_lock); + mutex_destroy(&cq->cq_lock); +} + +/** + * idpf_ctlq_add - add one control queue + * @hw: pointer to hardware struct + * @qinfo: info for queue to be created + * @cq_out: (output) double pointer to control queue to be created + * + * Allocate and initialize a control queue and add it to the control queue list. + * The cq parameter will be allocated/initialized and passed back to the caller + * if no errors occur. + * + * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add + */ +int idpf_ctlq_add(struct idpf_hw *hw, + struct idpf_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq_out) +{ + struct idpf_ctlq_info *cq; + bool is_rxq = false; + int err; + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return -ENOMEM; + + cq->cq_type = qinfo->type; + cq->q_id = qinfo->id; + cq->buf_size = qinfo->buf_size; + cq->ring_size = qinfo->len; + + cq->next_to_use = 0; + cq->next_to_clean = 0; + cq->next_to_post = cq->ring_size - 1; + + switch (qinfo->type) { + case IDPF_CTLQ_TYPE_MAILBOX_RX: + is_rxq = true; + fallthrough; + case IDPF_CTLQ_TYPE_MAILBOX_TX: + err = idpf_ctlq_alloc_ring_res(hw, cq); + break; + default: + err = -EBADR; + break; + } + + if (err) + goto init_free_q; + + if (is_rxq) { + idpf_ctlq_init_rxq_bufs(cq); + } else { + /* Allocate the array of msg pointers for TX queues */ + cq->bi.tx_msg = kcalloc(qinfo->len, + sizeof(struct idpf_ctlq_msg *), + GFP_KERNEL); + if (!cq->bi.tx_msg) { + err = -ENOMEM; + goto init_dealloc_q_mem; + } + } + + idpf_ctlq_setup_regs(cq, qinfo); + + idpf_ctlq_init_regs(hw, cq, is_rxq); + + mutex_init(&cq->cq_lock); + + list_add(&cq->cq_list, &hw->cq_list_head); + + *cq_out = cq; + + return 0; + +init_dealloc_q_mem: + /* free ring buffers and the ring itself */ + idpf_ctlq_dealloc_ring_res(hw, cq); +init_free_q: + kfree(cq); + + return err; +} + +/** + * idpf_ctlq_remove - deallocate and remove specified control queue + * @hw: pointer to hardware struct + * @cq: pointer to control queue to be removed + */ +void idpf_ctlq_remove(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + list_del(&cq->cq_list); + idpf_ctlq_shutdown(hw, cq); + kfree(cq); +} + +/** + * idpf_ctlq_init - main initialization routine for all control queues + * @hw: pointer to hardware struct + * @num_q: number of queues to initialize + * @q_info: array of structs containing info for each queue to be initialized + * + * This initializes any number and any type of control queues. This is an all + * or nothing routine; if one fails, all previously allocated queues will be + * destroyed. This must be called prior to using the individual add/remove + * APIs. + */ +int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, + struct idpf_ctlq_create_info *q_info) +{ + struct idpf_ctlq_info *cq, *tmp; + int err; + int i; + + INIT_LIST_HEAD(&hw->cq_list_head); + + for (i = 0; i < num_q; i++) { + struct idpf_ctlq_create_info *qinfo = q_info + i; + + err = idpf_ctlq_add(hw, qinfo, &cq); + if (err) + goto init_destroy_qs; + } + + return 0; + +init_destroy_qs: + list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) + idpf_ctlq_remove(hw, cq); + + return err; +} + +/** + * idpf_ctlq_deinit - destroy all control queues + * @hw: pointer to hw struct + */ +void idpf_ctlq_deinit(struct idpf_hw *hw) +{ + struct idpf_ctlq_info *cq, *tmp; + + list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) + idpf_ctlq_remove(hw, cq); +} + +/** + * idpf_ctlq_send - send command to Control Queue (CTQ) + * @hw: pointer to hw struct + * @cq: handle to control queue struct to send on + * @num_q_msg: number of messages to send on control queue + * @q_msg: pointer to array of queue messages to be sent + * + * The caller is expected to allocate DMAable buffers and pass them to the + * send routine via the q_msg struct / control queue specific data struct. + * The control queue will hold a reference to each send message until + * the completion for that message has been cleaned. + */ +int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 num_q_msg, struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_desc *desc; + int num_desc_avail; + int err = 0; + int i; + + mutex_lock(&cq->cq_lock); + + /* Ensure there are enough descriptors to send all messages */ + num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); + if (num_desc_avail == 0 || num_desc_avail < num_q_msg) { + err = -ENOSPC; + goto err_unlock; + } + + for (i = 0; i < num_q_msg; i++) { + struct idpf_ctlq_msg *msg = &q_msg[i]; + + desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); + + desc->opcode = cpu_to_le16(msg->opcode); + desc->pfid_vfid = cpu_to_le16(msg->func_id); + + desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode); + desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval); + + desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) << + IDPF_CTLQ_FLAG_HOST_ID_S); + if (msg->data_len) { + struct idpf_dma_mem *buff = msg->ctx.indirect.payload; + + desc->datalen |= cpu_to_le16(msg->data_len); + desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF); + desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD); + + /* Update the address values in the desc with the pa + * value for respective buffer + */ + desc->params.indirect.addr_high = + cpu_to_le32(upper_32_bits(buff->pa)); + desc->params.indirect.addr_low = + cpu_to_le32(lower_32_bits(buff->pa)); + + memcpy(&desc->params, msg->ctx.indirect.context, + IDPF_INDIRECT_CTX_SIZE); + } else { + memcpy(&desc->params, msg->ctx.direct, + IDPF_DIRECT_CTX_SIZE); + } + + /* Store buffer info */ + cq->bi.tx_msg[cq->next_to_use] = msg; + + (cq->next_to_use)++; + if (cq->next_to_use == cq->ring_size) + cq->next_to_use = 0; + } + + /* Force memory write to complete before letting hardware + * know that there are new descriptors to fetch. + */ + dma_wmb(); + + wr32(hw, cq->reg.tail, cq->next_to_use); + +err_unlock: + mutex_unlock(&cq->cq_lock); + + return err; +} + +/** + * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the + * requested queue + * @cq: pointer to the specific Control queue + * @clean_count: (input|output) number of descriptors to clean as input, and + * number of descriptors actually cleaned as output + * @msg_status: (output) pointer to msg pointer array to be populated; needs + * to be allocated by caller + * + * Returns an array of message pointers associated with the cleaned + * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned + * descriptors. The status will be returned for each; any messages that failed + * to send will have a non-zero status. The caller is expected to free original + * ctlq_msgs and free or reuse the DMA buffers. + */ +int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, + struct idpf_ctlq_msg *msg_status[]) +{ + struct idpf_ctlq_desc *desc; + u16 i, num_to_clean; + u16 ntc, desc_err; + + if (*clean_count == 0) + return 0; + if (*clean_count > cq->ring_size) + return -EBADR; + + mutex_lock(&cq->cq_lock); + + ntc = cq->next_to_clean; + + num_to_clean = *clean_count; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD)) + break; + + /* strip off FW internal code */ + desc_err = le16_to_cpu(desc->ret_val) & 0xff; + + msg_status[i] = cq->bi.tx_msg[ntc]; + msg_status[i]->status = desc_err; + + cq->bi.tx_msg[ntc] = NULL; + + /* Zero out any stale data */ + memset(desc, 0, sizeof(*desc)); + + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + } + + cq->next_to_clean = ntc; + + mutex_unlock(&cq->cq_lock); + + /* Return number of descriptors actually cleaned */ + *clean_count = i; + + return 0; +} + +/** + * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring + * @hw: pointer to hw struct + * @cq: pointer to control queue handle + * @buff_count: (input|output) input is number of buffers caller is trying to + * return; output is number of buffers that were not posted + * @buffs: array of pointers to dma mem structs to be given to hardware + * + * Caller uses this function to return DMA buffers to the descriptor ring after + * consuming them; buff_count will be the number of buffers. + * + * Note: this function needs to be called after a receive call even + * if there are no DMA buffers to be returned, i.e. buff_count = 0, + * buffs = NULL to support direct commands + */ +int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + u16 *buff_count, struct idpf_dma_mem **buffs) +{ + struct idpf_ctlq_desc *desc; + u16 ntp = cq->next_to_post; + bool buffs_avail = false; + u16 tbp = ntp + 1; + int i = 0; + + if (*buff_count > cq->ring_size) + return -EBADR; + + if (*buff_count > 0) + buffs_avail = true; + + mutex_lock(&cq->cq_lock); + + if (tbp >= cq->ring_size) + tbp = 0; + + if (tbp == cq->next_to_clean) + /* Nothing to do */ + goto post_buffs_out; + + /* Post buffers for as many as provided or up until the last one used */ + while (ntp != cq->next_to_clean) { + desc = IDPF_CTLQ_DESC(cq, ntp); + + if (cq->bi.rx_buff[ntp]) + goto fill_desc; + if (!buffs_avail) { + /* If the caller hasn't given us any buffers or + * there are none left, search the ring itself + * for an available buffer to move to this + * entry starting at the next entry in the ring + */ + tbp = ntp + 1; + + /* Wrap ring if necessary */ + if (tbp >= cq->ring_size) + tbp = 0; + + while (tbp != cq->next_to_clean) { + if (cq->bi.rx_buff[tbp]) { + cq->bi.rx_buff[ntp] = + cq->bi.rx_buff[tbp]; + cq->bi.rx_buff[tbp] = NULL; + + /* Found a buffer, no need to + * search anymore + */ + break; + } + + /* Wrap ring if necessary */ + tbp++; + if (tbp >= cq->ring_size) + tbp = 0; + } + + if (tbp == cq->next_to_clean) + goto post_buffs_out; + } else { + /* Give back pointer to DMA buffer */ + cq->bi.rx_buff[ntp] = buffs[i]; + i++; + + if (i >= *buff_count) + buffs_avail = false; + } + +fill_desc: + desc->flags = + cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); + + /* Post buffers to descriptor */ + desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size); + desc->params.indirect.addr_high = + cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa)); + desc->params.indirect.addr_low = + cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa)); + + ntp++; + if (ntp == cq->ring_size) + ntp = 0; + } + +post_buffs_out: + /* Only update tail if buffers were actually posted */ + if (cq->next_to_post != ntp) { + if (ntp) + /* Update next_to_post to ntp - 1 since current ntp + * will not have a buffer + */ + cq->next_to_post = ntp - 1; + else + /* Wrap to end of end ring since current ntp is 0 */ + cq->next_to_post = cq->ring_size - 1; + + wr32(hw, cq->reg.tail, cq->next_to_post); + } + + mutex_unlock(&cq->cq_lock); + + /* return the number of buffers that were not posted */ + *buff_count = *buff_count - i; + + return 0; +} + +/** + * idpf_ctlq_recv - receive control queue message call back + * @cq: pointer to control queue handle to receive on + * @num_q_msg: (input|output) input number of messages that should be received; + * output number of messages actually received + * @q_msg: (output) array of received control queue messages on this q; + * needs to be pre-allocated by caller for as many messages as requested + * + * Called by interrupt handler or polling mechanism. Caller is expected + * to free buffers + */ +int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + struct idpf_ctlq_msg *q_msg) +{ + u16 num_to_clean, ntc, flags; + struct idpf_ctlq_desc *desc; + int err = 0; + u16 i; + + if (*num_q_msg == 0) + return 0; + else if (*num_q_msg > cq->ring_size) + return -EBADR; + + /* take the lock before we start messing with the ring */ + mutex_lock(&cq->cq_lock); + + ntc = cq->next_to_clean; + + num_to_clean = *num_q_msg; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + flags = le16_to_cpu(desc->flags); + + if (!(flags & IDPF_CTLQ_FLAG_DD)) + break; + + q_msg[i].vmvf_type = (flags & + (IDPF_CTLQ_FLAG_FTYPE_VM | + IDPF_CTLQ_FLAG_FTYPE_PF)) >> + IDPF_CTLQ_FLAG_FTYPE_S; + + if (flags & IDPF_CTLQ_FLAG_ERR) + err = -EBADMSG; + + q_msg[i].cookie.mbx.chnl_opcode = + le32_to_cpu(desc->v_opcode_dtype); + q_msg[i].cookie.mbx.chnl_retval = + le32_to_cpu(desc->v_retval); + + q_msg[i].opcode = le16_to_cpu(desc->opcode); + q_msg[i].data_len = le16_to_cpu(desc->datalen); + q_msg[i].status = le16_to_cpu(desc->ret_val); + + if (desc->datalen) { + memcpy(q_msg[i].ctx.indirect.context, + &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE); + + /* Assign pointer to dma buffer to ctlq_msg array + * to be given to upper layer + */ + q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc]; + + /* Zero out pointer to DMA buffer info; + * will be repopulated by post buffers API + */ + cq->bi.rx_buff[ntc] = NULL; + } else { + memcpy(q_msg[i].ctx.direct, desc->params.raw, + IDPF_DIRECT_CTX_SIZE); + } + + /* Zero out stale data in descriptor */ + memset(desc, 0, sizeof(struct idpf_ctlq_desc)); + + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + } + + cq->next_to_clean = ntc; + + mutex_unlock(&cq->cq_lock); + + *num_q_msg = i; + if (*num_q_msg == 0) + err = -ENOMSG; + + return err; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h new file mode 100644 index 000000000000..c1aba09e9856 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_CONTROLQ_H_ +#define _IDPF_CONTROLQ_H_ + +#include <linux/slab.h> + +#include "idpf_controlq_api.h" + +/* Maximum buffer length for all control queue types */ +#define IDPF_CTLQ_MAX_BUF_LEN 4096 + +#define IDPF_CTLQ_DESC(R, i) \ + (&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i])) + +#define IDPF_CTLQ_DESC_UNUSED(R) \ + ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \ + (R)->next_to_clean - (R)->next_to_use - 1)) + +/* Control Queue default settings */ +#define IDPF_CTRL_SQ_CMD_TIMEOUT 250 /* msecs */ + +struct idpf_ctlq_desc { + /* Control queue descriptor flags */ + __le16 flags; + /* Control queue message opcode */ + __le16 opcode; + __le16 datalen; /* 0 for direct commands */ + union { + __le16 ret_val; + __le16 pfid_vfid; +#define IDPF_CTLQ_DESC_VF_ID_S 0 +#define IDPF_CTLQ_DESC_VF_ID_M (0x7FF << IDPF_CTLQ_DESC_VF_ID_S) +#define IDPF_CTLQ_DESC_PF_ID_S 11 +#define IDPF_CTLQ_DESC_PF_ID_M (0x1F << IDPF_CTLQ_DESC_PF_ID_S) + }; + + /* Virtchnl message opcode and virtchnl descriptor type + * v_opcode=[27:0], v_dtype=[31:28] + */ + __le32 v_opcode_dtype; + /* Virtchnl return value */ + __le32 v_retval; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } direct; + struct { + __le32 param0; + __le16 sw_cookie; + /* Virtchnl flags */ + __le16 v_flags; + __le32 addr_high; + __le32 addr_low; + } indirect; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID | + */ +/* command flags and offsets */ +#define IDPF_CTLQ_FLAG_DD_S 0 +#define IDPF_CTLQ_FLAG_CMP_S 1 +#define IDPF_CTLQ_FLAG_ERR_S 2 +#define IDPF_CTLQ_FLAG_FTYPE_S 6 +#define IDPF_CTLQ_FLAG_RD_S 10 +#define IDPF_CTLQ_FLAG_VFC_S 11 +#define IDPF_CTLQ_FLAG_BUF_S 12 +#define IDPF_CTLQ_FLAG_HOST_ID_S 13 + +#define IDPF_CTLQ_FLAG_DD BIT(IDPF_CTLQ_FLAG_DD_S) /* 0x1 */ +#define IDPF_CTLQ_FLAG_CMP BIT(IDPF_CTLQ_FLAG_CMP_S) /* 0x2 */ +#define IDPF_CTLQ_FLAG_ERR BIT(IDPF_CTLQ_FLAG_ERR_S) /* 0x4 */ +#define IDPF_CTLQ_FLAG_FTYPE_VM BIT(IDPF_CTLQ_FLAG_FTYPE_S) /* 0x40 */ +#define IDPF_CTLQ_FLAG_FTYPE_PF BIT(IDPF_CTLQ_FLAG_FTYPE_S + 1) /* 0x80 */ +#define IDPF_CTLQ_FLAG_RD BIT(IDPF_CTLQ_FLAG_RD_S) /* 0x400 */ +#define IDPF_CTLQ_FLAG_VFC BIT(IDPF_CTLQ_FLAG_VFC_S) /* 0x800 */ +#define IDPF_CTLQ_FLAG_BUF BIT(IDPF_CTLQ_FLAG_BUF_S) /* 0x1000 */ + +/* Host ID is a special field that has 3b and not a 1b flag */ +#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S) + +struct idpf_mbxq_desc { + u8 pad[8]; /* CTLQ flags/opcode/len/retval fields */ + u32 chnl_opcode; /* avoid confusion with desc->opcode */ + u32 chnl_retval; /* ditto for desc->retval */ + u32 pf_vf_id; /* used by CP when sending to PF */ +}; + +/* Define the driver hardware struct to replace other control structs as needed + * Align to ctlq_hw_info + */ +struct idpf_hw { + void __iomem *hw_addr; + resource_size_t hw_addr_len; + + struct idpf_adapter *back; + + /* control queue - send and receive */ + struct idpf_ctlq_info *asq; + struct idpf_ctlq_info *arq; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + + struct list_head cq_list_head; +}; + +int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, + struct idpf_ctlq_info *cq); + +void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq); + +/* prototype for functions used for dynamic memory allocation */ +void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, + u64 size); +void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem); +#endif /* _IDPF_CONTROLQ_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h new file mode 100644 index 000000000000..8dee098bbfb0 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_CONTROLQ_API_H_ +#define _IDPF_CONTROLQ_API_H_ + +#include "idpf_mem.h" + +struct idpf_hw; + +/* Used for queue init, response and events */ +enum idpf_ctlq_type { + IDPF_CTLQ_TYPE_MAILBOX_TX = 0, + IDPF_CTLQ_TYPE_MAILBOX_RX = 1, + IDPF_CTLQ_TYPE_CONFIG_TX = 2, + IDPF_CTLQ_TYPE_CONFIG_RX = 3, + IDPF_CTLQ_TYPE_EVENT_RX = 4, + IDPF_CTLQ_TYPE_RDMA_TX = 5, + IDPF_CTLQ_TYPE_RDMA_RX = 6, + IDPF_CTLQ_TYPE_RDMA_COMPL = 7 +}; + +/* Generic Control Queue Structures */ +struct idpf_ctlq_reg { + /* used for queue tracking */ + u32 head; + u32 tail; + /* Below applies only to default mb (if present) */ + u32 len; + u32 bah; + u32 bal; + u32 len_mask; + u32 len_ena_mask; + u32 head_mask; +}; + +/* Generic queue msg structure */ +struct idpf_ctlq_msg { + u8 vmvf_type; /* represents the source of the message on recv */ +#define IDPF_VMVF_TYPE_VF 0 +#define IDPF_VMVF_TYPE_VM 1 +#define IDPF_VMVF_TYPE_PF 2 + u8 host_id; + /* 3b field used only when sending a message to CP - to be used in + * combination with target func_id to route the message + */ +#define IDPF_HOST_ID_MASK 0x7 + + u16 opcode; + u16 data_len; /* data_len = 0 when no payload is attached */ + union { + u16 func_id; /* when sending a message */ + u16 status; /* when receiving a message */ + }; + union { + struct { + u32 chnl_opcode; + u32 chnl_retval; + } mbx; + } cookie; + union { +#define IDPF_DIRECT_CTX_SIZE 16 +#define IDPF_INDIRECT_CTX_SIZE 8 + /* 16 bytes of context can be provided or 8 bytes of context + * plus the address of a DMA buffer + */ + u8 direct[IDPF_DIRECT_CTX_SIZE]; + struct { + u8 context[IDPF_INDIRECT_CTX_SIZE]; + struct idpf_dma_mem *payload; + } indirect; + } ctx; +}; + +/* Generic queue info structures */ +/* MB, CONFIG and EVENT q do not have extended info */ +struct idpf_ctlq_create_info { + enum idpf_ctlq_type type; + int id; /* absolute queue offset passed as input + * -1 for default mailbox if present + */ + u16 len; /* Queue length passed as input */ + u16 buf_size; /* buffer size passed as input */ + u64 base_address; /* output, HPA of the Queue start */ + struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */ + + int ext_info_size; + void *ext_info; /* Specific to q type */ +}; + +/* Control Queue information */ +struct idpf_ctlq_info { + struct list_head cq_list; + + enum idpf_ctlq_type cq_type; + int q_id; + struct mutex cq_lock; /* control queue lock */ + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_post; /* starting descriptor to post buffers + * to after recev + */ + + struct idpf_dma_mem desc_ring; /* descriptor ring memory + * idpf_dma_mem is defined in OSdep.h + */ + union { + struct idpf_dma_mem **rx_buff; + struct idpf_ctlq_msg **tx_msg; + } bi; + + u16 buf_size; /* queue buffer size */ + u16 ring_size; /* Number of descriptors */ + struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */ +}; + +/** + * enum idpf_mbx_opc - PF/VF mailbox commands + * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP + */ +enum idpf_mbx_opc { + idpf_mbq_opc_send_msg_to_cp = 0x0801, +}; + +/* API supported for control queue management */ +/* Will init all required q including default mb. "q_info" is an array of + * create_info structs equal to the number of control queues to be created. + */ +int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q, + struct idpf_ctlq_create_info *q_info); + +/* Allocate and initialize a single control queue, which will be added to the + * control queue list; returns a handle to the created control queue + */ +int idpf_ctlq_add(struct idpf_hw *hw, + struct idpf_ctlq_create_info *qinfo, + struct idpf_ctlq_info **cq); + +/* Deinitialize and deallocate a single control queue */ +void idpf_ctlq_remove(struct idpf_hw *hw, + struct idpf_ctlq_info *cq); + +/* Sends messages to HW and will also free the buffer*/ +int idpf_ctlq_send(struct idpf_hw *hw, + struct idpf_ctlq_info *cq, + u16 num_q_msg, + struct idpf_ctlq_msg q_msg[]); + +/* Receives messages and called by interrupt handler/polling + * initiated by app/process. Also caller is supposed to free the buffers + */ +int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, + struct idpf_ctlq_msg *q_msg); + +/* Reclaims send descriptors on HW write back */ +int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, + struct idpf_ctlq_msg *msg_status[]); + +/* Indicate RX buffers are done being processed */ +int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, + struct idpf_ctlq_info *cq, + u16 *buff_count, + struct idpf_dma_mem **buffs); + +/* Will destroy all q including the default mb */ +void idpf_ctlq_deinit(struct idpf_hw *hw); + +#endif /* _IDPF_CONTROLQ_API_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c new file mode 100644 index 000000000000..a942a6385d06 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf_controlq.h" + +/** + * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + */ +static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc); + + cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size); + if (!cq->desc_ring.va) + return -ENOMEM; + + return 0; +} + +/** + * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * Allocate the buffer head for all control queues, and if it's a receive + * queue, allocate DMA buffers + */ +static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + int i; + + /* Do not allocate DMA buffers for transmit queues */ + if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) + return 0; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *), + GFP_KERNEL); + if (!cq->bi.rx_buff) + return -ENOMEM; + + /* allocate the mapped buffers (except for the last one) */ + for (i = 0; i < cq->ring_size - 1; i++) { + struct idpf_dma_mem *bi; + int num = 1; /* number of idpf_dma_mem to be allocated */ + + cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem), + GFP_KERNEL); + if (!cq->bi.rx_buff[i]) + goto unwind_alloc_cq_bufs; + + bi = cq->bi.rx_buff[i]; + + bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size); + if (!bi->va) { + /* unwind will not free the failed entry */ + kfree(cq->bi.rx_buff[i]); + goto unwind_alloc_cq_bufs; + } + } + + return 0; + +unwind_alloc_cq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) { + idpf_free_dma_mem(hw, cq->bi.rx_buff[i]); + kfree(cq->bi.rx_buff[i]); + } + kfree(cq->bi.rx_buff); + + return -ENOMEM; +} + +/** + * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + */ +static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw, + struct idpf_ctlq_info *cq) +{ + idpf_free_dma_mem(hw, &cq->desc_ring); +} + +/** + * idpf_ctlq_free_bufs - Free CQ buffer info elements + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX + * queues. The upper layers are expected to manage freeing of TX DMA buffers + */ +static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + void *bi; + + if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) { + int i; + + /* free DMA buffers for rx queues*/ + for (i = 0; i < cq->ring_size; i++) { + if (cq->bi.rx_buff[i]) { + idpf_free_dma_mem(hw, cq->bi.rx_buff[i]); + kfree(cq->bi.rx_buff[i]); + } + } + + bi = (void *)cq->bi.rx_buff; + } else { + bi = (void *)cq->bi.tx_msg; + } + + /* free the buffer header */ + kfree(bi); +} + +/** + * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue + * @hw: pointer to hw struct + * @cq: pointer to the specific Control queue + * + * Free the memory used by the ring, buffers and other related structures + */ +void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + /* free ring buffers and the ring itself */ + idpf_ctlq_free_bufs(hw, cq); + idpf_ctlq_free_desc_ring(hw, cq); +} + +/** + * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs + * @hw: pointer to hw struct + * @cq: pointer to control queue struct + * + * Do *NOT* hold cq_lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) +{ + int err; + + /* allocate the ring memory */ + err = idpf_ctlq_alloc_desc_ring(hw, cq); + if (err) + return err; + + /* allocate buffers in the rings */ + err = idpf_ctlq_alloc_bufs(hw, cq); + if (err) + goto idpf_init_cq_free_ring; + + /* success! */ + return 0; + +idpf_init_cq_free_ring: + idpf_free_dma_mem(hw, &cq->desc_ring); + + return err; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c new file mode 100644 index 000000000000..34ad1ac46b78 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" +#include "idpf_lan_pf_regs.h" + +#define IDPF_PF_ITR_IDX_SPACING 0x4 + +/** + * idpf_ctlq_reg_init - initialize default mailbox registers + * @cq: pointer to the array of create control queues + */ +static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +{ + int i; + + for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { + struct idpf_ctlq_create_info *ccq = cq + i; + + switch (ccq->type) { + case IDPF_CTLQ_TYPE_MAILBOX_TX: + /* set head and tail registers in our local struct */ + ccq->reg.head = PF_FW_ATQH; + ccq->reg.tail = PF_FW_ATQT; + ccq->reg.len = PF_FW_ATQLEN; + ccq->reg.bah = PF_FW_ATQBAH; + ccq->reg.bal = PF_FW_ATQBAL; + ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M; + ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; + ccq->reg.head_mask = PF_FW_ATQH_ATQH_M; + break; + case IDPF_CTLQ_TYPE_MAILBOX_RX: + /* set head and tail registers in our local struct */ + ccq->reg.head = PF_FW_ARQH; + ccq->reg.tail = PF_FW_ARQT; + ccq->reg.len = PF_FW_ARQLEN; + ccq->reg.bah = PF_FW_ARQBAH; + ccq->reg.bal = PF_FW_ARQBAL; + ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M; + ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; + ccq->reg.head_mask = PF_FW_ARQH_ARQH_M; + break; + default: + break; + } + } +} + +/** + * idpf_mb_intr_reg_init - Initialize mailbox interrupt register + * @adapter: adapter structure + */ +static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter) +{ + struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; + u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl); + + intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl); + intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M; + intr->dyn_ctl_itridx_m = PF_GLINT_DYN_CTL_ITR_INDX_M; + intr->icr_ena = idpf_get_reg_addr(adapter, PF_INT_DIR_OICR_ENA); + intr->icr_ena_ctlq_m = PF_INT_DIR_OICR_ENA_M; +} + +/** + * idpf_intr_reg_init - Initialize interrupt registers + * @vport: virtual port structure + */ +static int idpf_intr_reg_init(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + int num_vecs = vport->num_q_vectors; + struct idpf_vec_regs *reg_vals; + int num_regs, i, err = 0; + u32 rx_itr, tx_itr; + u16 total_vecs; + + total_vecs = idpf_get_reserved_vecs(vport->adapter); + reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs), + GFP_KERNEL); + if (!reg_vals) + return -ENOMEM; + + num_regs = idpf_get_reg_intr_vecs(vport, reg_vals); + if (num_regs < num_vecs) { + err = -EINVAL; + goto free_reg_vals; + } + + for (i = 0; i < num_vecs; i++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[i]; + u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC; + struct idpf_intr_reg *intr = &q_vector->intr_reg; + u32 spacing; + + intr->dyn_ctl = idpf_get_reg_addr(adapter, + reg_vals[vec_id].dyn_ctl_reg); + intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M; + intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S; + intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S; + + spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, + IDPF_PF_ITR_IDX_SPACING); + rx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_0, + reg_vals[vec_id].itrn_reg, + spacing); + tx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_1, + reg_vals[vec_id].itrn_reg, + spacing); + intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr); + intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr); + } + +free_reg_vals: + kfree(reg_vals); + + return err; +} + +/** + * idpf_reset_reg_init - Initialize reset registers + * @adapter: Driver specific private structure + */ +static void idpf_reset_reg_init(struct idpf_adapter *adapter) +{ + adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT); + adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M; +} + +/** + * idpf_trigger_reset - trigger reset + * @adapter: Driver specific private structure + * @trig_cause: Reason to trigger a reset + */ +static void idpf_trigger_reset(struct idpf_adapter *adapter, + enum idpf_flags __always_unused trig_cause) +{ + u32 reset_reg; + + reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL)); + writel(reset_reg | PFGEN_CTRL_PFSWR, + idpf_get_reg_addr(adapter, PFGEN_CTRL)); +} + +/** + * idpf_reg_ops_init - Initialize register API function pointers + * @adapter: Driver specific private structure + */ +static void idpf_reg_ops_init(struct idpf_adapter *adapter) +{ + adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_ctlq_reg_init; + adapter->dev_ops.reg_ops.intr_reg_init = idpf_intr_reg_init; + adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init; + adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init; + adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset; +} + +/** + * idpf_dev_ops_init - Initialize device API function pointers + * @adapter: Driver specific private structure + */ +void idpf_dev_ops_init(struct idpf_adapter *adapter) +{ + idpf_reg_ops_init(adapter); +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_devids.h b/drivers/net/ethernet/intel/idpf/idpf_devids.h new file mode 100644 index 000000000000..5154a52ae61c --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_devids.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_DEVIDS_H_ +#define _IDPF_DEVIDS_H_ + +#define IDPF_DEV_ID_PF 0x1452 +#define IDPF_DEV_ID_VF 0x145C + +#endif /* _IDPF_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c new file mode 100644 index 000000000000..52ea38669f85 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + */ +static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 __always_unused *rule_locs) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vport->num_rxq; + idpf_vport_ctrl_unlock(netdev); + + return 0; + default: + break; + } + + idpf_vport_ctrl_unlock(netdev); + + return -EOPNOTSUPP; +} + +/** + * idpf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the key size on success, error value on failure. + */ +static u32 idpf_get_rxfh_key_size(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + + if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) + return -EOPNOTSUPP; + + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + + return user_config->rss_data.rss_key_size; +} + +/** + * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size on success, error value on failure. + */ +static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + + if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) + return -EOPNOTSUPP; + + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + + return user_config->rss_data.rss_lut_size; +} + +/** + * idpf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + */ +static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_rss_data *rss_data; + struct idpf_adapter *adapter; + int err = 0; + u16 i; + + idpf_vport_ctrl_lock(netdev); + + adapter = np->adapter; + + if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data; + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (key) + memcpy(key, rss_data->rss_key, rss_data->rss_key_size); + + if (indir) { + for (i = 0; i < rss_data->rss_lut_size; i++) + indir[i] = rss_data->rss_lut[i]; + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function to use + * + * Returns -EINVAL if the table specifies an invalid queue id, otherwise + * returns 0 after programming the table. + */ +static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_rss_data *rss_data; + struct idpf_adapter *adapter; + struct idpf_vport *vport; + int err = 0; + u16 lut; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + adapter = vport->adapter; + + if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) { + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) { + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + if (key) + memcpy(rss_data->rss_key, key, rss_data->rss_key_size); + + if (indir) { + for (lut = 0; lut < rss_data->rss_lut_size; lut++) + rss_data->rss_lut[lut] = indir[lut]; + } + + err = idpf_config_rss(vport); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * Report maximum of TX and RX. Report one extra channel to match our MailBox + * Queue. + */ +static void idpf_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + u16 num_txq, num_rxq; + u16 combined; + + vport_config = np->adapter->vport_config[np->vport_idx]; + + num_txq = vport_config->user_config.num_req_tx_qs; + num_rxq = vport_config->user_config.num_req_rx_qs; + + combined = min(num_txq, num_rxq); + + /* Report maximum channels */ + ch->max_combined = min_t(u16, vport_config->max_q.max_txq, + vport_config->max_q.max_rxq); + ch->max_rx = vport_config->max_q.max_rxq; + ch->max_tx = vport_config->max_q.max_txq; + + ch->max_other = IDPF_MAX_MBXQ; + ch->other_count = IDPF_MAX_MBXQ; + + ch->combined_count = combined; + ch->rx_count = num_rxq - combined; + ch->tx_count = num_txq - combined; +} + +/** + * idpf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with CP. Returns 0 on success, negative + * on failure. + */ +static int idpf_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct idpf_vport_config *vport_config; + u16 combined, num_txq, num_rxq; + unsigned int num_req_tx_q; + unsigned int num_req_rx_q; + struct idpf_vport *vport; + struct device *dev; + int err = 0; + u16 idx; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + idx = vport->idx; + vport_config = vport->adapter->vport_config[idx]; + + num_txq = vport_config->user_config.num_req_tx_qs; + num_rxq = vport_config->user_config.num_req_rx_qs; + + combined = min(num_txq, num_rxq); + + /* these checks are for cases where user didn't specify a particular + * value on cmd line but we get non-zero value anyway via + * get_channels(); look at ethtool.c in ethtool repository (the user + * space part), particularly, do_schannels() routine + */ + if (ch->combined_count == combined) + ch->combined_count = 0; + if (ch->combined_count && ch->rx_count == num_rxq - combined) + ch->rx_count = 0; + if (ch->combined_count && ch->tx_count == num_txq - combined) + ch->tx_count = 0; + + num_req_tx_q = ch->combined_count + ch->tx_count; + num_req_rx_q = ch->combined_count + ch->rx_count; + + dev = &vport->adapter->pdev->dev; + /* It's possible to specify number of queues that exceeds max. + * Stack checks max combined_count and max [tx|rx]_count but not the + * max combined_count + [tx|rx]_count. These checks should catch that. + */ + if (num_req_tx_q > vport_config->max_q.max_txq) { + dev_info(dev, "Maximum TX queues is %d\n", + vport_config->max_q.max_txq); + err = -EINVAL; + goto unlock_mutex; + } + if (num_req_rx_q > vport_config->max_q.max_rxq) { + dev_info(dev, "Maximum RX queues is %d\n", + vport_config->max_q.max_rxq); + err = -EINVAL; + goto unlock_mutex; + } + + if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq) + goto unlock_mutex; + + vport_config->user_config.num_req_tx_qs = num_req_tx_q; + vport_config->user_config.num_req_rx_qs = num_req_rx_q; + + err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); + if (err) { + /* roll back queue change */ + vport_config->user_config.num_req_tx_qs = num_txq; + vport_config->user_config.num_req_rx_qs = num_rxq; + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_ringparam - Get ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * @kring: unused + * @ext_ack: unused + * + * Returns current ring parameters. TX and RX rings are reported separately, + * but the number of rings is not reported. + */ +static void idpf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + ring->rx_max_pending = IDPF_MAX_RXQ_DESC; + ring->tx_max_pending = IDPF_MAX_TXQ_DESC; + ring->rx_pending = vport->rxq_desc_count; + ring->tx_pending = vport->txq_desc_count; + + idpf_vport_ctrl_unlock(netdev); +} + +/** + * idpf_set_ringparam - Set ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * @kring: unused + * @ext_ack: unused + * + * Sets ring parameters. TX and RX rings are controlled separately, but the + * number of rings is not specified, so all rings get the same settings. + */ +static int idpf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ext_ack) +{ + struct idpf_vport_user_config_data *config_data; + u32 new_rx_count, new_tx_count; + struct idpf_vport *vport; + int i, err = 0; + u16 idx; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + idx = vport->idx; + + if (ring->tx_pending < IDPF_MIN_TXQ_DESC) { + netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n", + ring->tx_pending, + IDPF_MIN_TXQ_DESC); + err = -EINVAL; + goto unlock_mutex; + } + + if (ring->rx_pending < IDPF_MIN_RXQ_DESC) { + netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n", + ring->rx_pending, + IDPF_MIN_RXQ_DESC); + err = -EINVAL; + goto unlock_mutex; + } + + new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE); + if (new_rx_count != ring->rx_pending) + netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n", + new_rx_count); + + new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE); + if (new_tx_count != ring->tx_pending) + netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n", + new_tx_count); + + if (new_tx_count == vport->txq_desc_count && + new_rx_count == vport->rxq_desc_count) + goto unlock_mutex; + + config_data = &vport->adapter->vport_config[idx]->user_config; + config_data->num_req_txq_desc = new_tx_count; + config_data->num_req_rxq_desc = new_rx_count; + + /* Since we adjusted the RX completion queue count, the RX buffer queue + * descriptor count needs to be adjusted as well + */ + for (i = 0; i < vport->num_bufqs_per_qgrp; i++) + vport->bufq_desc_count[i] = + IDPF_RX_BUFQ_DESC_COUNT(new_rx_count, + vport->num_bufqs_per_qgrp); + + err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * struct idpf_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the IDPF_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the idpf_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the idpf_add_stat_string() helper function. + */ +struct idpf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro to define an idpf_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ +#define IDPF_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +/* Helper macro for defining some statistics related to queues */ +#define IDPF_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_queue, _name, _stat) + +/* Stats associated with a Tx queue */ +static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { + IDPF_QUEUE_STAT("pkts", q_stats.tx.packets), + IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes), + IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts), +}; + +/* Stats associated with an Rx queue */ +static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { + IDPF_QUEUE_STAT("pkts", q_stats.rx.packets), + IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes), + IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts), +}; + +#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) +#define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats) + +#define IDPF_PORT_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_vport, _name, _stat) + +static const struct idpf_stats idpf_gstrings_port_stats[] = { + IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err), + IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit), + IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo), + IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs), + IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops), + IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs), + IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize), + IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy), + IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast), + IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast), + IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast), + IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol), + IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast), + IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast), + IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast), +}; + +#define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats) + +/** + * __idpf_add_qstat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * @type: stat type + * @idx: stat index + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + */ +static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats, + const unsigned int size, const char *type, + unsigned int idx) +{ + unsigned int i; + + for (i = 0; i < size; i++) + ethtool_sprintf(p, "%s_q-%u_%s", + type, idx, stats[i].stat_string); +} + +/** + * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @type: stat type + * @idx: stat idx + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + */ +#define idpf_add_qstat_strings(p, stats, type, idx) \ + __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx) + +/** + * idpf_add_stat_strings - Copy port stat strings into ethtool buffer + * @p: ethtool buffer + * @stats: struct to copy from + * @size: size of stats array to copy from + */ +static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats, + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + ethtool_sprintf(p, "%s", stats[i].stat_string); +} + +/** + * idpf_get_stat_strings - Get stat strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the statistics string table + */ +static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + unsigned int i; + + idpf_add_stat_strings(&data, idpf_gstrings_port_stats, + IDPF_PORT_STATS_LEN); + + vport_config = np->adapter->vport_config[np->vport_idx]; + /* It's critical that we always report a constant number of strings and + * that the strings are reported in the same order regardless of how + * many queues are actually in use. + */ + for (i = 0; i < vport_config->max_q.max_txq; i++) + idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats, + "tx", i); + + for (i = 0; i < vport_config->max_q.max_rxq; i++) + idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, + "rx", i); + + page_pool_ethtool_stats_get_strings(data); +} + +/** + * idpf_get_strings - Get string set + * @netdev: network interface device structure + * @sset: id of string set + * @data: buffer for string data + * + * Builds string tables for various string sets + */ +static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + idpf_get_stat_strings(netdev, data); + break; + default: + break; + } +} + +/** + * idpf_get_sset_count - Get length of string set + * @netdev: network interface device structure + * @sset: id of string set + * + * Reports size of various string tables. + */ +static int idpf_get_sset_count(struct net_device *netdev, int sset) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + u16 max_txq, max_rxq; + unsigned int size; + + if (sset != ETH_SS_STATS) + return -EINVAL; + + vport_config = np->adapter->vport_config[np->vport_idx]; + /* This size reported back here *must* be constant throughout the + * lifecycle of the netdevice, i.e. we must report the maximum length + * even for queues that don't technically exist. This is due to the + * fact that this userspace API uses three separate ioctl calls to get + * stats data but has no way to communicate back to userspace when that + * size has changed, which can typically happen as a result of changing + * number of queues. If the number/order of stats change in the middle + * of this call chain it will lead to userspace crashing/accessing bad + * data through buffer under/overflow. + */ + max_txq = vport_config->max_q.max_txq; + max_rxq = vport_config->max_q.max_rxq; + + size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + + (IDPF_RX_QUEUE_STATS_LEN * max_rxq); + size += page_pool_ethtool_stats_get_count(); + + return size; +} + +/** + * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pstat: old stat pointer to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. If the pointer is null, data will be zero'd. + */ +static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, + const struct idpf_stats *stat) +{ + char *p; + + if (!pstat) { + /* Ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pstat + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * idpf_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @q: the queue to copy + * + * Queue statistics must be copied while protected by u64_stats_fetch_begin, + * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats + * are defined in idpf_gstrings_queue_stats. If the queue pointer is null, + * zero out the queue stat values and update the data pointer. Otherwise + * safely copy the stats from the queue into the supplied buffer and update + * the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + */ +static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) +{ + const struct idpf_stats *stats; + unsigned int start; + unsigned int size; + unsigned int i; + + if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + size = IDPF_RX_QUEUE_STATS_LEN; + stats = idpf_gstrings_rx_queue_stats; + } else { + size = IDPF_TX_QUEUE_STATS_LEN; + stats = idpf_gstrings_tx_queue_stats; + } + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry. + */ + do { + start = u64_stats_fetch_begin(&q->stats_sync); + for (i = 0; i < size; i++) + idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); + } while (u64_stats_fetch_retry(&q->stats_sync, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * idpf_add_empty_queue_stats - Add stats for a non-existent queue + * @data: pointer to data buffer + * @qtype: type of data queue + * + * We must report a constant length of stats back to userspace regardless of + * how many queues are actually in use because stats collection happens over + * three separate ioctls and there's no way to notify userspace the size + * changed between those calls. This adds empty to data to the stats since we + * don't have a real queue to refer to for this stats slot. + */ +static void idpf_add_empty_queue_stats(u64 **data, u16 qtype) +{ + unsigned int i; + int stats_len; + + if (qtype == VIRTCHNL2_QUEUE_TYPE_RX) + stats_len = IDPF_RX_QUEUE_STATS_LEN; + else + stats_len = IDPF_TX_QUEUE_STATS_LEN; + + for (i = 0; i < stats_len; i++) + (*data)[i] = 0; + *data += stats_len; +} + +/** + * idpf_add_port_stats - Copy port stats into ethtool buffer + * @vport: virtual port struct + * @data: ethtool buffer to copy into + */ +static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data) +{ + unsigned int size = IDPF_PORT_STATS_LEN; + unsigned int start; + unsigned int i; + + do { + start = u64_stats_fetch_begin(&vport->port_stats.stats_sync); + for (i = 0; i < size; i++) + idpf_add_one_ethtool_stat(&(*data)[i], vport, + &idpf_gstrings_port_stats[i]); + } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start)); + + *data += size; +} + +/** + * idpf_collect_queue_stats - accumulate various per queue stats + * into port level stats + * @vport: pointer to vport struct + **/ +static void idpf_collect_queue_stats(struct idpf_vport *vport) +{ + struct idpf_port_stats *pstats = &vport->port_stats; + int i, j; + + /* zero out port stats since they're actually tracked in per + * queue stats; this is only for reporting + */ + u64_stats_update_begin(&pstats->stats_sync); + u64_stats_set(&pstats->rx_hw_csum_err, 0); + u64_stats_set(&pstats->rx_hsplit, 0); + u64_stats_set(&pstats->rx_hsplit_hbo, 0); + u64_stats_set(&pstats->rx_bad_descs, 0); + u64_stats_set(&pstats->tx_linearize, 0); + u64_stats_set(&pstats->tx_busy, 0); + u64_stats_set(&pstats->tx_drops, 0); + u64_stats_set(&pstats->tx_dma_map_errs, 0); + u64_stats_update_end(&pstats->stats_sync); + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; + u16 num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rxq_grp->splitq.num_rxq_sets; + else + num_rxq = rxq_grp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; + struct idpf_rx_queue_stats *stats; + struct idpf_queue *rxq; + unsigned int start; + + if (idpf_is_queue_model_split(vport->rxq_model)) + rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; + else + rxq = rxq_grp->singleq.rxqs[j]; + + if (!rxq) + continue; + + do { + start = u64_stats_fetch_begin(&rxq->stats_sync); + + stats = &rxq->q_stats.rx; + hw_csum_err = u64_stats_read(&stats->hw_csum_err); + hsplit = u64_stats_read(&stats->hsplit_pkts); + hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); + bad_descs = u64_stats_read(&stats->bad_descs); + } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); + + u64_stats_update_begin(&pstats->stats_sync); + u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err); + u64_stats_add(&pstats->rx_hsplit, hsplit); + u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo); + u64_stats_add(&pstats->rx_bad_descs, bad_descs); + u64_stats_update_end(&pstats->stats_sync); + } + } + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + for (j = 0; j < txq_grp->num_txq; j++) { + u64 linearize, qbusy, skb_drops, dma_map_errs; + struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue_stats *stats; + unsigned int start; + + if (!txq) + continue; + + do { + start = u64_stats_fetch_begin(&txq->stats_sync); + + stats = &txq->q_stats.tx; + linearize = u64_stats_read(&stats->linearize); + qbusy = u64_stats_read(&stats->q_busy); + skb_drops = u64_stats_read(&stats->skb_drops); + dma_map_errs = u64_stats_read(&stats->dma_map_errs); + } while (u64_stats_fetch_retry(&txq->stats_sync, start)); + + u64_stats_update_begin(&pstats->stats_sync); + u64_stats_add(&pstats->tx_linearize, linearize); + u64_stats_add(&pstats->tx_busy, qbusy); + u64_stats_add(&pstats->tx_drops, skb_drops); + u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs); + u64_stats_update_end(&pstats->stats_sync); + } + } +} + +/** + * idpf_get_ethtool_stats - report device statistics + * @netdev: network interface device structure + * @stats: ethtool statistics structure + * @data: pointer to data buffer + * + * All statistics are added to the data buffer as an array of u64. + */ +static void idpf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + struct page_pool_stats pp_stats = { }; + struct idpf_vport *vport; + unsigned int total = 0; + unsigned int i, j; + bool is_splitq; + u16 qtype; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (np->state != __IDPF_VPORT_UP) { + idpf_vport_ctrl_unlock(netdev); + + return; + } + + rcu_read_lock(); + + idpf_collect_queue_stats(vport); + idpf_add_port_stats(vport, &data); + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + qtype = VIRTCHNL2_QUEUE_TYPE_TX; + + for (j = 0; j < txq_grp->num_txq; j++, total++) { + struct idpf_queue *txq = txq_grp->txqs[j]; + + if (!txq) + idpf_add_empty_queue_stats(&data, qtype); + else + idpf_add_queue_stats(&data, txq); + } + } + + vport_config = vport->adapter->vport_config[vport->idx]; + /* It is critical we provide a constant number of stats back to + * userspace regardless of how many queues are actually in use because + * there is no way to inform userspace the size has changed between + * ioctl calls. This will fill in any missing stats with zero. + */ + for (; total < vport_config->max_q.max_txq; total++) + idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX); + total = 0; + + is_splitq = idpf_is_queue_model_split(vport->rxq_model); + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; + u16 num_rxq; + + qtype = VIRTCHNL2_QUEUE_TYPE_RX; + + if (is_splitq) + num_rxq = rxq_grp->splitq.num_rxq_sets; + else + num_rxq = rxq_grp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, total++) { + struct idpf_queue *rxq; + + if (is_splitq) + rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; + else + rxq = rxq_grp->singleq.rxqs[j]; + if (!rxq) + idpf_add_empty_queue_stats(&data, qtype); + else + idpf_add_queue_stats(&data, rxq); + + /* In splitq mode, don't get page pool stats here since + * the pools are attached to the buffer queues + */ + if (is_splitq) + continue; + + if (rxq) + page_pool_get_stats(rxq->pp, &pp_stats); + } + } + + for (i = 0; i < vport->num_rxq_grp; i++) { + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_queue *rxbufq = + &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; + + page_pool_get_stats(rxbufq->pp, &pp_stats); + } + } + + for (; total < vport_config->max_q.max_rxq; total++) + idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); + + page_pool_ethtool_stats_get(data, &pp_stats); + + rcu_read_unlock(); + + idpf_vport_ctrl_unlock(netdev); +} + +/** + * idpf_find_rxq - find rxq from q index + * @vport: virtual port associated to queue + * @q_num: q index used to find queue + * + * returns pointer to rx queue + */ +static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) +{ + int q_grp, q_idx; + + if (!idpf_is_queue_model_split(vport->rxq_model)) + return vport->rxq_grps->singleq.rxqs[q_num]; + + q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; + q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; + + return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; +} + +/** + * idpf_find_txq - find txq from q index + * @vport: virtual port associated to queue + * @q_num: q index used to find queue + * + * returns pointer to tx queue + */ +static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) +{ + int q_grp; + + if (!idpf_is_queue_model_split(vport->txq_model)) + return vport->txqs[q_num]; + + q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; + + return vport->txq_grps[q_grp].complq; +} + +/** + * __idpf_get_q_coalesce - get ITR values for specific queue + * @ec: ethtool structure to fill with driver's coalesce settings + * @q: quuee of Rx or Tx + */ +static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, + struct idpf_queue *q) +{ + if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + ec->use_adaptive_rx_coalesce = + IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); + ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; + } else { + ec->use_adaptive_tx_coalesce = + IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); + ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; + } +} + +/** + * idpf_get_q_coalesce - get ITR values for specific queue + * @netdev: pointer to the netdev associated with this query + * @ec: coalesce settings to program the device with + * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index + * + * Return 0 on success, and negative on failure + */ +static int idpf_get_q_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + u32 q_num) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + if (q_num >= vport->num_rxq && q_num >= vport->num_txq) { + err = -EINVAL; + goto unlock_mutex; + } + + if (q_num < vport->num_rxq) + __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num)); + + if (q_num < vport->num_txq) + __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num)); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_coalesce - get ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @ec: coalesce settings to be filled + * @kec: unused + * @extack: unused + * + * Return 0 on success, and negative on failure + */ +static int idpf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *extack) +{ + /* Return coalesce based on queue number zero */ + return idpf_get_q_coalesce(netdev, ec, 0); +} + +/** + * idpf_get_per_q_coalesce - get ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @q_num: queue for which the itr values has to retrieved + * @ec: coalesce settings to be filled + * + * Return 0 on success, and negative on failure + */ + +static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) +{ + return idpf_get_q_coalesce(netdev, ec, q_num); +} + +/** + * __idpf_set_q_coalesce - set ITR values for specific queue + * @ec: ethtool structure from user to update ITR settings + * @q: queue for which itr values has to be set + * @is_rxq: is queue type rx + * + * Returns 0 on success, negative otherwise. + */ +static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, + struct idpf_queue *q, bool is_rxq) +{ + u32 use_adaptive_coalesce, coalesce_usecs; + struct idpf_q_vector *qv = q->q_vector; + bool is_dim_ena = false; + u16 itr_val; + + if (is_rxq) { + is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); + use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; + coalesce_usecs = ec->rx_coalesce_usecs; + itr_val = qv->rx_itr_value; + } else { + is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); + use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; + coalesce_usecs = ec->tx_coalesce_usecs; + itr_val = qv->tx_itr_value; + } + if (coalesce_usecs != itr_val && use_adaptive_coalesce) { + netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); + + return -EINVAL; + } + + if (is_dim_ena && use_adaptive_coalesce) + return 0; + + if (coalesce_usecs > IDPF_ITR_MAX) { + netdev_err(q->vport->netdev, + "Invalid value, %d-usecs range is 0-%d\n", + coalesce_usecs, IDPF_ITR_MAX); + + return -EINVAL; + } + + if (coalesce_usecs % 2) { + coalesce_usecs--; + netdev_info(q->vport->netdev, + "HW only supports even ITR values, ITR rounded to %d\n", + coalesce_usecs); + } + + if (is_rxq) { + qv->rx_itr_value = coalesce_usecs; + if (use_adaptive_coalesce) { + qv->rx_intr_mode = IDPF_ITR_DYNAMIC; + } else { + qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, qv->rx_itr_value, + false); + } + } else { + qv->tx_itr_value = coalesce_usecs; + if (use_adaptive_coalesce) { + qv->tx_intr_mode = IDPF_ITR_DYNAMIC; + } else { + qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true); + } + } + + /* Update of static/dynamic itr will be taken care when interrupt is + * fired + */ + return 0; +} + +/** + * idpf_set_q_coalesce - set ITR values for specific queue + * @vport: vport associated to the queue that need updating + * @ec: coalesce settings to program the device with + * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index + * @is_rxq: is queue type rx + * + * Return 0 on success, and negative on failure + */ +static int idpf_set_q_coalesce(struct idpf_vport *vport, + struct ethtool_coalesce *ec, + int q_num, bool is_rxq) +{ + struct idpf_queue *q; + + q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); + + if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) + return -EINVAL; + + return 0; +} + +/** + * idpf_set_coalesce - set ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @ec: coalesce settings to program the device with + * @kec: unused + * @extack: unused + * + * Return 0 on success, and negative on failure + */ +static int idpf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *extack) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int i, err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (np->state != __IDPF_VPORT_UP) + goto unlock_mutex; + + for (i = 0; i < vport->num_txq; i++) { + err = idpf_set_q_coalesce(vport, ec, i, false); + if (err) + goto unlock_mutex; + } + + for (i = 0; i < vport->num_rxq; i++) { + err = idpf_set_q_coalesce(vport, ec, i, true); + if (err) + goto unlock_mutex; + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_set_per_q_coalesce - set ITR values as requested by user + * @netdev: pointer to the netdev associated with this query + * @q_num: queue for which the itr values has to be set + * @ec: coalesce settings to program the device with + * + * Return 0 on success, and negative on failure + */ +static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + err = idpf_set_q_coalesce(vport, ec, q_num, false); + if (err) { + idpf_vport_ctrl_unlock(netdev); + + return err; + } + + err = idpf_set_q_coalesce(vport, ec, q_num, true); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_get_msglevel - Get debug message level + * @netdev: network interface device structure + * + * Returns current debug message level. + */ +static u32 idpf_get_msglevel(struct net_device *netdev) +{ + struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); + + return adapter->msg_enable; +} + +/** + * idpf_set_msglevel - Set debug message level + * @netdev: network interface device structure + * @data: message level + * + * Set current debug message level. Higher values cause the driver to + * be noisier. + */ +static void idpf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); + + adapter->msg_enable = data; +} + +/** + * idpf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @cmd: ethtool command + * + * Reports speed/duplex settings. + **/ +static int idpf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = PORT_NONE; + if (vport->link_up) { + cmd->base.duplex = DUPLEX_FULL; + cmd->base.speed = vport->link_speed_mbps; + } else { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + } + + idpf_vport_ctrl_unlock(netdev); + + return 0; +} + +static const struct ethtool_ops idpf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_msglevel = idpf_get_msglevel, + .set_msglevel = idpf_set_msglevel, + .get_link = ethtool_op_get_link, + .get_coalesce = idpf_get_coalesce, + .set_coalesce = idpf_set_coalesce, + .get_per_queue_coalesce = idpf_get_per_q_coalesce, + .set_per_queue_coalesce = idpf_set_per_q_coalesce, + .get_ethtool_stats = idpf_get_ethtool_stats, + .get_strings = idpf_get_strings, + .get_sset_count = idpf_get_sset_count, + .get_channels = idpf_get_channels, + .get_rxnfc = idpf_get_rxnfc, + .get_rxfh_key_size = idpf_get_rxfh_key_size, + .get_rxfh_indir_size = idpf_get_rxfh_indir_size, + .get_rxfh = idpf_get_rxfh, + .set_rxfh = idpf_set_rxfh, + .set_channels = idpf_set_channels, + .get_ringparam = idpf_get_ringparam, + .set_ringparam = idpf_set_ringparam, + .get_link_ksettings = idpf_get_link_ksettings, +}; + +/** + * idpf_set_ethtool_ops - Initialize ethtool ops struct + * @netdev: network interface device structure + * + * Sets ethtool ops struct in our netdev so that ethtool can call + * our functions. + */ +void idpf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &idpf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h new file mode 100644 index 000000000000..24edb8a6ec2e --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_LAN_PF_REGS_H_ +#define _IDPF_LAN_PF_REGS_H_ + +/* Receive queues */ +#define PF_QRX_BASE 0x00000000 +#define PF_QRX_TAIL(_QRX) (PF_QRX_BASE + (((_QRX) * 0x1000))) +#define PF_QRX_BUFFQ_BASE 0x03000000 +#define PF_QRX_BUFFQ_TAIL(_QRX) (PF_QRX_BUFFQ_BASE + (((_QRX) * 0x1000))) + +/* Transmit queues */ +#define PF_QTX_BASE 0x05000000 +#define PF_QTX_COMM_DBELL(_DBQM) (PF_QTX_BASE + ((_DBQM) * 0x1000)) + +/* Control(PF Mailbox) Queue */ +#define PF_FW_BASE 0x08400000 + +#define PF_FW_ARQBAL (PF_FW_BASE) +#define PF_FW_ARQBAH (PF_FW_BASE + 0x4) +#define PF_FW_ARQLEN (PF_FW_BASE + 0x8) +#define PF_FW_ARQLEN_ARQLEN_S 0 +#define PF_FW_ARQLEN_ARQLEN_M GENMASK(12, 0) +#define PF_FW_ARQLEN_ARQVFE_S 28 +#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S) +#define PF_FW_ARQLEN_ARQOVFL_S 29 +#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S) +#define PF_FW_ARQLEN_ARQCRIT_S 30 +#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S) +#define PF_FW_ARQLEN_ARQENABLE_S 31 +#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S) +#define PF_FW_ARQH (PF_FW_BASE + 0xC) +#define PF_FW_ARQH_ARQH_S 0 +#define PF_FW_ARQH_ARQH_M GENMASK(12, 0) +#define PF_FW_ARQT (PF_FW_BASE + 0x10) + +#define PF_FW_ATQBAL (PF_FW_BASE + 0x14) +#define PF_FW_ATQBAH (PF_FW_BASE + 0x18) +#define PF_FW_ATQLEN (PF_FW_BASE + 0x1C) +#define PF_FW_ATQLEN_ATQLEN_S 0 +#define PF_FW_ATQLEN_ATQLEN_M GENMASK(9, 0) +#define PF_FW_ATQLEN_ATQVFE_S 28 +#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S) +#define PF_FW_ATQLEN_ATQOVFL_S 29 +#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S) +#define PF_FW_ATQLEN_ATQCRIT_S 30 +#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S) +#define PF_FW_ATQLEN_ATQENABLE_S 31 +#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) +#define PF_FW_ATQH (PF_FW_BASE + 0x20) +#define PF_FW_ATQH_ATQH_S 0 +#define PF_FW_ATQH_ATQH_M GENMASK(9, 0) +#define PF_FW_ATQT (PF_FW_BASE + 0x24) + +/* Interrupts */ +#define PF_GLINT_BASE 0x08900000 +#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000)) +#define PF_GLINT_DYN_CTL_INTENA_S 0 +#define PF_GLINT_DYN_CTL_INTENA_M BIT(PF_GLINT_DYN_CTL_INTENA_S) +#define PF_GLINT_DYN_CTL_CLEARPBA_S 1 +#define PF_GLINT_DYN_CTL_CLEARPBA_M BIT(PF_GLINT_DYN_CTL_CLEARPBA_S) +#define PF_GLINT_DYN_CTL_SWINT_TRIG_S 2 +#define PF_GLINT_DYN_CTL_SWINT_TRIG_M BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S) +#define PF_GLINT_DYN_CTL_ITR_INDX_S 3 +#define PF_GLINT_DYN_CTL_ITR_INDX_M GENMASK(4, 3) +#define PF_GLINT_DYN_CTL_INTERVAL_S 5 +#define PF_GLINT_DYN_CTL_INTERVAL_M BIT(PF_GLINT_DYN_CTL_INTERVAL_S) +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24 +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S) +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S 25 +#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S) +#define PF_GLINT_DYN_CTL_WB_ON_ITR_S 30 +#define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S) +#define PF_GLINT_DYN_CTL_INTENA_MSK_S 31 +#define PF_GLINT_DYN_CTL_INTENA_MSK_M BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S) +/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is + * spacing b/w itrn registers of the same vector. + */ +#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ + ((_reg_start) + ((_ITR) * (_itrn_indx_spacing))) +/* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */ +#define PF_GLINT_ITR(_ITR, _INT) \ + (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000)) +#define PF_GLINT_ITR_MAX_INDEX 2 +#define PF_GLINT_ITR_INTERVAL_S 0 +#define PF_GLINT_ITR_INTERVAL_M GENMASK(11, 0) + +/* Generic registers */ +#define PF_INT_DIR_OICR_ENA 0x08406000 +#define PF_INT_DIR_OICR_ENA_S 0 +#define PF_INT_DIR_OICR_ENA_M GENMASK(31, 0) +#define PF_INT_DIR_OICR 0x08406004 +#define PF_INT_DIR_OICR_TSYN_EVNT 0 +#define PF_INT_DIR_OICR_PHY_TS_0 BIT(1) +#define PF_INT_DIR_OICR_PHY_TS_1 BIT(2) +#define PF_INT_DIR_OICR_CAUSE 0x08406008 +#define PF_INT_DIR_OICR_CAUSE_CAUSE_S 0 +#define PF_INT_DIR_OICR_CAUSE_CAUSE_M GENMASK(31, 0) +#define PF_INT_PBA_CLEAR 0x0840600C + +#define PF_FUNC_RID 0x08406010 +#define PF_FUNC_RID_FUNCTION_NUMBER_S 0 +#define PF_FUNC_RID_FUNCTION_NUMBER_M GENMASK(2, 0) +#define PF_FUNC_RID_DEVICE_NUMBER_S 3 +#define PF_FUNC_RID_DEVICE_NUMBER_M GENMASK(7, 3) +#define PF_FUNC_RID_BUS_NUMBER_S 8 +#define PF_FUNC_RID_BUS_NUMBER_M GENMASK(15, 8) + +/* Reset registers */ +#define PFGEN_RTRIG 0x08407000 +#define PFGEN_RTRIG_CORER_S 0 +#define PFGEN_RTRIG_CORER_M BIT(0) +#define PFGEN_RTRIG_LINKR_S 1 +#define PFGEN_RTRIG_LINKR_M BIT(1) +#define PFGEN_RTRIG_IMCR_S 2 +#define PFGEN_RTRIG_IMCR_M BIT(2) +#define PFGEN_RSTAT 0x08407008 /* PFR Status */ +#define PFGEN_RSTAT_PFR_STATE_S 0 +#define PFGEN_RSTAT_PFR_STATE_M GENMASK(1, 0) +#define PFGEN_CTRL 0x0840700C +#define PFGEN_CTRL_PFSWR BIT(0) + +#endif diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h new file mode 100644 index 000000000000..a5752dcab888 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_LAN_TXRX_H_ +#define _IDPF_LAN_TXRX_H_ + +enum idpf_rss_hash { + IDPF_HASH_INVALID = 0, + /* Values 1 - 28 are reserved for future use */ + IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29, + IDPF_HASH_NONF_MULTICAST_IPV4_UDP, + IDPF_HASH_NONF_IPV4_UDP, + IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK, + IDPF_HASH_NONF_IPV4_TCP, + IDPF_HASH_NONF_IPV4_SCTP, + IDPF_HASH_NONF_IPV4_OTHER, + IDPF_HASH_FRAG_IPV4, + /* Values 37-38 are reserved */ + IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39, + IDPF_HASH_NONF_MULTICAST_IPV6_UDP, + IDPF_HASH_NONF_IPV6_UDP, + IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK, + IDPF_HASH_NONF_IPV6_TCP, + IDPF_HASH_NONF_IPV6_SCTP, + IDPF_HASH_NONF_IPV6_OTHER, + IDPF_HASH_FRAG_IPV6, + IDPF_HASH_NONF_RSVD47, + IDPF_HASH_NONF_FCOE_OX, + IDPF_HASH_NONF_FCOE_RX, + IDPF_HASH_NONF_FCOE_OTHER, + /* Values 51-62 are reserved */ + IDPF_HASH_L2_PAYLOAD = 63, + + IDPF_HASH_MAX +}; + +/* Supported RSS offloads */ +#define IDPF_DEFAULT_RSS_HASH \ + (BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \ + BIT_ULL(IDPF_HASH_FRAG_IPV4) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \ + BIT_ULL(IDPF_HASH_FRAG_IPV6) | \ + BIT_ULL(IDPF_HASH_L2_PAYLOAD)) + +#define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \ + BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP)) + +/* For idpf_splitq_base_tx_compl_desc */ +#define IDPF_TXD_COMPLQ_GEN_S 15 +#define IDPF_TXD_COMPLQ_GEN_M BIT_ULL(IDPF_TXD_COMPLQ_GEN_S) +#define IDPF_TXD_COMPLQ_COMPL_TYPE_S 11 +#define IDPF_TXD_COMPLQ_COMPL_TYPE_M GENMASK_ULL(13, 11) +#define IDPF_TXD_COMPLQ_QID_S 0 +#define IDPF_TXD_COMPLQ_QID_M GENMASK_ULL(9, 0) + +/* For base mode TX descriptors */ + +#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S 23 +#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S) +#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S 19 +#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M \ + (0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S) +#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S 12 +#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M \ + (0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S) +#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S 11 +#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M \ + BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S) +#define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST \ + IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M +#define IDPF_TXD_CTX_QW0_TUNN_NATT_S 9 +#define IDPF_TXD_CTX_QW0_TUNN_NATT_M (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S) +#define IDPF_TXD_CTX_UDP_TUNNELING BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S) +#define IDPF_TXD_CTX_GRE_TUNNELING (0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S) +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S 2 +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M \ + (0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S) +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S 0 +#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M \ + (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S) + +#define IDPF_TXD_CTX_QW1_MSS_S 50 +#define IDPF_TXD_CTX_QW1_MSS_M GENMASK_ULL(63, 50) +#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30 +#define IDPF_TXD_CTX_QW1_TSO_LEN_M GENMASK_ULL(47, 30) +#define IDPF_TXD_CTX_QW1_CMD_S 4 +#define IDPF_TXD_CTX_QW1_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TXD_CTX_QW1_DTYPE_S 0 +#define IDPF_TXD_CTX_QW1_DTYPE_M GENMASK_ULL(3, 0) +#define IDPF_TXD_QW1_L2TAG1_S 48 +#define IDPF_TXD_QW1_L2TAG1_M GENMASK_ULL(63, 48) +#define IDPF_TXD_QW1_TX_BUF_SZ_S 34 +#define IDPF_TXD_QW1_TX_BUF_SZ_M GENMASK_ULL(47, 34) +#define IDPF_TXD_QW1_OFFSET_S 16 +#define IDPF_TXD_QW1_OFFSET_M GENMASK_ULL(33, 16) +#define IDPF_TXD_QW1_CMD_S 4 +#define IDPF_TXD_QW1_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TXD_QW1_DTYPE_S 0 +#define IDPF_TXD_QW1_DTYPE_M GENMASK_ULL(3, 0) + +/* TX Completion Descriptor Completion Types */ +#define IDPF_TXD_COMPLT_ITR_FLUSH 0 +/* Descriptor completion type 1 is reserved */ +#define IDPF_TXD_COMPLT_RS 2 +/* Descriptor completion type 3 is reserved */ +#define IDPF_TXD_COMPLT_RE 4 +#define IDPF_TXD_COMPLT_SW_MARKER 5 + +enum idpf_tx_desc_dtype_value { + IDPF_TX_DESC_DTYPE_DATA = 0, + IDPF_TX_DESC_DTYPE_CTX = 1, + /* DTYPE 2 is reserved + * DTYPE 3 is free for future use + * DTYPE 4 is reserved + */ + IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5, + /* DTYPE 6 is reserved */ + IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 = 7, + /* DTYPE 8, 9 are free for future use + * DTYPE 10 is reserved + * DTYPE 11 is free for future use + */ + IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE = 12, + /* DTYPE 13, 14 are free for future use */ + + /* DESC_DONE - HW has completed write-back of descriptor */ + IDPF_TX_DESC_DTYPE_DESC_DONE = 15, +}; + +enum idpf_tx_ctx_desc_cmd_bits { + IDPF_TX_CTX_DESC_TSO = 0x01, + IDPF_TX_CTX_DESC_TSYN = 0x02, + IDPF_TX_CTX_DESC_IL2TAG2 = 0x04, + IDPF_TX_CTX_DESC_RSVD = 0x08, + IDPF_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + IDPF_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + IDPF_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + IDPF_TX_CTX_DESC_SWTCH_VSI = 0x30, + IDPF_TX_CTX_DESC_FILT_AU_EN = 0x40, + IDPF_TX_CTX_DESC_FILT_AU_EVICT = 0x80, + IDPF_TX_CTX_DESC_RSVD1 = 0xF00 +}; + +enum idpf_tx_desc_len_fields { + /* Note: These are predefined bit offsets */ + IDPF_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */ + IDPF_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */ + IDPF_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ +}; + +enum idpf_tx_base_desc_cmd_bits { + IDPF_TX_DESC_CMD_EOP = BIT(0), + IDPF_TX_DESC_CMD_RS = BIT(1), + /* only on VFs else RSVD */ + IDPF_TX_DESC_CMD_ICRC = BIT(2), + IDPF_TX_DESC_CMD_IL2TAG1 = BIT(3), + IDPF_TX_DESC_CMD_RSVD1 = BIT(4), + IDPF_TX_DESC_CMD_IIPT_IPV6 = BIT(5), + IDPF_TX_DESC_CMD_IIPT_IPV4 = BIT(6), + IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM = GENMASK(6, 5), + IDPF_TX_DESC_CMD_RSVD2 = BIT(7), + IDPF_TX_DESC_CMD_L4T_EOFT_TCP = BIT(8), + IDPF_TX_DESC_CMD_L4T_EOFT_SCTP = BIT(9), + IDPF_TX_DESC_CMD_L4T_EOFT_UDP = GENMASK(9, 8), + IDPF_TX_DESC_CMD_RSVD3 = BIT(10), + IDPF_TX_DESC_CMD_RSVD4 = BIT(11), +}; + +/* Transmit descriptors */ +/* splitq tx buf, singleq tx buf and singleq compl desc */ +struct idpf_base_tx_desc { + __le64 buf_addr; /* Address of descriptor's data buf */ + __le64 qw1; /* type_cmd_offset_bsz_l2tag1 */ +}; /* read used with buffer queues */ + +struct idpf_splitq_tx_compl_desc { + /* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */ + __le16 qid_comptype_gen; + union { + __le16 q_head; /* Queue head */ + __le16 compl_tag; /* Completion tag */ + } q_head_compl_tag; + u8 ts[3]; + u8 rsvd; /* Reserved */ +}; /* writeback used with completion queues */ + +/* Context descriptors */ +struct idpf_base_tx_ctx_desc { + struct { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd1; + } qw0; + __le64 qw1; /* type_cmd_tlen_mss/rt_hint */ +}; + +/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */ +enum idpf_tx_flex_desc_cmd_bits { + IDPF_TX_FLEX_DESC_CMD_EOP = BIT(0), + IDPF_TX_FLEX_DESC_CMD_RS = BIT(1), + IDPF_TX_FLEX_DESC_CMD_RE = BIT(2), + IDPF_TX_FLEX_DESC_CMD_IL2TAG1 = BIT(3), + IDPF_TX_FLEX_DESC_CMD_DUMMY = BIT(4), + IDPF_TX_FLEX_DESC_CMD_CS_EN = BIT(5), + IDPF_TX_FLEX_DESC_CMD_FILT_AU_EN = BIT(6), + IDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT = BIT(7), +}; + +struct idpf_flex_tx_desc { + __le64 buf_addr; /* Packet buffer address */ + struct { +#define IDPF_FLEX_TXD_QW1_DTYPE_S 0 +#define IDPF_FLEX_TXD_QW1_DTYPE_M GENMASK(4, 0) +#define IDPF_FLEX_TXD_QW1_CMD_S 5 +#define IDPF_FLEX_TXD_QW1_CMD_M GENMASK(15, 5) + __le16 cmd_dtype; + /* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */ + struct { + __le16 l2tag1; + __le16 l2tag2; + } l2tags; + __le16 buf_size; + } qw1; +}; + +struct idpf_flex_tx_sched_desc { + __le64 buf_addr; /* Packet buffer address */ + + /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */ + struct { + u8 cmd_dtype; +#define IDPF_TXD_FLEX_FLOW_DTYPE_M GENMASK(4, 0) +#define IDPF_TXD_FLEX_FLOW_CMD_EOP BIT(5) +#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN BIT(6) +#define IDPF_TXD_FLEX_FLOW_CMD_RE BIT(7) + + /* [23:23] Horizon Overflow bit, [22:0] timestamp */ + u8 ts[3]; +#define IDPF_TXD_FLOW_SCH_HORIZON_OVERFLOW_M BIT(7) + + __le16 compl_tag; + __le16 rxr_bufsize; +#define IDPF_TXD_FLEX_FLOW_RXR BIT(14) +#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M GENMASK(13, 0) + } qw1; +}; + +/* Common cmd fields for all flex context descriptors + * Note: these defines already account for the 5 bit dtype in the cmd_dtype + * field + */ +enum idpf_tx_flex_ctx_desc_cmd_bits { + IDPF_TX_FLEX_CTX_DESC_CMD_TSO = BIT(5), + IDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN = BIT(6), + IDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2 = BIT(7), + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK = BIT(9), + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL = BIT(10), + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI = GENMASK(10, 9), +}; + +/* Standard flex descriptor TSO context quad word */ +struct idpf_flex_tx_tso_ctx_qw { + __le32 flex_tlen; +#define IDPF_TXD_FLEX_CTX_TLEN_M GENMASK(17, 0) +#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S 24 + __le16 mss_rt; +#define IDPF_TXD_FLEX_CTX_MSS_RT_M GENMASK(13, 0) + u8 hdr_len; + u8 flex; +}; + +struct idpf_flex_tx_ctx_desc { + /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */ + struct { + struct idpf_flex_tx_tso_ctx_qw qw0; + struct { + __le16 cmd_dtype; + u8 flex[6]; + } qw1; + } tso; +}; +#endif /* _IDPF_LAN_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h new file mode 100644 index 000000000000..3d73b6c76863 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_LAN_VF_REGS_H_ +#define _IDPF_LAN_VF_REGS_H_ + +/* Reset */ +#define VFGEN_RSTAT 0x00008800 +#define VFGEN_RSTAT_VFR_STATE_S 0 +#define VFGEN_RSTAT_VFR_STATE_M GENMASK(1, 0) + +/* Control(VF Mailbox) Queue */ +#define VF_BASE 0x00006000 + +#define VF_ATQBAL (VF_BASE + 0x1C00) +#define VF_ATQBAH (VF_BASE + 0x1800) +#define VF_ATQLEN (VF_BASE + 0x0800) +#define VF_ATQLEN_ATQLEN_S 0 +#define VF_ATQLEN_ATQLEN_M GENMASK(9, 0) +#define VF_ATQLEN_ATQVFE_S 28 +#define VF_ATQLEN_ATQVFE_M BIT(VF_ATQLEN_ATQVFE_S) +#define VF_ATQLEN_ATQOVFL_S 29 +#define VF_ATQLEN_ATQOVFL_M BIT(VF_ATQLEN_ATQOVFL_S) +#define VF_ATQLEN_ATQCRIT_S 30 +#define VF_ATQLEN_ATQCRIT_M BIT(VF_ATQLEN_ATQCRIT_S) +#define VF_ATQLEN_ATQENABLE_S 31 +#define VF_ATQLEN_ATQENABLE_M BIT(VF_ATQLEN_ATQENABLE_S) +#define VF_ATQH (VF_BASE + 0x0400) +#define VF_ATQH_ATQH_S 0 +#define VF_ATQH_ATQH_M GENMASK(9, 0) +#define VF_ATQT (VF_BASE + 0x2400) + +#define VF_ARQBAL (VF_BASE + 0x0C00) +#define VF_ARQBAH (VF_BASE) +#define VF_ARQLEN (VF_BASE + 0x2000) +#define VF_ARQLEN_ARQLEN_S 0 +#define VF_ARQLEN_ARQLEN_M GENMASK(9, 0) +#define VF_ARQLEN_ARQVFE_S 28 +#define VF_ARQLEN_ARQVFE_M BIT(VF_ARQLEN_ARQVFE_S) +#define VF_ARQLEN_ARQOVFL_S 29 +#define VF_ARQLEN_ARQOVFL_M BIT(VF_ARQLEN_ARQOVFL_S) +#define VF_ARQLEN_ARQCRIT_S 30 +#define VF_ARQLEN_ARQCRIT_M BIT(VF_ARQLEN_ARQCRIT_S) +#define VF_ARQLEN_ARQENABLE_S 31 +#define VF_ARQLEN_ARQENABLE_M BIT(VF_ARQLEN_ARQENABLE_S) +#define VF_ARQH (VF_BASE + 0x1400) +#define VF_ARQH_ARQH_S 0 +#define VF_ARQH_ARQH_M GENMASK(12, 0) +#define VF_ARQT (VF_BASE + 0x1000) + +/* Transmit queues */ +#define VF_QTX_TAIL_BASE 0x00000000 +#define VF_QTX_TAIL(_QTX) (VF_QTX_TAIL_BASE + (_QTX) * 0x4) +#define VF_QTX_TAIL_EXT_BASE 0x00040000 +#define VF_QTX_TAIL_EXT(_QTX) (VF_QTX_TAIL_EXT_BASE + ((_QTX) * 4)) + +/* Receive queues */ +#define VF_QRX_TAIL_BASE 0x00002000 +#define VF_QRX_TAIL(_QRX) (VF_QRX_TAIL_BASE + ((_QRX) * 4)) +#define VF_QRX_TAIL_EXT_BASE 0x00050000 +#define VF_QRX_TAIL_EXT(_QRX) (VF_QRX_TAIL_EXT_BASE + ((_QRX) * 4)) +#define VF_QRXB_TAIL_BASE 0x00060000 +#define VF_QRXB_TAIL(_QRX) (VF_QRXB_TAIL_BASE + ((_QRX) * 4)) + +/* Interrupts */ +#define VF_INT_DYN_CTL0 0x00005C00 +#define VF_INT_DYN_CTL0_INTENA_S 0 +#define VF_INT_DYN_CTL0_INTENA_M BIT(VF_INT_DYN_CTL0_INTENA_S) +#define VF_INT_DYN_CTL0_ITR_INDX_S 3 +#define VF_INT_DYN_CTL0_ITR_INDX_M GENMASK(4, 3) +#define VF_INT_DYN_CTLN(_INT) (0x00003800 + ((_INT) * 4)) +#define VF_INT_DYN_CTLN_EXT(_INT) (0x00070000 + ((_INT) * 4)) +#define VF_INT_DYN_CTLN_INTENA_S 0 +#define VF_INT_DYN_CTLN_INTENA_M BIT(VF_INT_DYN_CTLN_INTENA_S) +#define VF_INT_DYN_CTLN_CLEARPBA_S 1 +#define VF_INT_DYN_CTLN_CLEARPBA_M BIT(VF_INT_DYN_CTLN_CLEARPBA_S) +#define VF_INT_DYN_CTLN_SWINT_TRIG_S 2 +#define VF_INT_DYN_CTLN_SWINT_TRIG_M BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S) +#define VF_INT_DYN_CTLN_ITR_INDX_S 3 +#define VF_INT_DYN_CTLN_ITR_INDX_M GENMASK(4, 3) +#define VF_INT_DYN_CTLN_INTERVAL_S 5 +#define VF_INT_DYN_CTLN_INTERVAL_M BIT(VF_INT_DYN_CTLN_INTERVAL_S) +#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S 24 +#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S) +#define VF_INT_DYN_CTLN_SW_ITR_INDX_S 25 +#define VF_INT_DYN_CTLN_SW_ITR_INDX_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S) +#define VF_INT_DYN_CTLN_WB_ON_ITR_S 30 +#define VF_INT_DYN_CTLN_WB_ON_ITR_M BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S) +#define VF_INT_DYN_CTLN_INTENA_MSK_S 31 +#define VF_INT_DYN_CTLN_INTENA_MSK_M BIT(VF_INT_DYN_CTLN_INTENA_MSK_S) +/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is spacing + * b/w itrn registers of the same vector + */ +#define VF_INT_ITR0(_ITR) (0x00004C00 + ((_ITR) * 4)) +#define VF_INT_ITRN_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \ + ((_reg_start) + ((_ITR) * (_itrn_indx_spacing))) +/* For VF with 16 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x40 and base register offset is 0x00002800 + */ +#define VF_INT_ITRN(_INT, _ITR) \ + (0x00002800 + ((_INT) * 4) + ((_ITR) * 0x40)) +/* For VF with 64 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x100 and base register offset is 0x00002C00 + */ +#define VF_INT_ITRN_64(_INT, _ITR) \ + (0x00002C00 + ((_INT) * 4) + ((_ITR) * 0x100)) +/* For VF with 2k vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing + * is 0x2000 and base register offset is 0x00072000 + */ +#define VF_INT_ITRN_2K(_INT, _ITR) \ + (0x00072000 + ((_INT) * 4) + ((_ITR) * 0x2000)) +#define VF_INT_ITRN_MAX_INDEX 2 +#define VF_INT_ITRN_INTERVAL_S 0 +#define VF_INT_ITRN_INTERVAL_M GENMASK(11, 0) +#define VF_INT_PBA_CLEAR 0x00008900 + +#define VF_INT_ICR0_ENA1 0x00005000 +#define VF_INT_ICR0_ENA1_ADMINQ_S 30 +#define VF_INT_ICR0_ENA1_ADMINQ_M BIT(VF_INT_ICR0_ENA1_ADMINQ_S) +#define VF_INT_ICR0_ENA1_RSVD_S 31 +#define VF_INT_ICR01 0x00004800 +#define VF_QF_HENA(_i) (0x0000C400 + ((_i) * 4)) +#define VF_QF_HENA_MAX_INDX 1 +#define VF_QF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) +#define VF_QF_HKEY_MAX_INDX 12 +#define VF_QF_HLUT(_i) (0x0000D000 + ((_i) * 4)) +#define VF_QF_HLUT_MAX_INDX 15 +#endif diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c new file mode 100644 index 000000000000..19809b0ddcd9 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -0,0 +1,2379 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +static const struct net_device_ops idpf_netdev_ops_splitq; +static const struct net_device_ops idpf_netdev_ops_singleq; + +const char * const idpf_vport_vc_state_str[] = { + IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING) +}; + +/** + * idpf_init_vector_stack - Fill the MSIX vector stack with vector index + * @adapter: private data struct + * + * Return 0 on success, error on failure + */ +static int idpf_init_vector_stack(struct idpf_adapter *adapter) +{ + struct idpf_vector_lifo *stack; + u16 min_vec; + u32 i; + + mutex_lock(&adapter->vector_lock); + min_vec = adapter->num_msix_entries - adapter->num_avail_msix; + stack = &adapter->vector_stack; + stack->size = adapter->num_msix_entries; + /* set the base and top to point at start of the 'free pool' to + * distribute the unused vectors on-demand basis + */ + stack->base = min_vec; + stack->top = min_vec; + + stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL); + if (!stack->vec_idx) { + mutex_unlock(&adapter->vector_lock); + + return -ENOMEM; + } + + for (i = 0; i < stack->size; i++) + stack->vec_idx[i] = i; + + mutex_unlock(&adapter->vector_lock); + + return 0; +} + +/** + * idpf_deinit_vector_stack - zero out the MSIX vector stack + * @adapter: private data struct + */ +static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) +{ + struct idpf_vector_lifo *stack; + + mutex_lock(&adapter->vector_lock); + stack = &adapter->vector_stack; + kfree(stack->vec_idx); + stack->vec_idx = NULL; + mutex_unlock(&adapter->vector_lock); +} + +/** + * idpf_mb_intr_rel_irq - Free the IRQ association with the OS + * @adapter: adapter structure + * + * This will also disable interrupt mode and queue up mailbox task. Mailbox + * task will reschedule itself if not in interrupt mode. + */ +static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) +{ + clear_bit(IDPF_MB_INTR_MODE, adapter->flags); + free_irq(adapter->msix_entries[0].vector, adapter); + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); +} + +/** + * idpf_intr_rel - Release interrupt capabilities and free memory + * @adapter: adapter to disable interrupts on + */ +void idpf_intr_rel(struct idpf_adapter *adapter) +{ + int err; + + if (!adapter->msix_entries) + return; + + idpf_mb_intr_rel_irq(adapter); + pci_free_irq_vectors(adapter->pdev); + + err = idpf_send_dealloc_vectors_msg(adapter); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to deallocate vectors: %d\n", err); + + idpf_deinit_vector_stack(adapter); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * idpf_mb_intr_clean - Interrupt handler for the mailbox + * @irq: interrupt number + * @data: pointer to the adapter structure + */ +static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data) +{ + struct idpf_adapter *adapter = (struct idpf_adapter *)data; + + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); + + return IRQ_HANDLED; +} + +/** + * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox + * @adapter: adapter to get the hardware address for register write + */ +static void idpf_mb_irq_enable(struct idpf_adapter *adapter) +{ + struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; + u32 val; + + val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m; + writel(val, intr->dyn_ctl); + writel(intr->icr_ena_ctlq_m, intr->icr_ena); +} + +/** + * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt + * @adapter: adapter structure to pass to the mailbox irq handler + */ +static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) +{ + struct idpf_q_vector *mb_vector = &adapter->mb_vector; + int irq_num, mb_vidx = 0, err; + + irq_num = adapter->msix_entries[mb_vidx].vector; + mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", + dev_driver_string(&adapter->pdev->dev), + "Mailbox", mb_vidx); + err = request_irq(irq_num, adapter->irq_mb_handler, 0, + mb_vector->name, adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "IRQ request for mailbox failed, error: %d\n", err); + + return err; + } + + set_bit(IDPF_MB_INTR_MODE, adapter->flags); + + return 0; +} + +/** + * idpf_set_mb_vec_id - Set vector index for mailbox + * @adapter: adapter structure to access the vector chunks + * + * The first vector id in the requested vector chunks from the CP is for + * the mailbox + */ +static void idpf_set_mb_vec_id(struct idpf_adapter *adapter) +{ + if (adapter->req_vec_chunks) + adapter->mb_vector.v_idx = + le16_to_cpu(adapter->caps.mailbox_vector_id); + else + adapter->mb_vector.v_idx = 0; +} + +/** + * idpf_mb_intr_init - Initialize the mailbox interrupt + * @adapter: adapter structure to store the mailbox vector + */ +static int idpf_mb_intr_init(struct idpf_adapter *adapter) +{ + adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter); + adapter->irq_mb_handler = idpf_mb_intr_clean; + + return idpf_mb_intr_req_irq(adapter); +} + +/** + * idpf_vector_lifo_push - push MSIX vector index onto stack + * @adapter: private data struct + * @vec_idx: vector index to store + */ +static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx) +{ + struct idpf_vector_lifo *stack = &adapter->vector_stack; + + lockdep_assert_held(&adapter->vector_lock); + + if (stack->top == stack->base) { + dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n", + stack->top); + return -EINVAL; + } + + stack->vec_idx[--stack->top] = vec_idx; + + return 0; +} + +/** + * idpf_vector_lifo_pop - pop MSIX vector index from stack + * @adapter: private data struct + */ +static int idpf_vector_lifo_pop(struct idpf_adapter *adapter) +{ + struct idpf_vector_lifo *stack = &adapter->vector_stack; + + lockdep_assert_held(&adapter->vector_lock); + + if (stack->top == stack->size) { + dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n"); + + return -EINVAL; + } + + return stack->vec_idx[stack->top++]; +} + +/** + * idpf_vector_stash - Store the vector indexes onto the stack + * @adapter: private data struct + * @q_vector_idxs: vector index array + * @vec_info: info related to the number of vectors + * + * This function is a no-op if there are no vectors indexes to be stashed + */ +static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs, + struct idpf_vector_info *vec_info) +{ + int i, base = 0; + u16 vec_idx; + + lockdep_assert_held(&adapter->vector_lock); + + if (!vec_info->num_curr_vecs) + return; + + /* For default vports, no need to stash vector allocated from the + * default pool onto the stack + */ + if (vec_info->default_vport) + base = IDPF_MIN_Q_VEC; + + for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) { + vec_idx = q_vector_idxs[i]; + idpf_vector_lifo_push(adapter, vec_idx); + adapter->num_avail_msix++; + } +} + +/** + * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes + * @adapter: driver specific private structure + * @q_vector_idxs: vector index array + * @vec_info: info related to the number of vectors + * + * This is the core function to distribute the MSIX vectors acquired from the + * OS. It expects the caller to pass the number of vectors required and + * also previously allocated. First, it stashes previously allocated vector + * indexes on to the stack and then figures out if it can allocate requested + * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as + * requested vectors, then this function just stashes the already allocated + * vectors and returns 0. + * + * Returns actual number of vectors allocated on success, error value on failure + * If 0 is returned, implies the stack has no vectors to allocate which is also + * a failure case for the caller + */ +int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, + u16 *q_vector_idxs, + struct idpf_vector_info *vec_info) +{ + u16 num_req_vecs, num_alloc_vecs = 0, max_vecs; + struct idpf_vector_lifo *stack; + int i, j, vecid; + + mutex_lock(&adapter->vector_lock); + stack = &adapter->vector_stack; + num_req_vecs = vec_info->num_req_vecs; + + /* Stash interrupt vector indexes onto the stack if required */ + idpf_vector_stash(adapter, q_vector_idxs, vec_info); + + if (!num_req_vecs) + goto rel_lock; + + if (vec_info->default_vport) { + /* As IDPF_MIN_Q_VEC per default vport is put aside in the + * default pool of the stack, use them for default vports + */ + j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC; + for (i = 0; i < IDPF_MIN_Q_VEC; i++) { + q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++]; + num_req_vecs--; + } + } + + /* Find if stack has enough vector to allocate */ + max_vecs = min(adapter->num_avail_msix, num_req_vecs); + + for (j = 0; j < max_vecs; j++) { + vecid = idpf_vector_lifo_pop(adapter); + q_vector_idxs[num_alloc_vecs++] = vecid; + } + adapter->num_avail_msix -= max_vecs; + +rel_lock: + mutex_unlock(&adapter->vector_lock); + + return num_alloc_vecs; +} + +/** + * idpf_intr_req - Request interrupt capabilities + * @adapter: adapter to enable interrupts on + * + * Returns 0 on success, negative on failure + */ +int idpf_intr_req(struct idpf_adapter *adapter) +{ + u16 default_vports = idpf_get_default_vports(adapter); + int num_q_vecs, total_vecs, num_vec_ids; + int min_vectors, v_actual, err; + unsigned int vector; + u16 *vecids; + + total_vecs = idpf_get_reserved_vecs(adapter); + num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; + + err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to allocate %d vectors: %d\n", num_q_vecs, err); + + return -EAGAIN; + } + + min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; + v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, + total_vecs, PCI_IRQ_MSIX); + if (v_actual < min_vectors) { + dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", + v_actual); + err = -EAGAIN; + goto send_dealloc_vecs; + } + + adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) { + err = -ENOMEM; + goto free_irq; + } + + idpf_set_mb_vec_id(adapter); + + vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); + if (!vecids) { + err = -ENOMEM; + goto free_msix; + } + + if (adapter->req_vec_chunks) { + struct virtchnl2_vector_chunks *vchunks; + struct virtchnl2_alloc_vectors *ac; + + ac = adapter->req_vec_chunks; + vchunks = &ac->vchunks; + + num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, + vchunks); + if (num_vec_ids < v_actual) { + err = -EINVAL; + goto free_vecids; + } + } else { + int i; + + for (i = 0; i < v_actual; i++) + vecids[i] = i; + } + + for (vector = 0; vector < v_actual; vector++) { + adapter->msix_entries[vector].entry = vecids[vector]; + adapter->msix_entries[vector].vector = + pci_irq_vector(adapter->pdev, vector); + } + + adapter->num_req_msix = total_vecs; + adapter->num_msix_entries = v_actual; + /* 'num_avail_msix' is used to distribute excess vectors to the vports + * after considering the minimum vectors required per each default + * vport + */ + adapter->num_avail_msix = v_actual - min_vectors; + + /* Fill MSIX vector lifo stack with vector indexes */ + err = idpf_init_vector_stack(adapter); + if (err) + goto free_vecids; + + err = idpf_mb_intr_init(adapter); + if (err) + goto deinit_vec_stack; + idpf_mb_irq_enable(adapter); + kfree(vecids); + + return 0; + +deinit_vec_stack: + idpf_deinit_vector_stack(adapter); +free_vecids: + kfree(vecids); +free_msix: + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +free_irq: + pci_free_irq_vectors(adapter->pdev); +send_dealloc_vecs: + idpf_send_dealloc_vectors_msg(adapter); + + return err; +} + +/** + * idpf_find_mac_filter - Search filter list for specific mac filter + * @vconfig: Vport config structure + * @macaddr: The MAC address + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_filter_list_lock. + **/ +static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig, + const u8 *macaddr) +{ + struct idpf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +/** + * __idpf_del_mac_filter - Delete a MAC filter from the filter list + * @vport_config: Vport config structure + * @macaddr: The MAC address + * + * Returns 0 on success, error value on failure + **/ +static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config, + const u8 *macaddr) +{ + struct idpf_mac_filter *f; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + f = idpf_find_mac_filter(vport_config, macaddr); + if (f) { + list_del(&f->list); + kfree(f); + } + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; +} + +/** + * idpf_del_mac_filter - Delete a MAC filter from the filter list + * @vport: Main vport structure + * @np: Netdev private structure + * @macaddr: The MAC address + * @async: Don't wait for return message + * + * Removes filter from list and if interface is up, tells hardware about the + * removed filter. + **/ +static int idpf_del_mac_filter(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + const u8 *macaddr, bool async) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = np->adapter->vport_config[np->vport_idx]; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + f = idpf_find_mac_filter(vport_config, macaddr); + if (f) { + f->remove = true; + } else { + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return -EINVAL; + } + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + if (np->state == __IDPF_VPORT_UP) { + int err; + + err = idpf_add_del_mac_filters(vport, np, false, async); + if (err) + return err; + } + + return __idpf_del_mac_filter(vport_config, macaddr); +} + +/** + * __idpf_add_mac_filter - Add mac filter helper function + * @vport_config: Vport config structure + * @macaddr: Address to add + * + * Takes mac_filter_list_lock spinlock to add new filter to list. + */ +static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config, + const u8 *macaddr) +{ + struct idpf_mac_filter *f; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + + f = idpf_find_mac_filter(vport_config, macaddr); + if (f) { + f->remove = false; + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; + } + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) { + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return -ENOMEM; + } + + ether_addr_copy(f->macaddr, macaddr); + list_add_tail(&f->list, &vport_config->user_config.mac_filter_list); + f->add = true; + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; +} + +/** + * idpf_add_mac_filter - Add a mac filter to the filter list + * @vport: Main vport structure + * @np: Netdev private structure + * @macaddr: The MAC address + * @async: Don't wait for return message + * + * Returns 0 on success or error on failure. If interface is up, we'll also + * send the virtchnl message to tell hardware about the filter. + **/ +static int idpf_add_mac_filter(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + const u8 *macaddr, bool async) +{ + struct idpf_vport_config *vport_config; + int err; + + vport_config = np->adapter->vport_config[np->vport_idx]; + err = __idpf_add_mac_filter(vport_config, macaddr); + if (err) + return err; + + if (np->state == __IDPF_VPORT_UP) + err = idpf_add_del_mac_filters(vport, np, true, async); + + return err; +} + +/** + * idpf_del_all_mac_filters - Delete all MAC filters in list + * @vport: main vport struct + * + * Takes mac_filter_list_lock spinlock. Deletes all filters + */ +static void idpf_del_all_mac_filters(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f, *ftmp; + + vport_config = vport->adapter->vport_config[vport->idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list, + list) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&vport_config->mac_filter_list_lock); +} + +/** + * idpf_restore_mac_filters - Re-add all MAC filters in list + * @vport: main vport struct + * + * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to + * resync filters back to HW. + */ +static void idpf_restore_mac_filters(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = vport->adapter->vport_config[vport->idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) + f->add = true; + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), + true, false); +} + +/** + * idpf_remove_mac_filters - Remove all MAC filters in list + * @vport: main vport struct + * + * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters + * to remove filters in HW. + */ +static void idpf_remove_mac_filters(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = vport->adapter->vport_config[vport->idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list) + f->remove = true; + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), + false, false); +} + +/** + * idpf_deinit_mac_addr - deinitialize mac address for vport + * @vport: main vport structure + */ +static void idpf_deinit_mac_addr(struct idpf_vport *vport) +{ + struct idpf_vport_config *vport_config; + struct idpf_mac_filter *f; + + vport_config = vport->adapter->vport_config[vport->idx]; + + spin_lock_bh(&vport_config->mac_filter_list_lock); + + f = idpf_find_mac_filter(vport_config, vport->default_mac_addr); + if (f) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&vport_config->mac_filter_list_lock); +} + +/** + * idpf_init_mac_addr - initialize mac address for vport + * @vport: main vport structure + * @netdev: pointer to netdev struct associated with this vport + */ +static int idpf_init_mac_addr(struct idpf_vport *vport, + struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_adapter *adapter = vport->adapter; + int err; + + if (is_valid_ether_addr(vport->default_mac_addr)) { + eth_hw_addr_set(netdev, vport->default_mac_addr); + ether_addr_copy(netdev->perm_addr, vport->default_mac_addr); + + return idpf_add_mac_filter(vport, np, vport->default_mac_addr, + false); + } + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_MACFILTER)) { + dev_err(&adapter->pdev->dev, + "MAC address is not provided and capability is not set\n"); + + return -EINVAL; + } + + eth_hw_addr_random(netdev); + err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false); + if (err) + return err; + + dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n", + vport->default_mac_addr, netdev->dev_addr); + ether_addr_copy(vport->default_mac_addr, netdev->dev_addr); + + return 0; +} + +/** + * idpf_cfg_netdev - Allocate, configure and register a netdev + * @vport: main vport structure + * + * Returns 0 on success, negative value on failure. + */ +static int idpf_cfg_netdev(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + netdev_features_t dflt_features; + netdev_features_t offloads = 0; + struct idpf_netdev_priv *np; + struct net_device *netdev; + u16 idx = vport->idx; + int err; + + vport_config = adapter->vport_config[idx]; + + /* It's possible we already have a netdev allocated and registered for + * this vport + */ + if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) { + netdev = adapter->netdevs[idx]; + np = netdev_priv(netdev); + np->vport = vport; + np->vport_idx = vport->idx; + np->vport_id = vport->vport_id; + vport->netdev = netdev; + + return idpf_init_mac_addr(vport, netdev); + } + + netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv), + vport_config->max_q.max_txq, + vport_config->max_q.max_rxq); + if (!netdev) + return -ENOMEM; + + vport->netdev = netdev; + np = netdev_priv(netdev); + np->vport = vport; + np->adapter = adapter; + np->vport_idx = vport->idx; + np->vport_id = vport->vport_id; + + spin_lock_init(&np->stats_lock); + + err = idpf_init_mac_addr(vport, netdev); + if (err) { + free_netdev(vport->netdev); + vport->netdev = NULL; + + return err; + } + + /* assign netdev_ops */ + if (idpf_is_queue_model_split(vport->txq_model)) + netdev->netdev_ops = &idpf_netdev_ops_splitq; + else + netdev->netdev_ops = &idpf_netdev_ops_singleq; + + /* setup watchdog timeout value to be 5 second */ + netdev->watchdog_timeo = 5 * HZ; + + /* configure default MTU size */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = vport->max_mtu; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA; + + if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) + dflt_features |= NETIF_F_RXHASH; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4)) + dflt_features |= NETIF_F_IP_CSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6)) + dflt_features |= NETIF_F_IPV6_CSUM; + if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) + dflt_features |= NETIF_F_RXCSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM)) + dflt_features |= NETIF_F_SCTP_CRC; + + if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) + dflt_features |= NETIF_F_TSO; + if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) + dflt_features |= NETIF_F_TSO6; + if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, + VIRTCHNL2_CAP_SEG_IPV4_UDP | + VIRTCHNL2_CAP_SEG_IPV6_UDP)) + dflt_features |= NETIF_F_GSO_UDP_L4; + if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) + offloads |= NETIF_F_GRO_HW; + /* advertise to stack only if offloads for encapsulated packets is + * supported + */ + if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS, + VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) { + offloads |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + 0; + + if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS, + IDPF_CAP_TUNNEL_TX_CSUM)) + netdev->gso_partial_features |= + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + offloads |= NETIF_F_TSO_MANGLEID; + } + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) + offloads |= NETIF_F_LOOPBACK; + + netdev->features |= dflt_features; + netdev->hw_features |= dflt_features | offloads; + netdev->hw_enc_features |= dflt_features | offloads; + idpf_set_ethtool_ops(netdev); + SET_NETDEV_DEV(netdev, &adapter->pdev->dev); + + /* carrier off on init to avoid Tx hangs */ + netif_carrier_off(netdev); + + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(netdev); + + /* The vport can be arbitrarily released so we need to also track + * netdevs in the adapter struct + */ + adapter->netdevs[idx] = netdev; + + return 0; +} + +/** + * idpf_get_free_slot - get the next non-NULL location index in array + * @adapter: adapter in which to look for a free vport slot + */ +static int idpf_get_free_slot(struct idpf_adapter *adapter) +{ + unsigned int i; + + for (i = 0; i < adapter->max_vports; i++) { + if (!adapter->vports[i]) + return i; + } + + return IDPF_NO_FREE_SLOT; +} + +/** + * idpf_remove_features - Turn off feature configs + * @vport: virtual port structure + */ +static void idpf_remove_features(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) + idpf_remove_mac_filters(vport); +} + +/** + * idpf_vport_stop - Disable a vport + * @vport: vport to disable + */ +static void idpf_vport_stop(struct idpf_vport *vport) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + + if (np->state <= __IDPF_VPORT_DOWN) + return; + + netif_carrier_off(vport->netdev); + netif_tx_disable(vport->netdev); + + idpf_send_disable_vport_msg(vport); + idpf_send_disable_queues_msg(vport); + idpf_send_map_unmap_queue_vector_msg(vport, false); + /* Normally we ask for queues in create_vport, but if the number of + * initially requested queues have changed, for example via ethtool + * set channels, we do delete queues and then add the queues back + * instead of deleting and reallocating the vport. + */ + if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags)) + idpf_send_delete_queues_msg(vport); + + idpf_remove_features(vport); + + vport->link_up = false; + idpf_vport_intr_deinit(vport); + idpf_vport_intr_rel(vport); + idpf_vport_queues_rel(vport); + np->state = __IDPF_VPORT_DOWN; +} + +/** + * idpf_stop - Disables a network interface + * @netdev: network interface device structure + * + * The stop entry point is called when an interface is de-activated by the OS, + * and the netdevice enters the DOWN state. The hardware is still under the + * driver's control, but the netdev interface is disabled. + * + * Returns success only - not allowed to fail + */ +static int idpf_stop(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + + if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags)) + return 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + idpf_vport_stop(vport); + + idpf_vport_ctrl_unlock(netdev); + + return 0; +} + +/** + * idpf_decfg_netdev - Unregister the netdev + * @vport: vport for which netdev to be unregistered + */ +static void idpf_decfg_netdev(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + + unregister_netdev(vport->netdev); + free_netdev(vport->netdev); + vport->netdev = NULL; + + adapter->netdevs[vport->idx] = NULL; +} + +/** + * idpf_vport_rel - Delete a vport and free its resources + * @vport: the vport being removed + */ +static void idpf_vport_rel(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + struct idpf_vector_info vec_info; + struct idpf_rss_data *rss_data; + struct idpf_vport_max_q max_q; + u16 idx = vport->idx; + int i; + + vport_config = adapter->vport_config[vport->idx]; + idpf_deinit_rss(vport); + rss_data = &vport_config->user_config.rss_data; + kfree(rss_data->rss_key); + rss_data->rss_key = NULL; + + idpf_send_destroy_vport_msg(vport); + + /* Set all bits as we dont know on which vc_state the vport vhnl_wq + * is waiting on and wakeup the virtchnl workqueue even if it is + * waiting for the response as we are going down + */ + for (i = 0; i < IDPF_VC_NBITS; i++) + set_bit(i, vport->vc_state); + wake_up(&vport->vchnl_wq); + + mutex_destroy(&vport->vc_buf_lock); + + /* Clear all the bits */ + for (i = 0; i < IDPF_VC_NBITS; i++) + clear_bit(i, vport->vc_state); + + /* Release all max queues allocated to the adapter's pool */ + max_q.max_rxq = vport_config->max_q.max_rxq; + max_q.max_txq = vport_config->max_q.max_txq; + max_q.max_bufq = vport_config->max_q.max_bufq; + max_q.max_complq = vport_config->max_q.max_complq; + idpf_vport_dealloc_max_qs(adapter, &max_q); + + /* Release all the allocated vectors on the stack */ + vec_info.num_req_vecs = 0; + vec_info.num_curr_vecs = vport->num_q_vectors; + vec_info.default_vport = vport->default_vport; + + idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info); + + kfree(vport->q_vector_idxs); + vport->q_vector_idxs = NULL; + + kfree(adapter->vport_params_recvd[idx]); + adapter->vport_params_recvd[idx] = NULL; + kfree(adapter->vport_params_reqd[idx]); + adapter->vport_params_reqd[idx] = NULL; + if (adapter->vport_config[idx]) { + kfree(adapter->vport_config[idx]->req_qs_chunks); + adapter->vport_config[idx]->req_qs_chunks = NULL; + } + kfree(vport); + adapter->num_alloc_vports--; +} + +/** + * idpf_vport_dealloc - cleanup and release a given vport + * @vport: pointer to idpf vport structure + * + * returns nothing + */ +static void idpf_vport_dealloc(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + unsigned int i = vport->idx; + + idpf_deinit_mac_addr(vport); + idpf_vport_stop(vport); + + if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) + idpf_decfg_netdev(vport); + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + idpf_del_all_mac_filters(vport); + + if (adapter->netdevs[i]) { + struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]); + + np->vport = NULL; + } + + idpf_vport_rel(vport); + + adapter->vports[i] = NULL; + adapter->next_vport = idpf_get_free_slot(adapter); +} + +/** + * idpf_vport_alloc - Allocates the next available struct vport in the adapter + * @adapter: board private structure + * @max_q: vport max queue info + * + * returns a pointer to a vport on success, NULL on failure. + */ +static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct idpf_rss_data *rss_data; + u16 idx = adapter->next_vport; + struct idpf_vport *vport; + u16 num_max_q; + + if (idx == IDPF_NO_FREE_SLOT) + return NULL; + + vport = kzalloc(sizeof(*vport), GFP_KERNEL); + if (!vport) + return vport; + + if (!adapter->vport_config[idx]) { + struct idpf_vport_config *vport_config; + + vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); + if (!vport_config) { + kfree(vport); + + return NULL; + } + + adapter->vport_config[idx] = vport_config; + } + + vport->idx = idx; + vport->adapter = adapter; + vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET; + vport->default_vport = adapter->num_alloc_vports < + idpf_get_default_vports(adapter); + + num_max_q = max(max_q->max_txq, max_q->max_rxq); + vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); + if (!vport->q_vector_idxs) { + kfree(vport); + + return NULL; + } + idpf_vport_init(vport, max_q); + + /* This alloc is done separate from the LUT because it's not strictly + * dependent on how many queues we have. If we change number of queues + * and soft reset we'll need a new LUT but the key can remain the same + * for as long as the vport exists. + */ + rss_data = &adapter->vport_config[idx]->user_config.rss_data; + rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); + if (!rss_data->rss_key) { + kfree(vport); + + return NULL; + } + /* Initialize default rss key */ + netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); + + /* fill vport slot in the adapter struct */ + adapter->vports[idx] = vport; + adapter->vport_ids[idx] = idpf_get_vport_id(vport); + + adapter->num_alloc_vports++; + /* prepare adapter->next_vport for next use */ + adapter->next_vport = idpf_get_free_slot(adapter); + + return vport; +} + +/** + * idpf_get_stats64 - get statistics for network device structure + * @netdev: network interface device structure + * @stats: main device statistics structure + */ +static void idpf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + spin_lock_bh(&np->stats_lock); + *stats = np->netstats; + spin_unlock_bh(&np->stats_lock); +} + +/** + * idpf_statistics_task - Delayed task to get statistics over mailbox + * @work: work_struct handle to our data + */ +void idpf_statistics_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + int i; + + adapter = container_of(work, struct idpf_adapter, stats_task.work); + + for (i = 0; i < adapter->max_vports; i++) { + struct idpf_vport *vport = adapter->vports[i]; + + if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) + idpf_send_get_stats_msg(vport); + } + + queue_delayed_work(adapter->stats_wq, &adapter->stats_task, + msecs_to_jiffies(10000)); +} + +/** + * idpf_mbx_task - Delayed task to handle mailbox responses + * @work: work_struct handle + */ +void idpf_mbx_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + + adapter = container_of(work, struct idpf_adapter, mbx_task.work); + + if (test_bit(IDPF_MB_INTR_MODE, adapter->flags)) + idpf_mb_irq_enable(adapter); + else + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, + msecs_to_jiffies(300)); + + idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0); +} + +/** + * idpf_service_task - Delayed task for handling mailbox responses + * @work: work_struct handle to our data + * + */ +void idpf_service_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + + adapter = container_of(work, struct idpf_adapter, serv_task.work); + + if (idpf_is_reset_detected(adapter) && + !idpf_is_reset_in_prog(adapter) && + !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { + dev_info(&adapter->pdev->dev, "HW reset detected\n"); + set_bit(IDPF_HR_FUNC_RESET, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, + &adapter->vc_event_task, + msecs_to_jiffies(10)); + } + + queue_delayed_work(adapter->serv_wq, &adapter->serv_task, + msecs_to_jiffies(300)); +} + +/** + * idpf_restore_features - Restore feature configs + * @vport: virtual port structure + */ +static void idpf_restore_features(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) + idpf_restore_mac_filters(vport); +} + +/** + * idpf_set_real_num_queues - set number of queues for netdev + * @vport: virtual port structure + * + * Returns 0 on success, negative on failure. + */ +static int idpf_set_real_num_queues(struct idpf_vport *vport) +{ + int err; + + err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); + if (err) + return err; + + return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); +} + +/** + * idpf_up_complete - Complete interface up sequence + * @vport: virtual port structure + * + * Returns 0 on success, negative on failure. + */ +static int idpf_up_complete(struct idpf_vport *vport) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + + if (vport->link_up && !netif_carrier_ok(vport->netdev)) { + netif_carrier_on(vport->netdev); + netif_tx_start_all_queues(vport->netdev); + } + + np->state = __IDPF_VPORT_UP; + + return 0; +} + +/** + * idpf_rx_init_buf_tail - Write initial buffer ring tail value + * @vport: virtual port struct + */ +static void idpf_rx_init_buf_tail(struct idpf_vport *vport) +{ + int i, j; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + + if (idpf_is_queue_model_split(vport->rxq_model)) { + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_queue *q = + &grp->splitq.bufq_sets[j].bufq; + + writel(q->next_to_alloc, q->tail); + } + } else { + for (j = 0; j < grp->singleq.num_rxq; j++) { + struct idpf_queue *q = + grp->singleq.rxqs[j]; + + writel(q->next_to_alloc, q->tail); + } + } + } +} + +/** + * idpf_vport_open - Bring up a vport + * @vport: vport to bring up + * @alloc_res: allocate queue resources + */ +static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + int err; + + if (np->state != __IDPF_VPORT_DOWN) + return -EBUSY; + + /* we do not allow interface up just yet */ + netif_carrier_off(vport->netdev); + + if (alloc_res) { + err = idpf_vport_queues_alloc(vport); + if (err) + return err; + } + + err = idpf_vport_intr_alloc(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", + vport->vport_id, err); + goto queues_rel; + } + + err = idpf_vport_queue_ids_init(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + err = idpf_vport_intr_init(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + err = idpf_rx_bufs_init_all(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + err = idpf_queue_reg_init(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", + vport->vport_id, err); + goto intr_rel; + } + + idpf_rx_init_buf_tail(vport); + + err = idpf_send_config_queues_msg(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", + vport->vport_id, err); + goto intr_deinit; + } + + err = idpf_send_map_unmap_queue_vector_msg(vport, true); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n", + vport->vport_id, err); + goto intr_deinit; + } + + err = idpf_send_enable_queues_msg(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n", + vport->vport_id, err); + goto unmap_queue_vectors; + } + + err = idpf_send_enable_vport_msg(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n", + vport->vport_id, err); + err = -EAGAIN; + goto disable_queues; + } + + idpf_restore_features(vport); + + vport_config = adapter->vport_config[vport->idx]; + if (vport_config->user_config.rss_data.rss_lut) + err = idpf_config_rss(vport); + else + err = idpf_init_rss(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n", + vport->vport_id, err); + goto disable_vport; + } + + err = idpf_up_complete(vport); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n", + vport->vport_id, err); + goto deinit_rss; + } + + return 0; + +deinit_rss: + idpf_deinit_rss(vport); +disable_vport: + idpf_send_disable_vport_msg(vport); +disable_queues: + idpf_send_disable_queues_msg(vport); +unmap_queue_vectors: + idpf_send_map_unmap_queue_vector_msg(vport, false); +intr_deinit: + idpf_vport_intr_deinit(vport); +intr_rel: + idpf_vport_intr_rel(vport); +queues_rel: + idpf_vport_queues_rel(vport); + + return err; +} + +/** + * idpf_init_task - Delayed initialization task + * @work: work_struct handle to our data + * + * Init task finishes up pending work started in probe. Due to the asynchronous + * nature in which the device communicates with hardware, we may have to wait + * several milliseconds to get a response. Instead of busy polling in probe, + * pulling it out into a delayed work task prevents us from bogging down the + * whole system waiting for a response from hardware. + */ +void idpf_init_task(struct work_struct *work) +{ + struct idpf_vport_config *vport_config; + struct idpf_vport_max_q max_q; + struct idpf_adapter *adapter; + struct idpf_netdev_priv *np; + struct idpf_vport *vport; + u16 num_default_vports; + struct pci_dev *pdev; + bool default_vport; + int index, err; + + adapter = container_of(work, struct idpf_adapter, init_task.work); + + num_default_vports = idpf_get_default_vports(adapter); + if (adapter->num_alloc_vports < num_default_vports) + default_vport = true; + else + default_vport = false; + + err = idpf_vport_alloc_max_qs(adapter, &max_q); + if (err) + goto unwind_vports; + + err = idpf_send_create_vport_msg(adapter, &max_q); + if (err) { + idpf_vport_dealloc_max_qs(adapter, &max_q); + goto unwind_vports; + } + + pdev = adapter->pdev; + vport = idpf_vport_alloc(adapter, &max_q); + if (!vport) { + err = -EFAULT; + dev_err(&pdev->dev, "failed to allocate vport: %d\n", + err); + idpf_vport_dealloc_max_qs(adapter, &max_q); + goto unwind_vports; + } + + index = vport->idx; + vport_config = adapter->vport_config[index]; + + init_waitqueue_head(&vport->sw_marker_wq); + init_waitqueue_head(&vport->vchnl_wq); + + mutex_init(&vport->vc_buf_lock); + spin_lock_init(&vport_config->mac_filter_list_lock); + + INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); + + err = idpf_check_supported_desc_ids(vport); + if (err) { + dev_err(&pdev->dev, "failed to get required descriptor ids\n"); + goto cfg_netdev_err; + } + + if (idpf_cfg_netdev(vport)) + goto cfg_netdev_err; + + err = idpf_send_get_rx_ptype_msg(vport); + if (err) + goto handle_err; + + /* Once state is put into DOWN, driver is ready for dev_open */ + np = netdev_priv(vport->netdev); + np->state = __IDPF_VPORT_DOWN; + if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) + idpf_vport_open(vport, true); + + /* Spawn and return 'idpf_init_task' work queue until all the + * default vports are created + */ + if (adapter->num_alloc_vports < num_default_vports) { + queue_delayed_work(adapter->init_wq, &adapter->init_task, + msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); + + return; + } + + for (index = 0; index < adapter->max_vports; index++) { + if (adapter->netdevs[index] && + !test_bit(IDPF_VPORT_REG_NETDEV, + adapter->vport_config[index]->flags)) { + register_netdev(adapter->netdevs[index]); + set_bit(IDPF_VPORT_REG_NETDEV, + adapter->vport_config[index]->flags); + } + } + + /* As all the required vports are created, clear the reset flag + * unconditionally here in case we were in reset and the link was down. + */ + clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + /* Start the statistics task now */ + queue_delayed_work(adapter->stats_wq, &adapter->stats_task, + msecs_to_jiffies(10 * (pdev->devfn & 0x07))); + + return; + +handle_err: + idpf_decfg_netdev(vport); +cfg_netdev_err: + idpf_vport_rel(vport); + adapter->vports[index] = NULL; +unwind_vports: + if (default_vport) { + for (index = 0; index < adapter->max_vports; index++) { + if (adapter->vports[index]) + idpf_vport_dealloc(adapter->vports[index]); + } + } + clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); +} + +/** + * idpf_sriov_ena - Enable or change number of VFs + * @adapter: private data struct + * @num_vfs: number of VFs to allocate + */ +static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs) +{ + struct device *dev = &adapter->pdev->dev; + int err; + + err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs); + if (err) { + dev_err(dev, "Failed to allocate VFs: %d\n", err); + + return err; + } + + err = pci_enable_sriov(adapter->pdev, num_vfs); + if (err) { + idpf_send_set_sriov_vfs_msg(adapter, 0); + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + + return err; + } + + adapter->num_vfs = num_vfs; + + return num_vfs; +} + +/** + * idpf_sriov_configure - Configure the requested VFs + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of vfs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct idpf_adapter *adapter = pci_get_drvdata(pdev); + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) { + dev_info(&pdev->dev, "SR-IOV is not supported on this device\n"); + + return -EOPNOTSUPP; + } + + if (num_vfs) + return idpf_sriov_ena(adapter, num_vfs); + + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n"); + + return -EBUSY; + } + + pci_disable_sriov(adapter->pdev); + idpf_send_set_sriov_vfs_msg(adapter, 0); + adapter->num_vfs = 0; + + return 0; +} + +/** + * idpf_deinit_task - Device deinit routine + * @adapter: Driver specific private structure + * + * Extended remove logic which will be used for + * hard reset as well + */ +void idpf_deinit_task(struct idpf_adapter *adapter) +{ + unsigned int i; + + /* Wait until the init_task is done else this thread might release + * the resources first and the other thread might end up in a bad state + */ + cancel_delayed_work_sync(&adapter->init_task); + + if (!adapter->vports) + return; + + cancel_delayed_work_sync(&adapter->stats_task); + + for (i = 0; i < adapter->max_vports; i++) { + if (adapter->vports[i]) + idpf_vport_dealloc(adapter->vports[i]); + } +} + +/** + * idpf_check_reset_complete - check that reset is complete + * @hw: pointer to hw struct + * @reset_reg: struct with reset registers + * + * Returns 0 if device is ready to use, or -EBUSY if it's in reset. + **/ +static int idpf_check_reset_complete(struct idpf_hw *hw, + struct idpf_reset_reg *reset_reg) +{ + struct idpf_adapter *adapter = hw->back; + int i; + + for (i = 0; i < 2000; i++) { + u32 reg_val = readl(reset_reg->rstat); + + /* 0xFFFFFFFF might be read if other side hasn't cleared the + * register for us yet and 0xFFFFFFFF is not a valid value for + * the register, so treat that as invalid. + */ + if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m)) + return 0; + + usleep_range(5000, 10000); + } + + dev_warn(&adapter->pdev->dev, "Device reset timeout!\n"); + /* Clear the reset flag unconditionally here since the reset + * technically isn't in progress anymore from the driver's perspective + */ + clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + + return -EBUSY; +} + +/** + * idpf_set_vport_state - Set the vport state to be after the reset + * @adapter: Driver specific private structure + */ +static void idpf_set_vport_state(struct idpf_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->max_vports; i++) { + struct idpf_netdev_priv *np; + + if (!adapter->netdevs[i]) + continue; + + np = netdev_priv(adapter->netdevs[i]); + if (np->state == __IDPF_VPORT_UP) + set_bit(IDPF_VPORT_UP_REQUESTED, + adapter->vport_config[i]->flags); + } +} + +/** + * idpf_init_hard_reset - Initiate a hardware reset + * @adapter: Driver specific private structure + * + * Deallocate the vports and all the resources associated with them and + * reallocate. Also reinitialize the mailbox. Return 0 on success, + * negative on failure. + */ +static int idpf_init_hard_reset(struct idpf_adapter *adapter) +{ + struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops; + struct device *dev = &adapter->pdev->dev; + struct net_device *netdev; + int err; + u16 i; + + mutex_lock(&adapter->vport_ctrl_lock); + + dev_info(dev, "Device HW Reset initiated\n"); + + /* Avoid TX hangs on reset */ + for (i = 0; i < adapter->max_vports; i++) { + netdev = adapter->netdevs[i]; + if (!netdev) + continue; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + } + + /* Prepare for reset */ + if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { + reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD); + } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { + bool is_reset = idpf_is_reset_detected(adapter); + + idpf_set_vport_state(adapter); + idpf_vc_core_deinit(adapter); + if (!is_reset) + reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET); + idpf_deinit_dflt_mbx(adapter); + } else { + dev_err(dev, "Unhandled hard reset cause\n"); + err = -EBADRQC; + goto unlock_mutex; + } + + /* Wait for reset to complete */ + err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg); + if (err) { + dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n", + adapter->state); + goto unlock_mutex; + } + + /* Reset is complete and so start building the driver resources again */ + err = idpf_init_dflt_mbx(adapter); + if (err) { + dev_err(dev, "Failed to initialize default mailbox: %d\n", err); + goto unlock_mutex; + } + + /* Initialize the state machine, also allocate memory and request + * resources + */ + err = idpf_vc_core_init(adapter); + if (err) { + idpf_deinit_dflt_mbx(adapter); + goto unlock_mutex; + } + + /* Wait till all the vports are initialized to release the reset lock, + * else user space callbacks may access uninitialized vports + */ + while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) + msleep(100); + +unlock_mutex: + mutex_unlock(&adapter->vport_ctrl_lock); + + return err; +} + +/** + * idpf_vc_event_task - Handle virtchannel event logic + * @work: work queue struct + */ +void idpf_vc_event_task(struct work_struct *work) +{ + struct idpf_adapter *adapter; + + adapter = container_of(work, struct idpf_adapter, vc_event_task.work); + + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + return; + + if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || + test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { + set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + idpf_init_hard_reset(adapter); + } +} + +/** + * idpf_initiate_soft_reset - Initiate a software reset + * @vport: virtual port data struct + * @reset_cause: reason for the soft reset + * + * Soft reset only reallocs vport queue resources. Returns 0 on success, + * negative on failure. + */ +int idpf_initiate_soft_reset(struct idpf_vport *vport, + enum idpf_vport_reset_cause reset_cause) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + enum idpf_vport_state current_state = np->state; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport *new_vport; + int err, i; + + /* If the system is low on memory, we can end up in bad state if we + * free all the memory for queue resources and try to allocate them + * again. Instead, we can pre-allocate the new resources before doing + * anything and bailing if the alloc fails. + * + * Make a clone of the existing vport to mimic its current + * configuration, then modify the new structure with any requested + * changes. Once the allocation of the new resources is done, stop the + * existing vport and copy the configuration to the main vport. If an + * error occurred, the existing vport will be untouched. + * + */ + new_vport = kzalloc(sizeof(*vport), GFP_KERNEL); + if (!new_vport) + return -ENOMEM; + + /* This purposely avoids copying the end of the struct because it + * contains wait_queues and mutexes and other stuff we don't want to + * mess with. Nothing below should use those variables from new_vport + * and should instead always refer to them in vport if they need to. + */ + memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state)); + + /* Adjust resource parameters prior to reallocating resources */ + switch (reset_cause) { + case IDPF_SR_Q_CHANGE: + err = idpf_vport_adjust_qs(new_vport); + if (err) + goto free_vport; + break; + case IDPF_SR_Q_DESC_CHANGE: + /* Update queue parameters before allocating resources */ + idpf_vport_calc_num_q_desc(new_vport); + break; + case IDPF_SR_MTU_CHANGE: + case IDPF_SR_RSC_CHANGE: + break; + default: + dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n"); + err = -EINVAL; + goto free_vport; + } + + err = idpf_vport_queues_alloc(new_vport); + if (err) + goto free_vport; + if (current_state <= __IDPF_VPORT_DOWN) { + idpf_send_delete_queues_msg(vport); + } else { + set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); + idpf_vport_stop(vport); + } + + idpf_deinit_rss(vport); + /* We're passing in vport here because we need its wait_queue + * to send a message and it should be getting all the vport + * config data out of the adapter but we need to be careful not + * to add code to add_queues to change the vport config within + * vport itself as it will be wiped with a memcpy later. + */ + err = idpf_send_add_queues_msg(vport, new_vport->num_txq, + new_vport->num_complq, + new_vport->num_rxq, + new_vport->num_bufq); + if (err) + goto err_reset; + + /* Same comment as above regarding avoiding copying the wait_queues and + * mutexes applies here. We do not want to mess with those if possible. + */ + memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state)); + + /* Since idpf_vport_queues_alloc was called with new_port, the queue + * back pointers are currently pointing to the local new_vport. Reset + * the backpointers to the original vport here + */ + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + int j; + + tx_qgrp->vport = vport; + for (j = 0; j < tx_qgrp->num_txq; j++) + tx_qgrp->txqs[j]->vport = vport; + + if (idpf_is_queue_model_split(vport->txq_model)) + tx_qgrp->complq->vport = vport; + } + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + struct idpf_queue *q; + u16 num_rxq; + int j; + + rx_qgrp->vport = vport; + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) + rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + q->vport = vport; + } + } + + if (reset_cause == IDPF_SR_Q_CHANGE) + idpf_vport_alloc_vec_indexes(vport); + + err = idpf_set_real_num_queues(vport); + if (err) + goto err_reset; + + if (current_state == __IDPF_VPORT_UP) + err = idpf_vport_open(vport, false); + + kfree(new_vport); + + return err; + +err_reset: + idpf_vport_queues_rel(new_vport); +free_vport: + kfree(new_vport); + + return err; +} + +/** + * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock + * meaning we cannot sleep in this context. Due to this, we have to add the + * filter and send the virtchnl message asynchronously without waiting for the + * response from the other side. We won't know whether or not the operation + * actually succeeded until we get the message back. Returns 0 on success, + * negative on failure. + */ +static int idpf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + return idpf_add_mac_filter(np->vport, np, addr, true); +} + +/** + * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock + * meaning we cannot sleep in this context. Due to this we have to delete the + * filter and send the virtchnl message asynchronously without waiting for the + * return from the other side. We won't know whether or not the operation + * actually succeeded until we get the message back. Returns 0 on success, + * negative on failure. + */ +static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + idpf_del_mac_filter(np->vport, np, addr, true); + + return 0; +} + +/** + * idpf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + * + * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We + * cannot sleep in this context. + */ +static void idpf_set_rx_mode(struct net_device *netdev) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *config_data; + struct idpf_adapter *adapter; + bool changed = false; + struct device *dev; + int err; + + adapter = np->adapter; + dev = &adapter->pdev->dev; + + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) { + __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); + __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync); + } + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC)) + return; + + config_data = &adapter->vport_config[np->vport_idx]->user_config; + /* IFF_PROMISC enables both unicast and multicast promiscuous, + * while IFF_ALLMULTI only enables multicast such that: + * + * promisc + allmulti = unicast | multicast + * promisc + !allmulti = unicast | multicast + * !promisc + allmulti = multicast + */ + if ((netdev->flags & IFF_PROMISC) && + !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { + changed = true; + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); + if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags)) + dev_info(dev, "Entering multicast promiscuous mode\n"); + } + + if (!(netdev->flags & IFF_PROMISC) && + test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) { + changed = true; + dev_info(dev, "Leaving promiscuous mode\n"); + } + + if (netdev->flags & IFF_ALLMULTI && + !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { + changed = true; + dev_info(dev, "Entering multicast promiscuous mode\n"); + } + + if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) && + test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) { + changed = true; + dev_info(dev, "Leaving multicast promiscuous mode\n"); + } + + if (!changed) + return; + + err = idpf_set_promiscuous(adapter, config_data, np->vport_id); + if (err) + dev_err(dev, "Failed to set promiscuous mode: %d\n", err); +} + +/** + * idpf_vport_manage_rss_lut - disable/enable RSS + * @vport: the vport being changed + * + * In the event of disable request for RSS, this function will zero out RSS + * LUT, while in the event of enable request for RSS, it will reconfigure RSS + * LUT with the default LUT configuration. + */ +static int idpf_vport_manage_rss_lut(struct idpf_vport *vport) +{ + bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH); + struct idpf_rss_data *rss_data; + u16 idx = vport->idx; + int lut_size; + + rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data; + lut_size = rss_data->rss_lut_size * sizeof(u32); + + if (ena) { + /* This will contain the default or user configured LUT */ + memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size); + } else { + /* Save a copy of the current LUT to be restored later if + * requested. + */ + memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size); + + /* Zero out the current LUT to disable */ + memset(rss_data->rss_lut, 0, lut_size); + } + + return idpf_config_rss(vport); +} + +/** + * idpf_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + */ +static int idpf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct idpf_adapter *adapter; + struct idpf_vport *vport; + int err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + adapter = vport->adapter; + + if (idpf_is_reset_in_prog(adapter)) { + dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n"); + err = -EBUSY; + goto unlock_mutex; + } + + if (changed & NETIF_F_RXHASH) { + netdev->features ^= NETIF_F_RXHASH; + err = idpf_vport_manage_rss_lut(vport); + if (err) + goto unlock_mutex; + } + + if (changed & NETIF_F_GRO_HW) { + netdev->features ^= NETIF_F_GRO_HW; + err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE); + if (err) + goto unlock_mutex; + } + + if (changed & NETIF_F_LOOPBACK) { + netdev->features ^= NETIF_F_LOOPBACK; + err = idpf_send_ena_dis_loopback_msg(vport); + } + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_open - Called when a network interface becomes active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the netdev watchdog is enabled, + * and the stack is notified that the interface is ready. + * + * Returns 0 on success, negative value on failure + */ +static int idpf_open(struct net_device *netdev) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + err = idpf_vport_open(vport, true); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_change_mtu - NDO callback to change the MTU + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int idpf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + netdev->mtu = new_mtu; + + err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_features_check - Validate packet conforms to limits + * @skb: skb buffer + * @netdev: This port's netdev + * @features: Offload features that the stack believes apply + */ +static netdev_features_t idpf_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + struct idpf_vport *vport = idpf_netdev_to_vport(netdev); + struct idpf_adapter *adapter = vport->adapter; + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 88 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && + (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)) + features &= ~NETIF_F_GSO_MASK; + + /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */ + len = skb_network_offset(skb); + if (unlikely(len & ~(126))) + goto unsupported; + + len = skb_network_header_len(skb); + if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + goto unsupported; + + if (!skb->encapsulation) + return features; + + /* L4TUNLEN can support 127 words */ + len = skb_inner_network_header(skb) - skb_transport_header(skb); + if (unlikely(len & ~(127 * 2))) + goto unsupported; + + /* IPLEN can support at most 127 dwords */ + len = skb_inner_network_header_len(skb); + if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + goto unsupported; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + + return features; + +unsupported: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +/** + * idpf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int idpf_set_mac(struct net_device *netdev, void *p) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_config *vport_config; + struct sockaddr *addr = p; + struct idpf_vport *vport; + int err = 0; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_MACFILTER)) { + dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n"); + err = -EOPNOTSUPP; + goto unlock_mutex; + } + + if (!is_valid_ether_addr(addr->sa_data)) { + dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n", + addr->sa_data); + err = -EADDRNOTAVAIL; + goto unlock_mutex; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) + goto unlock_mutex; + + vport_config = vport->adapter->vport_config[vport->idx]; + err = idpf_add_mac_filter(vport, np, addr->sa_data, false); + if (err) { + __idpf_del_mac_filter(vport_config, addr->sa_data); + goto unlock_mutex; + } + + if (is_valid_ether_addr(vport->default_mac_addr)) + idpf_del_mac_filter(vport, np, vport->default_mac_addr, false); + + ether_addr_copy(vport->default_mac_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data); + +unlock_mutex: + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +/** + * idpf_alloc_dma_mem - Allocate dma memory + * @hw: pointer to hw struct + * @mem: pointer to dma_mem struct + * @size: size of the memory to allocate + */ +void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) +{ + struct idpf_adapter *adapter = hw->back; + size_t sz = ALIGN(size, 4096); + + mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, + &mem->pa, GFP_KERNEL); + mem->size = sz; + + return mem->va; +} + +/** + * idpf_free_dma_mem - Free the allocated dma memory + * @hw: pointer to hw struct + * @mem: pointer to dma_mem struct + */ +void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) +{ + struct idpf_adapter *adapter = hw->back; + + dma_free_coherent(&adapter->pdev->dev, mem->size, + mem->va, mem->pa); + mem->size = 0; + mem->va = NULL; + mem->pa = 0; +} + +static const struct net_device_ops idpf_netdev_ops_splitq = { + .ndo_open = idpf_open, + .ndo_stop = idpf_stop, + .ndo_start_xmit = idpf_tx_splitq_start, + .ndo_features_check = idpf_features_check, + .ndo_set_rx_mode = idpf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = idpf_set_mac, + .ndo_change_mtu = idpf_change_mtu, + .ndo_get_stats64 = idpf_get_stats64, + .ndo_set_features = idpf_set_features, + .ndo_tx_timeout = idpf_tx_timeout, +}; + +static const struct net_device_ops idpf_netdev_ops_singleq = { + .ndo_open = idpf_open, + .ndo_stop = idpf_stop, + .ndo_start_xmit = idpf_tx_singleq_start, + .ndo_features_check = idpf_features_check, + .ndo_set_rx_mode = idpf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = idpf_set_mac, + .ndo_change_mtu = idpf_change_mtu, + .ndo_get_stats64 = idpf_get_stats64, + .ndo_set_features = idpf_set_features, + .ndo_tx_timeout = idpf_tx_timeout, +}; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c new file mode 100644 index 000000000000..e1febc74cefd --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" +#include "idpf_devids.h" + +#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" + +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL"); + +/** + * idpf_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void idpf_remove(struct pci_dev *pdev) +{ + struct idpf_adapter *adapter = pci_get_drvdata(pdev); + int i; + + set_bit(IDPF_REMOVE_IN_PROG, adapter->flags); + + /* Wait until vc_event_task is done to consider if any hard reset is + * in progress else we may go ahead and release the resources but the + * thread doing the hard reset might continue the init path and + * end up in bad state. + */ + cancel_delayed_work_sync(&adapter->vc_event_task); + if (adapter->num_vfs) + idpf_sriov_configure(pdev, 0); + + idpf_vc_core_deinit(adapter); + /* Be a good citizen and leave the device clean on exit */ + adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET); + idpf_deinit_dflt_mbx(adapter); + + if (!adapter->netdevs) + goto destroy_wqs; + + /* There are some cases where it's possible to still have netdevs + * registered with the stack at this point, e.g. if the driver detected + * a HW reset and rmmod is called before it fully recovers. Unregister + * any stale netdevs here. + */ + for (i = 0; i < adapter->max_vports; i++) { + if (!adapter->netdevs[i]) + continue; + if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(adapter->netdevs[i]); + free_netdev(adapter->netdevs[i]); + adapter->netdevs[i] = NULL; + } + +destroy_wqs: + destroy_workqueue(adapter->init_wq); + destroy_workqueue(adapter->serv_wq); + destroy_workqueue(adapter->mbx_wq); + destroy_workqueue(adapter->stats_wq); + destroy_workqueue(adapter->vc_event_wq); + + for (i = 0; i < adapter->max_vports; i++) { + kfree(adapter->vport_config[i]); + adapter->vport_config[i] = NULL; + } + kfree(adapter->vport_config); + adapter->vport_config = NULL; + kfree(adapter->netdevs); + adapter->netdevs = NULL; + + mutex_destroy(&adapter->vport_ctrl_lock); + mutex_destroy(&adapter->vector_lock); + mutex_destroy(&adapter->queue_lock); + mutex_destroy(&adapter->vc_buf_lock); + + pci_set_drvdata(pdev, NULL); + kfree(adapter); +} + +/** + * idpf_shutdown - PCI callback for shutting down device + * @pdev: PCI device information struct + */ +static void idpf_shutdown(struct pci_dev *pdev) +{ + idpf_remove(pdev); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + +/** + * idpf_cfg_hw - Initialize HW struct + * @adapter: adapter to setup hw struct for + * + * Returns 0 on success, negative on failure + */ +static int idpf_cfg_hw(struct idpf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct idpf_hw *hw = &adapter->hw; + + hw->hw_addr = pcim_iomap_table(pdev)[0]; + if (!hw->hw_addr) { + pci_err(pdev, "failed to allocate PCI iomap table\n"); + + return -ENOMEM; + } + + hw->back = adapter; + + return 0; +} + +/** + * idpf_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in idpf_pci_tbl + * + * Returns 0 on success, negative on failure + */ +static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct idpf_adapter *adapter; + int err; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) + return -ENOMEM; + + adapter->req_tx_splitq = true; + adapter->req_rx_splitq = true; + + switch (ent->device) { + case IDPF_DEV_ID_PF: + idpf_dev_ops_init(adapter); + break; + case IDPF_DEV_ID_VF: + idpf_vf_dev_ops_init(adapter); + adapter->crc_enable = true; + break; + default: + err = -ENODEV; + dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", + ent->device); + goto err_free; + } + + adapter->pdev = pdev; + err = pcim_enable_device(pdev); + if (err) + goto err_free; + + err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (err) { + pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err)); + + goto err_free; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (err) { + pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err)); + + goto err_free; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, adapter); + + adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->init_wq) { + dev_err(dev, "Failed to allocate init workqueue\n"); + err = -ENOMEM; + goto err_free; + } + + adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->serv_wq) { + dev_err(dev, "Failed to allocate service workqueue\n"); + err = -ENOMEM; + goto err_serv_wq_alloc; + } + + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->mbx_wq) { + dev_err(dev, "Failed to allocate mailbox workqueue\n"); + err = -ENOMEM; + goto err_mbx_wq_alloc; + } + + adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->stats_wq) { + dev_err(dev, "Failed to allocate workqueue\n"); + err = -ENOMEM; + goto err_stats_wq_alloc; + } + + adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0, + dev_driver_string(dev), + dev_name(dev)); + if (!adapter->vc_event_wq) { + dev_err(dev, "Failed to allocate virtchnl event workqueue\n"); + err = -ENOMEM; + goto err_vc_event_wq_alloc; + } + + /* setup msglvl */ + adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M); + + err = idpf_cfg_hw(adapter); + if (err) { + dev_err(dev, "Failed to configure HW structure for adapter: %d\n", + err); + goto err_cfg_hw; + } + + mutex_init(&adapter->vport_ctrl_lock); + mutex_init(&adapter->vector_lock); + mutex_init(&adapter->queue_lock); + mutex_init(&adapter->vc_buf_lock); + + init_waitqueue_head(&adapter->vchnl_wq); + + INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task); + INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task); + INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task); + INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task); + INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task); + + adapter->dev_ops.reg_ops.reset_reg_init(adapter); + set_bit(IDPF_HR_DRV_LOAD, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, + msecs_to_jiffies(10 * (pdev->devfn & 0x07))); + + return 0; + +err_cfg_hw: + destroy_workqueue(adapter->vc_event_wq); +err_vc_event_wq_alloc: + destroy_workqueue(adapter->stats_wq); +err_stats_wq_alloc: + destroy_workqueue(adapter->mbx_wq); +err_mbx_wq_alloc: + destroy_workqueue(adapter->serv_wq); +err_serv_wq_alloc: + destroy_workqueue(adapter->init_wq); +err_free: + kfree(adapter); + return err; +} + +/* idpf_pci_tbl - PCI Dev idpf ID Table + */ +static const struct pci_device_id idpf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)}, + { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)}, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(pci, idpf_pci_tbl); + +static struct pci_driver idpf_driver = { + .name = KBUILD_MODNAME, + .id_table = idpf_pci_tbl, + .probe = idpf_probe, + .sriov_configure = idpf_sriov_configure, + .remove = idpf_remove, + .shutdown = idpf_shutdown, +}; +module_pci_driver(idpf_driver); diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h new file mode 100644 index 000000000000..b21a04fccf0f --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_MEM_H_ +#define _IDPF_MEM_H_ + +#include <linux/io.h> + +struct idpf_dma_mem { + void *va; + dma_addr_t pa; + size_t size; +}; + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) + +#endif /* _IDPF_MEM_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c new file mode 100644 index 000000000000..81288a17da2a --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -0,0 +1,1183 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_tx_singleq_csum - Enable tx checksum offloads + * @skb: pointer to skb + * @off: pointer to struct that holds offload parameters + * + * Returns 0 or error (negative) if checksum offload cannot be executed, 1 + * otherwise. + */ +static int idpf_tx_singleq_csum(struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + u32 l4_len, l3_len, l2_len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 offset, cmd = 0; + u8 l4_proto = 0; + __be16 frag_off; + bool is_tso; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + l2_len = ip.hdr - skb->data; + offset = FIELD_PREP(0x3F << IDPF_TX_DESC_LEN_MACLEN_S, l2_len / 2); + is_tso = !!(off->tx_flags & IDPF_TX_FLAGS_TSO); + if (skb->encapsulation) { + u32 tunnel = 0; + + /* define outer network header type */ + if (off->tx_flags & IDPF_TX_FLAGS_IPV4) { + /* The stack computes the IP header already, the only + * time we need the hardware to recompute it is in the + * case of TSO. + */ + tunnel |= is_tso ? + IDPF_TX_CTX_EXT_IP_IPV4 : + IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM; + + l4_proto = ip.v4->protocol; + } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) { + tunnel |= IDPF_TX_CTX_EXT_IP_IPV6; + + l4_proto = ip.v6->nexthdr; + if (ipv6_ext_hdr(l4_proto)) + ipv6_skip_exthdr(skb, skb_network_offset(skb) + + sizeof(*ip.v6), + &l4_proto, &frag_off); + } + + /* define outer transport */ + switch (l4_proto) { + case IPPROTO_UDP: + tunnel |= IDPF_TXD_CTX_UDP_TUNNELING; + break; + case IPPROTO_GRE: + tunnel |= IDPF_TXD_CTX_GRE_TUNNELING; + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + l4.hdr = skb_inner_network_header(skb); + break; + default: + if (is_tso) + return -1; + + skb_checksum_help(skb); + + return 0; + } + off->tx_flags |= IDPF_TX_FLAGS_TUNNEL; + + /* compute outer L3 header size */ + tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M, + (l4.hdr - ip.hdr) / 4); + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* compute tunnel header size */ + tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_NATLEN_M, + (ip.hdr - l4.hdr) / 2); + + /* indicate if we need to offload outer UDP header */ + if (is_tso && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M; + + /* record tunnel offload values */ + off->cd_tunneling |= tunnel; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + off->tx_flags &= ~(IDPF_TX_FLAGS_IPV4 | IDPF_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + off->tx_flags |= IDPF_TX_FLAGS_IPV4; + if (ip.v6->version == 6) + off->tx_flags |= IDPF_TX_FLAGS_IPV6; + } + + /* Enable IP checksum offloads */ + if (off->tx_flags & IDPF_TX_FLAGS_IPV4) { + l4_proto = ip.v4->protocol; + /* See comment above regarding need for HW to recompute IP + * header checksum in the case of TSO. + */ + if (is_tso) + cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM; + else + cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4; + + } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) { + cmd |= IDPF_TX_DESC_CMD_IIPT_IPV6; + l4_proto = ip.v6->nexthdr; + if (ipv6_ext_hdr(l4_proto)) + ipv6_skip_exthdr(skb, skb_network_offset(skb) + + sizeof(*ip.v6), &l4_proto, + &frag_off); + } else { + return -1; + } + + /* compute inner L3 header size */ + l3_len = l4.hdr - ip.hdr; + offset |= (l3_len / 4) << IDPF_TX_DESC_LEN_IPLEN_S; + + /* Enable L4 checksum offloads */ + switch (l4_proto) { + case IPPROTO_TCP: + /* enable checksum offloads */ + cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_TCP; + l4_len = l4.tcp->doff; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_UDP; + l4_len = sizeof(struct udphdr) >> 2; + break; + case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_SCTP; + l4_len = sizeof(struct sctphdr) >> 2; + break; + default: + if (is_tso) + return -1; + + skb_checksum_help(skb); + + return 0; + } + + offset |= l4_len << IDPF_TX_DESC_LEN_L4_LEN_S; + off->td_cmd |= cmd; + off->hdr_offsets |= offset; + + return 1; +} + +/** + * idpf_tx_singleq_map - Build the Tx base descriptor + * @tx_q: queue to send buffer on + * @first: first buffer info buffer to use + * @offloads: pointer to struct that holds offload parameters + * + * This function loops over the skb data pointed to by *first + * and gets a physical address for each memory location and programs + * it and the length into the transmit base mode descriptor. + */ +static void idpf_tx_singleq_map(struct idpf_queue *tx_q, + struct idpf_tx_buf *first, + struct idpf_tx_offload_params *offloads) +{ + u32 offsets = offloads->hdr_offsets; + struct idpf_tx_buf *tx_buf = first; + struct idpf_base_tx_desc *tx_desc; + struct sk_buff *skb = first->skb; + u64 td_cmd = offloads->td_cmd; + unsigned int data_len, size; + u16 i = tx_q->next_to_use; + struct netdev_queue *nq; + skb_frag_t *frag; + dma_addr_t dma; + u64 td_tag = 0; + + data_len = skb->data_len; + size = skb_headlen(skb); + + tx_desc = IDPF_BASE_TX_DESC(tx_q, i); + + dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); + + /* write each descriptor with CRC bit */ + if (tx_q->vport->crc_enable) + td_cmd |= IDPF_TX_DESC_CMD_ICRC; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + + if (dma_mapping_error(tx_q->dev, dma)) + return idpf_tx_dma_map_error(tx_q, skb, first, i); + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + /* align size to end of page */ + max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); + tx_desc->buf_addr = cpu_to_le64(dma); + + /* account for data chunks larger than the hardware + * can handle + */ + while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) { + tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, + offsets, + max_data, + td_tag); + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + i = 0; + } + + dma += max_data; + size -= max_data; + + max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + tx_desc->buf_addr = cpu_to_le64(dma); + } + + if (!data_len) + break; + + tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets, + size, td_tag); + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buf = &tx_q->tx_buf[i]; + } + + skb_tx_timestamp(first->skb); + + /* write last descriptor with RS and EOP bits */ + td_cmd |= (u64)(IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS); + + tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets, + size, td_tag); + + IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + netdev_tx_sent_queue(nq, first->bytecount); + + idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); +} + +/** + * idpf_tx_singleq_get_ctx_desc - grab next desc and update buffer ring + * @txq: queue to put context descriptor on + * + * Since the TX buffer rings mimics the descriptor ring, update the tx buffer + * ring entry to reflect that this index is a context descriptor + */ +static struct idpf_base_tx_ctx_desc * +idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) +{ + struct idpf_base_tx_ctx_desc *ctx_desc; + int ntu = txq->next_to_use; + + memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); + txq->tx_buf[ntu].ctx_entry = true; + + ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu); + + IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); + txq->next_to_use = ntu; + + return ctx_desc; +} + +/** + * idpf_tx_singleq_build_ctx_desc - populate context descriptor + * @txq: queue to send buffer on + * @offload: offload parameter structure + **/ +static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, + struct idpf_tx_offload_params *offload) +{ + struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq); + u64 qw1 = (u64)IDPF_TX_DESC_DTYPE_CTX; + + if (offload->tso_segs) { + qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S; + qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) & + IDPF_TXD_CTX_QW1_TSO_LEN_M; + qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) & + IDPF_TXD_CTX_QW1_MSS_M; + + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.tx.lso_pkts); + u64_stats_update_end(&txq->stats_sync); + } + + desc->qw0.tunneling_params = cpu_to_le32(offload->cd_tunneling); + + desc->qw0.l2tag2 = 0; + desc->qw0.rsvd1 = 0; + desc->qw1 = cpu_to_le64(qw1); +} + +/** + * idpf_tx_singleq_frame - Sends buffer on Tx ring using base descriptors + * @skb: send buffer + * @tx_q: queue to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_queue *tx_q) +{ + struct idpf_tx_offload_params offload = { }; + struct idpf_tx_buf *first; + unsigned int count; + __be16 protocol; + int csum, tso; + + count = idpf_tx_desc_count_required(tx_q, skb); + if (unlikely(!count)) + return idpf_tx_drop_skb(tx_q, skb); + + if (idpf_tx_maybe_stop_common(tx_q, + count + IDPF_TX_DESCS_PER_CACHE_LINE + + IDPF_TX_DESCS_FOR_CTX)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_BUSY; + } + + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IP)) + offload.tx_flags |= IDPF_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + offload.tx_flags |= IDPF_TX_FLAGS_IPV6; + + tso = idpf_tso(skb, &offload); + if (tso < 0) + goto out_drop; + + csum = idpf_tx_singleq_csum(skb, &offload); + if (csum < 0) + goto out_drop; + + if (tso || offload.cd_tunneling) + idpf_tx_singleq_build_ctx_desc(tx_q, &offload); + + /* record the location of the first descriptor for this packet */ + first = &tx_q->tx_buf[tx_q->next_to_use]; + first->skb = skb; + + if (tso) { + first->gso_segs = offload.tso_segs; + first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len); + } else { + first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + first->gso_segs = 1; + } + idpf_tx_singleq_map(tx_q, first, &offload); + + return NETDEV_TX_OK; + +out_drop: + return idpf_tx_drop_skb(tx_q, skb); +} + +/** + * idpf_tx_singleq_start - Selects the right Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, + struct net_device *netdev) +{ + struct idpf_vport *vport = idpf_netdev_to_vport(netdev); + struct idpf_queue *tx_q; + + tx_q = vport->txqs[skb_get_queue_mapping(skb)]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_OK; + } + + return idpf_tx_singleq_frame(skb, tx_q); +} + +/** + * idpf_tx_singleq_clean - Reclaim resources from queue + * @tx_q: Tx queue to clean + * @napi_budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + */ +static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, + int *cleaned) +{ + unsigned int budget = tx_q->vport->compln_clean_budget; + unsigned int total_bytes = 0, total_pkts = 0; + struct idpf_base_tx_desc *tx_desc; + s16 ntc = tx_q->next_to_clean; + struct idpf_netdev_priv *np; + struct idpf_tx_buf *tx_buf; + struct idpf_vport *vport; + struct netdev_queue *nq; + bool dont_wake; + + tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc); + tx_buf = &tx_q->tx_buf[ntc]; + ntc -= tx_q->desc_count; + + do { + struct idpf_base_tx_desc *eop_desc; + + /* If this entry in the ring was used as a context descriptor, + * it's corresponding entry in the buffer ring will indicate as + * such. We can skip this descriptor since there is no buffer + * to clean. + */ + if (tx_buf->ctx_entry) { + /* Clear this flag here to avoid stale flag values when + * this buffer is used for actual data in the future. + * There are cases where the tx_buf struct / the flags + * field will not be cleared before being reused. + */ + tx_buf->ctx_entry = false; + goto fetch_next_txq_desc; + } + + /* if next_to_watch is not set then no work pending */ + eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch; + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->qw1 & + cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_pkts += tx_buf->gso_segs; + + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buf data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buf++; + tx_desc++; + ntc++; + if (unlikely(!ntc)) { + ntc -= tx_q->desc_count; + tx_buf = tx_q->tx_buf; + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* update budget only if we did something */ + budget--; + +fetch_next_txq_desc: + tx_buf++; + tx_desc++; + ntc++; + if (unlikely(!ntc)) { + ntc -= tx_q->desc_count; + tx_buf = tx_q->tx_buf; + tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + } + } while (likely(budget)); + + ntc += tx_q->desc_count; + tx_q->next_to_clean = ntc; + + *cleaned += total_pkts; + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts); + u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes); + u64_stats_update_end(&tx_q->stats_sync); + + vport = tx_q->vport; + np = netdev_priv(vport->netdev); + nq = netdev_get_tx_queue(vport->netdev, tx_q->idx); + + dont_wake = np->state != __IDPF_VPORT_UP || + !netif_carrier_ok(vport->netdev); + __netif_txq_completed_wake(nq, total_pkts, total_bytes, + IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, + dont_wake); + + return !!budget; +} + +/** + * idpf_tx_singleq_clean_all - Clean all Tx queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, + int *cleaned) +{ + u16 num_txq = q_vec->num_txq; + bool clean_complete = true; + int i, budget_per_q; + + budget_per_q = num_txq ? max(budget / num_txq, 1) : 0; + for (i = 0; i < num_txq; i++) { + struct idpf_queue *q; + + q = q_vec->tx[i]; + clean_complete &= idpf_tx_singleq_clean(q, budget_per_q, + cleaned); + } + + return clean_complete; +} + +/** + * idpf_rx_singleq_test_staterr - tests bits in Rx descriptor + * status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_ptype_len doesn't need to be shifted because it begins + * at offset zero. + */ +static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->base_wb.qword1.status_error_ptype_len & + cpu_to_le64(stat_err_bits)); +} + +/** + * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers + * @rxq: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * @ntc: next to clean + */ +static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, + union virtchnl2_rx_desc *rx_desc, + struct sk_buff *skb, u16 ntc) +{ + /* if we are the last buffer then there is nothing else to do */ + if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ))) + return false; + + return true; +} + +/** + * idpf_rx_singleq_csum - Indicate in skb if checksum is good + * @rxq: Rx ring being processed + * @skb: skb currently being received and modified + * @csum_bits: checksum bits from descriptor + * @ptype: the packet type decoded by hardware + * + * skb->protocol must be set before this function is called + */ +static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb, + struct idpf_rx_csum_decoded *csum_bits, + u16 ptype) +{ + struct idpf_rx_ptype_decoded decoded; + bool ipv4, ipv6; + + /* check if Rx checksum is enabled */ + if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM))) + return; + + /* check if HW has decoded the packet and checksum */ + if (unlikely(!(csum_bits->l3l4p))) + return; + + decoded = rxq->vport->rx_ptype_lkup[ptype]; + if (unlikely(!(decoded.known && decoded.outer_ip))) + return; + + ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4); + ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6); + + /* Check if there were any checksum errors */ + if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe))) + goto checksum_fail; + + /* Device could not do any checksum offload for certain extension + * headers as indicated by setting IPV6EXADD bit + */ + if (unlikely(ipv6 && csum_bits->ipv6exadd)) + return; + + /* check for L4 errors and handle packets that were not able to be + * checksummed due to arrival speed + */ + if (unlikely(csum_bits->l4e)) + goto checksum_fail; + + if (unlikely(csum_bits->nat && csum_bits->eudpe)) + goto checksum_fail; + + /* Handle packets that were not able to be checksummed due to arrival + * speed, in this case the stack can compute the csum. + */ + if (unlikely(csum_bits->pprs)) + return; + + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT) + skb->csum_level = 1; + + /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ + switch (decoded.inner_prot) { + case IDPF_RX_PTYPE_INNER_PROT_ICMP: + case IDPF_RX_PTYPE_INNER_PROT_TCP: + case IDPF_RX_PTYPE_INNER_PROT_UDP: + case IDPF_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + default: + return; + } + +checksum_fail: + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_update_end(&rxq->stats_sync); +} + +/** + * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: the receive descriptor + * @ptype: Rx packet type + * + * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte + * descriptor writeback format. + **/ +static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + u16 ptype) +{ + struct idpf_rx_csum_decoded csum_bits; + u32 rx_error, rx_status; + u64 qword; + + qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); + + rx_status = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M, qword); + rx_error = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, qword); + + csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M, rx_error); + csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M, + rx_error); + csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M, rx_error); + csum_bits.pprs = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M, + rx_error); + csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M, + rx_status); + csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M, + rx_status); + csum_bits.nat = 0; + csum_bits.eudpe = 0; + + idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); +} + +/** + * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: the receive descriptor + * @ptype: Rx packet type + * + * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + **/ +static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + u16 ptype) +{ + struct idpf_rx_csum_decoded csum_bits; + u16 rx_status0, rx_status1; + + rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->flex_nic_wb.status_error1); + + csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M, + rx_status0); + csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M, + rx_status0); + csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M, + rx_status0); + csum_bits.eudpe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M, + rx_status0); + csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M, + rx_status0); + csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M, + rx_status0); + csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M, + rx_status1); + csum_bits.pprs = 0; + + idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); +} + +/** + * idpf_rx_singleq_base_hash - set the hash value in the skb + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: specific descriptor + * @decoded: Decoded Rx packet type related fields + * + * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte + * descriptor writeback format. + **/ +static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + u64 mask, qw1; + + if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + return; + + mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M; + qw1 = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); + + if (FIELD_GET(mask, qw1) == mask) { + u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss); + + skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + } +} + +/** + * idpf_rx_singleq_flex_hash - set the hash value in the skb + * @rx_q: Rx completion queue + * @skb: skb currently being received and modified + * @rx_desc: specific descriptor + * @decoded: Decoded Rx packet type related fields + * + * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + **/ +static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + return; + + if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M, + le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) + skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash), + idpf_ptype_to_htype(decoded)); +} + +/** + * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx + * descriptor + * @rx_q: Rx ring being processed + * @skb: pointer to current skb being populated + * @rx_desc: descriptor for skb + * @ptype: packet type + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. + */ +static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, + struct sk_buff *skb, + union virtchnl2_rx_desc *rx_desc, + u16 ptype) +{ + struct idpf_rx_ptype_decoded decoded = + rx_q->vport->rx_ptype_lkup[ptype]; + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_q->vport->netdev); + + /* Check if we're using base mode descriptor IDs */ + if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) { + idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded); + idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype); + } else { + idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded); + idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype); + } +} + +/** + * idpf_rx_singleq_buf_hw_alloc_all - Replace used receive buffers + * @rx_q: queue for which the hw buffers are allocated + * @cleaned_count: number of buffers to replace + * + * Returns false if all allocations were successful, true if any fail + */ +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, + u16 cleaned_count) +{ + struct virtchnl2_singleq_rx_buf_desc *desc; + u16 nta = rx_q->next_to_alloc; + struct idpf_rx_buf *buf; + + if (!cleaned_count) + return false; + + desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); + buf = &rx_q->rx_buf.buf[nta]; + + do { + dma_addr_t addr; + + addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size); + if (unlikely(addr == DMA_MAPPING_ERROR)) + break; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + desc->pkt_addr = cpu_to_le64(addr); + desc->hdr_addr = 0; + desc++; + + buf++; + nta++; + if (unlikely(nta == rx_q->desc_count)) { + desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); + buf = rx_q->rx_buf.buf; + nta = 0; + } + + cleaned_count--; + } while (cleaned_count); + + if (rx_q->next_to_alloc != nta) { + idpf_rx_buf_hw_update(rx_q, nta); + rx_q->next_to_alloc = nta; + } + + return !!cleaned_count; +} + +/** + * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor + * @rx_q: Rx descriptor queue + * @rx_desc: the descriptor to process + * @fields: storage for extracted values + * + * Decode the Rx descriptor and extract relevant information including the + * size and Rx packet type. + * + * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte + * descriptor writeback format. + */ +static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) +{ + u64 qword; + + qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); + + fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword); + fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); +} + +/** + * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor + * @rx_q: Rx descriptor queue + * @rx_desc: the descriptor to process + * @fields: storage for extracted values + * + * Decode the Rx descriptor and extract relevant information including the + * size and Rx packet type. + * + * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + */ +static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) +{ + fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, + le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); + fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, + le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); +} + +/** + * idpf_rx_singleq_extract_fields - Extract fields from the Rx descriptor + * @rx_q: Rx descriptor queue + * @rx_desc: the descriptor to process + * @fields: storage for extracted values + * + */ +static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, + union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) +{ + if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) + idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields); + else + idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields); +} + +/** + * idpf_rx_singleq_clean - Reclaim resources after receive completes + * @rx_q: rx queue to clean + * @budget: Total limit on number of packets to process + * + * Returns true if there's any budget left (e.g. the clean is finished) + */ +static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_pkts = 0; + struct sk_buff *skb = rx_q->skb; + u16 ntc = rx_q->next_to_clean; + u16 cleaned_count = 0; + bool failure = false; + + /* Process Rx packets bounded by budget */ + while (likely(total_rx_pkts < (unsigned int)budget)) { + struct idpf_rx_extracted fields = { }; + union virtchnl2_rx_desc *rx_desc; + struct idpf_rx_buf *rx_buf; + + /* get the Rx desc from Rx queue based on 'next_to_clean' */ + rx_desc = IDPF_RX_DESC(rx_q, ntc); + + /* status_error_ptype_len will always be zero for unused + * descriptors because it's cleared in cleanup, and overlaps + * with hdr_addr which is always zero because packet split + * isn't used, if the hardware wrote DD then the length will be + * non-zero + */ +#define IDPF_RXD_DD VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M + if (!idpf_rx_singleq_test_staterr(rx_desc, + IDPF_RXD_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc + */ + dma_rmb(); + + idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); + + rx_buf = &rx_q->rx_buf.buf[ntc]; + if (!fields.size) { + idpf_rx_put_page(rx_buf); + goto skip_data; + } + + idpf_rx_sync_for_cpu(rx_buf, fields.size); + skb = rx_q->skb; + if (skb) + idpf_rx_add_frag(rx_buf, skb, fields.size); + else + skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + +skip_data: + IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); + + cleaned_count++; + + /* skip if it is non EOP desc */ + if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc)) + continue; + +#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \ + VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M) + if (unlikely(idpf_rx_singleq_test_staterr(rx_desc, + IDPF_RXD_ERR_S))) { + dev_kfree_skb_any(skb); + skb = NULL; + continue; + } + + /* pad skb if needed (to make valid ethernet frame) */ + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* protocol */ + idpf_rx_singleq_process_skb_fields(rx_q, skb, + rx_desc, fields.rx_ptype); + + /* send completed skb up the stack */ + napi_gro_receive(&rx_q->q_vector->napi, skb); + skb = NULL; + + /* update budget accounting */ + total_rx_pkts++; + } + + rx_q->skb = skb; + + rx_q->next_to_clean = ntc; + + if (cleaned_count) + failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); + + u64_stats_update_begin(&rx_q->stats_sync); + u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts); + u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes); + u64_stats_update_end(&rx_q->stats_sync); + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_pkts; +} + +/** + * idpf_rx_singleq_clean_all - Clean all Rx queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, + int *cleaned) +{ + u16 num_rxq = q_vec->num_rxq; + bool clean_complete = true; + int budget_per_q, i; + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; + for (i = 0; i < num_rxq; i++) { + struct idpf_queue *rxq = q_vec->rx[i]; + int pkts_cleaned_per_q; + + pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q); + + /* if we clean as many as budgeted, we must not be done */ + if (pkts_cleaned_per_q >= budget_per_q) + clean_complete = false; + *cleaned += pkts_cleaned_per_q; + } + + return clean_complete; +} + +/** + * idpf_vport_singleq_napi_poll - NAPI handler + * @napi: struct from which you get q_vector + * @budget: budget provided by stack + */ +int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget) +{ + struct idpf_q_vector *q_vector = + container_of(napi, struct idpf_q_vector, napi); + bool clean_complete; + int work_done = 0; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) { + idpf_tx_singleq_clean_all(q_vector, budget, &work_done); + + return budget; + } + + clean_complete = idpf_rx_singleq_clean_all(q_vector, budget, + &work_done); + clean_complete &= idpf_tx_singleq_clean_all(q_vector, budget, + &work_done); + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) + return budget; + + work_done = min_t(int, work_done, budget - 1); + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + idpf_vport_intr_update_itr_ena_irq(q_vector); + + return work_done; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c new file mode 100644 index 000000000000..1f728a9004d9 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -0,0 +1,4294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_buf_lifo_push - push a buffer pointer onto stack + * @stack: pointer to stack struct + * @buf: pointer to buf to push + * + * Returns 0 on success, negative on failure + **/ +static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, + struct idpf_tx_stash *buf) +{ + if (unlikely(stack->top == stack->size)) + return -ENOSPC; + + stack->bufs[stack->top++] = buf; + + return 0; +} + +/** + * idpf_buf_lifo_pop - pop a buffer pointer from stack + * @stack: pointer to stack struct + **/ +static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack) +{ + if (unlikely(!stack->top)) + return NULL; + + return stack->bufs[--stack->top]; +} + +/** + * idpf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: TX queue + */ +void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev); + + adapter->tx_timeout_count++; + + netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n", + adapter->tx_timeout_count, txqueue); + if (!idpf_is_reset_in_prog(adapter)) { + set_bit(IDPF_HR_FUNC_RESET, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, + &adapter->vc_event_task, + msecs_to_jiffies(10)); + } +} + +/** + * idpf_tx_buf_rel - Release a Tx buffer + * @tx_q: the queue that owns the buffer + * @tx_buf: the buffer to free + */ +static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) +{ + if (tx_buf->skb) { + if (dma_unmap_len(tx_buf, len)) + dma_unmap_single(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dev_kfree_skb_any(tx_buf->skb); + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + } + + tx_buf->next_to_watch = NULL; + tx_buf->skb = NULL; + tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + dma_unmap_len_set(tx_buf, len, 0); +} + +/** + * idpf_tx_buf_rel_all - Free any empty Tx buffers + * @txq: queue to be cleaned + */ +static void idpf_tx_buf_rel_all(struct idpf_queue *txq) +{ + u16 i; + + /* Buffers already cleared, nothing to do */ + if (!txq->tx_buf) + return; + + /* Free all the Tx buffer sk_buffs */ + for (i = 0; i < txq->desc_count; i++) + idpf_tx_buf_rel(txq, &txq->tx_buf[i]); + + kfree(txq->tx_buf); + txq->tx_buf = NULL; + + if (!txq->buf_stack.bufs) + return; + + for (i = 0; i < txq->buf_stack.size; i++) + kfree(txq->buf_stack.bufs[i]); + + kfree(txq->buf_stack.bufs); + txq->buf_stack.bufs = NULL; +} + +/** + * idpf_tx_desc_rel - Free Tx resources per queue + * @txq: Tx descriptor ring for a specific queue + * @bufq: buffer q or completion q + * + * Free all transmit software resources + */ +static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq) +{ + if (bufq) + idpf_tx_buf_rel_all(txq); + + if (!txq->desc_ring) + return; + + dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); + txq->desc_ring = NULL; + txq->next_to_alloc = 0; + txq->next_to_use = 0; + txq->next_to_clean = 0; +} + +/** + * idpf_tx_desc_rel_all - Free Tx Resources for All Queues + * @vport: virtual port structure + * + * Free all transmit software resources + */ +static void idpf_tx_desc_rel_all(struct idpf_vport *vport) +{ + int i, j; + + if (!vport->txq_grps) + return; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + for (j = 0; j < txq_grp->num_txq; j++) + idpf_tx_desc_rel(txq_grp->txqs[j], true); + + if (idpf_is_queue_model_split(vport->txq_model)) + idpf_tx_desc_rel(txq_grp->complq, false); + } +} + +/** + * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources + * @tx_q: queue for which the buffers are allocated + * + * Returns 0 on success, negative on failure + */ +static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) +{ + int buf_size; + int i; + + /* Allocate book keeping buffers only. Buffers to be supplied to HW + * are allocated by kernel network stack and received as part of skb + */ + buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; + tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); + if (!tx_q->tx_buf) + return -ENOMEM; + + /* Initialize tx_bufs with invalid completion tags */ + for (i = 0; i < tx_q->desc_count; i++) + tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + /* Initialize tx buf stack for out-of-order completions if + * flow scheduling offload is enabled + */ + tx_q->buf_stack.bufs = + kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *), + GFP_KERNEL); + if (!tx_q->buf_stack.bufs) + return -ENOMEM; + + tx_q->buf_stack.size = tx_q->desc_count; + tx_q->buf_stack.top = tx_q->desc_count; + + for (i = 0; i < tx_q->desc_count; i++) { + tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]), + GFP_KERNEL); + if (!tx_q->buf_stack.bufs[i]) + return -ENOMEM; + } + + return 0; +} + +/** + * idpf_tx_desc_alloc - Allocate the Tx descriptors + * @tx_q: the tx ring to set up + * @bufq: buffer or completion queue + * + * Returns 0 on success, negative on failure + */ +static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) +{ + struct device *dev = tx_q->dev; + u32 desc_sz; + int err; + + if (bufq) { + err = idpf_tx_buf_alloc_all(tx_q); + if (err) + goto err_alloc; + + desc_sz = sizeof(struct idpf_base_tx_desc); + } else { + desc_sz = sizeof(struct idpf_splitq_tx_compl_desc); + } + + tx_q->size = tx_q->desc_count * desc_sz; + + /* Allocate descriptors also round up to nearest 4K */ + tx_q->size = ALIGN(tx_q->size, 4096); + tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma, + GFP_KERNEL); + if (!tx_q->desc_ring) { + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_q->size); + err = -ENOMEM; + goto err_alloc; + } + + tx_q->next_to_alloc = 0; + tx_q->next_to_use = 0; + tx_q->next_to_clean = 0; + set_bit(__IDPF_Q_GEN_CHK, tx_q->flags); + + return 0; + +err_alloc: + idpf_tx_desc_rel(tx_q, bufq); + + return err; +} + +/** + * idpf_tx_desc_alloc_all - allocate all queues Tx resources + * @vport: virtual port private structure + * + * Returns 0 on success, negative on failure + */ +static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) +{ + struct device *dev = &vport->adapter->pdev->dev; + int err = 0; + int i, j; + + /* Setup buffer queues. In single queue model buffer queues and + * completion queues will be same + */ + for (i = 0; i < vport->num_txq_grp; i++) { + for (j = 0; j < vport->txq_grps[i].num_txq; j++) { + struct idpf_queue *txq = vport->txq_grps[i].txqs[j]; + u8 gen_bits = 0; + u16 bufidx_mask; + + err = idpf_tx_desc_alloc(txq, true); + if (err) { + dev_err(dev, "Allocation for Tx Queue %u failed\n", + i); + goto err_out; + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + txq->compl_tag_cur_gen = 0; + + /* Determine the number of bits in the bufid + * mask and add one to get the start of the + * generation bits + */ + bufidx_mask = txq->desc_count - 1; + while (bufidx_mask >> 1) { + txq->compl_tag_gen_s++; + bufidx_mask = bufidx_mask >> 1; + } + txq->compl_tag_gen_s++; + + gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH - + txq->compl_tag_gen_s; + txq->compl_tag_gen_max = GETMAXVAL(gen_bits); + + /* Set bufid mask based on location of first + * gen bit; it cannot simply be the descriptor + * ring size-1 since we can have size values + * where not all of those bits are set. + */ + txq->compl_tag_bufid_m = + GETMAXVAL(txq->compl_tag_gen_s); + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + /* Setup completion queues */ + err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false); + if (err) { + dev_err(dev, "Allocation for Tx Completion Queue %u failed\n", + i); + goto err_out; + } + } + +err_out: + if (err) + idpf_tx_desc_rel_all(vport); + + return err; +} + +/** + * idpf_rx_page_rel - Release an rx buffer page + * @rxq: the queue that owns the buffer + * @rx_buf: the buffer to free + */ +static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf) +{ + if (unlikely(!rx_buf->page)) + return; + + page_pool_put_full_page(rxq->pp, rx_buf->page, false); + + rx_buf->page = NULL; + rx_buf->page_offset = 0; +} + +/** + * idpf_rx_hdr_buf_rel_all - Release header buffer memory + * @rxq: queue to use + */ +static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq) +{ + struct idpf_adapter *adapter = rxq->vport->adapter; + + dma_free_coherent(&adapter->pdev->dev, + rxq->desc_count * IDPF_HDR_BUF_SIZE, + rxq->rx_buf.hdr_buf_va, + rxq->rx_buf.hdr_buf_pa); + rxq->rx_buf.hdr_buf_va = NULL; +} + +/** + * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue + * @rxq: queue to be cleaned + */ +static void idpf_rx_buf_rel_all(struct idpf_queue *rxq) +{ + u16 i; + + /* queue already cleared, nothing to do */ + if (!rxq->rx_buf.buf) + return; + + /* Free all the bufs allocated and given to hw on Rx queue */ + for (i = 0; i < rxq->desc_count; i++) + idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]); + + if (rxq->rx_hsplit_en) + idpf_rx_hdr_buf_rel_all(rxq); + + page_pool_destroy(rxq->pp); + rxq->pp = NULL; + + kfree(rxq->rx_buf.buf); + rxq->rx_buf.buf = NULL; +} + +/** + * idpf_rx_desc_rel - Free a specific Rx q resources + * @rxq: queue to clean the resources from + * @bufq: buffer q or completion q + * @q_model: single or split q model + * + * Free a specific rx queue resources + */ +static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) +{ + if (!rxq) + return; + + if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) { + dev_kfree_skb_any(rxq->skb); + rxq->skb = NULL; + } + + if (bufq || !idpf_is_queue_model_split(q_model)) + idpf_rx_buf_rel_all(rxq); + + rxq->next_to_alloc = 0; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; + if (!rxq->desc_ring) + return; + + dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma); + rxq->desc_ring = NULL; +} + +/** + * idpf_rx_desc_rel_all - Free Rx Resources for All Queues + * @vport: virtual port structure + * + * Free all rx queues resources + */ +static void idpf_rx_desc_rel_all(struct idpf_vport *vport) +{ + struct idpf_rxq_group *rx_qgrp; + u16 num_rxq; + int i, j; + + if (!vport->rxq_grps) + return; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + + if (!idpf_is_queue_model_split(vport->rxq_model)) { + for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) + idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], + false, vport->rxq_model); + continue; + } + + num_rxq = rx_qgrp->splitq.num_rxq_sets; + for (j = 0; j < num_rxq; j++) + idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, + false, vport->rxq_model); + + if (!rx_qgrp->splitq.bufq_sets) + continue; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_bufq_set *bufq_set = + &rx_qgrp->splitq.bufq_sets[j]; + + idpf_rx_desc_rel(&bufq_set->bufq, true, + vport->rxq_model); + } + } +} + +/** + * idpf_rx_buf_hw_update - Store the new tail and head values + * @rxq: queue to bump + * @val: new head index + */ +void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val) +{ + rxq->next_to_use = val; + + if (unlikely(!rxq->tail)) + return; + + /* writel has an implicit memory barrier */ + writel(val, rxq->tail); +} + +/** + * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers + * @rxq: ring to use + * + * Returns 0 on success, negative on failure. + */ +static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) +{ + struct idpf_adapter *adapter = rxq->vport->adapter; + + rxq->rx_buf.hdr_buf_va = + dma_alloc_coherent(&adapter->pdev->dev, + IDPF_HDR_BUF_SIZE * rxq->desc_count, + &rxq->rx_buf.hdr_buf_pa, + GFP_KERNEL); + if (!rxq->rx_buf.hdr_buf_va) + return -ENOMEM; + + return 0; +} + +/** + * idpf_rx_post_buf_refill - Post buffer id to refill queue + * @refillq: refill queue to post to + * @buf_id: buffer id to post + */ +static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) +{ + u16 nta = refillq->next_to_alloc; + + /* store the buffer ID and the SW maintained GEN bit to the refillq */ + refillq->ring[nta] = + ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) | + (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) << + IDPF_RX_BI_GEN_S); + + if (unlikely(++nta == refillq->desc_count)) { + nta = 0; + change_bit(__IDPF_Q_GEN_CHK, refillq->flags); + } + refillq->next_to_alloc = nta; +} + +/** + * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring + * @bufq: buffer queue to post to + * @buf_id: buffer id to post + * + * Returns false if buffer could not be allocated, true otherwise. + */ +static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) +{ + struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; + u16 nta = bufq->next_to_alloc; + struct idpf_rx_buf *buf; + dma_addr_t addr; + + splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); + buf = &bufq->rx_buf.buf[buf_id]; + + if (bufq->rx_hsplit_en) { + splitq_rx_desc->hdr_addr = + cpu_to_le64(bufq->rx_buf.hdr_buf_pa + + (u32)buf_id * IDPF_HDR_BUF_SIZE); + } + + addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); + if (unlikely(addr == DMA_MAPPING_ERROR)) + return false; + + splitq_rx_desc->pkt_addr = cpu_to_le64(addr); + splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id); + + nta++; + if (unlikely(nta == bufq->desc_count)) + nta = 0; + bufq->next_to_alloc = nta; + + return true; +} + +/** + * idpf_rx_post_init_bufs - Post initial buffers to bufq + * @bufq: buffer queue to post working set to + * @working_set: number of buffers to put in working set + * + * Returns true if @working_set bufs were posted successfully, false otherwise. + */ +static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) +{ + int i; + + for (i = 0; i < working_set; i++) { + if (!idpf_rx_post_buf_desc(bufq, i)) + return false; + } + + idpf_rx_buf_hw_update(bufq, + bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1)); + + return true; +} + +/** + * idpf_rx_create_page_pool - Create a page pool + * @rxbufq: RX queue to create page pool for + * + * Returns &page_pool on success, casted -errno on failure + */ +static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) +{ + struct page_pool_params pp = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = rxbufq->desc_count, + .nid = NUMA_NO_NODE, + .dev = rxbufq->vport->netdev->dev.parent, + .max_len = PAGE_SIZE, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + }; + + return page_pool_create(&pp); +} + +/** + * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources + * @rxbufq: queue for which the buffers are allocated; equivalent to + * rxq when operating in singleq mode + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq) +{ + int err = 0; + + /* Allocate book keeping buffers */ + rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count, + sizeof(struct idpf_rx_buf), GFP_KERNEL); + if (!rxbufq->rx_buf.buf) { + err = -ENOMEM; + goto rx_buf_alloc_all_out; + } + + if (rxbufq->rx_hsplit_en) { + err = idpf_rx_hdr_buf_alloc_all(rxbufq); + if (err) + goto rx_buf_alloc_all_out; + } + + /* Allocate buffers to be given to HW. */ + if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) { + int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq); + + if (!idpf_rx_post_init_bufs(rxbufq, working_set)) + err = -ENOMEM; + } else { + if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq, + rxbufq->desc_count - 1)) + err = -ENOMEM; + } + +rx_buf_alloc_all_out: + if (err) + idpf_rx_buf_rel_all(rxbufq); + + return err; +} + +/** + * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW + * @rxbufq: RX queue to create page pool for + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) +{ + struct page_pool *pool; + + pool = idpf_rx_create_page_pool(rxbufq); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + rxbufq->pp = pool; + + return idpf_rx_buf_alloc_all(rxbufq); +} + +/** + * idpf_rx_bufs_init_all - Initialize all RX bufs + * @vport: virtual port struct + * + * Returns 0 on success, negative on failure + */ +int idpf_rx_bufs_init_all(struct idpf_vport *vport) +{ + struct idpf_rxq_group *rx_qgrp; + struct idpf_queue *q; + int i, j, err; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + + /* Allocate bufs for the rxq itself in singleq */ + if (!idpf_is_queue_model_split(vport->rxq_model)) { + int num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + q = rx_qgrp->singleq.rxqs[j]; + err = idpf_rx_bufs_init(q); + if (err) + return err; + } + + continue; + } + + /* Otherwise, allocate bufs for the buffer queues */ + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + err = idpf_rx_bufs_init(q); + if (err) + return err; + } + } + + return 0; +} + +/** + * idpf_rx_desc_alloc - Allocate queue Rx resources + * @rxq: Rx queue for which the resources are setup + * @bufq: buffer or completion queue + * @q_model: single or split queue model + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) +{ + struct device *dev = rxq->dev; + + if (bufq) + rxq->size = rxq->desc_count * + sizeof(struct virtchnl2_splitq_rx_buf_desc); + else + rxq->size = rxq->desc_count * + sizeof(union virtchnl2_rx_desc); + + /* Allocate descriptors and also round up to nearest 4K */ + rxq->size = ALIGN(rxq->size, 4096); + rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size, + &rxq->dma, GFP_KERNEL); + if (!rxq->desc_ring) { + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rxq->size); + return -ENOMEM; + } + + rxq->next_to_alloc = 0; + rxq->next_to_clean = 0; + rxq->next_to_use = 0; + set_bit(__IDPF_Q_GEN_CHK, rxq->flags); + + return 0; +} + +/** + * idpf_rx_desc_alloc_all - allocate all RX queues resources + * @vport: virtual port structure + * + * Returns 0 on success, negative on failure + */ +static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) +{ + struct device *dev = &vport->adapter->pdev->dev; + struct idpf_rxq_group *rx_qgrp; + struct idpf_queue *q; + int i, j, err; + u16 num_rxq; + + for (i = 0; i < vport->num_rxq_grp; i++) { + rx_qgrp = &vport->rxq_grps[i]; + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + err = idpf_rx_desc_alloc(q, false, vport->rxq_model); + if (err) { + dev_err(dev, "Memory allocation for Rx Queue %u failed\n", + i); + goto err_out; + } + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + continue; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + err = idpf_rx_desc_alloc(q, true, vport->rxq_model); + if (err) { + dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n", + i); + goto err_out; + } + } + } + + return 0; + +err_out: + idpf_rx_desc_rel_all(vport); + + return err; +} + +/** + * idpf_txq_group_rel - Release all resources for txq groups + * @vport: vport to release txq groups on + */ +static void idpf_txq_group_rel(struct idpf_vport *vport) +{ + int i, j; + + if (!vport->txq_grps) + return; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + + for (j = 0; j < txq_grp->num_txq; j++) { + kfree(txq_grp->txqs[j]); + txq_grp->txqs[j] = NULL; + } + kfree(txq_grp->complq); + txq_grp->complq = NULL; + } + kfree(vport->txq_grps); + vport->txq_grps = NULL; +} + +/** + * idpf_rxq_sw_queue_rel - Release software queue resources + * @rx_qgrp: rx queue group with software queues + */ +static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp) +{ + int i, j; + + for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { + struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; + + for (j = 0; j < bufq_set->num_refillqs; j++) { + kfree(bufq_set->refillqs[j].ring); + bufq_set->refillqs[j].ring = NULL; + } + kfree(bufq_set->refillqs); + bufq_set->refillqs = NULL; + } +} + +/** + * idpf_rxq_group_rel - Release all resources for rxq groups + * @vport: vport to release rxq groups on + */ +static void idpf_rxq_group_rel(struct idpf_vport *vport) +{ + int i; + + if (!vport->rxq_grps) + return; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + int j; + + if (idpf_is_queue_model_split(vport->rxq_model)) { + num_rxq = rx_qgrp->splitq.num_rxq_sets; + for (j = 0; j < num_rxq; j++) { + kfree(rx_qgrp->splitq.rxq_sets[j]); + rx_qgrp->splitq.rxq_sets[j] = NULL; + } + + idpf_rxq_sw_queue_rel(rx_qgrp); + kfree(rx_qgrp->splitq.bufq_sets); + rx_qgrp->splitq.bufq_sets = NULL; + } else { + num_rxq = rx_qgrp->singleq.num_rxq; + for (j = 0; j < num_rxq; j++) { + kfree(rx_qgrp->singleq.rxqs[j]); + rx_qgrp->singleq.rxqs[j] = NULL; + } + } + } + kfree(vport->rxq_grps); + vport->rxq_grps = NULL; +} + +/** + * idpf_vport_queue_grp_rel_all - Release all queue groups + * @vport: vport to release queue groups for + */ +static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport) +{ + idpf_txq_group_rel(vport); + idpf_rxq_group_rel(vport); +} + +/** + * idpf_vport_queues_rel - Free memory for all queues + * @vport: virtual port + * + * Free the memory allocated for queues associated to a vport + */ +void idpf_vport_queues_rel(struct idpf_vport *vport) +{ + idpf_tx_desc_rel_all(vport); + idpf_rx_desc_rel_all(vport); + idpf_vport_queue_grp_rel_all(vport); + + kfree(vport->txqs); + vport->txqs = NULL; +} + +/** + * idpf_vport_init_fast_path_txqs - Initialize fast path txq array + * @vport: vport to init txqs on + * + * We get a queue index from skb->queue_mapping and we need a fast way to + * dereference the queue from queue groups. This allows us to quickly pull a + * txq based on a queue index. + * + * Returns 0 on success, negative on failure + */ +static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) +{ + int i, j, k = 0; + + vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *), + GFP_KERNEL); + + if (!vport->txqs) + return -ENOMEM; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; + + for (j = 0; j < tx_grp->num_txq; j++, k++) { + vport->txqs[k] = tx_grp->txqs[j]; + vport->txqs[k]->idx = k; + } + } + + return 0; +} + +/** + * idpf_vport_init_num_qs - Initialize number of queues + * @vport: vport to initialize queues + * @vport_msg: data to be filled into vport + */ +void idpf_vport_init_num_qs(struct idpf_vport *vport, + struct virtchnl2_create_vport *vport_msg) +{ + struct idpf_vport_user_config_data *config_data; + u16 idx = vport->idx; + + config_data = &vport->adapter->vport_config[idx]->user_config; + vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); + vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); + /* number of txqs and rxqs in config data will be zeros only in the + * driver load path and we dont update them there after + */ + if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) { + config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); + config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); + } + + if (idpf_is_queue_model_split(vport->txq_model)) + vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); + if (idpf_is_queue_model_split(vport->rxq_model)) + vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); + + /* Adjust number of buffer queues per Rx queue group. */ + if (!idpf_is_queue_model_split(vport->rxq_model)) { + vport->num_bufqs_per_qgrp = 0; + vport->bufq_size[0] = IDPF_RX_BUF_2048; + + return; + } + + vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; + /* Bufq[0] default buffer size is 4K + * Bufq[1] default buffer size is 2K + */ + vport->bufq_size[0] = IDPF_RX_BUF_4096; + vport->bufq_size[1] = IDPF_RX_BUF_2048; +} + +/** + * idpf_vport_calc_num_q_desc - Calculate number of queue groups + * @vport: vport to calculate q groups for + */ +void idpf_vport_calc_num_q_desc(struct idpf_vport *vport) +{ + struct idpf_vport_user_config_data *config_data; + int num_bufqs = vport->num_bufqs_per_qgrp; + u32 num_req_txq_desc, num_req_rxq_desc; + u16 idx = vport->idx; + int i; + + config_data = &vport->adapter->vport_config[idx]->user_config; + num_req_txq_desc = config_data->num_req_txq_desc; + num_req_rxq_desc = config_data->num_req_rxq_desc; + + vport->complq_desc_count = 0; + if (num_req_txq_desc) { + vport->txq_desc_count = num_req_txq_desc; + if (idpf_is_queue_model_split(vport->txq_model)) { + vport->complq_desc_count = num_req_txq_desc; + if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) + vport->complq_desc_count = + IDPF_MIN_TXQ_COMPLQ_DESC; + } + } else { + vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; + if (idpf_is_queue_model_split(vport->txq_model)) + vport->complq_desc_count = + IDPF_DFLT_TX_COMPLQ_DESC_COUNT; + } + + if (num_req_rxq_desc) + vport->rxq_desc_count = num_req_rxq_desc; + else + vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; + + for (i = 0; i < num_bufqs; i++) { + if (!vport->bufq_desc_count[i]) + vport->bufq_desc_count[i] = + IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, + num_bufqs); + } +} + +/** + * idpf_vport_calc_total_qs - Calculate total number of queues + * @adapter: private data struct + * @vport_idx: vport idx to retrieve vport pointer + * @vport_msg: message to fill with data + * @max_q: vport max queue info + * + * Return 0 on success, error value on failure. + */ +int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, + struct virtchnl2_create_vport *vport_msg, + struct idpf_vport_max_q *max_q) +{ + int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0; + int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0; + u16 num_req_tx_qs = 0, num_req_rx_qs = 0; + struct idpf_vport_config *vport_config; + u16 num_txq_grps, num_rxq_grps; + u32 num_qs; + + vport_config = adapter->vport_config[vport_idx]; + if (vport_config) { + num_req_tx_qs = vport_config->user_config.num_req_tx_qs; + num_req_rx_qs = vport_config->user_config.num_req_rx_qs; + } else { + int num_cpus; + + /* Restrict num of queues to cpus online as a default + * configuration to give best performance. User can always + * override to a max number of queues via ethtool. + */ + num_cpus = num_online_cpus(); + + dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus); + dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus); + dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus); + dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus); + } + + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) { + num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps; + vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps * + IDPF_COMPLQ_PER_GROUP); + vport_msg->num_tx_q = cpu_to_le16(num_txq_grps * + IDPF_DFLT_SPLITQ_TXQ_PER_GROUP); + } else { + num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; + num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs : + dflt_singleq_txqs); + vport_msg->num_tx_q = cpu_to_le16(num_qs); + vport_msg->num_tx_complq = 0; + } + if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) { + num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps; + vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps * + IDPF_MAX_BUFQS_PER_RXQ_GRP); + vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps * + IDPF_DFLT_SPLITQ_RXQ_PER_GROUP); + } else { + num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; + num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs : + dflt_singleq_rxqs); + vport_msg->num_rx_q = cpu_to_le16(num_qs); + vport_msg->num_rx_bufq = 0; + } + + return 0; +} + +/** + * idpf_vport_calc_num_q_groups - Calculate number of queue groups + * @vport: vport to calculate q groups for + */ +void idpf_vport_calc_num_q_groups(struct idpf_vport *vport) +{ + if (idpf_is_queue_model_split(vport->txq_model)) + vport->num_txq_grp = vport->num_txq; + else + vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; + + if (idpf_is_queue_model_split(vport->rxq_model)) + vport->num_rxq_grp = vport->num_rxq; + else + vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; +} + +/** + * idpf_vport_calc_numq_per_grp - Calculate number of queues per group + * @vport: vport to calculate queues for + * @num_txq: return parameter for number of TX queues + * @num_rxq: return parameter for number of RX queues + */ +static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, + u16 *num_txq, u16 *num_rxq) +{ + if (idpf_is_queue_model_split(vport->txq_model)) + *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; + else + *num_txq = vport->num_txq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; + else + *num_rxq = vport->num_rxq; +} + +/** + * idpf_rxq_set_descids - set the descids supported by this queue + * @vport: virtual port data structure + * @q: rx queue for which descids are set + * + */ +static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) +{ + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; + } else { + if (vport->base_rxd) + q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; + else + q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; + } +} + +/** + * idpf_txq_group_alloc - Allocate all txq group resources + * @vport: vport to allocate txq groups for + * @num_txq: number of txqs to allocate for each group + * + * Returns 0 on success, negative on failure + */ +static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) +{ + bool flow_sch_en; + int err, i; + + vport->txq_grps = kcalloc(vport->num_txq_grp, + sizeof(*vport->txq_grps), GFP_KERNEL); + if (!vport->txq_grps) + return -ENOMEM; + + flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_SPLITQ_QSCHED); + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + struct idpf_adapter *adapter = vport->adapter; + int j; + + tx_qgrp->vport = vport; + tx_qgrp->num_txq = num_txq; + + for (j = 0; j < tx_qgrp->num_txq; j++) { + tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), + GFP_KERNEL); + if (!tx_qgrp->txqs[j]) { + err = -ENOMEM; + goto err_alloc; + } + } + + for (j = 0; j < tx_qgrp->num_txq; j++) { + struct idpf_queue *q = tx_qgrp->txqs[j]; + + q->dev = &adapter->pdev->dev; + q->desc_count = vport->txq_desc_count; + q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); + q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); + q->vport = vport; + q->txq_grp = tx_qgrp; + hash_init(q->sched_buf_hash); + + if (flow_sch_en) + set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, + sizeof(*tx_qgrp->complq), + GFP_KERNEL); + if (!tx_qgrp->complq) { + err = -ENOMEM; + goto err_alloc; + } + + tx_qgrp->complq->dev = &adapter->pdev->dev; + tx_qgrp->complq->desc_count = vport->complq_desc_count; + tx_qgrp->complq->vport = vport; + tx_qgrp->complq->txq_grp = tx_qgrp; + + if (flow_sch_en) + __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); + } + + return 0; + +err_alloc: + idpf_txq_group_rel(vport); + + return err; +} + +/** + * idpf_rxq_group_alloc - Allocate all rxq group resources + * @vport: vport to allocate rxq groups for + * @num_rxq: number of rxqs to allocate for each group + * + * Returns 0 on success, negative on failure + */ +static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_queue *q; + int i, k, err = 0; + + vport->rxq_grps = kcalloc(vport->num_rxq_grp, + sizeof(struct idpf_rxq_group), GFP_KERNEL); + if (!vport->rxq_grps) + return -ENOMEM; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + int j; + + rx_qgrp->vport = vport; + if (!idpf_is_queue_model_split(vport->rxq_model)) { + rx_qgrp->singleq.num_rxq = num_rxq; + for (j = 0; j < num_rxq; j++) { + rx_qgrp->singleq.rxqs[j] = + kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]), + GFP_KERNEL); + if (!rx_qgrp->singleq.rxqs[j]) { + err = -ENOMEM; + goto err_alloc; + } + } + goto skip_splitq_rx_init; + } + rx_qgrp->splitq.num_rxq_sets = num_rxq; + + for (j = 0; j < num_rxq; j++) { + rx_qgrp->splitq.rxq_sets[j] = + kzalloc(sizeof(struct idpf_rxq_set), + GFP_KERNEL); + if (!rx_qgrp->splitq.rxq_sets[j]) { + err = -ENOMEM; + goto err_alloc; + } + } + + rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, + sizeof(struct idpf_bufq_set), + GFP_KERNEL); + if (!rx_qgrp->splitq.bufq_sets) { + err = -ENOMEM; + goto err_alloc; + } + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_bufq_set *bufq_set = + &rx_qgrp->splitq.bufq_sets[j]; + int swq_size = sizeof(struct idpf_sw_queue); + + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + q->dev = &adapter->pdev->dev; + q->desc_count = vport->bufq_desc_count[j]; + q->vport = vport; + q->rxq_grp = rx_qgrp; + q->idx = j; + q->rx_buf_size = vport->bufq_size[j]; + q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; + q->rx_buf_stride = IDPF_RX_BUF_STRIDE; + if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT) && + idpf_is_queue_model_split(vport->rxq_model)) { + q->rx_hsplit_en = true; + q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; + } + + bufq_set->num_refillqs = num_rxq; + bufq_set->refillqs = kcalloc(num_rxq, swq_size, + GFP_KERNEL); + if (!bufq_set->refillqs) { + err = -ENOMEM; + goto err_alloc; + } + for (k = 0; k < bufq_set->num_refillqs; k++) { + struct idpf_sw_queue *refillq = + &bufq_set->refillqs[k]; + + refillq->dev = &vport->adapter->pdev->dev; + refillq->desc_count = + vport->bufq_desc_count[j]; + set_bit(__IDPF_Q_GEN_CHK, refillq->flags); + set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + refillq->ring = kcalloc(refillq->desc_count, + sizeof(u16), + GFP_KERNEL); + if (!refillq->ring) { + err = -ENOMEM; + goto err_alloc; + } + } + } + +skip_splitq_rx_init: + for (j = 0; j < num_rxq; j++) { + if (!idpf_is_queue_model_split(vport->rxq_model)) { + q = rx_qgrp->singleq.rxqs[j]; + goto setup_rxq; + } + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + rx_qgrp->splitq.rxq_sets[j]->refillq0 = + &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; + if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) + rx_qgrp->splitq.rxq_sets[j]->refillq1 = + &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; + + if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS, + IDPF_CAP_HSPLIT) && + idpf_is_queue_model_split(vport->rxq_model)) { + q->rx_hsplit_en = true; + q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; + } + +setup_rxq: + q->dev = &adapter->pdev->dev; + q->desc_count = vport->rxq_desc_count; + q->vport = vport; + q->rxq_grp = rx_qgrp; + q->idx = (i * num_rxq) + j; + /* In splitq mode, RXQ buffer size should be + * set to that of the first buffer queue + * associated with this RXQ + */ + q->rx_buf_size = vport->bufq_size[0]; + q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; + q->rx_max_pkt_size = vport->netdev->mtu + + IDPF_PACKET_HDR_PAD; + idpf_rxq_set_descids(vport, q); + } + } + +err_alloc: + if (err) + idpf_rxq_group_rel(vport); + + return err; +} + +/** + * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources + * @vport: vport with qgrps to allocate + * + * Returns 0 on success, negative on failure + */ +static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) +{ + u16 num_txq, num_rxq; + int err; + + idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq); + + err = idpf_txq_group_alloc(vport, num_txq); + if (err) + goto err_out; + + err = idpf_rxq_group_alloc(vport, num_rxq); + if (err) + goto err_out; + + return 0; + +err_out: + idpf_vport_queue_grp_rel_all(vport); + + return err; +} + +/** + * idpf_vport_queues_alloc - Allocate memory for all queues + * @vport: virtual port + * + * Allocate memory for queues associated with a vport. Returns 0 on success, + * negative on failure. + */ +int idpf_vport_queues_alloc(struct idpf_vport *vport) +{ + int err; + + err = idpf_vport_queue_grp_alloc_all(vport); + if (err) + goto err_out; + + err = idpf_tx_desc_alloc_all(vport); + if (err) + goto err_out; + + err = idpf_rx_desc_alloc_all(vport); + if (err) + goto err_out; + + err = idpf_vport_init_fast_path_txqs(vport); + if (err) + goto err_out; + + return 0; + +err_out: + idpf_vport_queues_rel(vport); + + return err; +} + +/** + * idpf_tx_handle_sw_marker - Handle queue marker packet + * @tx_q: tx queue to handle software marker + */ +static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) +{ + struct idpf_vport *vport = tx_q->vport; + int i; + + clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); + /* Hardware must write marker packets to all queues associated with + * completion queues. So check if all queues received marker packets + */ + for (i = 0; i < vport->num_txq; i++) + /* If we're still waiting on any other TXQ marker completions, + * just return now since we cannot wake up the marker_wq yet. + */ + if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) + return; + + /* Drain complete */ + set_bit(IDPF_VPORT_SW_MARKER, vport->flags); + wake_up(&vport->sw_marker_wq); +} + +/** + * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of + * packet + * @tx_q: tx queue to clean buffer from + * @tx_buf: buffer to be cleaned + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @napi_budget: Used to determine if we are in netpoll + */ +static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, + struct idpf_tx_buf *tx_buf, + struct idpf_cleaned_stats *cleaned, + int napi_budget) +{ + napi_consume_skb(tx_buf->skb, napi_budget); + + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_single(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + dma_unmap_len_set(tx_buf, len, 0); + } + + /* clear tx_buf data */ + tx_buf->skb = NULL; + + cleaned->bytes += tx_buf->bytecount; + cleaned->packets += tx_buf->gso_segs; +} + +/** + * idpf_tx_clean_stashed_bufs - clean bufs that were stored for + * out of order completions + * @txq: queue to clean + * @compl_tag: completion tag of packet to clean (from completion descriptor) + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @budget: Used to determine if we are in netpoll + */ +static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, + struct idpf_cleaned_stats *cleaned, + int budget) +{ + struct idpf_tx_stash *stash; + struct hlist_node *tmp_buf; + + /* Buffer completion */ + hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf, + hlist, compl_tag) { + if (unlikely(stash->buf.compl_tag != (int)compl_tag)) + continue; + + if (stash->buf.skb) { + idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned, + budget); + } else if (dma_unmap_len(&stash->buf, len)) { + dma_unmap_page(txq->dev, + dma_unmap_addr(&stash->buf, dma), + dma_unmap_len(&stash->buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(&stash->buf, len, 0); + } + + /* Push shadow buf back onto stack */ + idpf_buf_lifo_push(&txq->buf_stack, stash); + + hash_del(&stash->hlist); + } +} + +/** + * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a + * later time (only relevant for flow scheduling mode) + * @txq: Tx queue to clean + * @tx_buf: buffer to store + */ +static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, + struct idpf_tx_buf *tx_buf) +{ + struct idpf_tx_stash *stash; + + if (unlikely(!dma_unmap_addr(tx_buf, dma) && + !dma_unmap_len(tx_buf, len))) + return 0; + + stash = idpf_buf_lifo_pop(&txq->buf_stack); + if (unlikely(!stash)) { + net_err_ratelimited("%s: No out-of-order TX buffers left!\n", + txq->vport->netdev->name); + + return -ENOMEM; + } + + /* Store buffer params in shadow buffer */ + stash->buf.skb = tx_buf->skb; + stash->buf.bytecount = tx_buf->bytecount; + stash->buf.gso_segs = tx_buf->gso_segs; + dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); + dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); + stash->buf.compl_tag = tx_buf->compl_tag; + + /* Add buffer to buf_hash table to be freed later */ + hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag); + + memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); + + /* Reinitialize buf_id portion of tag */ + tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + return 0; +} + +#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \ +do { \ + (ntc)++; \ + if (unlikely(!(ntc))) { \ + ntc -= (txq)->desc_count; \ + buf = (txq)->tx_buf; \ + desc = IDPF_FLEX_TX_DESC(txq, 0); \ + } else { \ + (buf)++; \ + (desc)++; \ + } \ +} while (0) + +/** + * idpf_tx_splitq_clean - Reclaim resources from buffer queue + * @tx_q: Tx queue to clean + * @end: queue index until which it should be cleaned + * @napi_budget: Used to determine if we are in netpoll + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @descs_only: true if queue is using flow-based scheduling and should + * not clean buffers at this time + * + * Cleans the queue descriptor ring. If the queue is using queue-based + * scheduling, the buffers will be cleaned as well. If the queue is using + * flow-based scheduling, only the descriptors are cleaned at this time. + * Separate packet completion events will be reported on the completion queue, + * and the buffers will be cleaned separately. The stats are not updated from + * this function when using flow-based scheduling. + */ +static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, + int napi_budget, + struct idpf_cleaned_stats *cleaned, + bool descs_only) +{ + union idpf_tx_flex_desc *next_pending_desc = NULL; + union idpf_tx_flex_desc *tx_desc; + s16 ntc = tx_q->next_to_clean; + struct idpf_tx_buf *tx_buf; + + tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); + next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); + tx_buf = &tx_q->tx_buf[ntc]; + ntc -= tx_q->desc_count; + + while (tx_desc != next_pending_desc) { + union idpf_tx_flex_desc *eop_desc; + + /* If this entry in the ring was used as a context descriptor, + * it's corresponding entry in the buffer ring will have an + * invalid completion tag since no buffer was used. We can + * skip this descriptor since there is no buffer to clean. + */ + if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG)) + goto fetch_next_txq_desc; + + eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + if (descs_only) { + if (idpf_stash_flow_sch_buffers(tx_q, tx_buf)) + goto tx_splitq_clean_out; + + while (tx_desc != eop_desc) { + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, + tx_desc, tx_buf); + + if (dma_unmap_len(tx_buf, len)) { + if (idpf_stash_flow_sch_buffers(tx_q, + tx_buf)) + goto tx_splitq_clean_out; + } + } + } else { + idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned, + napi_budget); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, + tx_desc, tx_buf); + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_q->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + } + +fetch_next_txq_desc: + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); + } + +tx_splitq_clean_out: + ntc += tx_q->desc_count; + tx_q->next_to_clean = ntc; +} + +#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \ +do { \ + (buf)++; \ + (ntc)++; \ + if (unlikely((ntc) == (txq)->desc_count)) { \ + buf = (txq)->tx_buf; \ + ntc = 0; \ + } \ +} while (0) + +/** + * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers + * @txq: queue to clean + * @compl_tag: completion tag of packet to clean (from completion descriptor) + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @budget: Used to determine if we are in netpoll + * + * Cleans all buffers associated with the input completion tag either from the + * TX buffer ring or from the hash table if the buffers were previously + * stashed. Returns the byte/segment count for the cleaned packet associated + * this completion tag. + */ +static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, + struct idpf_cleaned_stats *cleaned, + int budget) +{ + u16 idx = compl_tag & txq->compl_tag_bufid_m; + struct idpf_tx_buf *tx_buf = NULL; + u16 ntc = txq->next_to_clean; + u16 num_descs_cleaned = 0; + u16 orig_idx = idx; + + tx_buf = &txq->tx_buf[idx]; + + while (tx_buf->compl_tag == (int)compl_tag) { + if (tx_buf->skb) { + idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget); + } else if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(txq->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + + memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); + tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + num_descs_cleaned++; + idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); + } + + /* If we didn't clean anything on the ring for this completion, there's + * nothing more to do. + */ + if (unlikely(!num_descs_cleaned)) + return false; + + /* Otherwise, if we did clean a packet on the ring directly, it's safe + * to assume that the descriptors starting from the original + * next_to_clean up until the previously cleaned packet can be reused. + * Therefore, we will go back in the ring and stash any buffers still + * in the ring into the hash table to be cleaned later. + */ + tx_buf = &txq->tx_buf[ntc]; + while (tx_buf != &txq->tx_buf[orig_idx]) { + idpf_stash_flow_sch_buffers(txq, tx_buf); + idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf); + } + + /* Finally, update next_to_clean to reflect the work that was just done + * on the ring, if any. If the packet was only cleaned from the hash + * table, the ring will not be impacted, therefore we should not touch + * next_to_clean. The updated idx is used here + */ + txq->next_to_clean = idx; + + return true; +} + +/** + * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers + * whether on the buffer ring or in the hash table + * @txq: Tx ring to clean + * @desc: pointer to completion queue descriptor to extract completion + * information from + * @cleaned: pointer to stats struct to track cleaned packets/bytes + * @budget: Used to determine if we are in netpoll + * + * Returns bytes/packets cleaned + */ +static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, + struct idpf_splitq_tx_compl_desc *desc, + struct idpf_cleaned_stats *cleaned, + int budget) +{ + u16 compl_tag; + + if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { + u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); + + return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); + } + + compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); + + /* If we didn't clean anything on the ring, this packet must be + * in the hash table. Go clean it there. + */ + if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget)) + idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget); +} + +/** + * idpf_tx_clean_complq - Reclaim resources on completion queue + * @complq: Tx ring to clean + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns true if there's any budget left (e.g. the clean is finished) + */ +static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, + int *cleaned) +{ + struct idpf_splitq_tx_compl_desc *tx_desc; + struct idpf_vport *vport = complq->vport; + s16 ntc = complq->next_to_clean; + struct idpf_netdev_priv *np; + unsigned int complq_budget; + bool complq_ok = true; + int i; + + complq_budget = vport->compln_clean_budget; + tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + ntc -= complq->desc_count; + + do { + struct idpf_cleaned_stats cleaned_stats = { }; + struct idpf_queue *tx_q; + int rel_tx_qid; + u16 hw_head; + u8 ctype; /* completion type */ + u16 gen; + + /* if the descriptor isn't done, no work yet to do */ + gen = (le16_to_cpu(tx_desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S; + if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) + break; + + /* Find necessary info of TX queue to clean buffers */ + rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S; + if (rel_tx_qid >= complq->txq_grp->num_txq || + !complq->txq_grp->txqs[rel_tx_qid]) { + dev_err(&complq->vport->adapter->pdev->dev, + "TxQ not found\n"); + goto fetch_next_desc; + } + tx_q = complq->txq_grp->txqs[rel_tx_qid]; + + /* Determine completion type */ + ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) & + IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> + IDPF_TXD_COMPLQ_COMPL_TYPE_S; + switch (ctype) { + case IDPF_TXD_COMPLT_RE: + hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head); + + idpf_tx_splitq_clean(tx_q, hw_head, budget, + &cleaned_stats, true); + break; + case IDPF_TXD_COMPLT_RS: + idpf_tx_handle_rs_completion(tx_q, tx_desc, + &cleaned_stats, budget); + break; + case IDPF_TXD_COMPLT_SW_MARKER: + idpf_tx_handle_sw_marker(tx_q); + break; + default: + dev_err(&tx_q->vport->adapter->pdev->dev, + "Unknown TX completion type: %d\n", + ctype); + goto fetch_next_desc; + } + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); + u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); + tx_q->cleaned_pkts += cleaned_stats.packets; + tx_q->cleaned_bytes += cleaned_stats.bytes; + complq->num_completions++; + u64_stats_update_end(&tx_q->stats_sync); + +fetch_next_desc: + tx_desc++; + ntc++; + if (unlikely(!ntc)) { + ntc -= complq->desc_count; + tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); + change_bit(__IDPF_Q_GEN_CHK, complq->flags); + } + + prefetch(tx_desc); + + /* update budget accounting */ + complq_budget--; + } while (likely(complq_budget)); + + /* Store the state of the complq to be used later in deciding if a + * TXQ can be started again + */ + if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) + complq_ok = false; + + np = netdev_priv(complq->vport->netdev); + for (i = 0; i < complq->txq_grp->num_txq; ++i) { + struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; + struct netdev_queue *nq; + bool dont_wake; + + /* We didn't clean anything on this queue, move along */ + if (!tx_q->cleaned_bytes) + continue; + + *cleaned += tx_q->cleaned_pkts; + + /* Update BQL */ + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + + dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || + np->state != __IDPF_VPORT_UP || + !netif_carrier_ok(tx_q->vport->netdev); + /* Check if the TXQ needs to and can be restarted */ + __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, + IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, + dont_wake); + + /* Reset cleaned stats for the next time this queue is + * cleaned + */ + tx_q->cleaned_bytes = 0; + tx_q->cleaned_pkts = 0; + } + + ntc += complq->desc_count; + complq->next_to_clean = ntc; + + return !!complq_budget; +} + +/** + * idpf_tx_splitq_build_ctb - populate command tag and size for queue + * based scheduling descriptors + * @desc: descriptor to populate + * @params: pointer to tx params struct + * @td_cmd: command to be filled in desc + * @size: size of buffer + */ +void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size) +{ + desc->q.qw1.cmd_dtype = + cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M); + desc->q.qw1.cmd_dtype |= + cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) & + IDPF_FLEX_TXD_QW1_CMD_M); + desc->q.qw1.buf_size = cpu_to_le16((u16)size); + desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag); +} + +/** + * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow + * scheduling descriptors + * @desc: descriptor to populate + * @params: pointer to tx params struct + * @td_cmd: command to be filled in desc + * @size: size of buffer + */ +void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size) +{ + desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; + desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); + desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); +} + +/** + * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions + * @tx_q: the queue to be checked + * @size: number of descriptors we want to assure is available + * + * Returns 0 if stop is not needed + */ +int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) +{ + struct netdev_queue *nq; + + if (likely(IDPF_DESC_UNUSED(tx_q) >= size)) + return 0; + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + + return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); +} + +/** + * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions + * @tx_q: the queue to be checked + * @descs_needed: number of descriptors required for this packet + * + * Returns 0 if stop is not needed + */ +static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, + unsigned int descs_needed) +{ + if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) + goto splitq_stop; + + /* If there are too many outstanding completions expected on the + * completion queue, stop the TX queue to give the device some time to + * catch up + */ + if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) + goto splitq_stop; + + /* Also check for available book keeping buffers; if we are low, stop + * the queue to wait for more completions + */ + if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) + goto splitq_stop; + + return 0; + +splitq_stop: + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx); + + return -EBUSY; +} + +/** + * idpf_tx_buf_hw_update - Store the new tail value + * @tx_q: queue to bump + * @val: new tail index + * @xmit_more: more skb's pending + * + * The naming here is special in that 'hw' signals that this function is about + * to do a register write to update our queue status. We know this can only + * mean tail here as HW should be owning head for TX. + */ +void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, + bool xmit_more) +{ + struct netdev_queue *nq; + + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + tx_q->next_to_use = val; + + idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* notify HW of packet */ + if (netif_xmit_stopped(nq) || !xmit_more) + writel(val, tx_q->tail); +} + +/** + * idpf_tx_desc_count_required - calculate number of Tx descriptors needed + * @txq: queue to send buffer on + * @skb: send buffer + * + * Returns number of data descriptors needed for this skb. + */ +unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, + struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo; + unsigned int count = 0, i; + + count += !!skb_headlen(skb); + + if (!skb_is_nonlinear(skb)) + return count; + + shinfo = skb_shinfo(skb); + for (i = 0; i < shinfo->nr_frags; i++) { + unsigned int size; + + size = skb_frag_size(&shinfo->frags[i]); + + /* We only need to use the idpf_size_to_txd_count check if the + * fragment is going to span multiple descriptors, + * i.e. size >= 16K. + */ + if (size >= SZ_16K) + count += idpf_size_to_txd_count(size); + else + count++; + } + + if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) { + if (__skb_linearize(skb)) + return 0; + + count = idpf_size_to_txd_count(skb->len); + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.tx.linearize); + u64_stats_update_end(&txq->stats_sync); + } + + return count; +} + +/** + * idpf_tx_dma_map_error - handle TX DMA map errors + * @txq: queue to send buffer on + * @skb: send buffer + * @first: original first buffer info buffer for packet + * @idx: starting point on ring to unwind + */ +void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, + struct idpf_tx_buf *first, u16 idx) +{ + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.tx.dma_map_errs); + u64_stats_update_end(&txq->stats_sync); + + /* clear dma mappings for failed tx_buf map */ + for (;;) { + struct idpf_tx_buf *tx_buf; + + tx_buf = &txq->tx_buf[idx]; + idpf_tx_buf_rel(txq, tx_buf); + if (tx_buf == first) + break; + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + if (skb_is_gso(skb)) { + union idpf_tx_flex_desc *tx_desc; + + /* If we failed a DMA mapping for a TSO packet, we will have + * used one additional descriptor for a context + * descriptor. Reset that here. + */ + tx_desc = IDPF_FLEX_TX_DESC(txq, idx); + memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + /* Update tail in case netdev_xmit_more was previously true */ + idpf_tx_buf_hw_update(txq, idx, false); +} + +/** + * idpf_tx_splitq_bump_ntu - adjust NTU and generation + * @txq: the tx ring to wrap + * @ntu: ring index to bump + */ +static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) +{ + ntu++; + + if (ntu == txq->desc_count) { + ntu = 0; + txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); + } + + return ntu; +} + +/** + * idpf_tx_splitq_map - Build the Tx flex descriptor + * @tx_q: queue to send buffer on + * @params: pointer to splitq params struct + * @first: first buffer info buffer to use + * + * This function loops over the skb data pointed to by *first + * and gets a physical address for each memory location and programs + * it and the length into the transmit flex descriptor. + */ +static void idpf_tx_splitq_map(struct idpf_queue *tx_q, + struct idpf_tx_splitq_params *params, + struct idpf_tx_buf *first) +{ + union idpf_tx_flex_desc *tx_desc; + unsigned int data_len, size; + struct idpf_tx_buf *tx_buf; + u16 i = tx_q->next_to_use; + struct netdev_queue *nq; + struct sk_buff *skb; + skb_frag_t *frag; + u16 td_cmd = 0; + dma_addr_t dma; + + skb = first->skb; + + td_cmd = params->offload.td_cmd; + + data_len = skb->data_len; + size = skb_headlen(skb); + + tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); + + dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buf = first; + + params->compl_tag = + (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + + if (dma_mapping_error(tx_q->dev, dma)) + return idpf_tx_dma_map_error(tx_q, skb, first, i); + + tx_buf->compl_tag = params->compl_tag; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + /* buf_addr is in same location for both desc types */ + tx_desc->q.buf_addr = cpu_to_le64(dma); + + /* The stack can send us fragments that are too large for a + * single descriptor i.e. frag size > 16K-1. We will need to + * split the fragment across multiple descriptors in this case. + * To adhere to HW alignment restrictions, the fragment needs + * to be split such that the first chunk ends on a 4K boundary + * and all subsequent chunks start on a 4K boundary. We still + * want to send as much data as possible though, so our + * intermediate descriptor chunk size will be 12K. + * + * For example, consider a 32K fragment mapped to DMA addr 2600. + * ------------------------------------------------------------ + * | frag_size = 32K | + * ------------------------------------------------------------ + * |2600 |16384 |28672 + * + * 3 descriptors will be used for this fragment. The HW expects + * the descriptors to contain the following: + * ------------------------------------------------------------ + * | size = 13784 | size = 12K | size = 6696 | + * | dma = 2600 | dma = 16384 | dma = 28672 | + * ------------------------------------------------------------ + * + * We need to first adjust the max_data for the first chunk so + * that it ends on a 4K boundary. By negating the value of the + * DMA address and taking only the low order bits, we're + * effectively calculating + * 4K - (DMA addr lower order bits) = + * bytes to next boundary. + * + * Add that to our base aligned max_data (12K) and we have + * our first chunk size. In the example above, + * 13784 = 12K + (4096-2600) + * + * After guaranteeing the first chunk ends on a 4K boundary, we + * will give the intermediate descriptors 12K chunks and + * whatever is left to the final descriptor. This ensures that + * all descriptors used for the remaining chunks of the + * fragment start on a 4K boundary and we use as few + * descriptors as possible. + */ + max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1); + while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) { + idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, + max_data); + + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + i = 0; + tx_q->compl_tag_cur_gen = + IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); + } + + /* Since this packet has a buffer that is going to span + * multiple descriptors, it's going to leave holes in + * to the TX buffer ring. To ensure these holes do not + * cause issues in the cleaning routines, we will clear + * them of any stale data and assign them the same + * completion tag as the current packet. Then when the + * packet is being cleaned, the cleaning routines will + * simply pass over these holes and finish cleaning the + * rest of the packet. + */ + memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); + tx_q->tx_buf[i].compl_tag = params->compl_tag; + + /* Adjust the DMA offset and the remaining size of the + * fragment. On the first iteration of this loop, + * max_data will be >= 12K and <= 16K-1. On any + * subsequent iteration of this loop, max_data will + * always be 12K. + */ + dma += max_data; + size -= max_data; + + /* Reset max_data since remaining chunks will be 12K + * at most + */ + max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED; + + /* buf_addr is in same location for both desc types */ + tx_desc->q.buf_addr = cpu_to_le64(dma); + } + + if (!data_len) + break; + + idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); + tx_desc++; + i++; + + if (i == tx_q->desc_count) { + tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + i = 0; + tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_q->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buf = &tx_q->tx_buf[i]; + } + + /* record SW timestamp if HW timestamp is not available */ + skb_tx_timestamp(skb); + + /* write last descriptor with RS and EOP bits */ + td_cmd |= params->eop_cmd; + idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); + i = idpf_tx_splitq_bump_ntu(tx_q, i); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + tx_q->txq_grp->num_completions_pending++; + + /* record bytecount for BQL */ + nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + netdev_tx_sent_queue(nq, first->bytecount); + + idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); +} + +/** + * idpf_tso - computes mss and TSO length to prepare for TSO + * @skb: pointer to skb + * @off: pointer to struct that holds offload parameters + * + * Returns error (negative) if TSO was requested but cannot be applied to the + * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise. + */ +int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off) +{ + const struct skb_shared_info *shinfo; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_start; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + shinfo = skb_shinfo(skb); + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else if (ip.v6->version == 6) { + ip.v6->payload_len = 0; + } + + l4_start = skb_transport_offset(skb); + + /* remove payload length from checksum */ + paylen = skb->len - l4_start; + + switch (shinfo->gso_type & ~SKB_GSO_DODGY) { + case SKB_GSO_TCPV4: + case SKB_GSO_TCPV6: + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start; + break; + case SKB_GSO_UDP_L4: + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + /* compute length of segmentation header */ + off->tso_hdr_len = sizeof(struct udphdr) + l4_start; + l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr)); + break; + default: + return -EINVAL; + } + + off->tso_len = skb->len - off->tso_hdr_len; + off->mss = shinfo->gso_size; + off->tso_segs = shinfo->gso_segs; + + off->tx_flags |= IDPF_TX_FLAGS_TSO; + + return 1; +} + +/** + * __idpf_chk_linearize - Check skb is not using too many buffers + * @skb: send buffer + * @max_bufs: maximum number of buffers + * + * For TSO we need to count the TSO header and segment payload separately. As + * such we need to check cases where we have max_bufs-1 fragments or more as we + * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1 + * for the segment payload in the first descriptor, and another max_buf-1 for + * the fragments. + */ +static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + const skb_frag_t *frag, *stale; + int nr_frags, sum; + + /* no need to check if number of frags is less than max_bufs - 1 */ + nr_frags = shinfo->nr_frags; + if (nr_frags < (max_bufs - 1)) + return false; + + /* We need to walk through the list and validate that each group + * of max_bufs-2 fragments totals at least gso_size. + */ + nr_frags -= max_bufs - 2; + frag = &shinfo->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We use + * this as the worst case scenario in which the frag ahead of us only + * provides one byte which is why we are limited to max_bufs-2 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - shinfo->gso_size; + + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + for (stale = &shinfo->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + + sum += skb_frag_size(frag++); + + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > IDPF_TX_MAX_DESC_DATA) { + int align_pad = -(skb_frag_off(stale)) & + (IDPF_TX_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED; + stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED; + } while (stale_size > IDPF_TX_MAX_DESC_DATA); + } + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + if (!nr_frags--) + break; + + sum -= stale_size; + } + + return false; +} + +/** + * idpf_chk_linearize - Check if skb exceeds max descriptors per packet + * @skb: send buffer + * @max_bufs: maximum scatter gather buffers for single packet + * @count: number of buffers this packet needs + * + * Make sure we don't exceed maximum scatter gather buffers for a single + * packet. We have to do some special checking around the boundary (max_bufs-1) + * if TSO is on since we need count the TSO header and payload separately. + * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO + * header, 1 for segment payload, and then 7 for the fragments. + */ +bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count) +{ + if (likely(count < max_bufs)) + return false; + if (skb_is_gso(skb)) + return __idpf_chk_linearize(skb, max_bufs); + + return count > max_bufs; +} + +/** + * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring + * @txq: queue to put context descriptor on + * + * Since the TX buffer rings mimics the descriptor ring, update the tx buffer + * ring entry to reflect that this index is a context descriptor + */ +static struct idpf_flex_tx_ctx_desc * +idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) +{ + struct idpf_flex_tx_ctx_desc *desc; + int i = txq->next_to_use; + + memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf)); + txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + + /* grab the next descriptor */ + desc = IDPF_FLEX_TX_CTX_DESC(txq, i); + txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); + + return desc; +} + +/** + * idpf_tx_drop_skb - free the SKB and bump tail if necessary + * @tx_q: queue to send buffer on + * @skb: pointer to skb + */ +netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) +{ + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.skb_drops); + u64_stats_update_end(&tx_q->stats_sync); + + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/** + * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors + * @skb: send buffer + * @tx_q: queue to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, + struct idpf_queue *tx_q) +{ + struct idpf_tx_splitq_params tx_params = { }; + struct idpf_tx_buf *first; + unsigned int count; + int tso; + + count = idpf_tx_desc_count_required(tx_q, skb); + if (unlikely(!count)) + return idpf_tx_drop_skb(tx_q, skb); + + tso = idpf_tso(skb, &tx_params.offload); + if (unlikely(tso < 0)) + return idpf_tx_drop_skb(tx_q, skb); + + /* Check for splitq specific TX resources */ + count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso); + if (idpf_tx_maybe_stop_splitq(tx_q, count)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_BUSY; + } + + if (tso) { + /* If tso is needed, set up context desc */ + struct idpf_flex_tx_ctx_desc *ctx_desc = + idpf_tx_splitq_get_ctx_desc(tx_q); + + ctx_desc->tso.qw1.cmd_dtype = + cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | + IDPF_TX_FLEX_CTX_DESC_CMD_TSO); + ctx_desc->tso.qw0.flex_tlen = + cpu_to_le32(tx_params.offload.tso_len & + IDPF_TXD_FLEX_CTX_TLEN_M); + ctx_desc->tso.qw0.mss_rt = + cpu_to_le16(tx_params.offload.mss & + IDPF_TXD_FLEX_CTX_MSS_RT_M); + ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; + + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tx.lso_pkts); + u64_stats_update_end(&tx_q->stats_sync); + } + + /* record the location of the first descriptor for this packet */ + first = &tx_q->tx_buf[tx_q->next_to_use]; + first->skb = skb; + + if (tso) { + first->gso_segs = tx_params.offload.tso_segs; + first->bytecount = skb->len + + ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len); + } else { + first->gso_segs = 1; + first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + } + + if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) { + tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; + tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; + /* Set the RE bit to catch any packets that may have not been + * stashed during RS completion cleaning. MIN_GAP is set to + * MIN_RING size to ensure it will be set at least once each + * time around the ring. + */ + if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { + tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE; + tx_q->txq_grp->num_completions_pending++; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; + + } else { + tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2; + tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; + } + + idpf_tx_splitq_map(tx_q, &tx_params, first); + + return NETDEV_TX_OK; +} + +/** + * idpf_tx_splitq_start - Selects the right Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + */ +netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, + struct net_device *netdev) +{ + struct idpf_vport *vport = idpf_netdev_to_vport(netdev); + struct idpf_queue *tx_q; + + if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; + } + + tx_q = vport->txqs[skb_get_queue_mapping(skb)]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) { + idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); + + return NETDEV_TX_OK; + } + + return idpf_tx_splitq_frame(skb, tx_q); +} + +/** + * idpf_ptype_to_htype - get a hash type + * @decoded: Decoded Rx packet type related fields + * + * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by + * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of + * Rx desc. + */ +enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded) +{ + if (!decoded->known) + return PKT_HASH_TYPE_NONE; + if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && + decoded->inner_prot) + return PKT_HASH_TYPE_L4; + if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && + decoded->outer_ip) + return PKT_HASH_TYPE_L3; + if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2) + return PKT_HASH_TYPE_L2; + + return PKT_HASH_TYPE_NONE; +} + +/** + * idpf_rx_hash - set the hash value in the skb + * @rxq: Rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being populated + * @rx_desc: Receive descriptor + * @decoded: Decoded Rx packet type related fields + */ +static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + u32 hash; + + if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH))) + return; + + hash = le16_to_cpu(rx_desc->hash1) | + (rx_desc->ff2_mirrid_hash2.hash2 << 16) | + (rx_desc->hash3 << 24); + + skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); +} + +/** + * idpf_rx_csum - Indicate in skb if checksum is good + * @rxq: Rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being populated + * @csum_bits: checksum fields extracted from the descriptor + * @decoded: Decoded Rx packet type related fields + * + * skb->protocol must be set before this function is called + */ +static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb, + struct idpf_rx_csum_decoded *csum_bits, + struct idpf_rx_ptype_decoded *decoded) +{ + bool ipv4, ipv6; + + /* check if Rx checksum is enabled */ + if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM))) + return; + + /* check if HW has decoded the packet and checksum */ + if (!(csum_bits->l3l4p)) + return; + + ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); + ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + + if (ipv4 && (csum_bits->ipe || csum_bits->eipe)) + goto checksum_fail; + + if (ipv6 && csum_bits->ipv6exadd) + return; + + /* check for L4 errors and handle packets that were not able to be + * checksummed + */ + if (csum_bits->l4e) + goto checksum_fail; + + /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ + switch (decoded->inner_prot) { + case IDPF_RX_PTYPE_INNER_PROT_ICMP: + case IDPF_RX_PTYPE_INNER_PROT_TCP: + case IDPF_RX_PTYPE_INNER_PROT_UDP: + if (!csum_bits->raw_csum_inv) { + u16 csum = csum_bits->raw_csum; + + skb->csum = csum_unfold((__force __sum16)~swab16(csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + break; + case IDPF_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + break; + } + + return; + +checksum_fail: + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_update_end(&rxq->stats_sync); +} + +/** + * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor + * @rx_desc: receive descriptor + * @csum: structure to extract checksum fields + * + **/ +static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct idpf_rx_csum_decoded *csum) +{ + u8 qword0, qword1; + + qword0 = rx_desc->status_err0_qw0; + qword1 = rx_desc->status_err0_qw1; + + csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + qword1); + csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, + qword1); + csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, + qword1); + csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, + qword1); + csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, + qword0); + csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M, + le16_to_cpu(rx_desc->ptype_err_fflags0)); + csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); +} + +/** + * idpf_rx_rsc - Set the RSC fields in the skb + * @rxq : Rx descriptor ring packet is being transacted on + * @skb : pointer to current skb being populated + * @rx_desc: Receive descriptor + * @decoded: Decoded Rx packet type related fields + * + * Return 0 on success and error code on failure + * + * Populate the skb fields with the total number of RSC segments, RSC payload + * length and packet type. + */ +static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct idpf_rx_ptype_decoded *decoded) +{ + u16 rsc_segments, rsc_seg_len; + bool ipv4, ipv6; + int len; + + if (unlikely(!decoded->outer_ip)) + return -EINVAL; + + rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); + if (unlikely(!rsc_seg_len)) + return -EINVAL; + + ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); + ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + + if (unlikely(!(ipv4 ^ ipv6))) + return -EINVAL; + + rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len); + if (unlikely(rsc_segments == 1)) + return 0; + + NAPI_GRO_CB(skb)->count = rsc_segments; + skb_shinfo(skb)->gso_size = rsc_seg_len; + + skb_reset_network_header(skb); + len = skb->len - skb_transport_offset(skb); + + if (ipv4) { + struct iphdr *ipv4h = ip_hdr(skb); + + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + /* Reset and set transport header offset in skb */ + skb_set_transport_header(skb, sizeof(struct iphdr)); + + /* Compute the TCP pseudo header checksum*/ + tcp_hdr(skb)->check = + ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0); + } else { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + tcp_hdr(skb)->check = + ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); + } + + tcp_gro_complete(skb); + + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.rsc_pkts); + u64_stats_update_end(&rxq->stats_sync); + + return 0; +} + +/** + * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor + * @rxq: Rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being populated + * @rx_desc: Receive descriptor + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, protocol, and + * other fields within the skb. + */ +static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, + struct sk_buff *skb, + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +{ + struct idpf_rx_csum_decoded csum_bits = { }; + struct idpf_rx_ptype_decoded decoded; + u16 rx_ptype; + + rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M, + le16_to_cpu(rx_desc->ptype_err_fflags0)); + + decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; + /* If we don't know the ptype we can't do anything else with it. Just + * pass it up the stack as-is. + */ + if (!decoded.known) + return 0; + + /* process RSS/hash */ + idpf_rx_hash(rxq, skb, rx_desc, &decoded); + + skb->protocol = eth_type_trans(skb, rxq->vport->netdev); + + if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M, + le16_to_cpu(rx_desc->hdrlen_flags))) + return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); + + idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); + idpf_rx_csum(rxq, skb, &csum_bits, &decoded); + + return 0; +} + +/** + * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag + * @rx_buf: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: packet length from rx_desc + * + * This function will add the data contained in rx_buf->page to the skb. + * It will just attach the page as a frag to the skb. + * The function will then update the page offset. + */ +void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, + unsigned int size) +{ + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + rx_buf->page_offset, size, rx_buf->truesize); + + rx_buf->page = NULL; +} + +/** + * idpf_rx_construct_skb - Allocate skb and populate it + * @rxq: Rx descriptor queue + * @rx_buf: Rx buffer to pull data from + * @size: the length of the packet + * + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. + */ +struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, + struct idpf_rx_buf *rx_buf, + unsigned int size) +{ + unsigned int headlen; + struct sk_buff *skb; + void *va; + + va = page_address(rx_buf->page) + rx_buf->page_offset; + + /* prefetch first cache line of first page */ + net_prefetch(va); + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE, + GFP_ATOMIC); + if (unlikely(!skb)) { + idpf_rx_put_page(rx_buf); + + return NULL; + } + + skb_record_rx_queue(skb, rxq->idx); + skb_mark_for_recycle(skb); + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IDPF_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* if we exhaust the linear part then add what is left as a frag */ + size -= headlen; + if (!size) { + idpf_rx_put_page(rx_buf); + + return skb; + } + + skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, + size, rx_buf->truesize); + + /* Since we're giving the page to the stack, clear our reference to it. + * We'll get a new one during buffer posting. + */ + rx_buf->page = NULL; + + return skb; +} + +/** + * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer + * @rxq: Rx descriptor queue + * @va: Rx buffer to pull data from + * @size: the length of the packet + * + * This function allocates an skb. It then populates it with the page data from + * the current receive descriptor, taking care to set up the skb correctly. + * This specifically uses a header buffer to start building the skb. + */ +static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, + const void *va, + unsigned int size) +{ + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC); + if (unlikely(!skb)) + return NULL; + + skb_record_rx_queue(skb, rxq->idx); + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* More than likely, a payload fragment, which will use a page from + * page_pool will be added to the SKB so mark it for recycle + * preemptively. And if not, it's inconsequential. + */ + skb_mark_for_recycle(skb); + + return skb; +} + +/** + * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor + * status and error fields + * @stat_err_field: field from descriptor to test bits in + * @stat_err_bits: value to mask + * + */ +static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field, + const u8 stat_err_bits) +{ + return !!(stat_err_field & stat_err_bits); +} + +/** + * idpf_rx_splitq_is_eop - process handling of EOP buffers + * @rx_desc: Rx descriptor for current buffer + * + * If the buffer is an EOP buffer, this function exits returning true, + * otherwise return false indicating that this is in fact a non-EOP buffer. + */ +static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +{ + /* if we are the last buffer then there is nothing else to do */ + return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1, + IDPF_RXD_EOF_SPLITQ)); +} + +/** + * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue + * @rxq: Rx descriptor queue to retrieve receive buffer queue + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed + */ +static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) +{ + int total_rx_bytes = 0, total_rx_pkts = 0; + struct idpf_queue *rx_bufq = NULL; + struct sk_buff *skb = rxq->skb; + u16 ntc = rxq->next_to_clean; + + /* Process Rx packets bounded by budget */ + while (likely(total_rx_pkts < budget)) { + struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; + struct idpf_sw_queue *refillq = NULL; + struct idpf_rxq_set *rxq_set = NULL; + struct idpf_rx_buf *rx_buf = NULL; + union virtchnl2_rx_desc *desc; + unsigned int pkt_len = 0; + unsigned int hdr_len = 0; + u16 gen_id, buf_id = 0; + /* Header buffer overflow only valid for header split */ + bool hbo = false; + int bufq_id; + u8 rxdid; + + /* get the Rx desc from Rx queue based on 'next_to_clean' */ + desc = IDPF_RX_DESC(rxq, ntc); + rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc + */ + dma_rmb(); + + /* if the descriptor isn't done, no work yet to do */ + gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); + gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id); + + if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) + break; + + rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, + rx_desc->rxdid_ucast); + if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { + IDPF_RX_BUMP_NTC(rxq, ntc); + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.bad_descs); + u64_stats_update_end(&rxq->stats_sync); + continue; + } + + pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); + pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M, + pkt_len); + + hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, + rx_desc->status_err0_qw1); + + if (unlikely(hbo)) { + /* If a header buffer overflow, occurs, i.e. header is + * too large to fit in the header split buffer, HW will + * put the entire packet, including headers, in the + * data/payload buffer. + */ + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf); + u64_stats_update_end(&rxq->stats_sync); + goto bypass_hsplit; + } + + hdr_len = le16_to_cpu(rx_desc->hdrlen_flags); + hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M, + hdr_len); + +bypass_hsplit: + bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id); + bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M, + bufq_id); + + rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); + if (!bufq_id) + refillq = rxq_set->refillq0; + else + refillq = rxq_set->refillq1; + + /* retrieve buffer from the rxq */ + rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq; + + buf_id = le16_to_cpu(rx_desc->buf_id); + + rx_buf = &rx_bufq->rx_buf.buf[buf_id]; + + if (hdr_len) { + const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va + + (u32)buf_id * IDPF_HDR_BUF_SIZE; + + skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len); + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts); + u64_stats_update_end(&rxq->stats_sync); + } + + if (pkt_len) { + idpf_rx_sync_for_cpu(rx_buf, pkt_len); + if (skb) + idpf_rx_add_frag(rx_buf, skb, pkt_len); + else + skb = idpf_rx_construct_skb(rxq, rx_buf, + pkt_len); + } else { + idpf_rx_put_page(rx_buf); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + idpf_rx_post_buf_refill(refillq, buf_id); + + IDPF_RX_BUMP_NTC(rxq, ntc); + /* skip if it is non EOP desc */ + if (!idpf_rx_splitq_is_eop(rx_desc)) + continue; + + /* pad skb if needed (to make valid ethernet frame) */ + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* protocol */ + if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) { + dev_kfree_skb_any(skb); + skb = NULL; + continue; + } + + /* send completed skb up the stack */ + napi_gro_receive(&rxq->q_vector->napi, skb); + skb = NULL; + + /* update budget accounting */ + total_rx_pkts++; + } + + rxq->next_to_clean = ntc; + + rxq->skb = skb; + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); + u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); + u64_stats_update_end(&rxq->stats_sync); + + /* guarantee a trip back through this routine if there was a failure */ + return total_rx_pkts; +} + +/** + * idpf_rx_update_bufq_desc - Update buffer queue descriptor + * @bufq: Pointer to the buffer queue + * @refill_desc: SW Refill queue descriptor containing buffer ID + * @buf_desc: Buffer queue descriptor + * + * Return 0 on success and negative on failure. + */ +static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, + struct virtchnl2_splitq_rx_buf_desc *buf_desc) +{ + struct idpf_rx_buf *buf; + dma_addr_t addr; + u16 buf_id; + + buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); + + buf = &bufq->rx_buf.buf[buf_id]; + + addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); + if (unlikely(addr == DMA_MAPPING_ERROR)) + return -ENOMEM; + + buf_desc->pkt_addr = cpu_to_le64(addr); + buf_desc->qword0.buf_id = cpu_to_le16(buf_id); + + if (!bufq->rx_hsplit_en) + return 0; + + buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa + + (u32)buf_id * IDPF_HDR_BUF_SIZE); + + return 0; +} + +/** + * idpf_rx_clean_refillq - Clean refill queue buffers + * @bufq: buffer queue to post buffers back to + * @refillq: refill queue to clean + * + * This function takes care of the buffer refill management + */ +static void idpf_rx_clean_refillq(struct idpf_queue *bufq, + struct idpf_sw_queue *refillq) +{ + struct virtchnl2_splitq_rx_buf_desc *buf_desc; + u16 bufq_nta = bufq->next_to_alloc; + u16 ntc = refillq->next_to_clean; + int cleaned = 0; + u16 gen; + + buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); + + /* make sure we stop at ring wrap in the unlikely case ring is full */ + while (likely(cleaned < refillq->desc_count)) { + u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); + bool failure; + + gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); + if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen) + break; + + failure = idpf_rx_update_bufq_desc(bufq, refill_desc, + buf_desc); + if (failure) + break; + + if (unlikely(++ntc == refillq->desc_count)) { + change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + ntc = 0; + } + + if (unlikely(++bufq_nta == bufq->desc_count)) { + buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); + bufq_nta = 0; + } else { + buf_desc++; + } + + cleaned++; + } + + if (!cleaned) + return; + + /* We want to limit how many transactions on the bus we trigger with + * tail writes so we only do it in strides. It's also important we + * align the write to a multiple of 8 as required by HW. + */ + if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) + + bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE) + idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta, + IDPF_RX_BUF_POST_STRIDE)); + + /* update next to alloc since we have filled the ring */ + refillq->next_to_clean = ntc; + bufq->next_to_alloc = bufq_nta; +} + +/** + * idpf_rx_clean_refillq_all - Clean all refill queues + * @bufq: buffer queue with refill queues + * + * Iterates through all refill queues assigned to the buffer queue assigned to + * this vector. Returns true if clean is complete within budget, false + * otherwise. + */ +static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq) +{ + struct idpf_bufq_set *bufq_set; + int i; + + bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); + for (i = 0; i < bufq_set->num_refillqs; i++) + idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); +} + +/** + * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + * + */ +static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq, + void *data) +{ + struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data; + + q_vector->total_events++; + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport + * @vport: virtual port structure + * + */ +static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport) +{ + u16 v_idx; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) + netif_napi_del(&vport->q_vectors[v_idx].napi); +} + +/** + * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport + * @vport: main vport structure + */ +static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) +{ + int v_idx; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) + napi_disable(&vport->q_vectors[v_idx].napi); +} + +/** + * idpf_vport_intr_rel - Free memory allocated for interrupt vectors + * @vport: virtual port + * + * Free the memory allocated for interrupt vectors associated to a vport + */ +void idpf_vport_intr_rel(struct idpf_vport *vport) +{ + int i, j, v_idx; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + + kfree(q_vector->bufq); + q_vector->bufq = NULL; + kfree(q_vector->tx); + q_vector->tx = NULL; + kfree(q_vector->rx); + q_vector->rx = NULL; + } + + /* Clean up the mapping of queues to vectors */ + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + + if (idpf_is_queue_model_split(vport->rxq_model)) + for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++) + rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL; + else + for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) + rx_qgrp->singleq.rxqs[j]->q_vector = NULL; + } + + if (idpf_is_queue_model_split(vport->txq_model)) + for (i = 0; i < vport->num_txq_grp; i++) + vport->txq_grps[i].complq->q_vector = NULL; + else + for (i = 0; i < vport->num_txq_grp; i++) + for (j = 0; j < vport->txq_grps[i].num_txq; j++) + vport->txq_grps[i].txqs[j]->q_vector = NULL; + + kfree(vport->q_vectors); + vport->q_vectors = NULL; +} + +/** + * idpf_vport_intr_rel_irq - Free the IRQ association with the OS + * @vport: main vport structure + */ +static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + int vector; + + for (vector = 0; vector < vport->num_q_vectors; vector++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; + int irq_num, vidx; + + /* free only the irqs that were actually requested */ + if (!q_vector) + continue; + + vidx = vport->q_vector_idxs[vector]; + irq_num = adapter->msix_entries[vidx].vector; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, q_vector); + } +} + +/** + * idpf_vport_intr_dis_irq_all - Disable all interrupt + * @vport: main vport structure + */ +static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) +{ + struct idpf_q_vector *q_vector = vport->q_vectors; + int q_idx; + + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) + writel(0, q_vector[q_idx].intr_reg.dyn_ctl); +} + +/** + * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings + * @q_vector: pointer to q_vector + * @type: itr index + * @itr: itr value + */ +static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector, + const int type, u16 itr) +{ + u32 itr_val; + + itr &= IDPF_ITR_MASK; + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ + itr_val = q_vector->intr_reg.dyn_ctl_intena_m | + (type << q_vector->intr_reg.dyn_ctl_itridx_s) | + (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); + + return itr_val; +} + +/** + * idpf_update_dim_sample - Update dim sample with packets and bytes + * @q_vector: the vector associated with the interrupt + * @dim_sample: dim sample to update + * @dim: dim instance structure + * @packets: total packets + * @bytes: total bytes + * + * Update the dim sample with the packets and bytes which are passed to this + * function. Set the dim state appropriately if the dim settings gets stale. + */ +static void idpf_update_dim_sample(struct idpf_q_vector *q_vector, + struct dim_sample *dim_sample, + struct dim *dim, u64 packets, u64 bytes) +{ + dim_update_sample(q_vector->total_events, packets, bytes, dim_sample); + dim_sample->comp_ctr = 0; + + /* if dim settings get stale, like when not updated for 1 second or + * longer, force it to start again. This addresses the frequent case + * of an idle queue being switched to by the scheduler. + */ + if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ) + dim->state = DIM_START_MEASURE; +} + +/** + * idpf_net_dim - Update net DIM algorithm + * @q_vector: the vector associated with the interrupt + * + * Create a DIM sample and notify net_dim() so that it can possibly decide + * a new ITR value based on incoming packets, bytes, and interrupts. + * + * This function is a no-op if the queue is not configured to dynamic ITR. + */ +static void idpf_net_dim(struct idpf_q_vector *q_vector) +{ + struct dim_sample dim_sample = { }; + u64 packets, bytes; + u32 i; + + if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode)) + goto check_rx_itr; + + for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { + struct idpf_queue *txq = q_vector->tx[i]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&txq->stats_sync); + packets += u64_stats_read(&txq->q_stats.tx.packets); + bytes += u64_stats_read(&txq->q_stats.tx.bytes); + } while (u64_stats_fetch_retry(&txq->stats_sync, start)); + } + + idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim, + packets, bytes); + net_dim(&q_vector->tx_dim, dim_sample); + +check_rx_itr: + if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode)) + return; + + for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { + struct idpf_queue *rxq = q_vector->rx[i]; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&rxq->stats_sync); + packets += u64_stats_read(&rxq->q_stats.rx.packets); + bytes += u64_stats_read(&rxq->q_stats.rx.bytes); + } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); + } + + idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim, + packets, bytes); + net_dim(&q_vector->rx_dim, dim_sample); +} + +/** + * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + * Update the net_dim() algorithm and re-enable the interrupt associated with + * this vector. + */ +void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) +{ + u32 intval; + + /* net_dim() updates ITR out-of-band using a work item */ + idpf_net_dim(q_vector); + + intval = idpf_vport_intr_buildreg_itr(q_vector, + IDPF_NO_ITR_UPDATE_IDX, 0); + + writel(intval, q_vector->intr_reg.dyn_ctl); +} + +/** + * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport + * @vport: main vport structure + * @basename: name for the vector + */ +static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) +{ + struct idpf_adapter *adapter = vport->adapter; + int vector, err, irq_num, vidx; + const char *vec_name; + + for (vector = 0; vector < vport->num_q_vectors; vector++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; + + vidx = vport->q_vector_idxs[vector]; + irq_num = adapter->msix_entries[vidx].vector; + + if (q_vector->num_rxq && q_vector->num_txq) + vec_name = "TxRx"; + else if (q_vector->num_rxq) + vec_name = "Rx"; + else if (q_vector->num_txq) + vec_name = "Tx"; + else + continue; + + q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", + basename, vec_name, vidx); + + err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, + q_vector->name, q_vector); + if (err) { + netdev_err(vport->netdev, + "Request_irq failed, error: %d\n", err); + goto free_q_irqs; + } + /* assign the mask for this irq */ + irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + } + + return 0; + +free_q_irqs: + while (--vector >= 0) { + vidx = vport->q_vector_idxs[vector]; + irq_num = adapter->msix_entries[vidx].vector; + free_irq(irq_num, &vport->q_vectors[vector]); + } + + return err; +} + +/** + * idpf_vport_intr_write_itr - Write ITR value to the ITR register + * @q_vector: q_vector structure + * @itr: Interrupt throttling rate + * @tx: Tx or Rx ITR + */ +void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx) +{ + struct idpf_intr_reg *intr_reg; + + if (tx && !q_vector->tx) + return; + else if (!tx && !q_vector->rx) + return; + + intr_reg = &q_vector->intr_reg; + writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S, + tx ? intr_reg->tx_itr : intr_reg->rx_itr); +} + +/** + * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport + * @vport: main vport structure + */ +static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport) +{ + bool dynamic; + int q_idx; + u16 itr; + + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { + struct idpf_q_vector *qv = &vport->q_vectors[q_idx]; + + /* Set the initial ITR values */ + if (qv->num_txq) { + dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode); + itr = vport->tx_itr_profile[qv->tx_dim.profile_ix]; + idpf_vport_intr_write_itr(qv, dynamic ? + itr : qv->tx_itr_value, + true); + } + + if (qv->num_rxq) { + dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode); + itr = vport->rx_itr_profile[qv->rx_dim.profile_ix]; + idpf_vport_intr_write_itr(qv, dynamic ? + itr : qv->rx_itr_value, + false); + } + + if (qv->num_txq || qv->num_rxq) + idpf_vport_intr_update_itr_ena_irq(qv); + } +} + +/** + * idpf_vport_intr_deinit - Release all vector associations for the vport + * @vport: main vport structure + */ +void idpf_vport_intr_deinit(struct idpf_vport *vport) +{ + idpf_vport_intr_napi_dis_all(vport); + idpf_vport_intr_napi_del_all(vport); + idpf_vport_intr_dis_irq_all(vport); + idpf_vport_intr_rel_irq(vport); +} + +/** + * idpf_tx_dim_work - Call back from the stack + * @work: work queue structure + */ +static void idpf_tx_dim_work(struct work_struct *work) +{ + struct idpf_q_vector *q_vector; + struct idpf_vport *vport; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + q_vector = container_of(dim, struct idpf_q_vector, tx_dim); + vport = q_vector->vport; + + if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile)) + dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1; + + /* look up the values in our local table */ + itr = vport->tx_itr_profile[dim->profile_ix]; + + idpf_vport_intr_write_itr(q_vector, itr, true); + + dim->state = DIM_START_MEASURE; +} + +/** + * idpf_rx_dim_work - Call back from the stack + * @work: work queue structure + */ +static void idpf_rx_dim_work(struct work_struct *work) +{ + struct idpf_q_vector *q_vector; + struct idpf_vport *vport; + struct dim *dim; + u16 itr; + + dim = container_of(work, struct dim, work); + q_vector = container_of(dim, struct idpf_q_vector, rx_dim); + vport = q_vector->vport; + + if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile)) + dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1; + + /* look up the values in our local table */ + itr = vport->rx_itr_profile[dim->profile_ix]; + + idpf_vport_intr_write_itr(q_vector, itr, false); + + dim->state = DIM_START_MEASURE; +} + +/** + * idpf_init_dim - Set up dynamic interrupt moderation + * @qv: q_vector structure + */ +static void idpf_init_dim(struct idpf_q_vector *qv) +{ + INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work); + qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; + + INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work); + qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX; +} + +/** + * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport + * @vport: main vport structure + */ +static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) +{ + int q_idx; + + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx]; + + idpf_init_dim(q_vector); + napi_enable(&q_vector->napi); + } +} + +/** + * idpf_tx_splitq_clean_all- Clean completion queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, + int budget, int *cleaned) +{ + u16 num_txq = q_vec->num_txq; + bool clean_complete = true; + int i, budget_per_q; + + if (unlikely(!num_txq)) + return true; + + budget_per_q = DIV_ROUND_UP(budget, num_txq); + for (i = 0; i < num_txq; i++) + clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], + budget_per_q, cleaned); + + return clean_complete; +} + +/** + * idpf_rx_splitq_clean_all- Clean completion queues + * @q_vec: queue vector + * @budget: Used to determine if we are in netpoll + * @cleaned: returns number of packets cleaned + * + * Returns false if clean is not complete else returns true + */ +static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, + int *cleaned) +{ + u16 num_rxq = q_vec->num_rxq; + bool clean_complete = true; + int pkts_cleaned = 0; + int i, budget_per_q; + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; + for (i = 0; i < num_rxq; i++) { + struct idpf_queue *rxq = q_vec->rx[i]; + int pkts_cleaned_per_q; + + pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); + /* if we clean as many as budgeted, we must not be done */ + if (pkts_cleaned_per_q >= budget_per_q) + clean_complete = false; + pkts_cleaned += pkts_cleaned_per_q; + } + *cleaned = pkts_cleaned; + + for (i = 0; i < q_vec->num_bufq; i++) + idpf_rx_clean_refillq_all(q_vec->bufq[i]); + + return clean_complete; +} + +/** + * idpf_vport_splitq_napi_poll - NAPI handler + * @napi: struct from which you get q_vector + * @budget: budget provided by stack + */ +static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) +{ + struct idpf_q_vector *q_vector = + container_of(napi, struct idpf_q_vector, napi); + bool clean_complete; + int work_done = 0; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (unlikely(!budget)) { + idpf_tx_splitq_clean_all(q_vector, budget, &work_done); + + return 0; + } + + clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done); + clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done); + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) + return budget; + + work_done = min_t(int, work_done, budget - 1); + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + idpf_vport_intr_update_itr_ena_irq(q_vector); + + /* Switch to poll mode in the tear-down path after sending disable + * queues virtchnl message, as the interrupts will be disabled after + * that + */ + if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, + q_vector->tx[0]->flags))) + return budget; + else + return work_done; +} + +/** + * idpf_vport_intr_map_vector_to_qs - Map vectors to queues + * @vport: virtual port + * + * Mapping for vectors to queues + */ +static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) +{ + u16 num_txq_grp = vport->num_txq_grp; + int i, j, qv_idx, bufq_vidx = 0; + struct idpf_rxq_group *rx_qgrp; + struct idpf_txq_group *tx_qgrp; + struct idpf_queue *q, *bufq; + u16 q_index; + + for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { + u16 num_rxq; + + rx_qgrp = &vport->rxq_grps[i]; + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_rxq; + q->q_vector->rx[q_index] = q; + q->q_vector->num_rxq++; + qv_idx++; + } + + if (idpf_is_queue_model_split(vport->rxq_model)) { + for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; + bufq->q_vector = &vport->q_vectors[bufq_vidx]; + q_index = bufq->q_vector->num_bufq; + bufq->q_vector->bufq[q_index] = bufq; + bufq->q_vector->num_bufq++; + } + if (++bufq_vidx >= vport->num_q_vectors) + bufq_vidx = 0; + } + } + + for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { + u16 num_txq; + + tx_qgrp = &vport->txq_grps[i]; + num_txq = tx_qgrp->num_txq; + + if (idpf_is_queue_model_split(vport->txq_model)) { + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + + q = tx_qgrp->complq; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + qv_idx++; + } else { + for (j = 0; j < num_txq; j++) { + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + + q = tx_qgrp->txqs[j]; + q->q_vector = &vport->q_vectors[qv_idx]; + q_index = q->q_vector->num_txq; + q->q_vector->tx[q_index] = q; + q->q_vector->num_txq++; + + qv_idx++; + } + } + } +} + +/** + * idpf_vport_intr_init_vec_idx - Initialize the vector indexes + * @vport: virtual port + * + * Initialize vector indexes with values returened over mailbox + */ +static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_alloc_vectors *ac; + u16 *vecids, total_vecs; + int i; + + ac = adapter->req_vec_chunks; + if (!ac) { + for (i = 0; i < vport->num_q_vectors; i++) + vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; + + return 0; + } + + total_vecs = idpf_get_reserved_vecs(adapter); + vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); + if (!vecids) + return -ENOMEM; + + idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks); + + for (i = 0; i < vport->num_q_vectors; i++) + vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; + + kfree(vecids); + + return 0; +} + +/** + * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors + * @vport: virtual port structure + */ +static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) +{ + int (*napi_poll)(struct napi_struct *napi, int budget); + u16 v_idx; + + if (idpf_is_queue_model_split(vport->txq_model)) + napi_poll = idpf_vport_splitq_napi_poll; + else + napi_poll = idpf_vport_singleq_napi_poll; + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + + netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); + + /* only set affinity_mask if the CPU is online */ + if (cpu_online(v_idx)) + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + } +} + +/** + * idpf_vport_intr_alloc - Allocate memory for interrupt vectors + * @vport: virtual port + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +int idpf_vport_intr_alloc(struct idpf_vport *vport) +{ + u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; + struct idpf_q_vector *q_vector; + int v_idx, err; + + vport->q_vectors = kcalloc(vport->num_q_vectors, + sizeof(struct idpf_q_vector), GFP_KERNEL); + if (!vport->q_vectors) + return -ENOMEM; + + txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors); + rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors); + bufqs_per_vector = vport->num_bufqs_per_qgrp * + DIV_ROUND_UP(vport->num_rxq_grp, + vport->num_q_vectors); + + for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + q_vector = &vport->q_vectors[v_idx]; + q_vector->vport = vport; + + q_vector->tx_itr_value = IDPF_ITR_TX_DEF; + q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; + + q_vector->rx_itr_value = IDPF_ITR_RX_DEF; + q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; + + q_vector->tx = kcalloc(txqs_per_vector, + sizeof(struct idpf_queue *), + GFP_KERNEL); + if (!q_vector->tx) { + err = -ENOMEM; + goto error; + } + + q_vector->rx = kcalloc(rxqs_per_vector, + sizeof(struct idpf_queue *), + GFP_KERNEL); + if (!q_vector->rx) { + err = -ENOMEM; + goto error; + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + continue; + + q_vector->bufq = kcalloc(bufqs_per_vector, + sizeof(struct idpf_queue *), + GFP_KERNEL); + if (!q_vector->bufq) { + err = -ENOMEM; + goto error; + } + } + + return 0; + +error: + idpf_vport_intr_rel(vport); + + return err; +} + +/** + * idpf_vport_intr_init - Setup all vectors for the given vport + * @vport: virtual port + * + * Returns 0 on success or negative on failure + */ +int idpf_vport_intr_init(struct idpf_vport *vport) +{ + char *int_name; + int err; + + err = idpf_vport_intr_init_vec_idx(vport); + if (err) + return err; + + idpf_vport_intr_map_vector_to_qs(vport); + idpf_vport_intr_napi_add_all(vport); + idpf_vport_intr_napi_ena_all(vport); + + err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); + if (err) + goto unroll_vectors_alloc; + + int_name = kasprintf(GFP_KERNEL, "%s-%s", + dev_driver_string(&vport->adapter->pdev->dev), + vport->netdev->name); + + err = idpf_vport_intr_req_irq(vport, int_name); + if (err) + goto unroll_vectors_alloc; + + idpf_vport_intr_ena_irq_all(vport); + + return 0; + +unroll_vectors_alloc: + idpf_vport_intr_napi_dis_all(vport); + idpf_vport_intr_napi_del_all(vport); + + return err; +} + +/** + * idpf_config_rss - Send virtchnl messages to configure RSS + * @vport: virtual port + * + * Return 0 on success, negative on failure + */ +int idpf_config_rss(struct idpf_vport *vport) +{ + int err; + + err = idpf_send_get_set_rss_key_msg(vport, false); + if (err) + return err; + + return idpf_send_get_set_rss_lut_msg(vport, false); +} + +/** + * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values + * @vport: virtual port structure + */ +static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + u16 num_active_rxq = vport->num_rxq; + struct idpf_rss_data *rss_data; + int i; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + + for (i = 0; i < rss_data->rss_lut_size; i++) { + rss_data->rss_lut[i] = i % num_active_rxq; + rss_data->cached_lut[i] = rss_data->rss_lut[i]; + } +} + +/** + * idpf_init_rss - Allocate and initialize RSS resources + * @vport: virtual port + * + * Return 0 on success, negative on failure + */ +int idpf_init_rss(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_rss_data *rss_data; + u32 lut_size; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + + lut_size = rss_data->rss_lut_size * sizeof(u32); + rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL); + if (!rss_data->rss_lut) + return -ENOMEM; + + rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL); + if (!rss_data->cached_lut) { + kfree(rss_data->rss_lut); + rss_data->rss_lut = NULL; + + return -ENOMEM; + } + + /* Fill the default RSS lut values */ + idpf_fill_dflt_rss_lut(vport); + + return idpf_config_rss(vport); +} + +/** + * idpf_deinit_rss - Release RSS resources + * @vport: virtual port + */ +void idpf_deinit_rss(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_rss_data *rss_data; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + kfree(rss_data->cached_lut); + rss_data->cached_lut = NULL; + kfree(rss_data->rss_lut); + rss_data->rss_lut = NULL; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h new file mode 100644 index 000000000000..df76493faa75 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -0,0 +1,1023 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _IDPF_TXRX_H_ +#define _IDPF_TXRX_H_ + +#include <net/page_pool/helpers.h> +#include <net/tcp.h> +#include <net/netdev_queues.h> + +#define IDPF_LARGE_MAX_Q 256 +#define IDPF_MAX_Q 16 +#define IDPF_MIN_Q 2 +/* Mailbox Queue */ +#define IDPF_MAX_MBXQ 1 + +#define IDPF_MIN_TXQ_DESC 64 +#define IDPF_MIN_RXQ_DESC 64 +#define IDPF_MIN_TXQ_COMPLQ_DESC 256 +#define IDPF_MAX_QIDS 256 + +/* Number of descriptors in a queue should be a multiple of 32. RX queue + * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE + * to achieve BufQ descriptors aligned to 32 + */ +#define IDPF_REQ_DESC_MULTIPLE 32 +#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32) +#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6) +#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2) + +#define IDPF_MAX_DESCS 8160 +#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE) +#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE) +#define MIN_SUPPORT_TXDID (\ + VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\ + VIRTCHNL2_TXDID_FLEX_TSO_CTX) + +#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1 +#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1 +#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4 +#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4 + +#define IDPF_COMPLQ_PER_GROUP 1 +#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1 +#define IDPF_MAX_BUFQS_PER_RXQ_GRP 2 +#define IDPF_BUFQ2_ENA 1 +#define IDPF_NUMQ_PER_CHUNK 1 + +#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1 +#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1 + +/* Default vector sharing */ +#define IDPF_MBX_Q_VEC 1 +#define IDPF_MIN_Q_VEC 1 + +#define IDPF_DFLT_TX_Q_DESC_COUNT 512 +#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512 +#define IDPF_DFLT_RX_Q_DESC_COUNT 512 + +/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a + * given RX completion queue has descriptors. This includes _ALL_ buffer + * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers, + * you have a total of 1024 buffers so your RX queue _must_ have at least that + * many descriptors. This macro divides a given number of RX descriptors by + * number of buffer queues to calculate how many descriptors each buffer queue + * can have without overrunning the RX queue. + * + * If you give hardware more buffers than completion descriptors what will + * happen is that if hardware gets a chance to post more than ring wrap of + * descriptors before SW gets an interrupt and overwrites SW head, the gen bit + * in the descriptor will be wrong. Any overwritten descriptors' buffers will + * be gone forever and SW has no reasonable way to tell that this has happened. + * From SW perspective, when we finally get an interrupt, it looks like we're + * still waiting for descriptor to be done, stalling forever. + */ +#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ)) + +#define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1) + +#define IDPF_RX_BUMP_NTC(rxq, ntc) \ +do { \ + if (unlikely(++(ntc) == (rxq)->desc_count)) { \ + ntc = 0; \ + change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \ + } \ +} while (0) + +#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \ +do { \ + if (unlikely(++(idx) == (q)->desc_count)) \ + idx = 0; \ +} while (0) + +#define IDPF_RX_HDR_SIZE 256 +#define IDPF_RX_BUF_2048 2048 +#define IDPF_RX_BUF_4096 4096 +#define IDPF_RX_BUF_STRIDE 32 +#define IDPF_RX_BUF_POST_STRIDE 16 +#define IDPF_LOW_WATERMARK 64 +/* Size of header buffer specifically for header split */ +#define IDPF_HDR_BUF_SIZE 256 +#define IDPF_PACKET_HDR_PAD \ + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2) +#define IDPF_TX_TSO_MIN_MSS 88 + +/* Minimum number of descriptors between 2 descriptors with the RE bit set; + * only relevant in flow scheduling mode + */ +#define IDPF_TX_SPLITQ_RE_MIN_GAP 64 + +#define IDPF_RX_BI_BUFID_S 0 +#define IDPF_RX_BI_BUFID_M GENMASK(14, 0) +#define IDPF_RX_BI_GEN_S 15 +#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S) +#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M +#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M + +#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \ + (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i])) +#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \ + (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i])) +#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i]) + +#define IDPF_BASE_TX_DESC(txq, i) \ + (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i])) +#define IDPF_BASE_TX_CTX_DESC(txq, i) \ + (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i])) +#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \ + (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i])) + +#define IDPF_FLEX_TX_DESC(txq, i) \ + (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i])) +#define IDPF_FLEX_TX_CTX_DESC(txq, i) \ + (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i])) + +#define IDPF_DESC_UNUSED(txq) \ + ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ + (txq)->next_to_clean - (txq)->next_to_use - 1) + +#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top) +#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ + (txq)->desc_count >> 2) + +#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1) +/* Determine the absolute number of completions pending, i.e. the number of + * completions that are expected to arrive on the TX completion queue. + */ +#define IDPF_TX_COMPLQ_PENDING(txq) \ + (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \ + 0 : U64_MAX) + \ + (txq)->num_completions_pending - (txq)->complq->num_completions) + +#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16 +#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1 +/* Adjust the generation for the completion tag and wrap if necessary */ +#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ + ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ + 0 : (txq)->compl_tag_cur_gen) + +#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS) + +#define IDPF_TX_FLAGS_TSO BIT(0) +#define IDPF_TX_FLAGS_IPV4 BIT(1) +#define IDPF_TX_FLAGS_IPV6 BIT(2) +#define IDPF_TX_FLAGS_TUNNEL BIT(3) + +union idpf_tx_flex_desc { + struct idpf_flex_tx_desc q; /* queue based scheduling */ + struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */ +}; + +/** + * struct idpf_tx_buf + * @next_to_watch: Next descriptor to clean + * @skb: Pointer to the skb + * @dma: DMA address + * @len: DMA length + * @bytecount: Number of bytes + * @gso_segs: Number of GSO segments + * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare + * with completion tag returned in buffer completion event. + * Because the completion tag is expected to be the same in all + * data descriptors for a given packet, and a single packet can + * span multiple buffers, we need this field to track all + * buffers associated with this completion tag independently of + * the buf_id. The tag consists of a N bit buf_id and M upper + * order "generation bits". See compl_tag_bufid_m and + * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1 + * to indicate the tag is not valid. + * @ctx_entry: Singleq only. Used to indicate the corresponding entry + * in the descriptor ring was used for a context descriptor and + * this buffer entry should be skipped. + */ +struct idpf_tx_buf { + void *next_to_watch; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + unsigned int bytecount; + unsigned short gso_segs; + + union { + int compl_tag; + + bool ctx_entry; + }; +}; + +struct idpf_tx_stash { + struct hlist_node hlist; + struct idpf_tx_buf buf; +}; + +/** + * struct idpf_buf_lifo - LIFO for managing OOO completions + * @top: Used to know how many buffers are left + * @size: Total size of LIFO + * @bufs: Backing array + */ +struct idpf_buf_lifo { + u16 top; + u16 size; + struct idpf_tx_stash **bufs; +}; + +/** + * struct idpf_tx_offload_params - Offload parameters for a given packet + * @tx_flags: Feature flags enabled for this packet + * @hdr_offsets: Offset parameter for single queue model + * @cd_tunneling: Type of tunneling enabled for single queue model + * @tso_len: Total length of payload to segment + * @mss: Segment size + * @tso_segs: Number of segments to be sent + * @tso_hdr_len: Length of headers to be duplicated + * @td_cmd: Command field to be inserted into descriptor + */ +struct idpf_tx_offload_params { + u32 tx_flags; + + u32 hdr_offsets; + u32 cd_tunneling; + + u32 tso_len; + u16 mss; + u16 tso_segs; + u16 tso_hdr_len; + + u16 td_cmd; +}; + +/** + * struct idpf_tx_splitq_params + * @dtype: General descriptor info + * @eop_cmd: Type of EOP + * @compl_tag: Associated tag for completion + * @td_tag: Descriptor tunneling tag + * @offload: Offload parameters + */ +struct idpf_tx_splitq_params { + enum idpf_tx_desc_dtype_value dtype; + u16 eop_cmd; + union { + u16 compl_tag; + u16 td_tag; + }; + + struct idpf_tx_offload_params offload; +}; + +enum idpf_tx_ctx_desc_eipt_offload { + IDPF_TX_CTX_EXT_IP_NONE = 0x0, + IDPF_TX_CTX_EXT_IP_IPV6 = 0x1, + IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + IDPF_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +/* Checksum offload bits decoded from the receive descriptor. */ +struct idpf_rx_csum_decoded { + u32 l3l4p : 1; + u32 ipe : 1; + u32 eipe : 1; + u32 eudpe : 1; + u32 ipv6exadd : 1; + u32 l4e : 1; + u32 pprs : 1; + u32 nat : 1; + u32 raw_csum_inv : 1; + u32 raw_csum : 16; +}; + +struct idpf_rx_extracted { + unsigned int size; + u16 rx_ptype; +}; + +#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256 +#define IDPF_TX_MIN_PKT_LEN 17 +#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1 +#define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \ + sizeof(struct idpf_flex_tx_desc)) +#define IDPF_TX_DESCS_FOR_CTX 1 +/* TX descriptors needed, worst case */ +#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \ + IDPF_TX_DESCS_PER_CACHE_LINE + \ + IDPF_TX_DESCS_FOR_SKB_DATA_PTR) + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K +#define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1) +#define IDPF_TX_MAX_DESC_DATA_ALIGNED \ + ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE) + +#define IDPF_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define IDPF_RX_DESC(rxq, i) \ + (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i])) + +struct idpf_rx_buf { + struct page *page; + unsigned int page_offset; + u16 truesize; +}; + +#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 +#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \ + (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS)) +#define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info) +#define IDPF_RX_MAX_PTYPES_PER_BUF \ + DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \ + IDPF_RX_MAX_PTYPE_SZ) + +#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count) + +#define IDPF_TUN_IP_GRE (\ + IDPF_PTYPE_TUNNEL_IP |\ + IDPF_PTYPE_TUNNEL_IP_GRENAT) + +#define IDPF_TUN_IP_GRE_MAC (\ + IDPF_TUN_IP_GRE |\ + IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC) + +#define IDPF_RX_MAX_PTYPE 1024 +#define IDPF_RX_MAX_BASE_PTYPE 256 +#define IDPF_INVALID_PTYPE_ID 0xFFFF + +/* Packet type non-ip values */ +enum idpf_rx_ptype_l2 { + IDPF_RX_PTYPE_L2_RESERVED = 0, + IDPF_RX_PTYPE_L2_MAC_PAY2 = 1, + IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + IDPF_RX_PTYPE_L2_FIP_PAY2 = 3, + IDPF_RX_PTYPE_L2_OUI_PAY2 = 4, + IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6, + IDPF_RX_PTYPE_L2_ECP_PAY2 = 7, + IDPF_RX_PTYPE_L2_EVB_PAY2 = 8, + IDPF_RX_PTYPE_L2_QCN_PAY2 = 9, + IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10, + IDPF_RX_PTYPE_L2_ARP = 11, +}; + +enum idpf_rx_ptype_outer_ip { + IDPF_RX_PTYPE_OUTER_L2 = 0, + IDPF_RX_PTYPE_OUTER_IP = 1, +}; + +#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \ + (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \ + ((ptype)->outer_ip_ver == (ipv))) + +enum idpf_rx_ptype_outer_ip_ver { + IDPF_RX_PTYPE_OUTER_NONE = 0, + IDPF_RX_PTYPE_OUTER_IPV4 = 1, + IDPF_RX_PTYPE_OUTER_IPV6 = 2, +}; + +enum idpf_rx_ptype_outer_fragmented { + IDPF_RX_PTYPE_NOT_FRAG = 0, + IDPF_RX_PTYPE_FRAG = 1, +}; + +enum idpf_rx_ptype_tunnel_type { + IDPF_RX_PTYPE_TUNNEL_NONE = 0, + IDPF_RX_PTYPE_TUNNEL_IP_IP = 1, + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum idpf_rx_ptype_tunnel_end_prot { + IDPF_RX_PTYPE_TUNNEL_END_NONE = 0, + IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1, + IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum idpf_rx_ptype_inner_prot { + IDPF_RX_PTYPE_INNER_PROT_NONE = 0, + IDPF_RX_PTYPE_INNER_PROT_UDP = 1, + IDPF_RX_PTYPE_INNER_PROT_TCP = 2, + IDPF_RX_PTYPE_INNER_PROT_SCTP = 3, + IDPF_RX_PTYPE_INNER_PROT_ICMP = 4, + IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5, +}; + +enum idpf_rx_ptype_payload_layer { + IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +enum idpf_tunnel_state { + IDPF_PTYPE_TUNNEL_IP = BIT(0), + IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1), + IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2), +}; + +struct idpf_ptype_state { + bool outer_ip; + bool outer_frag; + u8 tunnel_state; +}; + +struct idpf_rx_ptype_decoded { + u32 ptype:10; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:2; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +/** + * enum idpf_queue_flags_t + * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to + * identify new descriptor writebacks on the ring. HW sets + * the gen bit to 1 on the first writeback of any given + * descriptor. After the ring wraps, HW sets the gen bit of + * those descriptors to 0, and continues flipping + * 0->1 or 1->0 on each ring wrap. SW maintains its own + * gen bit to know what value will indicate writebacks on + * the next pass around the ring. E.g. it is initialized + * to 1 and knows that reading a gen bit of 1 in any + * descriptor on the initial pass of the ring indicates a + * writeback. It also flips on every ring wrap. + * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit + * and RFLGQ_GEN is the SW bit. + * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling + * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions + * @__IDPF_Q_POLL_MODE: Enable poll mode + * @__IDPF_Q_FLAGS_NBITS: Must be last + */ +enum idpf_queue_flags_t { + __IDPF_Q_GEN_CHK, + __IDPF_RFLQ_GEN_CHK, + __IDPF_Q_FLOW_SCH_EN, + __IDPF_Q_SW_MARKER, + __IDPF_Q_POLL_MODE, + + __IDPF_Q_FLAGS_NBITS, +}; + +/** + * struct idpf_vec_regs + * @dyn_ctl_reg: Dynamic control interrupt register offset + * @itrn_reg: Interrupt Throttling Rate register offset + * @itrn_index_spacing: Register spacing between ITR registers of the same + * vector + */ +struct idpf_vec_regs { + u32 dyn_ctl_reg; + u32 itrn_reg; + u32 itrn_index_spacing; +}; + +/** + * struct idpf_intr_reg + * @dyn_ctl: Dynamic control interrupt register + * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable + * @dyn_ctl_itridx_s: Register bit offset for ITR index + * @dyn_ctl_itridx_m: Mask for ITR index + * @dyn_ctl_intrvl_s: Register bit offset for ITR interval + * @rx_itr: RX ITR register + * @tx_itr: TX ITR register + * @icr_ena: Interrupt cause register offset + * @icr_ena_ctlq_m: Mask for ICR + */ +struct idpf_intr_reg { + void __iomem *dyn_ctl; + u32 dyn_ctl_intena_m; + u32 dyn_ctl_itridx_s; + u32 dyn_ctl_itridx_m; + u32 dyn_ctl_intrvl_s; + void __iomem *rx_itr; + void __iomem *tx_itr; + void __iomem *icr_ena; + u32 icr_ena_ctlq_m; +}; + +/** + * struct idpf_q_vector + * @vport: Vport back pointer + * @affinity_mask: CPU affinity mask + * @napi: napi handler + * @v_idx: Vector index + * @intr_reg: See struct idpf_intr_reg + * @num_txq: Number of TX queues + * @tx: Array of TX queues to service + * @tx_dim: Data for TX net_dim algorithm + * @tx_itr_value: TX interrupt throttling rate + * @tx_intr_mode: Dynamic ITR or not + * @tx_itr_idx: TX ITR index + * @num_rxq: Number of RX queues + * @rx: Array of RX queues to service + * @rx_dim: Data for RX net_dim algorithm + * @rx_itr_value: RX interrupt throttling rate + * @rx_intr_mode: Dynamic ITR or not + * @rx_itr_idx: RX ITR index + * @num_bufq: Number of buffer queues + * @bufq: Array of buffer queues to service + * @total_events: Number of interrupts processed + * @name: Queue vector name + */ +struct idpf_q_vector { + struct idpf_vport *vport; + cpumask_t affinity_mask; + struct napi_struct napi; + u16 v_idx; + struct idpf_intr_reg intr_reg; + + u16 num_txq; + struct idpf_queue **tx; + struct dim tx_dim; + u16 tx_itr_value; + bool tx_intr_mode; + u32 tx_itr_idx; + + u16 num_rxq; + struct idpf_queue **rx; + struct dim rx_dim; + u16 rx_itr_value; + bool rx_intr_mode; + u32 rx_itr_idx; + + u16 num_bufq; + struct idpf_queue **bufq; + + u16 total_events; + char *name; +}; + +struct idpf_rx_queue_stats { + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t rsc_pkts; + u64_stats_t hw_csum_err; + u64_stats_t hsplit_pkts; + u64_stats_t hsplit_buf_ovf; + u64_stats_t bad_descs; +}; + +struct idpf_tx_queue_stats { + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t lso_pkts; + u64_stats_t linearize; + u64_stats_t q_busy; + u64_stats_t skb_drops; + u64_stats_t dma_map_errs; +}; + +struct idpf_cleaned_stats { + u32 packets; + u32 bytes; +}; + +union idpf_queue_stats { + struct idpf_rx_queue_stats rx; + struct idpf_tx_queue_stats tx; +}; + +#define IDPF_ITR_DYNAMIC 1 +#define IDPF_ITR_MAX 0x1FE0 +#define IDPF_ITR_20K 0x0032 +#define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */ +#define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */ +#define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK) +#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode) +#define IDPF_ITR_TX_DEF IDPF_ITR_20K +#define IDPF_ITR_RX_DEF IDPF_ITR_20K +/* Index used for 'No ITR' update in DYN_CTL register */ +#define IDPF_NO_ITR_UPDATE_IDX 3 +#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt) +#define IDPF_DIM_DEFAULT_PROFILE_IX 1 + +/** + * struct idpf_queue + * @dev: Device back pointer for DMA mapping + * @vport: Back pointer to associated vport + * @txq_grp: See struct idpf_txq_group + * @rxq_grp: See struct idpf_rxq_group + * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean, + * buffer queue uses this index to determine which group of refill queues + * to clean. + * For TX queue, it is used as index to map between TX queue group and + * hot path TX pointers stored in vport. Used in both singleq/splitq. + * For RX queue, it is used to index to total RX queue across groups and + * used for skb reporting. + * @tail: Tail offset. Used for both queue models single and split. In splitq + * model relevant only for TX queue and RX queue. + * @tx_buf: See struct idpf_tx_buf + * @rx_buf: Struct with RX buffer related members + * @rx_buf.buf: See struct idpf_rx_buf + * @rx_buf.hdr_buf_pa: DMA handle + * @rx_buf.hdr_buf_va: Virtual address + * @pp: Page pool pointer + * @skb: Pointer to the skb + * @q_type: Queue type (TX, RX, TX completion, RX buffer) + * @q_id: Queue id + * @desc_count: Number of descriptors + * @next_to_use: Next descriptor to use. Relevant in both split & single txq + * and bufq. + * @next_to_clean: Next descriptor to clean. In split queue model, only + * relevant to TX completion queue and RX queue. + * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model + * only relevant to RX queue. + * @flags: See enum idpf_queue_flags_t + * @q_stats: See union idpf_queue_stats + * @stats_sync: See struct u64_stats_sync + * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on + * the TX completion queue, it can be for any TXQ associated + * with that completion queue. This means we can clean up to + * N TXQs during a single call to clean the completion queue. + * cleaned_bytes|pkts tracks the clean stats per TXQ during + * that single call to clean the completion queue. By doing so, + * we can update BQL with aggregate cleaned stats for each TXQ + * only once at the end of the cleaning routine. + * @cleaned_pkts: Number of packets cleaned for the above said case + * @rx_hsplit_en: RX headsplit enable + * @rx_hbuf_size: Header buffer size + * @rx_buf_size: Buffer size + * @rx_max_pkt_size: RX max packet size + * @rx_buf_stride: RX buffer stride + * @rx_buffer_low_watermark: RX buffer low watermark + * @rxdids: Supported RX descriptor ids + * @q_vector: Backreference to associated vector + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @desc_ring: Descriptor ring memory + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + * @tx_min_pkt_len: Min supported packet length + * @num_completions: Only relevant for TX completion queue. It tracks the + * number of completions received to compare against the + * number of completions pending, as accumulated by the + * TX queues. + * @buf_stack: Stack of empty buffers to store buffer info for out of order + * buffer completions. See struct idpf_buf_lifo. + * @compl_tag_bufid_m: Completion tag buffer id mask + * @compl_tag_gen_s: Completion tag generation bit + * The format of the completion tag will change based on the TXQ + * descriptor ring size so that we can maintain roughly the same level + * of "uniqueness" across all descriptor sizes. For example, if the + * TXQ descriptor ring size is 64 (the minimum size supported), the + * completion tag will be formatted as below: + * 15 6 5 0 + * -------------------------------- + * | GEN=0-1023 |IDX = 0-63| + * -------------------------------- + * + * This gives us 64*1024 = 65536 possible unique values. Similarly, if + * the TXQ descriptor ring size is 8160 (the maximum size supported), + * the completion tag will be formatted as below: + * 15 13 12 0 + * -------------------------------- + * |GEN | IDX = 0-8159 | + * -------------------------------- + * + * This gives us 8*8160 = 65280 possible unique values. + * @compl_tag_cur_gen: Used to keep track of current completion tag generation + * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @sched_buf_hash: Hash table to stores buffers + */ +struct idpf_queue { + struct device *dev; + struct idpf_vport *vport; + union { + struct idpf_txq_group *txq_grp; + struct idpf_rxq_group *rxq_grp; + }; + u16 idx; + void __iomem *tail; + union { + struct idpf_tx_buf *tx_buf; + struct { + struct idpf_rx_buf *buf; + dma_addr_t hdr_buf_pa; + void *hdr_buf_va; + } rx_buf; + }; + struct page_pool *pp; + struct sk_buff *skb; + u16 q_type; + u32 q_id; + u16 desc_count; + + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + + union idpf_queue_stats q_stats; + struct u64_stats_sync stats_sync; + + u32 cleaned_bytes; + u16 cleaned_pkts; + + bool rx_hsplit_en; + u16 rx_hbuf_size; + u16 rx_buf_size; + u16 rx_max_pkt_size; + u16 rx_buf_stride; + u8 rx_buffer_low_watermark; + u64 rxdids; + struct idpf_q_vector *q_vector; + unsigned int size; + dma_addr_t dma; + void *desc_ring; + + u16 tx_max_bufs; + u8 tx_min_pkt_len; + + u32 num_completions; + + struct idpf_buf_lifo buf_stack; + + u16 compl_tag_bufid_m; + u16 compl_tag_gen_s; + + u16 compl_tag_cur_gen; + u16 compl_tag_gen_max; + + DECLARE_HASHTABLE(sched_buf_hash, 12); +} ____cacheline_internodealigned_in_smp; + +/** + * struct idpf_sw_queue + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: Buffer to allocate at + * @flags: See enum idpf_queue_flags_t + * @ring: Pointer to the ring + * @desc_count: Descriptor count + * @dev: Device back pointer for DMA mapping + * + * Software queues are used in splitq mode to manage buffers between rxq + * producer and the bufq consumer. These are required in order to maintain a + * lockless buffer management system and are strictly software only constructs. + */ +struct idpf_sw_queue { + u16 next_to_clean; + u16 next_to_alloc; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 *ring; + u16 desc_count; + struct device *dev; +} ____cacheline_internodealigned_in_smp; + +/** + * struct idpf_rxq_set + * @rxq: RX queue + * @refillq0: Pointer to refill queue 0 + * @refillq1: Pointer to refill queue 1 + * + * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs. + * Each rxq needs a refillq to return used buffers back to the respective bufq. + * Bufqs then clean these refillqs for buffers to give to hardware. + */ +struct idpf_rxq_set { + struct idpf_queue rxq; + struct idpf_sw_queue *refillq0; + struct idpf_sw_queue *refillq1; +}; + +/** + * struct idpf_bufq_set + * @bufq: Buffer queue + * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets + * in idpf_rxq_group. + * @refillqs: Pointer to refill queues array. + * + * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs. + * In this bufq_set, there will be one refillq for each rxq in this rxq_group. + * Used buffers received by rxqs will be put on refillqs which bufqs will + * clean to return new buffers back to hardware. + * + * Buffers needed by some number of rxqs associated in this rxq_group are + * managed by at most two bufqs (depending on performance configuration). + */ +struct idpf_bufq_set { + struct idpf_queue bufq; + int num_refillqs; + struct idpf_sw_queue *refillqs; +}; + +/** + * struct idpf_rxq_group + * @vport: Vport back pointer + * @singleq: Struct with single queue related members + * @singleq.num_rxq: Number of RX queues associated + * @singleq.rxqs: Array of RX queue pointers + * @splitq: Struct with split queue related members + * @splitq.num_rxq_sets: Number of RX queue sets + * @splitq.rxq_sets: Array of RX queue sets + * @splitq.bufq_sets: Buffer queue set pointer + * + * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a + * rxq_group contains all the rxqs, bufqs and refillqs needed to + * manage buffers in splitq mode. + */ +struct idpf_rxq_group { + struct idpf_vport *vport; + + union { + struct { + u16 num_rxq; + struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q]; + } singleq; + struct { + u16 num_rxq_sets; + struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q]; + struct idpf_bufq_set *bufq_sets; + } splitq; + }; +}; + +/** + * struct idpf_txq_group + * @vport: Vport back pointer + * @num_txq: Number of TX queues associated + * @txqs: Array of TX queue pointers + * @complq: Associated completion queue pointer, split queue only + * @num_completions_pending: Total number of completions pending for the + * completion queue, acculumated for all TX queues + * associated with that completion queue. + * + * Between singleq and splitq, a txq_group is largely the same except for the + * complq. In splitq a single complq is responsible for handling completions + * for some number of txqs associated in this txq_group. + */ +struct idpf_txq_group { + struct idpf_vport *vport; + + u16 num_txq; + struct idpf_queue *txqs[IDPF_LARGE_MAX_Q]; + + struct idpf_queue *complq; + + u32 num_completions_pending; +}; + +/** + * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag + * @size: transmit request size in bytes + * + * In the case where a large frag (>= 16K) needs to be split across multiple + * descriptors, we need to assume that we can have no more than 12K of data + * per descriptor due to hardware alignment restrictions (4K alignment). + */ +static inline u32 idpf_size_to_txd_count(unsigned int size) +{ + return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED); +} + +/** + * idpf_tx_singleq_build_ctob - populate command tag offset and size + * @td_cmd: Command to be filled in desc + * @td_offset: Offset to be filled in desc + * @size: Size of the buffer + * @td_tag: td tag to be filled + * + * Returns the 64 bit value populated with the input parameters + */ +static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset, + unsigned int size, u64 td_tag) +{ + return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA | + (td_cmd << IDPF_TXD_QW1_CMD_S) | + (td_offset << IDPF_TXD_QW1_OFFSET_S) | + ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) | + (td_tag << IDPF_TXD_QW1_L2TAG1_S)); +} + +void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size); +void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size); +/** + * idpf_tx_splitq_build_desc - determine which type of data descriptor to build + * @desc: descriptor to populate + * @params: pointer to tx params struct + * @td_cmd: command to be filled in desc + * @size: size of buffer + */ +static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc, + struct idpf_tx_splitq_params *params, + u16 td_cmd, u16 size) +{ + if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2) + idpf_tx_splitq_build_ctb(desc, params, td_cmd, size); + else + idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size); +} + +/** + * idpf_alloc_page - Allocate a new RX buffer from the page pool + * @pool: page_pool to allocate from + * @buf: metadata struct to populate with page info + * @buf_size: 2K or 4K + * + * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise. + */ +static inline dma_addr_t idpf_alloc_page(struct page_pool *pool, + struct idpf_rx_buf *buf, + unsigned int buf_size) +{ + if (buf_size == IDPF_RX_BUF_2048) + buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset, + buf_size); + else + buf->page = page_pool_dev_alloc_pages(pool); + + if (!buf->page) + return DMA_MAPPING_ERROR; + + buf->truesize = buf_size; + + return page_pool_get_dma_addr(buf->page) + buf->page_offset + + pool->p.offset; +} + +/** + * idpf_rx_put_page - Return RX buffer page to pool + * @rx_buf: RX buffer metadata struct + */ +static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf) +{ + page_pool_put_page(rx_buf->page->pp, rx_buf->page, + rx_buf->truesize, true); + rx_buf->page = NULL; +} + +/** + * idpf_rx_sync_for_cpu - Synchronize DMA buffer + * @rx_buf: RX buffer metadata struct + * @len: frame length from descriptor + */ +static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len) +{ + struct page *page = rx_buf->page; + struct page_pool *pp = page->pp; + + dma_sync_single_range_for_cpu(pp->p.dev, + page_pool_get_dma_addr(page), + rx_buf->page_offset + pp->p.offset, len, + page_pool_get_dma_dir(pp)); +} + +int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); +void idpf_vport_init_num_qs(struct idpf_vport *vport, + struct virtchnl2_create_vport *vport_msg); +void idpf_vport_calc_num_q_desc(struct idpf_vport *vport); +int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index, + struct virtchnl2_create_vport *vport_msg, + struct idpf_vport_max_q *max_q); +void idpf_vport_calc_num_q_groups(struct idpf_vport *vport); +int idpf_vport_queues_alloc(struct idpf_vport *vport); +void idpf_vport_queues_rel(struct idpf_vport *vport); +void idpf_vport_intr_rel(struct idpf_vport *vport); +int idpf_vport_intr_alloc(struct idpf_vport *vport); +void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector); +void idpf_vport_intr_deinit(struct idpf_vport *vport); +int idpf_vport_intr_init(struct idpf_vport *vport); +enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded); +int idpf_config_rss(struct idpf_vport *vport); +int idpf_init_rss(struct idpf_vport *vport); +void idpf_deinit_rss(struct idpf_vport *vport); +int idpf_rx_bufs_init_all(struct idpf_vport *vport); +void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, + unsigned int size); +struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, + struct idpf_rx_buf *rx_buf, + unsigned int size); +bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf); +void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val); +void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, + bool xmit_more); +unsigned int idpf_size_to_txd_count(unsigned int size); +netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb); +void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, + struct idpf_tx_buf *first, u16 ring_idx); +unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, + struct sk_buff *skb); +bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count); +int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size); +void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); +netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, + struct net_device *netdev); +netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, + struct net_device *netdev); +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, + u16 cleaned_count); +int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); + +#endif /* !_IDPF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c new file mode 100644 index 000000000000..8ade4e3a9fe1 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" +#include "idpf_lan_vf_regs.h" + +#define IDPF_VF_ITR_IDX_SPACING 0x40 + +/** + * idpf_vf_ctlq_reg_init - initialize default mailbox registers + * @cq: pointer to the array of create control queues + */ +static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +{ + int i; + + for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { + struct idpf_ctlq_create_info *ccq = cq + i; + + switch (ccq->type) { + case IDPF_CTLQ_TYPE_MAILBOX_TX: + /* set head and tail registers in our local struct */ + ccq->reg.head = VF_ATQH; + ccq->reg.tail = VF_ATQT; + ccq->reg.len = VF_ATQLEN; + ccq->reg.bah = VF_ATQBAH; + ccq->reg.bal = VF_ATQBAL; + ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M; + ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M; + ccq->reg.head_mask = VF_ATQH_ATQH_M; + break; + case IDPF_CTLQ_TYPE_MAILBOX_RX: + /* set head and tail registers in our local struct */ + ccq->reg.head = VF_ARQH; + ccq->reg.tail = VF_ARQT; + ccq->reg.len = VF_ARQLEN; + ccq->reg.bah = VF_ARQBAH; + ccq->reg.bal = VF_ARQBAL; + ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M; + ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M; + ccq->reg.head_mask = VF_ARQH_ARQH_M; + break; + default: + break; + } + } +} + +/** + * idpf_vf_mb_intr_reg_init - Initialize the mailbox register + * @adapter: adapter structure + */ +static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter) +{ + struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg; + u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl); + + intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl); + intr->dyn_ctl_intena_m = VF_INT_DYN_CTL0_INTENA_M; + intr->dyn_ctl_itridx_m = VF_INT_DYN_CTL0_ITR_INDX_M; + intr->icr_ena = idpf_get_reg_addr(adapter, VF_INT_ICR0_ENA1); + intr->icr_ena_ctlq_m = VF_INT_ICR0_ENA1_ADMINQ_M; +} + +/** + * idpf_vf_intr_reg_init - Initialize interrupt registers + * @vport: virtual port structure + */ +static int idpf_vf_intr_reg_init(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + int num_vecs = vport->num_q_vectors; + struct idpf_vec_regs *reg_vals; + int num_regs, i, err = 0; + u32 rx_itr, tx_itr; + u16 total_vecs; + + total_vecs = idpf_get_reserved_vecs(vport->adapter); + reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs), + GFP_KERNEL); + if (!reg_vals) + return -ENOMEM; + + num_regs = idpf_get_reg_intr_vecs(vport, reg_vals); + if (num_regs < num_vecs) { + err = -EINVAL; + goto free_reg_vals; + } + + for (i = 0; i < num_vecs; i++) { + struct idpf_q_vector *q_vector = &vport->q_vectors[i]; + u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC; + struct idpf_intr_reg *intr = &q_vector->intr_reg; + u32 spacing; + + intr->dyn_ctl = idpf_get_reg_addr(adapter, + reg_vals[vec_id].dyn_ctl_reg); + intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M; + intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S; + + spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, + IDPF_VF_ITR_IDX_SPACING); + rx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_0, + reg_vals[vec_id].itrn_reg, + spacing); + tx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_1, + reg_vals[vec_id].itrn_reg, + spacing); + intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr); + intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr); + } + +free_reg_vals: + kfree(reg_vals); + + return err; +} + +/** + * idpf_vf_reset_reg_init - Initialize reset registers + * @adapter: Driver specific private structure + */ +static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter) +{ + adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT); + adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M; +} + +/** + * idpf_vf_trigger_reset - trigger reset + * @adapter: Driver specific private structure + * @trig_cause: Reason to trigger a reset + */ +static void idpf_vf_trigger_reset(struct idpf_adapter *adapter, + enum idpf_flags trig_cause) +{ + /* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */ + if (trig_cause == IDPF_HR_FUNC_RESET && + !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL); +} + +/** + * idpf_vf_reg_ops_init - Initialize register API function pointers + * @adapter: Driver specific private structure + */ +static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter) +{ + adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_vf_ctlq_reg_init; + adapter->dev_ops.reg_ops.intr_reg_init = idpf_vf_intr_reg_init; + adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_vf_mb_intr_reg_init; + adapter->dev_ops.reg_ops.reset_reg_init = idpf_vf_reset_reg_init; + adapter->dev_ops.reg_ops.trigger_reset = idpf_vf_trigger_reset; +} + +/** + * idpf_vf_dev_ops_init - Initialize device API function pointers + * @adapter: Driver specific private structure + */ +void idpf_vf_dev_ops_init(struct idpf_adapter *adapter) +{ + idpf_vf_reg_ops_init(adapter); +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c new file mode 100644 index 000000000000..2c1b051fdc0d --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -0,0 +1,3798 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2023 Intel Corporation */ + +#include "idpf.h" + +/** + * idpf_recv_event_msg - Receive virtchnl event message + * @vport: virtual port structure + * @ctlq_msg: message to copy from + * + * Receive virtchnl event message + */ +static void idpf_recv_event_msg(struct idpf_vport *vport, + struct idpf_ctlq_msg *ctlq_msg) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + struct virtchnl2_event *v2e; + bool link_status; + u32 event; + + v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; + event = le32_to_cpu(v2e->event); + + switch (event) { + case VIRTCHNL2_EVENT_LINK_CHANGE: + vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); + link_status = v2e->link_status; + + if (vport->link_up == link_status) + break; + + vport->link_up = link_status; + if (np->state == __IDPF_VPORT_UP) { + if (vport->link_up) { + netif_carrier_on(vport->netdev); + netif_tx_start_all_queues(vport->netdev); + } else { + netif_tx_stop_all_queues(vport->netdev); + netif_carrier_off(vport->netdev); + } + } + break; + default: + dev_err(&vport->adapter->pdev->dev, + "Unknown event %d from PF\n", event); + break; + } +} + +/** + * idpf_mb_clean - Reclaim the send mailbox queue entries + * @adapter: Driver specific private structure + * + * Reclaim the send mailbox queue entries to be used to send further messages + * + * Returns 0 on success, negative on failure + */ +static int idpf_mb_clean(struct idpf_adapter *adapter) +{ + u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; + struct idpf_ctlq_msg **q_msg; + struct idpf_dma_mem *dma_mem; + int err; + + q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC); + if (!q_msg) + return -ENOMEM; + + err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); + if (err) + goto err_kfree; + + for (i = 0; i < num_q_msg; i++) { + if (!q_msg[i]) + continue; + dma_mem = q_msg[i]->ctx.indirect.payload; + if (dma_mem) + dma_free_coherent(&adapter->pdev->dev, dma_mem->size, + dma_mem->va, dma_mem->pa); + kfree(q_msg[i]); + kfree(dma_mem); + } + +err_kfree: + kfree(q_msg); + + return err; +} + +/** + * idpf_send_mb_msg - Send message over mailbox + * @adapter: Driver specific private structure + * @op: virtchnl opcode + * @msg_size: size of the payload + * @msg: pointer to buffer holding the payload + * + * Will prepare the control queue message and initiates the send api + * + * Returns 0 on success, negative on failure + */ +int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, + u16 msg_size, u8 *msg) +{ + struct idpf_ctlq_msg *ctlq_msg; + struct idpf_dma_mem *dma_mem; + int err; + + /* If we are here and a reset is detected nothing much can be + * done. This thread should silently abort and expected to + * be corrected with a new run either by user or driver + * flows after reset + */ + if (idpf_is_reset_detected(adapter)) + return 0; + + err = idpf_mb_clean(adapter); + if (err) + return err; + + ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC); + if (!ctlq_msg) + return -ENOMEM; + + dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC); + if (!dma_mem) { + err = -ENOMEM; + goto dma_mem_error; + } + + ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; + ctlq_msg->func_id = 0; + ctlq_msg->data_len = msg_size; + ctlq_msg->cookie.mbx.chnl_opcode = op; + ctlq_msg->cookie.mbx.chnl_retval = 0; + dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; + dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, + &dma_mem->pa, GFP_ATOMIC); + if (!dma_mem->va) { + err = -ENOMEM; + goto dma_alloc_error; + } + memcpy(dma_mem->va, msg, msg_size); + ctlq_msg->ctx.indirect.payload = dma_mem; + + err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); + if (err) + goto send_error; + + return 0; + +send_error: + dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, + dma_mem->pa); +dma_alloc_error: + kfree(dma_mem); +dma_mem_error: + kfree(ctlq_msg); + + return err; +} + +/** + * idpf_find_vport - Find vport pointer from control queue message + * @adapter: driver specific private structure + * @vport: address of vport pointer to copy the vport from adapters vport list + * @ctlq_msg: control queue message + * + * Return 0 on success, error value on failure. Also this function does check + * for the opcodes which expect to receive payload and return error value if + * it is not the case. + */ +static int idpf_find_vport(struct idpf_adapter *adapter, + struct idpf_vport **vport, + struct idpf_ctlq_msg *ctlq_msg) +{ + bool no_op = false, vid_found = false; + int i, err = 0; + char *vc_msg; + u32 v_id; + + vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL); + if (!vc_msg) + return -ENOMEM; + + if (ctlq_msg->data_len) { + size_t payload_size = ctlq_msg->ctx.indirect.payload->size; + + if (!payload_size) { + dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n"); + kfree(vc_msg); + + return -EINVAL; + } + + memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va, + min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN)); + } + + switch (ctlq_msg->cookie.mbx.chnl_opcode) { + case VIRTCHNL2_OP_VERSION: + case VIRTCHNL2_OP_GET_CAPS: + case VIRTCHNL2_OP_CREATE_VPORT: + case VIRTCHNL2_OP_SET_SRIOV_VFS: + case VIRTCHNL2_OP_ALLOC_VECTORS: + case VIRTCHNL2_OP_DEALLOC_VECTORS: + case VIRTCHNL2_OP_GET_PTYPE_INFO: + goto free_vc_msg; + case VIRTCHNL2_OP_ENABLE_VPORT: + case VIRTCHNL2_OP_DISABLE_VPORT: + case VIRTCHNL2_OP_DESTROY_VPORT: + v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_CONFIG_TX_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_CONFIG_RX_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_ENABLE_QUEUES: + case VIRTCHNL2_OP_DISABLE_QUEUES: + case VIRTCHNL2_OP_DEL_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_ADD_QUEUES: + v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: + case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: + v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_GET_STATS: + v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_GET_RSS_LUT: + case VIRTCHNL2_OP_SET_RSS_LUT: + v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_GET_RSS_KEY: + case VIRTCHNL2_OP_SET_RSS_KEY: + v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_EVENT: + v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_LOOPBACK: + v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: + v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id); + break; + case VIRTCHNL2_OP_ADD_MAC_ADDR: + case VIRTCHNL2_OP_DEL_MAC_ADDR: + v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id); + break; + default: + no_op = true; + break; + } + + if (no_op) + goto free_vc_msg; + + for (i = 0; i < idpf_get_max_vports(adapter); i++) { + if (adapter->vport_ids[i] == v_id) { + vid_found = true; + break; + } + } + + if (vid_found) + *vport = adapter->vports[i]; + else + err = -EINVAL; + +free_vc_msg: + kfree(vc_msg); + + return err; +} + +/** + * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer. + * @adapter: driver specific private structure + * @vport: virtual port structure + * @ctlq_msg: msg to copy from + * @err_enum: err bit to set on error + * + * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success, + * negative on failure. + */ +static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter, + struct idpf_vport *vport, + struct idpf_ctlq_msg *ctlq_msg, + enum idpf_vport_vc_state err_enum) +{ + if (ctlq_msg->cookie.mbx.chnl_retval) { + if (vport) + set_bit(err_enum, vport->vc_state); + else + set_bit(err_enum, adapter->vc_state); + + return -EINVAL; + } + + if (vport) + memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va, + min_t(int, ctlq_msg->ctx.indirect.payload->size, + IDPF_CTLQ_MAX_BUF_LEN)); + else + memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va, + min_t(int, ctlq_msg->ctx.indirect.payload->size, + IDPF_CTLQ_MAX_BUF_LEN)); + + return 0; +} + +/** + * idpf_recv_vchnl_op - helper function with common logic when handling the + * reception of VIRTCHNL OPs. + * @adapter: driver specific private structure + * @vport: virtual port structure + * @ctlq_msg: msg to copy from + * @state: state bit used on timeout check + * @err_state: err bit to set on error + */ +static void idpf_recv_vchnl_op(struct idpf_adapter *adapter, + struct idpf_vport *vport, + struct idpf_ctlq_msg *ctlq_msg, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_state) +{ + wait_queue_head_t *vchnl_wq; + int err; + + if (vport) + vchnl_wq = &vport->vchnl_wq; + else + vchnl_wq = &adapter->vchnl_wq; + + err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state); + if (wq_has_sleeper(vchnl_wq)) { + if (vport) + set_bit(state, vport->vc_state); + else + set_bit(state, adapter->vc_state); + + wake_up(vchnl_wq); + } else { + if (!err) { + dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n", + ctlq_msg->cookie.mbx.chnl_opcode); + } else { + /* Clear the errors since there is no sleeper to pass + * them on + */ + if (vport) + clear_bit(err_state, vport->vc_state); + else + clear_bit(err_state, adapter->vc_state); + } + } +} + +/** + * idpf_recv_mb_msg - Receive message over mailbox + * @adapter: Driver specific private structure + * @op: virtchannel operation code + * @msg: Received message holding buffer + * @msg_size: message size + * + * Will receive control queue message and posts the receive buffer. Returns 0 + * on success and negative on failure. + */ +int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, + void *msg, int msg_size) +{ + struct idpf_vport *vport = NULL; + struct idpf_ctlq_msg ctlq_msg; + struct idpf_dma_mem *dma_mem; + bool work_done = false; + int num_retry = 2000; + u16 num_q_msg; + int err; + + while (1) { + struct idpf_vport_config *vport_config; + int payload_size = 0; + + /* Try to get one message */ + num_q_msg = 1; + dma_mem = NULL; + err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg); + /* If no message then decide if we have to retry based on + * opcode + */ + if (err || !num_q_msg) { + /* Increasing num_retry to consider the delayed + * responses because of large number of VF's mailbox + * messages. If the mailbox message is received from + * the other side, we come out of the sleep cycle + * immediately else we wait for more time. + */ + if (!op || !num_retry--) + break; + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { + err = -EIO; + break; + } + msleep(20); + continue; + } + + /* If we are here a message is received. Check if we are looking + * for a specific message based on opcode. If it is different + * ignore and post buffers + */ + if (op && ctlq_msg.cookie.mbx.chnl_opcode != op) + goto post_buffs; + + err = idpf_find_vport(adapter, &vport, &ctlq_msg); + if (err) + goto post_buffs; + + if (ctlq_msg.data_len) + payload_size = ctlq_msg.ctx.indirect.payload->size; + + /* All conditions are met. Either a message requested is + * received or we received a message to be processed + */ + switch (ctlq_msg.cookie.mbx.chnl_opcode) { + case VIRTCHNL2_OP_VERSION: + case VIRTCHNL2_OP_GET_CAPS: + if (ctlq_msg.cookie.mbx.chnl_retval) { + dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n", + ctlq_msg.cookie.mbx.chnl_opcode, + ctlq_msg.cookie.mbx.chnl_retval); + err = -EBADMSG; + } else if (msg) { + memcpy(msg, ctlq_msg.ctx.indirect.payload->va, + min_t(int, payload_size, msg_size)); + } + work_done = true; + break; + case VIRTCHNL2_OP_CREATE_VPORT: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_CREATE_VPORT, + IDPF_VC_CREATE_VPORT_ERR); + break; + case VIRTCHNL2_OP_ENABLE_VPORT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ENA_VPORT, + IDPF_VC_ENA_VPORT_ERR); + break; + case VIRTCHNL2_OP_DISABLE_VPORT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DIS_VPORT, + IDPF_VC_DIS_VPORT_ERR); + break; + case VIRTCHNL2_OP_DESTROY_VPORT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DESTROY_VPORT, + IDPF_VC_DESTROY_VPORT_ERR); + break; + case VIRTCHNL2_OP_CONFIG_TX_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_CONFIG_TXQ, + IDPF_VC_CONFIG_TXQ_ERR); + break; + case VIRTCHNL2_OP_CONFIG_RX_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_CONFIG_RXQ, + IDPF_VC_CONFIG_RXQ_ERR); + break; + case VIRTCHNL2_OP_ENABLE_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ENA_QUEUES, + IDPF_VC_ENA_QUEUES_ERR); + break; + case VIRTCHNL2_OP_DISABLE_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DIS_QUEUES, + IDPF_VC_DIS_QUEUES_ERR); + break; + case VIRTCHNL2_OP_ADD_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ADD_QUEUES, + IDPF_VC_ADD_QUEUES_ERR); + break; + case VIRTCHNL2_OP_DEL_QUEUES: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DEL_QUEUES, + IDPF_VC_DEL_QUEUES_ERR); + break; + case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_MAP_IRQ, + IDPF_VC_MAP_IRQ_ERR); + break; + case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_UNMAP_IRQ, + IDPF_VC_UNMAP_IRQ_ERR); + break; + case VIRTCHNL2_OP_GET_STATS: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_GET_STATS, + IDPF_VC_GET_STATS_ERR); + break; + case VIRTCHNL2_OP_GET_RSS_LUT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_GET_RSS_LUT, + IDPF_VC_GET_RSS_LUT_ERR); + break; + case VIRTCHNL2_OP_SET_RSS_LUT: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_SET_RSS_LUT, + IDPF_VC_SET_RSS_LUT_ERR); + break; + case VIRTCHNL2_OP_GET_RSS_KEY: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_GET_RSS_KEY, + IDPF_VC_GET_RSS_KEY_ERR); + break; + case VIRTCHNL2_OP_SET_RSS_KEY: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_SET_RSS_KEY, + IDPF_VC_SET_RSS_KEY_ERR); + break; + case VIRTCHNL2_OP_SET_SRIOV_VFS: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_SET_SRIOV_VFS, + IDPF_VC_SET_SRIOV_VFS_ERR); + break; + case VIRTCHNL2_OP_ALLOC_VECTORS: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_ALLOC_VECTORS, + IDPF_VC_ALLOC_VECTORS_ERR); + break; + case VIRTCHNL2_OP_DEALLOC_VECTORS: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_DEALLOC_VECTORS, + IDPF_VC_DEALLOC_VECTORS_ERR); + break; + case VIRTCHNL2_OP_GET_PTYPE_INFO: + idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, + IDPF_VC_GET_PTYPE_INFO, + IDPF_VC_GET_PTYPE_INFO_ERR); + break; + case VIRTCHNL2_OP_LOOPBACK: + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_LOOPBACK_STATE, + IDPF_VC_LOOPBACK_STATE_ERR); + break; + case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: + /* This message can only be sent asynchronously. As + * such we'll have lost the context in which it was + * called and thus can only really report if it looks + * like an error occurred. Don't bother setting ERR bit + * or waking chnl_wq since no work queue will be waiting + * to read the message. + */ + if (ctlq_msg.cookie.mbx.chnl_retval) { + dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n", + ctlq_msg.cookie.mbx.chnl_retval); + } + break; + case VIRTCHNL2_OP_ADD_MAC_ADDR: + vport_config = adapter->vport_config[vport->idx]; + if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ, + vport_config->flags)) { + /* Message was sent asynchronously. We don't + * normally print errors here, instead + * prefer to handle errors in the function + * calling wait_for_event. However, if + * asynchronous, the context in which the + * message was sent is lost. We can't really do + * anything about at it this point, but we + * should at a minimum indicate that it looks + * like something went wrong. Also don't bother + * setting ERR bit or waking vchnl_wq since no + * one will be waiting to read the async + * message. + */ + if (ctlq_msg.cookie.mbx.chnl_retval) + dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n", + ctlq_msg.cookie.mbx.chnl_retval); + break; + } + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_ADD_MAC_ADDR, + IDPF_VC_ADD_MAC_ADDR_ERR); + break; + case VIRTCHNL2_OP_DEL_MAC_ADDR: + vport_config = adapter->vport_config[vport->idx]; + if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ, + vport_config->flags)) { + /* Message was sent asynchronously like the + * VIRTCHNL2_OP_ADD_MAC_ADDR + */ + if (ctlq_msg.cookie.mbx.chnl_retval) + dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n", + ctlq_msg.cookie.mbx.chnl_retval); + break; + } + idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, + IDPF_VC_DEL_MAC_ADDR, + IDPF_VC_DEL_MAC_ADDR_ERR); + break; + case VIRTCHNL2_OP_EVENT: + idpf_recv_event_msg(vport, &ctlq_msg); + break; + default: + dev_warn(&adapter->pdev->dev, + "Unhandled virtchnl response %d\n", + ctlq_msg.cookie.mbx.chnl_opcode); + break; + } + +post_buffs: + if (ctlq_msg.data_len) + dma_mem = ctlq_msg.ctx.indirect.payload; + else + num_q_msg = 0; + + err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq, + &num_q_msg, &dma_mem); + /* If post failed clear the only buffer we supplied */ + if (err && dma_mem) + dma_free_coherent(&adapter->pdev->dev, dma_mem->size, + dma_mem->va, dma_mem->pa); + + /* Applies only if we are looking for a specific opcode */ + if (work_done) + break; + } + + return err; +} + +/** + * __idpf_wait_for_event - wrapper function for wait on virtchannel response + * @adapter: Driver private data structure + * @vport: virtual port structure + * @state: check on state upon timeout + * @err_check: check if this specific error bit is set + * @timeout: Max time to wait + * + * Checks if state is set upon expiry of timeout. Returns 0 on success, + * negative on failure. + */ +static int __idpf_wait_for_event(struct idpf_adapter *adapter, + struct idpf_vport *vport, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_check, + int timeout) +{ + int time_to_wait, num_waits; + wait_queue_head_t *vchnl_wq; + unsigned long *vc_state; + + time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT); + num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT); + + if (vport) { + vchnl_wq = &vport->vchnl_wq; + vc_state = vport->vc_state; + } else { + vchnl_wq = &adapter->vchnl_wq; + vc_state = adapter->vc_state; + } + + while (num_waits) { + int event; + + /* If we are here and a reset is detected do not wait but + * return. Reset timing is out of drivers control. So + * while we are cleaning resources as part of reset if the + * underlying HW mailbox is gone, wait on mailbox messages + * is not meaningful + */ + if (idpf_is_reset_detected(adapter)) + return 0; + + event = wait_event_timeout(*vchnl_wq, + test_and_clear_bit(state, vc_state), + msecs_to_jiffies(time_to_wait)); + if (event) { + if (test_and_clear_bit(err_check, vc_state)) { + dev_err(&adapter->pdev->dev, "VC response error %s\n", + idpf_vport_vc_state_str[err_check]); + + return -EINVAL; + } + + return 0; + } + num_waits--; + } + + /* Timeout occurred */ + dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n", + idpf_vport_vc_state_str[state]); + + return -ETIMEDOUT; +} + +/** + * idpf_min_wait_for_event - wait for virtchannel response + * @adapter: Driver private data structure + * @vport: virtual port structure + * @state: check on state upon timeout + * @err_check: check if this specific error bit is set + * + * Returns 0 on success, negative on failure. + */ +static int idpf_min_wait_for_event(struct idpf_adapter *adapter, + struct idpf_vport *vport, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_check) +{ + return __idpf_wait_for_event(adapter, vport, state, err_check, + IDPF_WAIT_FOR_EVENT_TIMEO_MIN); +} + +/** + * idpf_wait_for_event - wait for virtchannel response + * @adapter: Driver private data structure + * @vport: virtual port structure + * @state: check on state upon timeout after 500ms + * @err_check: check if this specific error bit is set + * + * Returns 0 on success, negative on failure. + */ +static int idpf_wait_for_event(struct idpf_adapter *adapter, + struct idpf_vport *vport, + enum idpf_vport_vc_state state, + enum idpf_vport_vc_state err_check) +{ + /* Increasing the timeout in __IDPF_INIT_SW flow to consider large + * number of VF's mailbox message responses. When a message is received + * on mailbox, this thread is woken up by the idpf_recv_mb_msg before + * the timeout expires. Only in the error case i.e. if no message is + * received on mailbox, we wait for the complete timeout which is + * less likely to happen. + */ + return __idpf_wait_for_event(adapter, vport, state, err_check, + IDPF_WAIT_FOR_EVENT_TIMEO); +} + +/** + * idpf_wait_for_marker_event - wait for software marker response + * @vport: virtual port data structure + * + * Returns 0 success, negative on failure. + **/ +static int idpf_wait_for_marker_event(struct idpf_vport *vport) +{ + int event; + int i; + + for (i = 0; i < vport->num_txq; i++) + set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags); + + event = wait_event_timeout(vport->sw_marker_wq, + test_and_clear_bit(IDPF_VPORT_SW_MARKER, + vport->flags), + msecs_to_jiffies(500)); + + for (i = 0; i < vport->num_txq; i++) + clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + + if (event) + return 0; + + dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); + + return -ETIMEDOUT; +} + +/** + * idpf_send_ver_msg - send virtchnl version message + * @adapter: Driver specific private structure + * + * Send virtchnl version message. Returns 0 on success, negative on failure. + */ +static int idpf_send_ver_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_version_info vvi; + + if (adapter->virt_ver_maj) { + vvi.major = cpu_to_le32(adapter->virt_ver_maj); + vvi.minor = cpu_to_le32(adapter->virt_ver_min); + } else { + vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR); + vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); + } + + return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi), + (u8 *)&vvi); +} + +/** + * idpf_recv_ver_msg - Receive virtchnl version message + * @adapter: Driver specific private structure + * + * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need + * to send version message again, otherwise negative on failure. + */ +static int idpf_recv_ver_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_version_info vvi; + u32 major, minor; + int err; + + err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi, + sizeof(vvi)); + if (err) + return err; + + major = le32_to_cpu(vvi.major); + minor = le32_to_cpu(vvi.minor); + + if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { + dev_warn(&adapter->pdev->dev, + "Virtchnl major version (%d) greater than supported\n", + major); + + return -EINVAL; + } + + if (major == IDPF_VIRTCHNL_VERSION_MAJOR && + minor > IDPF_VIRTCHNL_VERSION_MINOR) + dev_warn(&adapter->pdev->dev, + "Virtchnl minor version (%d) didn't match\n", minor); + + /* If we have a mismatch, resend version to update receiver on what + * version we will use. + */ + if (!adapter->virt_ver_maj && + major != IDPF_VIRTCHNL_VERSION_MAJOR && + minor != IDPF_VIRTCHNL_VERSION_MINOR) + err = -EAGAIN; + + adapter->virt_ver_maj = major; + adapter->virt_ver_min = minor; + + return err; +} + +/** + * idpf_send_get_caps_msg - Send virtchnl get capabilities message + * @adapter: Driver specific private structure + * + * Send virtchl get capabilities message. Returns 0 on success, negative on + * failure. + */ +static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_capabilities caps = { }; + + caps.csum_caps = + cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP | + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP | + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP | + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP | + VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL | + VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL | + VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL | + VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL | + VIRTCHNL2_CAP_RX_CSUM_GENERIC); + + caps.seg_caps = + cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP | + VIRTCHNL2_CAP_SEG_IPV4_UDP | + VIRTCHNL2_CAP_SEG_IPV4_SCTP | + VIRTCHNL2_CAP_SEG_IPV6_TCP | + VIRTCHNL2_CAP_SEG_IPV6_UDP | + VIRTCHNL2_CAP_SEG_IPV6_SCTP | + VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); + + caps.rss_caps = + cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | + VIRTCHNL2_CAP_RSS_IPV4_UDP | + VIRTCHNL2_CAP_RSS_IPV4_SCTP | + VIRTCHNL2_CAP_RSS_IPV4_OTHER | + VIRTCHNL2_CAP_RSS_IPV6_TCP | + VIRTCHNL2_CAP_RSS_IPV6_UDP | + VIRTCHNL2_CAP_RSS_IPV6_SCTP | + VIRTCHNL2_CAP_RSS_IPV6_OTHER); + + caps.hsplit_caps = + cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6); + + caps.rsc_caps = + cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP | + VIRTCHNL2_CAP_RSC_IPV6_TCP); + + caps.other_caps = + cpu_to_le64(VIRTCHNL2_CAP_SRIOV | + VIRTCHNL2_CAP_MACFILTER | + VIRTCHNL2_CAP_SPLITQ_QSCHED | + VIRTCHNL2_CAP_PROMISC | + VIRTCHNL2_CAP_LOOPBACK); + + return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps), + (u8 *)&caps); +} + +/** + * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message + * @adapter: Driver specific private structure + * + * Receive virtchnl get capabilities message. Returns 0 on success, negative on + * failure. + */ +static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter) +{ + return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps, + sizeof(struct virtchnl2_get_capabilities)); +} + +/** + * idpf_vport_alloc_max_qs - Allocate max queues for a vport + * @adapter: Driver specific private structure + * @max_q: vport max queue structure + */ +int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; + struct virtchnl2_get_capabilities *caps = &adapter->caps; + u16 default_vports = idpf_get_default_vports(adapter); + int max_rx_q, max_tx_q; + + mutex_lock(&adapter->queue_lock); + + max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; + max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; + if (adapter->num_alloc_vports < default_vports) { + max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); + max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); + } else { + max_q->max_rxq = IDPF_MIN_Q; + max_q->max_txq = IDPF_MIN_Q; + } + max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; + max_q->max_complq = max_q->max_txq; + + if (avail_queues->avail_rxq < max_q->max_rxq || + avail_queues->avail_txq < max_q->max_txq || + avail_queues->avail_bufq < max_q->max_bufq || + avail_queues->avail_complq < max_q->max_complq) { + mutex_unlock(&adapter->queue_lock); + + return -EINVAL; + } + + avail_queues->avail_rxq -= max_q->max_rxq; + avail_queues->avail_txq -= max_q->max_txq; + avail_queues->avail_bufq -= max_q->max_bufq; + avail_queues->avail_complq -= max_q->max_complq; + + mutex_unlock(&adapter->queue_lock); + + return 0; +} + +/** + * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport + * @adapter: Driver specific private structure + * @max_q: vport max queue structure + */ +void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct idpf_avail_queue_info *avail_queues; + + mutex_lock(&adapter->queue_lock); + avail_queues = &adapter->avail_queues; + + avail_queues->avail_rxq += max_q->max_rxq; + avail_queues->avail_txq += max_q->max_txq; + avail_queues->avail_bufq += max_q->max_bufq; + avail_queues->avail_complq += max_q->max_complq; + + mutex_unlock(&adapter->queue_lock); +} + +/** + * idpf_init_avail_queues - Initialize available queues on the device + * @adapter: Driver specific private structure + */ +static void idpf_init_avail_queues(struct idpf_adapter *adapter) +{ + struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; + struct virtchnl2_get_capabilities *caps = &adapter->caps; + + avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); + avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); + avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); + avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); +} + +/** + * idpf_get_reg_intr_vecs - Get vector queue register offset + * @vport: virtual port structure + * @reg_vals: Register offsets to store in + * + * Returns number of registers that got populated + */ +int idpf_get_reg_intr_vecs(struct idpf_vport *vport, + struct idpf_vec_regs *reg_vals) +{ + struct virtchnl2_vector_chunks *chunks; + struct idpf_vec_regs reg_val; + u16 num_vchunks, num_vec; + int num_regs = 0, i, j; + + chunks = &vport->adapter->req_vec_chunks->vchunks; + num_vchunks = le16_to_cpu(chunks->num_vchunks); + + for (j = 0; j < num_vchunks; j++) { + struct virtchnl2_vector_chunk *chunk; + u32 dynctl_reg_spacing; + u32 itrn_reg_spacing; + + chunk = &chunks->vchunks[j]; + num_vec = le16_to_cpu(chunk->num_vectors); + reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); + reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); + reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); + + dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); + itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); + + for (i = 0; i < num_vec; i++) { + reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg; + reg_vals[num_regs].itrn_reg = reg_val.itrn_reg; + reg_vals[num_regs].itrn_index_spacing = + reg_val.itrn_index_spacing; + + reg_val.dyn_ctl_reg += dynctl_reg_spacing; + reg_val.itrn_reg += itrn_reg_spacing; + num_regs++; + } + } + + return num_regs; +} + +/** + * idpf_vport_get_q_reg - Get the queue registers for the vport + * @reg_vals: register values needing to be set + * @num_regs: amount we expect to fill + * @q_type: queue model + * @chunks: queue regs received over mailbox + * + * This function parses the queue register offsets from the queue register + * chunk information, with a specific queue type and stores it into the array + * passed as an argument. It returns the actual number of queue registers that + * are filled. + */ +static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, + struct virtchnl2_queue_reg_chunks *chunks) +{ + u16 num_chunks = le16_to_cpu(chunks->num_chunks); + int reg_filled = 0, i; + u32 reg_val; + + while (num_chunks--) { + struct virtchnl2_queue_reg_chunk *chunk; + u16 num_q; + + chunk = &chunks->chunks[num_chunks]; + if (le32_to_cpu(chunk->type) != q_type) + continue; + + num_q = le32_to_cpu(chunk->num_queues); + reg_val = le64_to_cpu(chunk->qtail_reg_start); + for (i = 0; i < num_q && reg_filled < num_regs ; i++) { + reg_vals[reg_filled++] = reg_val; + reg_val += le32_to_cpu(chunk->qtail_reg_spacing); + } + } + + return reg_filled; +} + +/** + * __idpf_queue_reg_init - initialize queue registers + * @vport: virtual port structure + * @reg_vals: registers we are initializing + * @num_regs: how many registers there are in total + * @q_type: queue model + * + * Return number of queues that are initialized + */ +static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, + int num_regs, u32 q_type) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_queue *q; + int i, j, k = 0; + + switch (q_type) { + case VIRTCHNL2_QUEUE_TYPE_TX: + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) + tx_qgrp->txqs[j]->tail = + idpf_get_reg_addr(adapter, reg_vals[k]); + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq && k < num_regs; j++, k++) { + q = rx_qgrp->singleq.rxqs[j]; + q->tail = idpf_get_reg_addr(adapter, + reg_vals[k]); + } + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u8 num_bufqs = vport->num_bufqs_per_qgrp; + + for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + q->tail = idpf_get_reg_addr(adapter, + reg_vals[k]); + } + } + break; + default: + break; + } + + return k; +} + +/** + * idpf_queue_reg_init - initialize queue registers + * @vport: virtual port structure + * + * Return 0 on success, negative on failure + */ +int idpf_queue_reg_init(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_params; + struct virtchnl2_queue_reg_chunks *chunks; + struct idpf_vport_config *vport_config; + u16 vport_idx = vport->idx; + int num_regs, ret = 0; + u32 *reg_vals; + + /* We may never deal with more than 256 same type of queues */ + reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); + if (!reg_vals) + return -ENOMEM; + + vport_config = vport->adapter->vport_config[vport_idx]; + if (vport_config->req_qs_chunks) { + struct virtchnl2_add_queues *vc_aq = + (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; + chunks = &vc_aq->chunks; + } else { + vport_params = vport->adapter->vport_params_recvd[vport_idx]; + chunks = &vport_params->chunks; + } + + /* Initialize Tx queue tail register address */ + num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, + VIRTCHNL2_QUEUE_TYPE_TX, + chunks); + if (num_regs < vport->num_txq) { + ret = -EINVAL; + goto free_reg_vals; + } + + num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + VIRTCHNL2_QUEUE_TYPE_TX); + if (num_regs < vport->num_txq) { + ret = -EINVAL; + goto free_reg_vals; + } + + /* Initialize Rx/buffer queue tail register address based on Rx queue + * model + */ + if (idpf_is_queue_model_split(vport->rxq_model)) { + num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, + chunks); + if (num_regs < vport->num_bufq) { + ret = -EINVAL; + goto free_reg_vals; + } + + num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); + if (num_regs < vport->num_bufq) { + ret = -EINVAL; + goto free_reg_vals; + } + } else { + num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, + VIRTCHNL2_QUEUE_TYPE_RX, + chunks); + if (num_regs < vport->num_rxq) { + ret = -EINVAL; + goto free_reg_vals; + } + + num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + VIRTCHNL2_QUEUE_TYPE_RX); + if (num_regs < vport->num_rxq) { + ret = -EINVAL; + goto free_reg_vals; + } + } + +free_reg_vals: + kfree(reg_vals); + + return ret; +} + +/** + * idpf_send_create_vport_msg - Send virtchnl create vport message + * @adapter: Driver specific private structure + * @max_q: vport max queue info + * + * send virtchnl creae vport message + * + * Returns 0 on success, negative on failure + */ +int idpf_send_create_vport_msg(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q) +{ + struct virtchnl2_create_vport *vport_msg; + u16 idx = adapter->next_vport; + int err, buf_size; + + buf_size = sizeof(struct virtchnl2_create_vport); + if (!adapter->vport_params_reqd[idx]) { + adapter->vport_params_reqd[idx] = kzalloc(buf_size, + GFP_KERNEL); + if (!adapter->vport_params_reqd[idx]) + return -ENOMEM; + } + + vport_msg = adapter->vport_params_reqd[idx]; + vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); + vport_msg->vport_index = cpu_to_le16(idx); + + if (adapter->req_tx_splitq) + vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + else + vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + + if (adapter->req_rx_splitq) + vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + else + vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + + err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q); + if (err) { + dev_err(&adapter->pdev->dev, "Enough queues are not available"); + + return err; + } + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size, + (u8 *)vport_msg); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT, + IDPF_VC_CREATE_VPORT_ERR); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to receive create vport message"); + + goto rel_lock; + } + + if (!adapter->vport_params_recvd[idx]) { + adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, + GFP_KERNEL); + if (!adapter->vport_params_recvd[idx]) { + err = -ENOMEM; + goto rel_lock; + } + } + + vport_msg = adapter->vport_params_recvd[idx]; + memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_check_supported_desc_ids - Verify we have required descriptor support + * @vport: virtual port structure + * + * Return 0 on success, error on failure + */ +int idpf_check_supported_desc_ids(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_create_vport *vport_msg; + u64 rx_desc_ids, tx_desc_ids; + + vport_msg = adapter->vport_params_recvd[vport->idx]; + + rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); + tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); + + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { + dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); + vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); + } + } else { + if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) + vport->base_rxd = true; + } + + if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) + return 0; + + if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { + dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); + vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); + } + + return 0; +} + +/** + * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message + * @vport: virtual port data structure + * + * Send virtchnl destroy vport message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_destroy_vport_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport v_id; + int err; + + v_id.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT, + sizeof(v_id), (u8 *)&v_id); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT, + IDPF_VC_DESTROY_VPORT_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_enable_vport_msg - Send virtchnl enable vport message + * @vport: virtual port data structure + * + * Send enable vport virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_enable_vport_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport v_id; + int err; + + v_id.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT, + sizeof(v_id), (u8 *)&v_id); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT, + IDPF_VC_ENA_VPORT_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_disable_vport_msg - Send virtchnl disable vport message + * @vport: virtual port data structure + * + * Send disable vport virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_disable_vport_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport v_id; + int err; + + v_id.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT, + sizeof(v_id), (u8 *)&v_id); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT, + IDPF_VC_DIS_VPORT_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message + * @vport: virtual port data structure + * + * Send config tx queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) +{ + struct virtchnl2_config_tx_queues *ctq; + u32 config_sz, chunk_sz, buf_sz; + int totqs, num_msgs, num_chunks; + struct virtchnl2_txq_info *qi; + int err = 0, i, k = 0; + + totqs = vport->num_txq + vport->num_complq; + qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); + if (!qi) + return -ENOMEM; + + /* Populate the queue info buffer with all queue context info */ + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + int j, sched_mode; + + for (j = 0; j < tx_qgrp->num_txq; j++, k++) { + qi[k].queue_id = + cpu_to_le32(tx_qgrp->txqs[j]->q_id); + qi[k].model = + cpu_to_le16(vport->txq_model); + qi[k].type = + cpu_to_le32(tx_qgrp->txqs[j]->q_type); + qi[k].ring_len = + cpu_to_le16(tx_qgrp->txqs[j]->desc_count); + qi[k].dma_ring_addr = + cpu_to_le64(tx_qgrp->txqs[j]->dma); + if (idpf_is_queue_model_split(vport->txq_model)) { + struct idpf_queue *q = tx_qgrp->txqs[j]; + + qi[k].tx_compl_queue_id = + cpu_to_le16(tx_qgrp->complq->q_id); + qi[k].relative_queue_id = cpu_to_le16(j); + + if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) + qi[k].sched_mode = + cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); + else + qi[k].sched_mode = + cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); + } else { + qi[k].sched_mode = + cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); + } + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + continue; + + qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); + qi[k].model = cpu_to_le16(vport->txq_model); + qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); + qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); + + if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; + else + sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; + qi[k].sched_mode = cpu_to_le16(sched_mode); + + k++; + } + + /* Make sure accounting agrees */ + if (k != totqs) { + err = -EINVAL; + goto error; + } + + /* Chunk up the queue contexts into multiple messages to avoid + * sending a control queue message buffer that is too large + */ + config_sz = sizeof(struct virtchnl2_config_tx_queues); + chunk_sz = sizeof(struct virtchnl2_txq_info); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + totqs); + num_msgs = DIV_ROUND_UP(totqs, num_chunks); + + buf_sz = struct_size(ctq, qinfo, num_chunks); + ctq = kzalloc(buf_sz, GFP_KERNEL); + if (!ctq) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(ctq, 0, buf_sz); + ctq->vport_id = cpu_to_le32(vport->vport_id); + ctq->num_qinfo = cpu_to_le16(num_chunks); + memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); + + err = idpf_send_mb_msg(vport->adapter, + VIRTCHNL2_OP_CONFIG_TX_QUEUES, + buf_sz, (u8 *)ctq); + if (err) + goto mbx_error; + + err = idpf_wait_for_event(vport->adapter, vport, + IDPF_VC_CONFIG_TXQ, + IDPF_VC_CONFIG_TXQ_ERR); + if (err) + goto mbx_error; + + k += num_chunks; + totqs -= num_chunks; + num_chunks = min(num_chunks, totqs); + /* Recalculate buffer size */ + buf_sz = struct_size(ctq, qinfo, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(ctq); +error: + kfree(qi); + + return err; +} + +/** + * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message + * @vport: virtual port data structure + * + * Send config rx queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) +{ + struct virtchnl2_config_rx_queues *crq; + u32 config_sz, chunk_sz, buf_sz; + int totqs, num_msgs, num_chunks; + struct virtchnl2_rxq_info *qi; + int err = 0, i, k = 0; + + totqs = vport->num_rxq + vport->num_bufq; + qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); + if (!qi) + return -ENOMEM; + + /* Populate the queue info buffer with all queue context info */ + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + int j; + + if (!idpf_is_queue_model_split(vport->rxq_model)) + goto setup_rxqs; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { + struct idpf_queue *bufq = + &rx_qgrp->splitq.bufq_sets[j].bufq; + + qi[k].queue_id = cpu_to_le32(bufq->q_id); + qi[k].model = cpu_to_le16(vport->rxq_model); + qi[k].type = cpu_to_le32(bufq->q_type); + qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); + qi[k].ring_len = cpu_to_le16(bufq->desc_count); + qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); + qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); + qi[k].buffer_notif_stride = bufq->rx_buf_stride; + qi[k].rx_buffer_low_watermark = + cpu_to_le16(bufq->rx_buffer_low_watermark); + if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) + qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); + } + +setup_rxqs: + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, k++) { + struct idpf_queue *rxq; + + if (!idpf_is_queue_model_split(vport->rxq_model)) { + rxq = rx_qgrp->singleq.rxqs[j]; + goto common_qi_fields; + } + rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; + qi[k].rx_bufq1_id = + cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id); + if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { + qi[k].bufq2_ena = IDPF_BUFQ2_ENA; + qi[k].rx_bufq2_id = + cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); + } + qi[k].rx_buffer_low_watermark = + cpu_to_le16(rxq->rx_buffer_low_watermark); + if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) + qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); + +common_qi_fields: + if (rxq->rx_hsplit_en) { + qi[k].qflags |= + cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); + qi[k].hdr_buffer_size = + cpu_to_le16(rxq->rx_hbuf_size); + } + qi[k].queue_id = cpu_to_le32(rxq->q_id); + qi[k].model = cpu_to_le16(vport->rxq_model); + qi[k].type = cpu_to_le32(rxq->q_type); + qi[k].ring_len = cpu_to_le16(rxq->desc_count); + qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); + qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); + qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); + qi[k].qflags |= + cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); + qi[k].desc_ids = cpu_to_le64(rxq->rxdids); + } + } + + /* Make sure accounting agrees */ + if (k != totqs) { + err = -EINVAL; + goto error; + } + + /* Chunk up the queue contexts into multiple messages to avoid + * sending a control queue message buffer that is too large + */ + config_sz = sizeof(struct virtchnl2_config_rx_queues); + chunk_sz = sizeof(struct virtchnl2_rxq_info); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + totqs); + num_msgs = DIV_ROUND_UP(totqs, num_chunks); + + buf_sz = struct_size(crq, qinfo, num_chunks); + crq = kzalloc(buf_sz, GFP_KERNEL); + if (!crq) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(crq, 0, buf_sz); + crq->vport_id = cpu_to_le32(vport->vport_id); + crq->num_qinfo = cpu_to_le16(num_chunks); + memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); + + err = idpf_send_mb_msg(vport->adapter, + VIRTCHNL2_OP_CONFIG_RX_QUEUES, + buf_sz, (u8 *)crq); + if (err) + goto mbx_error; + + err = idpf_wait_for_event(vport->adapter, vport, + IDPF_VC_CONFIG_RXQ, + IDPF_VC_CONFIG_RXQ_ERR); + if (err) + goto mbx_error; + + k += num_chunks; + totqs -= num_chunks; + num_chunks = min(num_chunks, totqs); + /* Recalculate buffer size */ + buf_sz = struct_size(crq, qinfo, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(crq); +error: + kfree(qi); + + return err; +} + +/** + * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable + * queues message + * @vport: virtual port data structure + * @vc_op: virtchnl op code to send + * + * Send enable or disable queues virtchnl message. Returns 0 on success, + * negative on failure. + */ +static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) +{ + u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_del_ena_dis_queues *eq; + struct virtchnl2_queue_chunks *qcs; + struct virtchnl2_queue_chunk *qc; + u32 config_sz, chunk_sz, buf_sz; + int i, j, k = 0, err = 0; + + /* validate virtchnl op */ + switch (vc_op) { + case VIRTCHNL2_OP_ENABLE_QUEUES: + case VIRTCHNL2_OP_DISABLE_QUEUES: + break; + default: + return -EINVAL; + } + + num_txq = vport->num_txq + vport->num_complq; + num_rxq = vport->num_rxq + vport->num_bufq; + num_q = num_txq + num_rxq; + buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q; + qc = kzalloc(buf_sz, GFP_KERNEL); + if (!qc) + return -ENOMEM; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq; j++, k++) { + qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + } + if (vport->num_txq != k) { + err = -EINVAL; + goto error; + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + goto setup_rx; + + for (i = 0; i < vport->num_txq_grp; i++, k++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + if (vport->num_complq != (k - vport->num_txq)) { + err = -EINVAL; + goto error; + } + +setup_rx: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, k++) { + if (idpf_is_queue_model_split(vport->rxq_model)) { + qc[k].start_queue_id = + cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); + qc[k].type = + cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); + } else { + qc[k].start_queue_id = + cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); + qc[k].type = + cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); + } + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + } + if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) { + err = -EINVAL; + goto error; + } + + if (!idpf_is_queue_model_split(vport->rxq_model)) + goto send_msg; + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + + for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { + struct idpf_queue *q; + + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + qc[k].type = cpu_to_le32(q->q_type); + qc[k].start_queue_id = cpu_to_le32(q->q_id); + qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); + } + } + if (vport->num_bufq != k - (vport->num_txq + + vport->num_complq + + vport->num_rxq)) { + err = -EINVAL; + goto error; + } + +send_msg: + /* Chunk up the queue info into multiple messages */ + config_sz = sizeof(struct virtchnl2_del_ena_dis_queues); + chunk_sz = sizeof(struct virtchnl2_queue_chunk); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + num_q); + num_msgs = DIV_ROUND_UP(num_q, num_chunks); + + buf_sz = struct_size(eq, chunks.chunks, num_chunks); + eq = kzalloc(buf_sz, GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(eq, 0, buf_sz); + eq->vport_id = cpu_to_le32(vport->vport_id); + eq->chunks.num_chunks = cpu_to_le16(num_chunks); + qcs = &eq->chunks; + memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); + + err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq); + if (err) + goto mbx_error; + + if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES) + err = idpf_wait_for_event(adapter, vport, + IDPF_VC_ENA_QUEUES, + IDPF_VC_ENA_QUEUES_ERR); + else + err = idpf_min_wait_for_event(adapter, vport, + IDPF_VC_DIS_QUEUES, + IDPF_VC_DIS_QUEUES_ERR); + if (err) + goto mbx_error; + + k += num_chunks; + num_q -= num_chunks; + num_chunks = min(num_chunks, num_q); + /* Recalculate buffer size */ + buf_sz = struct_size(eq, chunks.chunks, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(eq); +error: + kfree(qc); + + return err; +} + +/** + * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue + * vector message + * @vport: virtual port data structure + * @map: true for map and false for unmap + * + * Send map or unmap queue vector virtchnl message. Returns 0 on success, + * negative on failure. + */ +int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_queue_vector_maps *vqvm; + struct virtchnl2_queue_vector *vqv; + u32 config_sz, chunk_sz, buf_sz; + u32 num_msgs, num_chunks, num_q; + int i, j, k = 0, err = 0; + + num_q = vport->num_txq + vport->num_rxq; + + buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q; + vqv = kzalloc(buf_sz, GFP_KERNEL); + if (!vqv) + return -ENOMEM; + + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq; j++, k++) { + vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); + + if (idpf_is_queue_model_split(vport->txq_model)) { + vqv[k].vector_id = + cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); + vqv[k].itr_idx = + cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); + } else { + vqv[k].vector_id = + cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); + vqv[k].itr_idx = + cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); + } + } + } + + if (vport->num_txq != k) { + err = -EINVAL; + goto error; + } + + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++, k++) { + struct idpf_queue *rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + rxq = rx_qgrp->singleq.rxqs[j]; + + vqv[k].queue_type = cpu_to_le32(rxq->q_type); + vqv[k].queue_id = cpu_to_le32(rxq->q_id); + vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); + vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); + } + } + + if (idpf_is_queue_model_split(vport->txq_model)) { + if (vport->num_rxq != k - vport->num_complq) { + err = -EINVAL; + goto error; + } + } else { + if (vport->num_rxq != k - vport->num_txq) { + err = -EINVAL; + goto error; + } + } + + /* Chunk up the vector info into multiple messages */ + config_sz = sizeof(struct virtchnl2_queue_vector_maps); + chunk_sz = sizeof(struct virtchnl2_queue_vector); + + num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), + num_q); + num_msgs = DIV_ROUND_UP(num_q, num_chunks); + + buf_sz = struct_size(vqvm, qv_maps, num_chunks); + vqvm = kzalloc(buf_sz, GFP_KERNEL); + if (!vqvm) { + err = -ENOMEM; + goto error; + } + + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + memset(vqvm, 0, buf_sz); + vqvm->vport_id = cpu_to_le32(vport->vport_id); + vqvm->num_qv_maps = cpu_to_le16(num_chunks); + memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); + + if (map) { + err = idpf_send_mb_msg(adapter, + VIRTCHNL2_OP_MAP_QUEUE_VECTOR, + buf_sz, (u8 *)vqvm); + if (!err) + err = idpf_wait_for_event(adapter, vport, + IDPF_VC_MAP_IRQ, + IDPF_VC_MAP_IRQ_ERR); + } else { + err = idpf_send_mb_msg(adapter, + VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR, + buf_sz, (u8 *)vqvm); + if (!err) + err = + idpf_min_wait_for_event(adapter, vport, + IDPF_VC_UNMAP_IRQ, + IDPF_VC_UNMAP_IRQ_ERR); + } + if (err) + goto mbx_error; + + k += num_chunks; + num_q -= num_chunks; + num_chunks = min(num_chunks, num_q); + /* Recalculate buffer size */ + buf_sz = struct_size(vqvm, qv_maps, num_chunks); + } + +mbx_error: + mutex_unlock(&vport->vc_buf_lock); + kfree(vqvm); +error: + kfree(vqv); + + return err; +} + +/** + * idpf_send_enable_queues_msg - send enable queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send enable queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_enable_queues_msg(struct idpf_vport *vport) +{ + return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES); +} + +/** + * idpf_send_disable_queues_msg - send disable queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send disable queues virtchnl message. Returns 0 on success, negative + * on failure. + */ +int idpf_send_disable_queues_msg(struct idpf_vport *vport) +{ + int err, i; + + err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES); + if (err) + return err; + + /* switch to poll mode as interrupts will be disabled after disable + * queues virtchnl message is sent + */ + for (i = 0; i < vport->num_txq; i++) + set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + + /* schedule the napi to receive all the marker packets */ + for (i = 0; i < vport->num_q_vectors; i++) + napi_schedule(&vport->q_vectors[i].napi); + + return idpf_wait_for_marker_event(vport); +} + +/** + * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right + * structure + * @dchunks: Destination chunks to store data to + * @schunks: Source chunks to copy data from + * @num_chunks: number of chunks to copy + */ +static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, + struct virtchnl2_queue_reg_chunk *schunks, + u16 num_chunks) +{ + u16 i; + + for (i = 0; i < num_chunks; i++) { + dchunks[i].type = schunks[i].type; + dchunks[i].start_queue_id = schunks[i].start_queue_id; + dchunks[i].num_queues = schunks[i].num_queues; + } +} + +/** + * idpf_send_delete_queues_msg - send delete queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send delete queues virtchnl message. Return 0 on success, negative on + * failure. + */ +int idpf_send_delete_queues_msg(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_create_vport *vport_params; + struct virtchnl2_queue_reg_chunks *chunks; + struct virtchnl2_del_ena_dis_queues *eq; + struct idpf_vport_config *vport_config; + u16 vport_idx = vport->idx; + int buf_size, err; + u16 num_chunks; + + vport_config = adapter->vport_config[vport_idx]; + if (vport_config->req_qs_chunks) { + struct virtchnl2_add_queues *vc_aq = + (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; + chunks = &vc_aq->chunks; + } else { + vport_params = adapter->vport_params_recvd[vport_idx]; + chunks = &vport_params->chunks; + } + + num_chunks = le16_to_cpu(chunks->num_chunks); + buf_size = struct_size(eq, chunks.chunks, num_chunks); + + eq = kzalloc(buf_size, GFP_KERNEL); + if (!eq) + return -ENOMEM; + + eq->vport_id = cpu_to_le32(vport->vport_id); + eq->chunks.num_chunks = cpu_to_le16(num_chunks); + + idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, + num_chunks); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES, + buf_size, (u8 *)eq); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES, + IDPF_VC_DEL_QUEUES_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + kfree(eq); + + return err; +} + +/** + * idpf_send_config_queues_msg - Send config queues virtchnl message + * @vport: Virtual port private data structure + * + * Will send config queues virtchnl message. Returns 0 on success, negative on + * failure. + */ +int idpf_send_config_queues_msg(struct idpf_vport *vport) +{ + int err; + + err = idpf_send_config_tx_queues_msg(vport); + if (err) + return err; + + return idpf_send_config_rx_queues_msg(vport); +} + +/** + * idpf_send_add_queues_msg - Send virtchnl add queues message + * @vport: Virtual port private data structure + * @num_tx_q: number of transmit queues + * @num_complq: number of transmit completion queues + * @num_rx_q: number of receive queues + * @num_rx_bufq: number of receive buffer queues + * + * Returns 0 on success, negative on failure. vport _MUST_ be const here as + * we should not change any fields within vport itself in this function. + */ +int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, + u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) +{ + struct idpf_adapter *adapter = vport->adapter; + struct idpf_vport_config *vport_config; + struct virtchnl2_add_queues aq = { }; + struct virtchnl2_add_queues *vc_msg; + u16 vport_idx = vport->idx; + int size, err; + + vport_config = adapter->vport_config[vport_idx]; + + aq.vport_id = cpu_to_le32(vport->vport_id); + aq.num_tx_q = cpu_to_le16(num_tx_q); + aq.num_tx_complq = cpu_to_le16(num_complq); + aq.num_rx_q = cpu_to_le16(num_rx_q); + aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); + + mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES, + sizeof(struct virtchnl2_add_queues), (u8 *)&aq); + if (err) + goto rel_lock; + + /* We want vport to be const to prevent incidental code changes making + * changes to the vport config. We're making a special exception here + * to discard const to use the virtchnl. + */ + err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport, + IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR); + if (err) + goto rel_lock; + + kfree(vport_config->req_qs_chunks); + vport_config->req_qs_chunks = NULL; + + vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg; + /* compare vc_msg num queues with vport num queues */ + if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || + le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || + le16_to_cpu(vc_msg->num_tx_complq) != num_complq || + le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) { + err = -EINVAL; + goto rel_lock; + } + + size = struct_size(vc_msg, chunks.chunks, + le16_to_cpu(vc_msg->chunks.num_chunks)); + vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); + if (!vport_config->req_qs_chunks) { + err = -ENOMEM; + goto rel_lock; + } + +rel_lock: + mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock); + + return err; +} + +/** + * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message + * @adapter: Driver specific private structure + * @num_vectors: number of vectors to be allocated + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) +{ + struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec; + struct virtchnl2_alloc_vectors ac = { }; + u16 num_vchunks; + int size, err; + + ac.num_vectors = cpu_to_le16(num_vectors); + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS, + sizeof(ac), (u8 *)&ac); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS, + IDPF_VC_ALLOC_VECTORS_ERR); + if (err) + goto rel_lock; + + rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg; + num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); + + size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); + if (size > sizeof(adapter->vc_msg)) { + err = -EINVAL; + goto rel_lock; + } + + kfree(adapter->req_vec_chunks); + adapter->req_vec_chunks = NULL; + adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL); + if (!adapter->req_vec_chunks) { + err = -ENOMEM; + goto rel_lock; + } + + alloc_vec = adapter->req_vec_chunks; + if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) { + kfree(adapter->req_vec_chunks); + adapter->req_vec_chunks = NULL; + err = -EINVAL; + } + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message + * @adapter: Driver specific private structure + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) +{ + struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; + struct virtchnl2_vector_chunks *vcs = &ac->vchunks; + int buf_size, err; + + buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size, + (u8 *)vcs); + if (err) + goto rel_lock; + + err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS, + IDPF_VC_DEALLOC_VECTORS_ERR); + if (err) + goto rel_lock; + + kfree(adapter->req_vec_chunks); + adapter->req_vec_chunks = NULL; + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_get_max_vfs - Get max number of vfs supported + * @adapter: Driver specific private structure + * + * Returns max number of VFs + */ +static int idpf_get_max_vfs(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.max_sriov_vfs); +} + +/** + * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message + * @adapter: Driver specific private structure + * @num_vfs: number of virtual functions to be created + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) +{ + struct virtchnl2_sriov_vfs_info svi = { }; + int err; + + svi.num_vfs = cpu_to_le16(num_vfs); + + mutex_lock(&adapter->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS, + sizeof(svi), (u8 *)&svi); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS, + IDPF_VC_SET_SRIOV_VFS_ERR); + +rel_lock: + mutex_unlock(&adapter->vc_buf_lock); + + return err; +} + +/** + * idpf_send_get_stats_msg - Send virtchnl get statistics message + * @vport: vport to get stats for + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_get_stats_msg(struct idpf_vport *vport) +{ + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + struct rtnl_link_stats64 *netstats = &np->netstats; + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_vport_stats stats_msg = { }; + struct virtchnl2_vport_stats *stats; + int err; + + /* Don't send get_stats message if the link is down */ + if (np->state <= __IDPF_VPORT_DOWN) + return 0; + + stats_msg.vport_id = cpu_to_le32(vport->vport_id); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS, + sizeof(struct virtchnl2_vport_stats), + (u8 *)&stats_msg); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS, + IDPF_VC_GET_STATS_ERR); + if (err) + goto rel_lock; + + stats = (struct virtchnl2_vport_stats *)vport->vc_msg; + + spin_lock_bh(&np->stats_lock); + + netstats->rx_packets = le64_to_cpu(stats->rx_unicast) + + le64_to_cpu(stats->rx_multicast) + + le64_to_cpu(stats->rx_broadcast); + netstats->rx_bytes = le64_to_cpu(stats->rx_bytes); + netstats->rx_dropped = le64_to_cpu(stats->rx_discards); + netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop); + netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length); + + netstats->tx_packets = le64_to_cpu(stats->tx_unicast) + + le64_to_cpu(stats->tx_multicast) + + le64_to_cpu(stats->tx_broadcast); + netstats->tx_bytes = le64_to_cpu(stats->tx_bytes); + netstats->tx_errors = le64_to_cpu(stats->tx_errors); + netstats->tx_dropped = le64_to_cpu(stats->tx_discards); + + vport->port_stats.vport_stats = *stats; + + spin_unlock_bh(&np->stats_lock); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message + * @vport: virtual port data structure + * @get: flag to set or get rss look up table + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_lut *recv_rl; + struct idpf_rss_data *rss_data; + struct virtchnl2_rss_lut *rl; + int buf_size, lut_buf_size; + int i, err; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + buf_size = struct_size(rl, lut, rss_data->rss_lut_size); + rl = kzalloc(buf_size, GFP_KERNEL); + if (!rl) + return -ENOMEM; + + rl->vport_id = cpu_to_le32(vport->vport_id); + mutex_lock(&vport->vc_buf_lock); + + if (!get) { + rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); + for (i = 0; i < rss_data->rss_lut_size; i++) + rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT, + buf_size, (u8 *)rl); + if (err) + goto free_mem; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT, + IDPF_VC_SET_RSS_LUT_ERR); + + goto free_mem; + } + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT, + buf_size, (u8 *)rl); + if (err) + goto free_mem; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT, + IDPF_VC_GET_RSS_LUT_ERR); + if (err) + goto free_mem; + + recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg; + if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) + goto do_memcpy; + + rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); + kfree(rss_data->rss_lut); + + lut_buf_size = rss_data->rss_lut_size * sizeof(u32); + rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); + if (!rss_data->rss_lut) { + rss_data->rss_lut_size = 0; + err = -ENOMEM; + goto free_mem; + } + +do_memcpy: + memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size); +free_mem: + mutex_unlock(&vport->vc_buf_lock); + kfree(rl); + + return err; +} + +/** + * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message + * @vport: virtual port data structure + * @get: flag to set or get rss look up table + * + * Returns 0 on success, negative on failure + */ +int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_key *recv_rk; + struct idpf_rss_data *rss_data; + struct virtchnl2_rss_key *rk; + int i, buf_size, err; + + rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); + rk = kzalloc(buf_size, GFP_KERNEL); + if (!rk) + return -ENOMEM; + + rk->vport_id = cpu_to_le32(vport->vport_id); + mutex_lock(&vport->vc_buf_lock); + + if (get) { + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY, + buf_size, (u8 *)rk); + if (err) + goto error; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY, + IDPF_VC_GET_RSS_KEY_ERR); + if (err) + goto error; + + recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg; + if (rss_data->rss_key_size != + le16_to_cpu(recv_rk->key_len)) { + rss_data->rss_key_size = + min_t(u16, NETDEV_RSS_KEY_LEN, + le16_to_cpu(recv_rk->key_len)); + kfree(rss_data->rss_key); + rss_data->rss_key = kzalloc(rss_data->rss_key_size, + GFP_KERNEL); + if (!rss_data->rss_key) { + rss_data->rss_key_size = 0; + err = -ENOMEM; + goto error; + } + } + memcpy(rss_data->rss_key, recv_rk->key_flex, + rss_data->rss_key_size); + } else { + rk->key_len = cpu_to_le16(rss_data->rss_key_size); + for (i = 0; i < rss_data->rss_key_size; i++) + rk->key_flex[i] = rss_data->rss_key[i]; + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY, + buf_size, (u8 *)rk); + if (err) + goto error; + + err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY, + IDPF_VC_SET_RSS_KEY_ERR); + } + +error: + mutex_unlock(&vport->vc_buf_lock); + kfree(rk); + + return err; +} + +/** + * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table + * @ptype: ptype lookup table + * @pstate: state machine for ptype lookup table + * @ipv4: ipv4 or ipv6 + * @frag: fragmentation allowed + * + */ +static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, + struct idpf_ptype_state *pstate, + bool ipv4, bool frag) +{ + if (!pstate->outer_ip || !pstate->outer_frag) { + ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP; + pstate->outer_ip = true; + + if (ipv4) + ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; + else + ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; + + if (frag) { + ptype->outer_frag = IDPF_RX_PTYPE_FRAG; + pstate->outer_frag = true; + } + } else { + ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; + pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; + + if (ipv4) + ptype->tunnel_end_prot = + IDPF_RX_PTYPE_TUNNEL_END_IPV4; + else + ptype->tunnel_end_prot = + IDPF_RX_PTYPE_TUNNEL_END_IPV6; + + if (frag) + ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; + } +} + +/** + * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info + * @vport: virtual port data structure + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) +{ + struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; + struct virtchnl2_get_ptype_info get_ptype_info; + int max_ptype, ptypes_recvd = 0, ptype_offset; + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_get_ptype_info *ptype_info; + u16 next_ptype_id = 0; + int err = 0, i, j, k; + + if (idpf_is_queue_model_split(vport->rxq_model)) + max_ptype = IDPF_RX_MAX_PTYPE; + else + max_ptype = IDPF_RX_MAX_BASE_PTYPE; + + memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); + + ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!ptype_info) + return -ENOMEM; + + mutex_lock(&adapter->vc_buf_lock); + + while (next_ptype_id < max_ptype) { + get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id); + + if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) + get_ptype_info.num_ptypes = + cpu_to_le16(max_ptype - next_ptype_id); + else + get_ptype_info.num_ptypes = + cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, + sizeof(struct virtchnl2_get_ptype_info), + (u8 *)&get_ptype_info); + if (err) + goto vc_buf_unlock; + + err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO, + IDPF_VC_GET_PTYPE_INFO_ERR); + if (err) + goto vc_buf_unlock; + + memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); + + ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); + if (ptypes_recvd > max_ptype) { + err = -EINVAL; + goto vc_buf_unlock; + } + + next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) + + le16_to_cpu(get_ptype_info.num_ptypes); + + ptype_offset = IDPF_RX_PTYPE_HDR_SZ; + + for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { + struct idpf_ptype_state pstate = { }; + struct virtchnl2_ptype *ptype; + u16 id; + + ptype = (struct virtchnl2_ptype *) + ((u8 *)ptype_info + ptype_offset); + + ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); + if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) { + err = -EINVAL; + goto vc_buf_unlock; + } + + /* 0xFFFF indicates end of ptypes */ + if (le16_to_cpu(ptype->ptype_id_10) == + IDPF_INVALID_PTYPE_ID) { + err = 0; + goto vc_buf_unlock; + } + + if (idpf_is_queue_model_split(vport->rxq_model)) + k = le16_to_cpu(ptype->ptype_id_10); + else + k = ptype->ptype_id_8; + + if (ptype->proto_id_count) + ptype_lkup[k].known = 1; + + for (j = 0; j < ptype->proto_id_count; j++) { + id = le16_to_cpu(ptype->proto_id[j]); + switch (id) { + case VIRTCHNL2_PROTO_HDR_GRE: + if (pstate.tunnel_state == + IDPF_PTYPE_TUNNEL_IP) { + ptype_lkup[k].tunnel_type = + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; + pstate.tunnel_state |= + IDPF_PTYPE_TUNNEL_IP_GRENAT; + } + break; + case VIRTCHNL2_PROTO_HDR_MAC: + ptype_lkup[k].outer_ip = + IDPF_RX_PTYPE_OUTER_L2; + if (pstate.tunnel_state == + IDPF_TUN_IP_GRE) { + ptype_lkup[k].tunnel_type = + IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; + pstate.tunnel_state |= + IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; + } + break; + case VIRTCHNL2_PROTO_HDR_IPV4: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, true, + false); + break; + case VIRTCHNL2_PROTO_HDR_IPV6: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, false, + false); + break; + case VIRTCHNL2_PROTO_HDR_IPV4_FRAG: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, true, + true); + break; + case VIRTCHNL2_PROTO_HDR_IPV6_FRAG: + idpf_fill_ptype_lookup(&ptype_lkup[k], + &pstate, false, + true); + break; + case VIRTCHNL2_PROTO_HDR_UDP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_UDP; + break; + case VIRTCHNL2_PROTO_HDR_TCP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_TCP; + break; + case VIRTCHNL2_PROTO_HDR_SCTP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_SCTP; + break; + case VIRTCHNL2_PROTO_HDR_ICMP: + ptype_lkup[k].inner_prot = + IDPF_RX_PTYPE_INNER_PROT_ICMP; + break; + case VIRTCHNL2_PROTO_HDR_PAY: + ptype_lkup[k].payload_layer = + IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; + break; + case VIRTCHNL2_PROTO_HDR_ICMPV6: + case VIRTCHNL2_PROTO_HDR_IPV6_EH: + case VIRTCHNL2_PROTO_HDR_PRE_MAC: + case VIRTCHNL2_PROTO_HDR_POST_MAC: + case VIRTCHNL2_PROTO_HDR_ETHERTYPE: + case VIRTCHNL2_PROTO_HDR_SVLAN: + case VIRTCHNL2_PROTO_HDR_CVLAN: + case VIRTCHNL2_PROTO_HDR_MPLS: + case VIRTCHNL2_PROTO_HDR_MMPLS: + case VIRTCHNL2_PROTO_HDR_PTP: + case VIRTCHNL2_PROTO_HDR_CTRL: + case VIRTCHNL2_PROTO_HDR_LLDP: + case VIRTCHNL2_PROTO_HDR_ARP: + case VIRTCHNL2_PROTO_HDR_ECP: + case VIRTCHNL2_PROTO_HDR_EAPOL: + case VIRTCHNL2_PROTO_HDR_PPPOD: + case VIRTCHNL2_PROTO_HDR_PPPOE: + case VIRTCHNL2_PROTO_HDR_IGMP: + case VIRTCHNL2_PROTO_HDR_AH: + case VIRTCHNL2_PROTO_HDR_ESP: + case VIRTCHNL2_PROTO_HDR_IKE: + case VIRTCHNL2_PROTO_HDR_NATT_KEEP: + case VIRTCHNL2_PROTO_HDR_L2TPV2: + case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL: + case VIRTCHNL2_PROTO_HDR_L2TPV3: + case VIRTCHNL2_PROTO_HDR_GTP: + case VIRTCHNL2_PROTO_HDR_GTP_EH: + case VIRTCHNL2_PROTO_HDR_GTPCV2: + case VIRTCHNL2_PROTO_HDR_GTPC_TEID: + case VIRTCHNL2_PROTO_HDR_GTPU: + case VIRTCHNL2_PROTO_HDR_GTPU_UL: + case VIRTCHNL2_PROTO_HDR_GTPU_DL: + case VIRTCHNL2_PROTO_HDR_ECPRI: + case VIRTCHNL2_PROTO_HDR_VRRP: + case VIRTCHNL2_PROTO_HDR_OSPF: + case VIRTCHNL2_PROTO_HDR_TUN: + case VIRTCHNL2_PROTO_HDR_NVGRE: + case VIRTCHNL2_PROTO_HDR_VXLAN: + case VIRTCHNL2_PROTO_HDR_VXLAN_GPE: + case VIRTCHNL2_PROTO_HDR_GENEVE: + case VIRTCHNL2_PROTO_HDR_NSH: + case VIRTCHNL2_PROTO_HDR_QUIC: + case VIRTCHNL2_PROTO_HDR_PFCP: + case VIRTCHNL2_PROTO_HDR_PFCP_NODE: + case VIRTCHNL2_PROTO_HDR_PFCP_SESSION: + case VIRTCHNL2_PROTO_HDR_RTP: + case VIRTCHNL2_PROTO_HDR_NO_PROTO: + break; + default: + break; + } + } + } + } + +vc_buf_unlock: + mutex_unlock(&adapter->vc_buf_lock); + kfree(ptype_info); + + return err; +} + +/** + * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback + * message + * @vport: virtual port data structure + * + * Returns 0 on success, negative on failure. + */ +int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) +{ + struct virtchnl2_loopback loopback; + int err; + + loopback.vport_id = cpu_to_le32(vport->vport_id); + loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); + + mutex_lock(&vport->vc_buf_lock); + + err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK, + sizeof(loopback), (u8 *)&loopback); + if (err) + goto rel_lock; + + err = idpf_wait_for_event(vport->adapter, vport, + IDPF_VC_LOOPBACK_STATE, + IDPF_VC_LOOPBACK_STATE_ERR); + +rel_lock: + mutex_unlock(&vport->vc_buf_lock); + + return err; +} + +/** + * idpf_find_ctlq - Given a type and id, find ctlq info + * @hw: hardware struct + * @type: type of ctrlq to find + * @id: ctlq id to find + * + * Returns pointer to found ctlq info struct, NULL otherwise. + */ +static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw, + enum idpf_ctlq_type type, int id) +{ + struct idpf_ctlq_info *cq, *tmp; + + list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) + if (cq->q_id == id && cq->cq_type == type) + return cq; + + return NULL; +} + +/** + * idpf_init_dflt_mbx - Setup default mailbox parameters and make request + * @adapter: adapter info struct + * + * Returns 0 on success, negative otherwise + */ +int idpf_init_dflt_mbx(struct idpf_adapter *adapter) +{ + struct idpf_ctlq_create_info ctlq_info[] = { + { + .type = IDPF_CTLQ_TYPE_MAILBOX_TX, + .id = IDPF_DFLT_MBX_ID, + .len = IDPF_DFLT_MBX_Q_LEN, + .buf_size = IDPF_CTLQ_MAX_BUF_LEN + }, + { + .type = IDPF_CTLQ_TYPE_MAILBOX_RX, + .id = IDPF_DFLT_MBX_ID, + .len = IDPF_DFLT_MBX_Q_LEN, + .buf_size = IDPF_CTLQ_MAX_BUF_LEN + } + }; + struct idpf_hw *hw = &adapter->hw; + int err; + + adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); + + err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); + if (err) + return err; + + hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, + IDPF_DFLT_MBX_ID); + hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, + IDPF_DFLT_MBX_ID); + + if (!hw->asq || !hw->arq) { + idpf_ctlq_deinit(hw); + + return -ENOENT; + } + + adapter->state = __IDPF_STARTUP; + + return 0; +} + +/** + * idpf_deinit_dflt_mbx - Free up ctlqs setup + * @adapter: Driver specific private data structure + */ +void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) +{ + if (adapter->hw.arq && adapter->hw.asq) { + idpf_mb_clean(adapter); + idpf_ctlq_deinit(&adapter->hw); + } + adapter->hw.arq = NULL; + adapter->hw.asq = NULL; +} + +/** + * idpf_vport_params_buf_rel - Release memory for MailBox resources + * @adapter: Driver specific private data structure + * + * Will release memory to hold the vport parameters received on MailBox + */ +static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter) +{ + kfree(adapter->vport_params_recvd); + adapter->vport_params_recvd = NULL; + kfree(adapter->vport_params_reqd); + adapter->vport_params_reqd = NULL; + kfree(adapter->vport_ids); + adapter->vport_ids = NULL; +} + +/** + * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources + * @adapter: Driver specific private data structure + * + * Will alloc memory to hold the vport parameters received on MailBox + */ +static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter) +{ + u16 num_max_vports = idpf_get_max_vports(adapter); + + adapter->vport_params_reqd = kcalloc(num_max_vports, + sizeof(*adapter->vport_params_reqd), + GFP_KERNEL); + if (!adapter->vport_params_reqd) + return -ENOMEM; + + adapter->vport_params_recvd = kcalloc(num_max_vports, + sizeof(*adapter->vport_params_recvd), + GFP_KERNEL); + if (!adapter->vport_params_recvd) + goto err_mem; + + adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); + if (!adapter->vport_ids) + goto err_mem; + + if (adapter->vport_config) + return 0; + + adapter->vport_config = kcalloc(num_max_vports, + sizeof(*adapter->vport_config), + GFP_KERNEL); + if (!adapter->vport_config) + goto err_mem; + + return 0; + +err_mem: + idpf_vport_params_buf_rel(adapter); + + return -ENOMEM; +} + +/** + * idpf_vc_core_init - Initialize state machine and get driver specific + * resources + * @adapter: Driver specific private structure + * + * This function will initialize the state machine and request all necessary + * resources required by the device driver. Once the state machine is + * initialized, allocate memory to store vport specific information and also + * requests required interrupts. + * + * Returns 0 on success, -EAGAIN function will get called again, + * otherwise negative on failure. + */ +int idpf_vc_core_init(struct idpf_adapter *adapter) +{ + int task_delay = 30; + u16 num_max_vports; + int err = 0; + + while (adapter->state != __IDPF_INIT_SW) { + switch (adapter->state) { + case __IDPF_STARTUP: + if (idpf_send_ver_msg(adapter)) + goto init_failed; + adapter->state = __IDPF_VER_CHECK; + goto restart; + case __IDPF_VER_CHECK: + err = idpf_recv_ver_msg(adapter); + if (err == -EIO) { + return err; + } else if (err == -EAGAIN) { + adapter->state = __IDPF_STARTUP; + goto restart; + } else if (err) { + goto init_failed; + } + if (idpf_send_get_caps_msg(adapter)) + goto init_failed; + adapter->state = __IDPF_GET_CAPS; + goto restart; + case __IDPF_GET_CAPS: + if (idpf_recv_get_caps_msg(adapter)) + goto init_failed; + adapter->state = __IDPF_INIT_SW; + break; + default: + dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", + adapter->state); + goto init_failed; + } + break; +restart: + /* Give enough time before proceeding further with + * state machine + */ + msleep(task_delay); + } + + pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); + num_max_vports = idpf_get_max_vports(adapter); + adapter->max_vports = num_max_vports; + adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), + GFP_KERNEL); + if (!adapter->vports) + return -ENOMEM; + + if (!adapter->netdevs) { + adapter->netdevs = kcalloc(num_max_vports, + sizeof(struct net_device *), + GFP_KERNEL); + if (!adapter->netdevs) { + err = -ENOMEM; + goto err_netdev_alloc; + } + } + + err = idpf_vport_params_buf_alloc(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", + err); + goto err_netdev_alloc; + } + + /* Start the mailbox task before requesting vectors. This will ensure + * vector information response from mailbox is handled + */ + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); + + queue_delayed_work(adapter->serv_wq, &adapter->serv_task, + msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); + + err = idpf_intr_req(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", + err); + goto err_intr_req; + } + + idpf_init_avail_queues(adapter); + + /* Skew the delay for init tasks for each function based on fn number + * to prevent every function from making the same call simultaneously. + */ + queue_delayed_work(adapter->init_wq, &adapter->init_task, + msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); + + goto no_err; + +err_intr_req: + cancel_delayed_work_sync(&adapter->serv_task); + cancel_delayed_work_sync(&adapter->mbx_task); + idpf_vport_params_buf_rel(adapter); +err_netdev_alloc: + kfree(adapter->vports); + adapter->vports = NULL; +no_err: + return err; + +init_failed: + /* Don't retry if we're trying to go down, just bail. */ + if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) + return err; + + if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { + dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); + + return -EFAULT; + } + /* If it reached here, it is possible that mailbox queue initialization + * register writes might not have taken effect. Retry to initialize + * the mailbox again + */ + adapter->state = __IDPF_STARTUP; + idpf_deinit_dflt_mbx(adapter); + set_bit(IDPF_HR_DRV_LOAD, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, + msecs_to_jiffies(task_delay)); + + return -EAGAIN; +} + +/** + * idpf_vc_core_deinit - Device deinit routine + * @adapter: Driver specific private structure + * + */ +void idpf_vc_core_deinit(struct idpf_adapter *adapter) +{ + int i; + + idpf_deinit_task(adapter); + idpf_intr_rel(adapter); + /* Set all bits as we dont know on which vc_state the vhnl_wq is + * waiting on and wakeup the virtchnl workqueue even if it is waiting + * for the response as we are going down + */ + for (i = 0; i < IDPF_VC_NBITS; i++) + set_bit(i, adapter->vc_state); + wake_up(&adapter->vchnl_wq); + + cancel_delayed_work_sync(&adapter->serv_task); + cancel_delayed_work_sync(&adapter->mbx_task); + + idpf_vport_params_buf_rel(adapter); + + /* Clear all the bits */ + for (i = 0; i < IDPF_VC_NBITS; i++) + clear_bit(i, adapter->vc_state); + + kfree(adapter->vports); + adapter->vports = NULL; +} + +/** + * idpf_vport_alloc_vec_indexes - Get relative vector indexes + * @vport: virtual port data struct + * + * This function requests the vector information required for the vport and + * stores the vector indexes received from the 'global vector distribution' + * in the vport's queue vectors array. + * + * Return 0 on success, error on failure + */ +int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) +{ + struct idpf_vector_info vec_info; + int num_alloc_vecs; + + vec_info.num_curr_vecs = vport->num_q_vectors; + vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); + vec_info.default_vport = vport->default_vport; + vec_info.index = vport->idx; + + num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, + vport->q_vector_idxs, + &vec_info); + if (num_alloc_vecs <= 0) { + dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", + num_alloc_vecs); + return -EINVAL; + } + + vport->num_q_vectors = num_alloc_vecs; + + return 0; +} + +/** + * idpf_vport_init - Initialize virtual port + * @vport: virtual port to be initialized + * @max_q: vport max queue info + * + * Will initialize vport with the info received through MB earlier + */ +void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_create_vport *vport_msg; + struct idpf_vport_config *vport_config; + u16 tx_itr[] = {2, 8, 64, 128, 256}; + u16 rx_itr[] = {2, 8, 32, 96, 128}; + struct idpf_rss_data *rss_data; + u16 idx = vport->idx; + + vport_config = adapter->vport_config[idx]; + rss_data = &vport_config->user_config.rss_data; + vport_msg = adapter->vport_params_recvd[idx]; + + vport_config->max_q.max_txq = max_q->max_txq; + vport_config->max_q.max_rxq = max_q->max_rxq; + vport_config->max_q.max_complq = max_q->max_complq; + vport_config->max_q.max_bufq = max_q->max_bufq; + + vport->txq_model = le16_to_cpu(vport_msg->txq_model); + vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); + vport->vport_type = le16_to_cpu(vport_msg->vport_type); + vport->vport_id = le32_to_cpu(vport_msg->vport_id); + + rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, + le16_to_cpu(vport_msg->rss_key_size)); + rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); + + ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); + vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; + + /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ + memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); + memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); + + idpf_vport_init_num_qs(vport, vport_msg); + idpf_vport_calc_num_q_desc(vport); + idpf_vport_calc_num_q_groups(vport); + idpf_vport_alloc_vec_indexes(vport); + + vport->crc_enable = adapter->crc_enable; +} + +/** + * idpf_get_vec_ids - Initialize vector id from Mailbox parameters + * @adapter: adapter structure to get the mailbox vector id + * @vecids: Array of vector ids + * @num_vecids: number of vector ids + * @chunks: vector ids received over mailbox + * + * Will initialize the mailbox vector id which is received from the + * get capabilities and data queue vector ids with ids received as + * mailbox parameters. + * Returns number of ids filled + */ +int idpf_get_vec_ids(struct idpf_adapter *adapter, + u16 *vecids, int num_vecids, + struct virtchnl2_vector_chunks *chunks) +{ + u16 num_chunks = le16_to_cpu(chunks->num_vchunks); + int num_vecid_filled = 0; + int i, j; + + vecids[num_vecid_filled] = adapter->mb_vector.v_idx; + num_vecid_filled++; + + for (j = 0; j < num_chunks; j++) { + struct virtchnl2_vector_chunk *chunk; + u16 start_vecid, num_vec; + + chunk = &chunks->vchunks[j]; + num_vec = le16_to_cpu(chunk->num_vectors); + start_vecid = le16_to_cpu(chunk->start_vector_id); + + for (i = 0; i < num_vec; i++) { + if ((num_vecid_filled + i) < num_vecids) { + vecids[num_vecid_filled + i] = start_vecid; + start_vecid++; + } else { + break; + } + } + num_vecid_filled = num_vecid_filled + i; + } + + return num_vecid_filled; +} + +/** + * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters + * @qids: Array of queue ids + * @num_qids: number of queue ids + * @q_type: queue model + * @chunks: queue ids received over mailbox + * + * Will initialize all queue ids with ids received as mailbox parameters + * Returns number of ids filled + */ +static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, + struct virtchnl2_queue_reg_chunks *chunks) +{ + u16 num_chunks = le16_to_cpu(chunks->num_chunks); + u32 num_q_id_filled = 0, i; + u32 start_q_id, num_q; + + while (num_chunks--) { + struct virtchnl2_queue_reg_chunk *chunk; + + chunk = &chunks->chunks[num_chunks]; + if (le32_to_cpu(chunk->type) != q_type) + continue; + + num_q = le32_to_cpu(chunk->num_queues); + start_q_id = le32_to_cpu(chunk->start_queue_id); + + for (i = 0; i < num_q; i++) { + if ((num_q_id_filled + i) < num_qids) { + qids[num_q_id_filled + i] = start_q_id; + start_q_id++; + } else { + break; + } + } + num_q_id_filled = num_q_id_filled + i; + } + + return num_q_id_filled; +} + +/** + * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters + * @vport: virtual port for which the queues ids are initialized + * @qids: queue ids + * @num_qids: number of queue ids + * @q_type: type of queue + * + * Will initialize all queue ids with ids received as mailbox + * parameters. Returns number of queue ids initialized. + */ +static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, + const u32 *qids, + int num_qids, + u32 q_type) +{ + struct idpf_queue *q; + int i, j, k = 0; + + switch (q_type) { + case VIRTCHNL2_QUEUE_TYPE_TX: + for (i = 0; i < vport->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { + tx_qgrp->txqs[j]->q_id = qids[k]; + tx_qgrp->txqs[j]->q_type = + VIRTCHNL2_QUEUE_TYPE_TX; + } + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u16 num_rxq; + + if (idpf_is_queue_model_split(vport->rxq_model)) + num_rxq = rx_qgrp->splitq.num_rxq_sets; + else + num_rxq = rx_qgrp->singleq.num_rxq; + + for (j = 0; j < num_rxq && k < num_qids; j++, k++) { + if (idpf_is_queue_model_split(vport->rxq_model)) + q = &rx_qgrp->splitq.rxq_sets[j]->rxq; + else + q = rx_qgrp->singleq.rxqs[j]; + q->q_id = qids[k]; + q->q_type = VIRTCHNL2_QUEUE_TYPE_RX; + } + } + break; + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: + for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { + struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + + tx_qgrp->complq->q_id = qids[k]; + tx_qgrp->complq->q_type = + VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; + } + break; + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: + for (i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u8 num_bufqs = vport->num_bufqs_per_qgrp; + + for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { + q = &rx_qgrp->splitq.bufq_sets[j].bufq; + q->q_id = qids[k]; + q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; + } + } + break; + default: + break; + } + + return k; +} + +/** + * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters + * @vport: virtual port for which the queues ids are initialized + * + * Will initialize all queue ids with ids received as mailbox parameters. + * Returns 0 on success, negative if all the queues are not initialized. + */ +int idpf_vport_queue_ids_init(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_params; + struct virtchnl2_queue_reg_chunks *chunks; + struct idpf_vport_config *vport_config; + u16 vport_idx = vport->idx; + int num_ids, err = 0; + u16 q_type; + u32 *qids; + + vport_config = vport->adapter->vport_config[vport_idx]; + if (vport_config->req_qs_chunks) { + struct virtchnl2_add_queues *vc_aq = + (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; + chunks = &vc_aq->chunks; + } else { + vport_params = vport->adapter->vport_params_recvd[vport_idx]; + chunks = &vport_params->chunks; + } + + qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL); + if (!qids) + return -ENOMEM; + + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, + VIRTCHNL2_QUEUE_TYPE_TX, + chunks); + if (num_ids < vport->num_txq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, + VIRTCHNL2_QUEUE_TYPE_TX); + if (num_ids < vport->num_txq) { + err = -EINVAL; + goto mem_rel; + } + + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, + VIRTCHNL2_QUEUE_TYPE_RX, + chunks); + if (num_ids < vport->num_rxq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, + VIRTCHNL2_QUEUE_TYPE_RX); + if (num_ids < vport->num_rxq) { + err = -EINVAL; + goto mem_rel; + } + + if (!idpf_is_queue_model_split(vport->txq_model)) + goto check_rxq; + + q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); + if (num_ids < vport->num_complq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); + if (num_ids < vport->num_complq) { + err = -EINVAL; + goto mem_rel; + } + +check_rxq: + if (!idpf_is_queue_model_split(vport->rxq_model)) + goto mem_rel; + + q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; + num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); + if (num_ids < vport->num_bufq) { + err = -EINVAL; + goto mem_rel; + } + num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); + if (num_ids < vport->num_bufq) + err = -EINVAL; + +mem_rel: + kfree(qids); + + return err; +} + +/** + * idpf_vport_adjust_qs - Adjust to new requested queues + * @vport: virtual port data struct + * + * Renegotiate queues. Returns 0 on success, negative on failure. + */ +int idpf_vport_adjust_qs(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport vport_msg; + int err; + + vport_msg.txq_model = cpu_to_le16(vport->txq_model); + vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); + err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, + NULL); + if (err) + return err; + + idpf_vport_init_num_qs(vport, &vport_msg); + idpf_vport_calc_num_q_groups(vport); + + return 0; +} + +/** + * idpf_is_capability_ena - Default implementation of capability checking + * @adapter: Private data struct + * @all: all or one flag + * @field: caps field to check for flags + * @flag: flag to check + * + * Return true if all capabilities are supported, false otherwise + */ +bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, + enum idpf_cap_field field, u64 flag) +{ + u8 *caps = (u8 *)&adapter->caps; + u32 *cap_field; + + if (!caps) + return false; + + if (field == IDPF_BASE_CAPS) + return false; + + cap_field = (u32 *)(caps + field); + + if (all) + return (*cap_field & flag) == flag; + else + return !!(*cap_field & flag); +} + +/** + * idpf_get_vport_id: Get vport id + * @vport: virtual port structure + * + * Return vport id from the adapter persistent data + */ +u32 idpf_get_vport_id(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + + return le32_to_cpu(vport_msg->vport_id); +} + +/** + * idpf_add_del_mac_filters - Add/del mac filters + * @vport: Virtual port data structure + * @np: Netdev private structure + * @add: Add or delete flag + * @async: Don't wait for return message + * + * Returns 0 on success, error on failure. + **/ +int idpf_add_del_mac_filters(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + bool add, bool async) +{ + struct virtchnl2_mac_addr_list *ma_list = NULL; + struct idpf_adapter *adapter = np->adapter; + struct idpf_vport_config *vport_config; + enum idpf_vport_config_flags mac_flag; + struct pci_dev *pdev = adapter->pdev; + enum idpf_vport_vc_state vc, vc_err; + struct virtchnl2_mac_addr *mac_addr; + struct idpf_mac_filter *f, *tmp; + u32 num_msgs, total_filters = 0; + int i = 0, k, err = 0; + u32 vop; + + vport_config = adapter->vport_config[np->vport_idx]; + spin_lock_bh(&vport_config->mac_filter_list_lock); + + /* Find the number of newly added filters */ + list_for_each_entry(f, &vport_config->user_config.mac_filter_list, + list) { + if (add && f->add) + total_filters++; + else if (!add && f->remove) + total_filters++; + } + + if (!total_filters) { + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + return 0; + } + + /* Fill all the new filters into virtchannel message */ + mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), + GFP_ATOMIC); + if (!mac_addr) { + err = -ENOMEM; + spin_unlock_bh(&vport_config->mac_filter_list_lock); + goto error; + } + + list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list, + list) { + if (add && f->add) { + ether_addr_copy(mac_addr[i].addr, f->macaddr); + i++; + f->add = false; + if (i == total_filters) + break; + } + if (!add && f->remove) { + ether_addr_copy(mac_addr[i].addr, f->macaddr); + i++; + f->remove = false; + if (i == total_filters) + break; + } + } + + spin_unlock_bh(&vport_config->mac_filter_list_lock); + + if (add) { + vop = VIRTCHNL2_OP_ADD_MAC_ADDR; + vc = IDPF_VC_ADD_MAC_ADDR; + vc_err = IDPF_VC_ADD_MAC_ADDR_ERR; + mac_flag = IDPF_VPORT_ADD_MAC_REQ; + } else { + vop = VIRTCHNL2_OP_DEL_MAC_ADDR; + vc = IDPF_VC_DEL_MAC_ADDR; + vc_err = IDPF_VC_DEL_MAC_ADDR_ERR; + mac_flag = IDPF_VPORT_DEL_MAC_REQ; + } + + /* Chunk up the filters into multiple messages to avoid + * sending a control queue message buffer that is too large + */ + num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); + + if (!async) + mutex_lock(&vport->vc_buf_lock); + + for (i = 0, k = 0; i < num_msgs; i++) { + u32 entries_size, buf_size, num_entries; + + num_entries = min_t(u32, total_filters, + IDPF_NUM_FILTERS_PER_MSG); + entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries; + buf_size = struct_size(ma_list, mac_addr_list, num_entries); + + if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { + kfree(ma_list); + ma_list = kzalloc(buf_size, GFP_ATOMIC); + if (!ma_list) { + err = -ENOMEM; + goto list_prep_error; + } + } else { + memset(ma_list, 0, buf_size); + } + + ma_list->vport_id = cpu_to_le32(np->vport_id); + ma_list->num_mac_addr = cpu_to_le16(num_entries); + memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); + + if (async) + set_bit(mac_flag, vport_config->flags); + + err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list); + if (err) + goto mbx_error; + + if (!async) { + err = idpf_wait_for_event(adapter, vport, vc, vc_err); + if (err) + goto mbx_error; + } + + k += num_entries; + total_filters -= num_entries; + } + +mbx_error: + if (!async) + mutex_unlock(&vport->vc_buf_lock); + kfree(ma_list); +list_prep_error: + kfree(mac_addr); +error: + if (err) + dev_err(&pdev->dev, "Failed to add or del mac filters %d", err); + + return err; +} + +/** + * idpf_set_promiscuous - set promiscuous and send message to mailbox + * @adapter: Driver specific private structure + * @config_data: Vport specific config data + * @vport_id: Vport identifier + * + * Request to enable promiscuous mode for the vport. Message is sent + * asynchronously and won't wait for response. Returns 0 on success, negative + * on failure; + */ +int idpf_set_promiscuous(struct idpf_adapter *adapter, + struct idpf_vport_user_config_data *config_data, + u32 vport_id) +{ + struct virtchnl2_promisc_info vpi; + u16 flags = 0; + int err; + + if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) + flags |= VIRTCHNL2_UNICAST_PROMISC; + if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) + flags |= VIRTCHNL2_MULTICAST_PROMISC; + + vpi.vport_id = cpu_to_le32(vport_id); + vpi.flags = cpu_to_le16(flags); + + err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE, + sizeof(struct virtchnl2_promisc_info), + (u8 *)&vpi); + + return err; +} diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h new file mode 100644 index 000000000000..07e72c72d156 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -0,0 +1,1273 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _VIRTCHNL2_H_ +#define _VIRTCHNL2_H_ + +/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or + * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures, + * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion. + * + * PF/VF uses the virtchnl2 interface defined in this header file to communicate + * with device Control Plane (CP). Driver and the CP may run on different + * platforms with different endianness. To avoid byte order discrepancies, + * all the structures in this header follow little-endian format. + * + * This is an interface definition file where existing enums and their values + * must remain unchanged over time, so we specify explicit values for all enums. + */ + +#include "virtchnl2_lan_desc.h" + +/* This macro is used to generate compilation errors if a structure + * is not exactly the correct length. + */ +#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) \ + static_assert((n) == sizeof(struct X)) + +/* New major set of opcodes introduced and so leaving room for + * old misc opcodes to be added in future. Also these opcodes may only + * be used if both the PF and VF have successfully negotiated the + * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange. + */ +enum virtchnl2_op { + VIRTCHNL2_OP_UNKNOWN = 0, + VIRTCHNL2_OP_VERSION = 1, + VIRTCHNL2_OP_GET_CAPS = 500, + VIRTCHNL2_OP_CREATE_VPORT = 501, + VIRTCHNL2_OP_DESTROY_VPORT = 502, + VIRTCHNL2_OP_ENABLE_VPORT = 503, + VIRTCHNL2_OP_DISABLE_VPORT = 504, + VIRTCHNL2_OP_CONFIG_TX_QUEUES = 505, + VIRTCHNL2_OP_CONFIG_RX_QUEUES = 506, + VIRTCHNL2_OP_ENABLE_QUEUES = 507, + VIRTCHNL2_OP_DISABLE_QUEUES = 508, + VIRTCHNL2_OP_ADD_QUEUES = 509, + VIRTCHNL2_OP_DEL_QUEUES = 510, + VIRTCHNL2_OP_MAP_QUEUE_VECTOR = 511, + VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR = 512, + VIRTCHNL2_OP_GET_RSS_KEY = 513, + VIRTCHNL2_OP_SET_RSS_KEY = 514, + VIRTCHNL2_OP_GET_RSS_LUT = 515, + VIRTCHNL2_OP_SET_RSS_LUT = 516, + VIRTCHNL2_OP_GET_RSS_HASH = 517, + VIRTCHNL2_OP_SET_RSS_HASH = 518, + VIRTCHNL2_OP_SET_SRIOV_VFS = 519, + VIRTCHNL2_OP_ALLOC_VECTORS = 520, + VIRTCHNL2_OP_DEALLOC_VECTORS = 521, + VIRTCHNL2_OP_EVENT = 522, + VIRTCHNL2_OP_GET_STATS = 523, + VIRTCHNL2_OP_RESET_VF = 524, + VIRTCHNL2_OP_GET_EDT_CAPS = 525, + VIRTCHNL2_OP_GET_PTYPE_INFO = 526, + /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and + * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW. + * Opcodes 529, 530, 531, 532 and 533 are reserved. + */ + VIRTCHNL2_OP_LOOPBACK = 534, + VIRTCHNL2_OP_ADD_MAC_ADDR = 535, + VIRTCHNL2_OP_DEL_MAC_ADDR = 536, + VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537, +}; + +/** + * enum virtchnl2_vport_type - Type of virtual port. + * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type. + */ +enum virtchnl2_vport_type { + VIRTCHNL2_VPORT_TYPE_DEFAULT = 0, +}; + +/** + * enum virtchnl2_queue_model - Type of queue model. + * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model. + * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model. + * + * In the single queue model, the same transmit descriptor queue is used by + * software to post descriptors to hardware and by hardware to post completed + * descriptors to software. + * Likewise, the same receive descriptor queue is used by hardware to post + * completions to software and by software to post buffers to hardware. + * + * In the split queue model, hardware uses transmit completion queues to post + * descriptor/buffer completions to software, while software uses transmit + * descriptor queues to post descriptors to hardware. + * Likewise, hardware posts descriptor completions to the receive descriptor + * queue, while software uses receive buffer queues to post buffers to hardware. + */ +enum virtchnl2_queue_model { + VIRTCHNL2_QUEUE_MODEL_SINGLE = 0, + VIRTCHNL2_QUEUE_MODEL_SPLIT = 1, +}; + +/* Checksum offload capability flags */ +enum virtchnl2_cap_txrx_csum { + VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 = BIT(0), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP = BIT(1), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP = BIT(2), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP = BIT(3), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP = BIT(4), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP = BIT(5), + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP = BIT(6), + VIRTCHNL2_CAP_TX_CSUM_GENERIC = BIT(7), + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 = BIT(8), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP = BIT(9), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP = BIT(10), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP = BIT(11), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP = BIT(12), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP = BIT(13), + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP = BIT(14), + VIRTCHNL2_CAP_RX_CSUM_GENERIC = BIT(15), + VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL = BIT(16), + VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL = BIT(17), + VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL = BIT(18), + VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL = BIT(19), + VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL = BIT(20), + VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL = BIT(21), + VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL = BIT(22), + VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL = BIT(23), +}; + +/* Segmentation offload capability flags */ +enum virtchnl2_cap_seg { + VIRTCHNL2_CAP_SEG_IPV4_TCP = BIT(0), + VIRTCHNL2_CAP_SEG_IPV4_UDP = BIT(1), + VIRTCHNL2_CAP_SEG_IPV4_SCTP = BIT(2), + VIRTCHNL2_CAP_SEG_IPV6_TCP = BIT(3), + VIRTCHNL2_CAP_SEG_IPV6_UDP = BIT(4), + VIRTCHNL2_CAP_SEG_IPV6_SCTP = BIT(5), + VIRTCHNL2_CAP_SEG_GENERIC = BIT(6), + VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL = BIT(7), + VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8), +}; + +/* Receive Side Scaling Flow type capability flags */ +enum virtchnl2_cap_rss { + VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0), + VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1), + VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2), + VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3), + VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4), + VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5), + VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6), + VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7), + VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8), + VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9), + VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10), + VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11), + VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12), + VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13), +}; + +/* Header split capability flags */ +enum virtchnl2_cap_rx_hsplit_at { + /* for prepended metadata */ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 = BIT(0), + /* all VLANs go into header buffer */ + VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 = BIT(1), + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 = BIT(2), + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6 = BIT(3), +}; + +/* Receive Side Coalescing offload capability flags */ +enum virtchnl2_cap_rsc { + VIRTCHNL2_CAP_RSC_IPV4_TCP = BIT(0), + VIRTCHNL2_CAP_RSC_IPV4_SCTP = BIT(1), + VIRTCHNL2_CAP_RSC_IPV6_TCP = BIT(2), + VIRTCHNL2_CAP_RSC_IPV6_SCTP = BIT(3), +}; + +/* Other capability flags */ +enum virtchnl2_cap_other { + VIRTCHNL2_CAP_RDMA = BIT_ULL(0), + VIRTCHNL2_CAP_SRIOV = BIT_ULL(1), + VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2), + VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3), + /* Queue based scheduling using split queue model */ + VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4), + VIRTCHNL2_CAP_CRC = BIT_ULL(5), + VIRTCHNL2_CAP_ADQ = BIT_ULL(6), + VIRTCHNL2_CAP_WB_ON_ITR = BIT_ULL(7), + VIRTCHNL2_CAP_PROMISC = BIT_ULL(8), + VIRTCHNL2_CAP_LINK_SPEED = BIT_ULL(9), + VIRTCHNL2_CAP_INLINE_IPSEC = BIT_ULL(10), + VIRTCHNL2_CAP_LARGE_NUM_QUEUES = BIT_ULL(11), + VIRTCHNL2_CAP_VLAN = BIT_ULL(12), + VIRTCHNL2_CAP_PTP = BIT_ULL(13), + /* EDT: Earliest Departure Time capability used for Timing Wheel */ + VIRTCHNL2_CAP_EDT = BIT_ULL(14), + VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15), + VIRTCHNL2_CAP_FDIR = BIT_ULL(16), + VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17), + VIRTCHNL2_CAP_PTYPE = BIT_ULL(18), + VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19), + /* Other capability 20 is reserved */ + + /* this must be the last capability */ + VIRTCHNL2_CAP_OEM = BIT_ULL(63), +}; + +/* underlying device type */ +enum virtchl2_device_type { + VIRTCHNL2_MEV_DEVICE = 0, +}; + +/** + * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes. + * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder + * completions where descriptors and buffers + * are completed at the same time. + * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order + * packet processing where descriptors are + * cleaned in order, but buffers can be + * completed out of order. + */ +enum virtchnl2_txq_sched_mode { + VIRTCHNL2_TXQ_SCHED_MODE_QUEUE = 0, + VIRTCHNL2_TXQ_SCHED_MODE_FLOW = 1, +}; + +/** + * enum virtchnl2_rxq_flags - Receive Queue Feature flags. + * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag. + * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag. + * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed + * by hardware immediately after processing + * each packet. + * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size. + * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size. + */ +enum virtchnl2_rxq_flags { + VIRTCHNL2_RXQ_RSC = BIT(0), + VIRTCHNL2_RXQ_HDR_SPLIT = BIT(1), + VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK = BIT(2), + VIRTCHNL2_RX_DESC_SIZE_16BYTE = BIT(3), + VIRTCHNL2_RX_DESC_SIZE_32BYTE = BIT(4), +}; + +/* Type of RSS algorithm */ +enum virtchnl2_rss_alg { + VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, + VIRTCHNL2_RSS_ALG_R_ASYMMETRIC = 1, + VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, + VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC = 3, +}; + +/* Type of event */ +enum virtchnl2_event_codes { + VIRTCHNL2_EVENT_UNKNOWN = 0, + VIRTCHNL2_EVENT_LINK_CHANGE = 1, + /* Event type 2, 3 are reserved */ +}; + +/* Transmit and Receive queue types are valid in legacy as well as split queue + * models. With Split Queue model, 2 additional types are introduced - + * TX_COMPLETION and RX_BUFFER. In split queue model, receive corresponds to + * the queue where hardware posts completions. + */ +enum virtchnl2_queue_type { + VIRTCHNL2_QUEUE_TYPE_TX = 0, + VIRTCHNL2_QUEUE_TYPE_RX = 1, + VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION = 2, + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER = 3, + VIRTCHNL2_QUEUE_TYPE_CONFIG_TX = 4, + VIRTCHNL2_QUEUE_TYPE_CONFIG_RX = 5, + /* Queue types 6, 7, 8, 9 are reserved */ + VIRTCHNL2_QUEUE_TYPE_MBX_TX = 10, + VIRTCHNL2_QUEUE_TYPE_MBX_RX = 11, +}; + +/* Interrupt throttling rate index */ +enum virtchnl2_itr_idx { + VIRTCHNL2_ITR_IDX_0 = 0, + VIRTCHNL2_ITR_IDX_1 = 1, +}; + +/** + * enum virtchnl2_mac_addr_type - MAC address types. + * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the + * primary/device unicast MAC address filter for + * VIRTCHNL2_OP_ADD_MAC_ADDR and + * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the + * underlying control plane function to accurately + * track the MAC address and for VM/function reset. + * + * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra + * unicast and/or multicast filters that are being + * added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or + * VIRTCHNL2_OP_DEL_MAC_ADDR. + */ +enum virtchnl2_mac_addr_type { + VIRTCHNL2_MAC_ADDR_PRIMARY = 1, + VIRTCHNL2_MAC_ADDR_EXTRA = 2, +}; + +/* Flags used for promiscuous mode */ +enum virtchnl2_promisc_flags { + VIRTCHNL2_UNICAST_PROMISC = BIT(0), + VIRTCHNL2_MULTICAST_PROMISC = BIT(1), +}; + +/* Protocol header type within a packet segment. A segment consists of one or + * more protocol headers that make up a logical group of protocol headers. Each + * logical group of protocol headers encapsulates or is encapsulated using/by + * tunneling or encapsulation protocols for network virtualization. + */ +enum virtchnl2_proto_hdr_type { + /* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_ANY = 0, + VIRTCHNL2_PROTO_HDR_PRE_MAC = 1, + /* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_MAC = 2, + VIRTCHNL2_PROTO_HDR_POST_MAC = 3, + VIRTCHNL2_PROTO_HDR_ETHERTYPE = 4, + VIRTCHNL2_PROTO_HDR_VLAN = 5, + VIRTCHNL2_PROTO_HDR_SVLAN = 6, + VIRTCHNL2_PROTO_HDR_CVLAN = 7, + VIRTCHNL2_PROTO_HDR_MPLS = 8, + VIRTCHNL2_PROTO_HDR_UMPLS = 9, + VIRTCHNL2_PROTO_HDR_MMPLS = 10, + VIRTCHNL2_PROTO_HDR_PTP = 11, + VIRTCHNL2_PROTO_HDR_CTRL = 12, + VIRTCHNL2_PROTO_HDR_LLDP = 13, + VIRTCHNL2_PROTO_HDR_ARP = 14, + VIRTCHNL2_PROTO_HDR_ECP = 15, + VIRTCHNL2_PROTO_HDR_EAPOL = 16, + VIRTCHNL2_PROTO_HDR_PPPOD = 17, + VIRTCHNL2_PROTO_HDR_PPPOE = 18, + /* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV4 = 19, + /* IPv4 and IPv6 Fragment header types are only associated to + * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively, + * cannot be used independently. + */ + /* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV4_FRAG = 20, + /* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV6 = 21, + /* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_IPV6_FRAG = 22, + VIRTCHNL2_PROTO_HDR_IPV6_EH = 23, + /* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_UDP = 24, + /* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_TCP = 25, + /* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_SCTP = 26, + /* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_ICMP = 27, + /* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_ICMPV6 = 28, + VIRTCHNL2_PROTO_HDR_IGMP = 29, + VIRTCHNL2_PROTO_HDR_AH = 30, + VIRTCHNL2_PROTO_HDR_ESP = 31, + VIRTCHNL2_PROTO_HDR_IKE = 32, + VIRTCHNL2_PROTO_HDR_NATT_KEEP = 33, + /* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_PAY = 34, + VIRTCHNL2_PROTO_HDR_L2TPV2 = 35, + VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL = 36, + VIRTCHNL2_PROTO_HDR_L2TPV3 = 37, + VIRTCHNL2_PROTO_HDR_GTP = 38, + VIRTCHNL2_PROTO_HDR_GTP_EH = 39, + VIRTCHNL2_PROTO_HDR_GTPCV2 = 40, + VIRTCHNL2_PROTO_HDR_GTPC_TEID = 41, + VIRTCHNL2_PROTO_HDR_GTPU = 42, + VIRTCHNL2_PROTO_HDR_GTPU_UL = 43, + VIRTCHNL2_PROTO_HDR_GTPU_DL = 44, + VIRTCHNL2_PROTO_HDR_ECPRI = 45, + VIRTCHNL2_PROTO_HDR_VRRP = 46, + VIRTCHNL2_PROTO_HDR_OSPF = 47, + /* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */ + VIRTCHNL2_PROTO_HDR_TUN = 48, + VIRTCHNL2_PROTO_HDR_GRE = 49, + VIRTCHNL2_PROTO_HDR_NVGRE = 50, + VIRTCHNL2_PROTO_HDR_VXLAN = 51, + VIRTCHNL2_PROTO_HDR_VXLAN_GPE = 52, + VIRTCHNL2_PROTO_HDR_GENEVE = 53, + VIRTCHNL2_PROTO_HDR_NSH = 54, + VIRTCHNL2_PROTO_HDR_QUIC = 55, + VIRTCHNL2_PROTO_HDR_PFCP = 56, + VIRTCHNL2_PROTO_HDR_PFCP_NODE = 57, + VIRTCHNL2_PROTO_HDR_PFCP_SESSION = 58, + VIRTCHNL2_PROTO_HDR_RTP = 59, + VIRTCHNL2_PROTO_HDR_ROCE = 60, + VIRTCHNL2_PROTO_HDR_ROCEV1 = 61, + VIRTCHNL2_PROTO_HDR_ROCEV2 = 62, + /* Protocol ids up to 32767 are reserved. + * 32768 - 65534 are used for user defined protocol ids. + * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id. + */ + VIRTCHNL2_PROTO_HDR_NO_PROTO = 65535, +}; + +enum virtchl2_version { + VIRTCHNL2_VERSION_MINOR_0 = 0, + VIRTCHNL2_VERSION_MAJOR_2 = 2, +}; + +/** + * struct virtchnl2_edt_caps - Get EDT granularity and time horizon. + * @tstamp_granularity_ns: Timestamp granularity in nanoseconds. + * @time_horizon_ns: Total time window in nanoseconds. + * + * Associated with VIRTCHNL2_OP_GET_EDT_CAPS. + */ +struct virtchnl2_edt_caps { + __le64 tstamp_granularity_ns; + __le64 time_horizon_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps); + +/** + * struct virtchnl2_version_info - Version information. + * @major: Major version. + * @minor: Minor version. + * + * PF/VF posts its version number to the CP. CP responds with its version number + * in the same format, along with a return code. + * If there is a major version mismatch, then the PF/VF cannot operate. + * If there is a minor version mismatch, then the PF/VF can operate but should + * add a warning to the system log. + * + * This version opcode MUST always be specified as == 1, regardless of other + * changes in the API. The CP must always respond to this message without + * error regardless of version mismatch. + * + * Associated with VIRTCHNL2_OP_VERSION. + */ +struct virtchnl2_version_info { + __le32 major; + __le32 minor; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info); + +/** + * struct virtchnl2_get_capabilities - Capabilities info. + * @csum_caps: See enum virtchnl2_cap_txrx_csum. + * @seg_caps: See enum virtchnl2_cap_seg. + * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at. + * @rsc_caps: See enum virtchnl2_cap_rsc. + * @rss_caps: See enum virtchnl2_cap_rss. + * @other_caps: See enum virtchnl2_cap_other. + * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox + * provided by CP. + * @mailbox_vector_id: Mailbox vector id. + * @num_allocated_vectors: Maximum number of allocated vectors for the device. + * @max_rx_q: Maximum number of supported Rx queues. + * @max_tx_q: Maximum number of supported Tx queues. + * @max_rx_bufq: Maximum number of supported buffer queues. + * @max_tx_complq: Maximum number of supported completion queues. + * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP + * responds with the maximum VFs granted. + * @max_vports: Maximum number of vports that can be supported. + * @default_num_vports: Default number of vports driver should allocate on load. + * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes. + * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be + * sent per transmit packet without needing to be + * linearized. + * @pad: Padding. + * @reserved: Reserved. + * @device_type: See enum virtchl2_device_type. + * @min_sso_packet_len: Min packet length supported by device for single + * segment offload. + * @max_hdr_buf_per_lso: Max number of header buffers that can be used for + * an LSO. + * @pad1: Padding for future extensions. + * + * Dataplane driver sends this message to CP to negotiate capabilities and + * provides a virtchnl2_get_capabilities structure with its desired + * capabilities, max_sriov_vfs and num_allocated_vectors. + * CP responds with a virtchnl2_get_capabilities structure updated + * with allowed capabilities and the other fields as below. + * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs + * that can be created by this PF. For any other value 'n', CP responds + * with max_sriov_vfs set to min(n, x) where x is the max number of VFs + * allowed by CP's policy. max_sriov_vfs is not applicable for VFs. + * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1 + * which is default vector associated with the default mailbox. For any other + * value 'n', CP responds with a value <= n based on the CP's policy of + * max number of vectors for a PF. + * CP will respond with the vector ID of mailbox allocated to the PF in + * mailbox_vector_id and the number of itr index registers in itr_idx_map. + * It also responds with default number of vports that the dataplane driver + * should comeup with in default_num_vports and maximum number of vports that + * can be supported in max_vports. + * + * Associated with VIRTCHNL2_OP_GET_CAPS. + */ +struct virtchnl2_get_capabilities { + __le32 csum_caps; + __le32 seg_caps; + __le32 hsplit_caps; + __le32 rsc_caps; + __le64 rss_caps; + __le64 other_caps; + __le32 mailbox_dyn_ctl; + __le16 mailbox_vector_id; + __le16 num_allocated_vectors; + __le16 max_rx_q; + __le16 max_tx_q; + __le16 max_rx_bufq; + __le16 max_tx_complq; + __le16 max_sriov_vfs; + __le16 max_vports; + __le16 default_num_vports; + __le16 max_tx_hdr_size; + u8 max_sg_bufs_per_tx_pkt; + u8 pad[3]; + u8 reserved[4]; + __le32 device_type; + u8 min_sso_packet_len; + u8 max_hdr_buf_per_lso; + u8 pad1[10]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities); + +/** + * struct virtchnl2_queue_reg_chunk - Single queue chunk. + * @type: See enum virtchnl2_queue_type. + * @start_queue_id: Start Queue ID. + * @num_queues: Number of queues in the chunk. + * @pad: Padding. + * @qtail_reg_start: Queue tail register offset. + * @qtail_reg_spacing: Queue tail register spacing. + * @pad1: Padding for future extensions. + */ +struct virtchnl2_queue_reg_chunk { + __le32 type; + __le32 start_queue_id; + __le32 num_queues; + __le32 pad; + __le64 qtail_reg_start; + __le32 qtail_reg_spacing; + u8 pad1[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk); + +/** + * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous + * queues. + * @num_chunks: Number of chunks. + * @pad: Padding. + * @chunks: Chunks of queue info. + */ +struct virtchnl2_queue_reg_chunks { + __le16 num_chunks; + u8 pad[6]; + struct virtchnl2_queue_reg_chunk chunks[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); + +/** + * struct virtchnl2_create_vport - Create vport config info. + * @vport_type: See enum virtchnl2_vport_type. + * @txq_model: See virtchnl2_queue_model. + * @rxq_model: See virtchnl2_queue_model. + * @num_tx_q: Number of Tx queues. + * @num_tx_complq: Valid only if txq_model is split queue. + * @num_rx_q: Number of Rx queues. + * @num_rx_bufq: Valid only if rxq_model is split queue. + * @default_rx_q: Relative receive queue index to be used as default. + * @vport_index: Used to align PF and CP in case of default multiple vports, + * it is filled by the PF and CP returns the same value, to + * enable the driver to support multiple asynchronous parallel + * CREATE_VPORT requests and associate a response to a specific + * request. + * @max_mtu: Max MTU. CP populates this field on response. + * @vport_id: Vport id. CP populates this field on response. + * @default_mac_addr: Default MAC address. + * @pad: Padding. + * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. + * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions. + * @pad1: Padding. + * @rss_algorithm: RSS algorithm. + * @rss_key_size: RSS key size. + * @rss_lut_size: RSS LUT size. + * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at. + * @pad2: Padding. + * @chunks: Chunks of contiguous queues. + * + * PF sends this message to CP to create a vport by filling in required + * fields of virtchnl2_create_vport structure. + * CP responds with the updated virtchnl2_create_vport structure containing the + * necessary fields followed by chunks which in turn will have an array of + * num_chunks entries of virtchnl2_queue_chunk structures. + * + * Associated with VIRTCHNL2_OP_CREATE_VPORT. + */ +struct virtchnl2_create_vport { + __le16 vport_type; + __le16 txq_model; + __le16 rxq_model; + __le16 num_tx_q; + __le16 num_tx_complq; + __le16 num_rx_q; + __le16 num_rx_bufq; + __le16 default_rx_q; + __le16 vport_index; + /* CP populates the following fields on response */ + __le16 max_mtu; + __le32 vport_id; + u8 default_mac_addr[ETH_ALEN]; + __le16 pad; + __le64 rx_desc_ids; + __le64 tx_desc_ids; + u8 pad1[72]; + __le32 rss_algorithm; + __le16 rss_key_size; + __le16 rss_lut_size; + __le32 rx_split_pos; + u8 pad2[20]; + struct virtchnl2_queue_reg_chunks chunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport); + +/** + * struct virtchnl2_vport - Vport ID info. + * @vport_id: Vport id. + * @pad: Padding for future extensions. + * + * PF sends this message to CP to destroy, enable or disable a vport by filling + * in the vport_id in virtchnl2_vport structure. + * CP responds with the status of the requested operation. + * + * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT, + * VIRTCHNL2_OP_DISABLE_VPORT. + */ +struct virtchnl2_vport { + __le32 vport_id; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport); + +/** + * struct virtchnl2_txq_info - Transmit queue config info + * @dma_ring_addr: DMA address. + * @type: See enum virtchnl2_queue_type. + * @queue_id: Queue ID. + * @relative_queue_id: Valid only if queue model is split and type is transmit + * queue. Used in many to one mapping of transmit queues to + * completion queue. + * @model: See enum virtchnl2_queue_model. + * @sched_mode: See enum virtchnl2_txq_sched_mode. + * @qflags: TX queue feature flags. + * @ring_len: Ring length. + * @tx_compl_queue_id: Valid only if queue model is split and type is transmit + * queue. + * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX + * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver + * messages for the respective CONFIG_TX queue. + * @pad: Padding. + * @egress_pasid: Egress PASID info. + * @egress_hdr_pasid: Egress HDR passid. + * @egress_buf_pasid: Egress buf passid. + * @pad1: Padding for future extensions. + */ +struct virtchnl2_txq_info { + __le64 dma_ring_addr; + __le32 type; + __le32 queue_id; + __le16 relative_queue_id; + __le16 model; + __le16 sched_mode; + __le16 qflags; + __le16 ring_len; + __le16 tx_compl_queue_id; + __le16 peer_type; + __le16 peer_rx_queue_id; + u8 pad[4]; + __le32 egress_pasid; + __le32 egress_hdr_pasid; + __le32 egress_buf_pasid; + u8 pad1[8]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info); + +/** + * struct virtchnl2_config_tx_queues - TX queue config. + * @vport_id: Vport id. + * @num_qinfo: Number of virtchnl2_txq_info structs. + * @pad: Padding. + * @qinfo: Tx queues config info. + * + * PF sends this message to set up parameters for one or more transmit queues. + * This message contains an array of num_qinfo instances of virtchnl2_txq_info + * structures. CP configures requested queues and returns a status code. If + * num_qinfo specified is greater than the number of queues associated with the + * vport, an error is returned and no queues are configured. + * + * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES. + */ +struct virtchnl2_config_tx_queues { + __le32 vport_id; + __le16 num_qinfo; + u8 pad[10]; + struct virtchnl2_txq_info qinfo[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues); + +/** + * struct virtchnl2_rxq_info - Receive queue config info. + * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. + * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions. + * @type: See enum virtchnl2_queue_type. + * @queue_id: Queue id. + * @model: See enum virtchnl2_queue_model. + * @hdr_buffer_size: Header buffer size. + * @data_buffer_size: Data buffer size. + * @max_pkt_size: Max packet size. + * @ring_len: Ring length. + * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors. + * This field must be a power of 2. + * @pad: Padding. + * @dma_head_wb_addr: Applicable only for receive buffer queues. + * @qflags: Applicable only for receive completion queues. + * See enum virtchnl2_rxq_flags. + * @rx_buffer_low_watermark: Rx buffer low watermark. + * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with + * the Rx queue. Valid only in split queue model. + * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with + * the Rx queue. Valid only in split queue model. + * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid + * only if this field is set. + * @pad1: Padding. + * @ingress_pasid: Ingress PASID. + * @ingress_hdr_pasid: Ingress PASID header. + * @ingress_buf_pasid: Ingress PASID buffer. + * @pad2: Padding for future extensions. + */ +struct virtchnl2_rxq_info { + __le64 desc_ids; + __le64 dma_ring_addr; + __le32 type; + __le32 queue_id; + __le16 model; + __le16 hdr_buffer_size; + __le32 data_buffer_size; + __le32 max_pkt_size; + __le16 ring_len; + u8 buffer_notif_stride; + u8 pad; + __le64 dma_head_wb_addr; + __le16 qflags; + __le16 rx_buffer_low_watermark; + __le16 rx_bufq1_id; + __le16 rx_bufq2_id; + u8 bufq2_ena; + u8 pad1[3]; + __le32 ingress_pasid; + __le32 ingress_hdr_pasid; + __le32 ingress_buf_pasid; + u8 pad2[16]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info); + +/** + * struct virtchnl2_config_rx_queues - Rx queues config. + * @vport_id: Vport id. + * @num_qinfo: Number of instances. + * @pad: Padding. + * @qinfo: Rx queues config info. + * + * PF sends this message to set up parameters for one or more receive queues. + * This message contains an array of num_qinfo instances of virtchnl2_rxq_info + * structures. CP configures requested queues and returns a status code. + * If the number of queues specified is greater than the number of queues + * associated with the vport, an error is returned and no queues are configured. + * + * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES. + */ +struct virtchnl2_config_rx_queues { + __le32 vport_id; + __le16 num_qinfo; + u8 pad[18]; + struct virtchnl2_rxq_info qinfo[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues); + +/** + * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES. + * @vport_id: Vport id. + * @num_tx_q: Number of Tx qieues. + * @num_tx_complq: Number of Tx completion queues. + * @num_rx_q: Number of Rx queues. + * @num_rx_bufq: Number of Rx buffer queues. + * @pad: Padding. + * @chunks: Chunks of contiguous queues. + * + * PF sends this message to request additional transmit/receive queues beyond + * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues + * structure is used to specify the number of each type of queues. + * CP responds with the same structure with the actual number of queues assigned + * followed by num_chunks of virtchnl2_queue_chunk structures. + * + * Associated with VIRTCHNL2_OP_ADD_QUEUES. + */ +struct virtchnl2_add_queues { + __le32 vport_id; + __le16 num_tx_q; + __le16 num_tx_complq; + __le16 num_rx_q; + __le16 num_rx_bufq; + u8 pad[4]; + struct virtchnl2_queue_reg_chunks chunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues); + +/** + * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous + * interrupt vectors. + * @start_vector_id: Start vector id. + * @start_evv_id: Start EVV id. + * @num_vectors: Number of vectors. + * @pad: Padding. + * @dynctl_reg_start: DYN_CTL register offset. + * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2 + * consecutive vectors. + * @itrn_reg_start: ITRN register offset. + * @itrn_reg_spacing: Register spacing between dynctl registers of 2 + * consecutive vectors. + * @itrn_index_spacing: Register spacing between itrn registers of the same + * vector where n=0..2. + * @pad1: Padding for future extensions. + * + * Register offsets and spacing provided by CP. + * Dynamic control registers are used for enabling/disabling/re-enabling + * interrupts and updating interrupt rates in the hotpath. Any changes + * to interrupt rates in the dynamic control registers will be reflected + * in the interrupt throttling rate registers. + * itrn registers are used to update interrupt rates for specific + * interrupt indices without modifying the state of the interrupt. + */ +struct virtchnl2_vector_chunk { + __le16 start_vector_id; + __le16 start_evv_id; + __le16 num_vectors; + __le16 pad; + __le32 dynctl_reg_start; + __le32 dynctl_reg_spacing; + __le32 itrn_reg_start; + __le32 itrn_reg_spacing; + __le32 itrn_index_spacing; + u8 pad1[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk); + +/** + * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors. + * @num_vchunks: number of vector chunks. + * @pad: Padding. + * @vchunks: Chunks of contiguous vector info. + * + * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving + * away. CP performs requested action and returns status. + * + * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS. + */ +struct virtchnl2_vector_chunks { + __le16 num_vchunks; + u8 pad[14]; + struct virtchnl2_vector_chunk vchunks[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks); + +/** + * struct virtchnl2_alloc_vectors - vector allocation info. + * @num_vectors: Number of vectors. + * @pad: Padding. + * @vchunks: Chunks of contiguous vector info. + * + * PF sends this message to request additional interrupt vectors beyond the + * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors + * structure is used to specify the number of vectors requested. CP responds + * with the same structure with the actual number of vectors assigned followed + * by virtchnl2_vector_chunks structure identifying the vector ids. + * + * Associated with VIRTCHNL2_OP_ALLOC_VECTORS. + */ +struct virtchnl2_alloc_vectors { + __le16 num_vectors; + u8 pad[14]; + struct virtchnl2_vector_chunks vchunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors); + +/** + * struct virtchnl2_rss_lut - RSS LUT info. + * @vport_id: Vport id. + * @lut_entries_start: Start of LUT entries. + * @lut_entries: Number of LUT entrties. + * @pad: Padding. + * @lut: RSS lookup table. + * + * PF sends this message to get or set RSS lookup table. Only supported if + * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration + * negotiation. + * + * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT. + */ +struct virtchnl2_rss_lut { + __le32 vport_id; + __le16 lut_entries_start; + __le16 lut_entries; + u8 pad[4]; + __le32 lut[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut); + +/** + * struct virtchnl2_rss_hash - RSS hash info. + * @ptype_groups: Packet type groups bitmap. + * @vport_id: Vport id. + * @pad: Padding for future extensions. + * + * PF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the CP sets these to all possible traffic types that the + * hardware supports. The PF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit + * during configuration negotiation. + * + * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH + */ +struct virtchnl2_rss_hash { + __le64 ptype_groups; + __le32 vport_id; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash); + +/** + * struct virtchnl2_sriov_vfs_info - VFs info. + * @num_vfs: Number of VFs. + * @pad: Padding for future extensions. + * + * This message is used to set number of SRIOV VFs to be created. The actual + * allocation of resources for the VFs in terms of vport, queues and interrupts + * is done by CP. When this call completes, the IDPF driver calls + * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices. + * The number of VFs set to 0 will destroy all the VFs of this function. + * + * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS. + */ +struct virtchnl2_sriov_vfs_info { + __le16 num_vfs; + __le16 pad; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info); + +/** + * struct virtchnl2_ptype - Packet type info. + * @ptype_id_10: 10-bit packet type. + * @ptype_id_8: 8-bit packet type. + * @proto_id_count: Number of protocol ids the packet supports, maximum of 32 + * protocol ids are supported. + * @pad: Padding. + * @proto_id: proto_id_count decides the allocation of protocol id array. + * See enum virtchnl2_proto_hdr_type. + * + * Based on the descriptor type the PF supports, CP fills ptype_id_10 or + * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value + * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the + * last ptype. + */ +struct virtchnl2_ptype { + __le16 ptype_id_10; + u8 ptype_id_8; + u8 proto_id_count; + __le16 pad; + __le16 proto_id[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype); + +/** + * struct virtchnl2_get_ptype_info - Packet type info. + * @start_ptype_id: Starting ptype ID. + * @num_ptypes: Number of packet types from start_ptype_id. + * @pad: Padding for future extensions. + * + * The total number of supported packet types is based on the descriptor type. + * For the flex descriptor, it is 1024 (10-bit ptype), and for the base + * descriptor, it is 256 (8-bit ptype). Send this message to the CP by + * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the + * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that + * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There + * is no specific field for the ptypes but are added at the end of the + * ptype info message. PF/VF is expected to extract the ptypes accordingly. + * Reason for doing this is because compiler doesn't allow nested flexible + * array fields). + * + * If all the ptypes don't fit into one mailbox buffer, CP splits the + * ptype info into multiple messages, where each message will have its own + * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done + * updating all the ptype information extracted from the package (the number of + * ptypes extracted might be less than what PF/VF expects), it will append a + * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF) + * to the ptype array. + * + * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages. + * + * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO. + */ +struct virtchnl2_get_ptype_info { + __le16 start_ptype_id; + __le16 num_ptypes; + __le32 pad; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info); + +/** + * struct virtchnl2_vport_stats - Vport statistics. + * @vport_id: Vport id. + * @pad: Padding. + * @rx_bytes: Received bytes. + * @rx_unicast: Received unicast packets. + * @rx_multicast: Received multicast packets. + * @rx_broadcast: Received broadcast packets. + * @rx_discards: Discarded packets on receive. + * @rx_errors: Receive errors. + * @rx_unknown_protocol: Unlnown protocol. + * @tx_bytes: Transmitted bytes. + * @tx_unicast: Transmitted unicast packets. + * @tx_multicast: Transmitted multicast packets. + * @tx_broadcast: Transmitted broadcast packets. + * @tx_discards: Discarded packets on transmit. + * @tx_errors: Transmit errors. + * @rx_invalid_frame_length: Packets with invalid frame length. + * @rx_overflow_drop: Packets dropped on buffer overflow. + * + * PF/VF sends this message to CP to get the update stats by specifying the + * vport_id. CP responds with stats in struct virtchnl2_vport_stats. + * + * Associated with VIRTCHNL2_OP_GET_STATS. + */ +struct virtchnl2_vport_stats { + __le32 vport_id; + u8 pad[4]; + __le64 rx_bytes; + __le64 rx_unicast; + __le64 rx_multicast; + __le64 rx_broadcast; + __le64 rx_discards; + __le64 rx_errors; + __le64 rx_unknown_protocol; + __le64 tx_bytes; + __le64 tx_unicast; + __le64 tx_multicast; + __le64 tx_broadcast; + __le64 tx_discards; + __le64 tx_errors; + __le64 rx_invalid_frame_length; + __le64 rx_overflow_drop; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats); + +/** + * struct virtchnl2_event - Event info. + * @event: Event opcode. See enum virtchnl2_event_codes. + * @link_speed: Link_speed provided in Mbps. + * @vport_id: Vport ID. + * @link_status: Link status. + * @pad: Padding. + * @reserved: Reserved. + * + * CP sends this message to inform the PF/VF driver of events that may affect + * it. No direct response is expected from the driver, though it may generate + * other messages in response to this one. + * + * Associated with VIRTCHNL2_OP_EVENT. + */ +struct virtchnl2_event { + __le32 event; + __le32 link_speed; + __le32 vport_id; + u8 link_status; + u8 pad; + __le16 reserved; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event); + +/** + * struct virtchnl2_rss_key - RSS key info. + * @vport_id: Vport id. + * @key_len: Length of RSS key. + * @pad: Padding. + * @key_flex: RSS hash key, packed bytes. + * PF/VF sends this message to get or set RSS key. Only supported if both + * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration + * negotiation. + * + * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY. + */ +struct virtchnl2_rss_key { + __le32 vport_id; + __le16 key_len; + u8 pad; + __DECLARE_FLEX_ARRAY(u8, key_flex); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key); + +/** + * struct virtchnl2_queue_chunk - chunk of contiguous queues + * @type: See enum virtchnl2_queue_type. + * @start_queue_id: Starting queue id. + * @num_queues: Number of queues. + * @pad: Padding for future extensions. + */ +struct virtchnl2_queue_chunk { + __le32 type; + __le32 start_queue_id; + __le32 num_queues; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk); + +/* struct virtchnl2_queue_chunks - chunks of contiguous queues + * @num_chunks: Number of chunks. + * @pad: Padding. + * @chunks: Chunks of contiguous queues info. + */ +struct virtchnl2_queue_chunks { + __le16 num_chunks; + u8 pad[6]; + struct virtchnl2_queue_chunk chunks[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks); + +/** + * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info. + * @vport_id: Vport id. + * @pad: Padding. + * @chunks: Chunks of contiguous queues info. + * + * PF sends these messages to enable, disable or delete queues specified in + * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues + * to be enabled/disabled/deleted. Also applicable to single queue receive or + * transmit. CP performs requested action and returns status. + * + * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and + * VIRTCHNL2_OP_DISABLE_QUEUES. + */ +struct virtchnl2_del_ena_dis_queues { + __le32 vport_id; + u8 pad[4]; + struct virtchnl2_queue_chunks chunks; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues); + +/** + * struct virtchnl2_queue_vector - Queue to vector mapping. + * @queue_id: Queue id. + * @vector_id: Vector id. + * @pad: Padding. + * @itr_idx: See enum virtchnl2_itr_idx. + * @queue_type: See enum virtchnl2_queue_type. + * @pad1: Padding for future extensions. + */ +struct virtchnl2_queue_vector { + __le32 queue_id; + __le16 vector_id; + u8 pad[2]; + __le32 itr_idx; + __le32 queue_type; + u8 pad1[8]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector); + +/** + * struct virtchnl2_queue_vector_maps - Map/unmap queues info. + * @vport_id: Vport id. + * @num_qv_maps: Number of queue vector maps. + * @pad: Padding. + * @qv_maps: Queue to vector maps. + * + * PF sends this message to map or unmap queues to vectors and interrupt + * throttling rate index registers. External data buffer contains + * virtchnl2_queue_vector_maps structure that contains num_qv_maps of + * virtchnl2_queue_vector structures. CP maps the requested queue vector maps + * after validating the queue and vector ids and returns a status code. + * + * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and + * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR. + */ +struct virtchnl2_queue_vector_maps { + __le32 vport_id; + __le16 num_qv_maps; + u8 pad[10]; + struct virtchnl2_queue_vector qv_maps[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps); + +/** + * struct virtchnl2_loopback - Loopback info. + * @vport_id: Vport id. + * @enable: Enable/disable. + * @pad: Padding for future extensions. + * + * PF/VF sends this message to transition to/from the loopback state. Setting + * the 'enable' to 1 enables the loopback state and setting 'enable' to 0 + * disables it. CP configures the state to loopback and returns status. + * + * Associated with VIRTCHNL2_OP_LOOPBACK. + */ +struct virtchnl2_loopback { + __le32 vport_id; + u8 enable; + u8 pad[3]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback); + +/* struct virtchnl2_mac_addr - MAC address info. + * @addr: MAC address. + * @type: MAC type. See enum virtchnl2_mac_addr_type. + * @pad: Padding for future extensions. + */ +struct virtchnl2_mac_addr { + u8 addr[ETH_ALEN]; + u8 type; + u8 pad; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr); + +/** + * struct virtchnl2_mac_addr_list - List of MAC addresses. + * @vport_id: Vport id. + * @num_mac_addr: Number of MAC addresses. + * @pad: Padding. + * @mac_addr_list: List with MAC address info. + * + * PF/VF driver uses this structure to send list of MAC addresses to be + * added/deleted to the CP where as CP performs the action and returns the + * status. + * + * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR. + */ +struct virtchnl2_mac_addr_list { + __le32 vport_id; + __le16 num_mac_addr; + u8 pad[2]; + struct virtchnl2_mac_addr mac_addr_list[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list); + +/** + * struct virtchnl2_promisc_info - Promisc type info. + * @vport_id: Vport id. + * @flags: See enum virtchnl2_promisc_flags. + * @pad: Padding for future extensions. + * + * PF/VF sends vport id and flags to the CP where as CP performs the action + * and returns the status. + * + * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE. + */ +struct virtchnl2_promisc_info { + __le32 vport_id; + /* See VIRTCHNL2_PROMISC_FLAGS definitions */ + __le16 flags; + u8 pad[2]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info); + +#endif /* _VIRTCHNL_2_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h new file mode 100644 index 000000000000..f1b577f1c452 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h @@ -0,0 +1,451 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023 Intel Corporation */ + +#ifndef _VIRTCHNL2_LAN_DESC_H_ +#define _VIRTCHNL2_LAN_DESC_H_ + +#include <linux/bits.h> + +/* This is an interface definition file where existing enums and their values + * must remain unchanged over time, so we specify explicit values for all enums. + */ + +/* Transmit descriptor ID flags + */ +enum virtchnl2_tx_desc_ids { + VIRTCHNL2_TXDID_DATA = BIT(0), + VIRTCHNL2_TXDID_CTX = BIT(1), + /* TXDID bit 2 is reserved + * TXDID bit 3 is free for future use + * TXDID bit 4 is reserved + */ + VIRTCHNL2_TXDID_FLEX_TSO_CTX = BIT(5), + /* TXDID bit 6 is reserved */ + VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2 = BIT(7), + /* TXDID bits 8 and 9 are free for future use + * TXDID bit 10 is reserved + * TXDID bit 11 is free for future use + */ + VIRTCHNL2_TXDID_FLEX_FLOW_SCHED = BIT(12), + /* TXDID bits 13 and 14 are free for future use */ + VIRTCHNL2_TXDID_DESC_DONE = BIT(15), +}; + +/* Receive descriptor IDs */ +enum virtchnl2_rx_desc_ids { + VIRTCHNL2_RXDID_1_32B_BASE = 1, + /* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be + * differentiated based on queue model; e.g. single queue model can + * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ + * for DID 2. + */ + VIRTCHNL2_RXDID_2_FLEX_SPLITQ = 2, + VIRTCHNL2_RXDID_2_FLEX_SQ_NIC = VIRTCHNL2_RXDID_2_FLEX_SPLITQ, + /* 3 through 6 are reserved */ + VIRTCHNL2_RXDID_7_HW_RSVD = 7, + /* 8 through 15 are free */ +}; + +/* Receive descriptor ID bitmasks */ +#define VIRTCHNL2_RXDID_M(bit) BIT_ULL(VIRTCHNL2_RXDID_##bit) + +enum virtchnl2_rx_desc_id_bitmasks { + VIRTCHNL2_RXDID_1_32B_BASE_M = VIRTCHNL2_RXDID_M(1_32B_BASE), + VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M = VIRTCHNL2_RXDID_M(2_FLEX_SPLITQ), + VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL2_RXDID_M(2_FLEX_SQ_NIC), + VIRTCHNL2_RXDID_7_HW_RSVD_M = VIRTCHNL2_RXDID_M(7_HW_RSVD), +}; + +/* For splitq virtchnl2_rx_flex_desc_adv_nic_3 desc members */ +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M GENMASK(3, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M GENMASK(7, 6) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M GENMASK(9, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S 12 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M GENMASK(15, 13) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M GENMASK(13, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S 14 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S 15 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M GENMASK(9, 0) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S 10 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S 11 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 12 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M GENMASK(14, 12) +#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15 +#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S) + +/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */ +enum virtchl2_rx_flex_desc_adv_status_error_0_qw1_bits { + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_M = BIT(0), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M = BIT(1), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_M = BIT(7), +}; + +/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */ +enum virtchnl2_rx_flex_desc_adv_status_error_0_qw0_bits { + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_M = BIT(0), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M = BIT(1), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_M = BIT(7), +}; + +/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */ +enum virtchnl2_rx_flex_desc_adv_status_error_1_bits { + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_M = GENMASK(1, 0), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_M = BIT(7), +}; + +/* For singleq (flex) virtchnl2_rx_flex_desc fields + * For virtchnl2_rx_flex_desc.ptype_flex_flags0 member + */ +#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M GENMASK(9, 0) + +/* For virtchnl2_rx_flex_desc.pkt_len member */ +#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M GENMASK(13, 0) + +/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */ +enum virtchnl2_rx_flex_desc_status_error_0_bits { + VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_M = BIT(0), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_M = BIT(1), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_M = BIT(2), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M = BIT(3), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M = BIT(5), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M = BIT(6), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M = BIT(7), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_M = BIT(8), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M = BIT(9), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_M = BIT(10), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_M = BIT(11), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M = BIT(12), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_M = BIT(13), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_M = BIT(14), + VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_M = BIT(15), +}; + +/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */ +enum virtchnl2_rx_flex_desc_status_error_1_bits { + VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_M = GENMASK(3, 0), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M = BIT(4), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_M = BIT(5), + /* [10:6] reserved */ + VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_M = BIT(11), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_M = BIT(12), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_M = BIT(13), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_M = BIT(14), + VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_M = BIT(15), +}; + +/* For virtchnl2_rx_flex_desc.ts_low member */ +#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID BIT(0) + +/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */ +#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M GENMASK_ULL(51, 38) +#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M GENMASK_ULL(37, 30) +#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M GENMASK_ULL(26, 19) +#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M GENMASK_ULL(18, 0) + +/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */ +enum virtchnl2_rx_base_desc_status_bits { + VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M = BIT(0), + VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M = BIT(1), + VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_M = BIT(2), + VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M = BIT(3), + VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_M = BIT(4), + VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_M = GENMASK(7, 5), + VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_M = BIT(8), + VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_M = GENMASK(10, 9), + VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_M = BIT(11), + VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_M = GENMASK(13, 12), + VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_M = BIT(14), + VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M = BIT(15), + VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_M = GENMASK(17, 16), + VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_M = BIT(18), +}; + +/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */ +enum virtchnl2_rx_base_desc_error_bits { + VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M = BIT(0), + VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_M = BIT(1), + VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_M = BIT(2), + VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_M = GENMASK(5, 3), + VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M = BIT(3), + VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M = BIT(4), + VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M = BIT(5), + VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_M = BIT(6), + VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M = BIT(7), +}; + +/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */ +#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M GENMASK(13, 12) + +/** + * struct virtchnl2_splitq_rx_buf_desc - SplitQ RX buffer descriptor format + * @qword0: RX buffer struct. + * @qword0.buf_id: Buffer identifier. + * @qword0.rsvd0: Reserved. + * @qword0.rsvd1: Reserved. + * @pkt_addr: Packet buffer address. + * @hdr_addr: Header buffer address. + * @rsvd2: Rerserved. + * + * Receive Descriptors + * SplitQ buffer + * | 16| 0| + * ---------------------------------------------------------------- + * | RSV | Buffer ID | + * ---------------------------------------------------------------- + * | Rx packet buffer address | + * ---------------------------------------------------------------- + * | Rx header buffer address | + * ---------------------------------------------------------------- + * | RSV | + * ---------------------------------------------------------------- + * | 0| + */ +struct virtchnl2_splitq_rx_buf_desc { + struct { + __le16 buf_id; + __le16 rsvd0; + __le32 rsvd1; + } qword0; + __le64 pkt_addr; + __le64 hdr_addr; + __le64 rsvd2; +}; + +/** + * struct virtchnl2_singleq_rx_buf_desc - SingleQ RX buffer descriptor format. + * @pkt_addr: Packet buffer address. + * @hdr_addr: Header buffer address. + * @rsvd1: Reserved. + * @rsvd2: Reserved. + * + * SingleQ buffer + * | 0| + * ---------------------------------------------------------------- + * | Rx packet buffer address | + * ---------------------------------------------------------------- + * | Rx header buffer address | + * ---------------------------------------------------------------- + * | RSV | + * ---------------------------------------------------------------- + * | RSV | + * ---------------------------------------------------------------- + * | 0| + */ +struct virtchnl2_singleq_rx_buf_desc { + __le64 pkt_addr; + __le64 hdr_addr; + __le64 rsvd1; + __le64 rsvd2; +}; + +/** + * struct virtchnl2_singleq_base_rx_desc - RX descriptor writeback format. + * @qword0: First quad word struct. + * @qword0.lo_dword: Lower dual word struct. + * @qword0.lo_dword.mirroring_status: Mirrored packet status. + * @qword0.lo_dword.l2tag1: Stripped L2 tag from the received packet. + * @qword0.hi_dword: High dual word union. + * @qword0.hi_dword.rss: RSS hash. + * @qword0.hi_dword.fd_id: Flow director filter id. + * @qword1: Second quad word struct. + * @qword1.status_error_ptype_len: Status/error/PTYPE/length. + * @qword2: Third quad word struct. + * @qword2.ext_status: Extended status. + * @qword2.rsvd: Reserved. + * @qword2.l2tag2_1: Extracted L2 tag 2 from the packet. + * @qword2.l2tag2_2: Reserved. + * @qword3: Fourth quad word struct. + * @qword3.reserved: Reserved. + * @qword3.fd_id: Flow director filter id. + * + * Profile ID 0x1, SingleQ, base writeback format + */ +struct virtchnl2_singleq_base_rx_desc { + struct { + struct { + __le16 mirroring_status; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; + __le32 fd_id; + } hi_dword; + } qword0; + struct { + __le64 status_error_ptype_len; + } qword1; + struct { + __le16 ext_status; + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + __le32 reserved; + __le32 fd_id; + } qword3; +}; + +/** + * struct virtchnl2_rx_flex_desc_nic - RX descriptor writeback format. + * + * @rxdid: Descriptor builder profile id. + * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0] + * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0] + * @pkt_len: Packet length, [15:14] are reserved. + * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0]. + * @status_error0: Status/Error section 0. + * @l2tag1: Stripped L2 tag from the received packet + * @rss_hash: RSS hash. + * @status_error1: Status/Error section 1. + * @flexi_flags2: Flexible flags section 2. + * @ts_low: Lower word of timestamp value. + * @l2tag2_1st: First L2TAG2. + * @l2tag2_2nd: Second L2TAG2. + * @flow_id: Flow id. + * @flex_ts: Timestamp and flexible flow id union. + * @flex_ts.ts_high: Timestamp higher word of the timestamp value. + * @flex_ts.flex.rsvd: Reserved. + * @flex_ts.flex.flow_id_ipv6: IPv6 flow id. + * + * Profile ID 0x2, SingleQ, flex writeback format + */ +struct virtchnl2_rx_flex_desc_nic { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flex_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/** + * struct virtchnl2_rx_flex_desc_adv_nic_3 - RX descriptor writeback format. + * @rxdid_ucast: ucast=[7:6], rsvd=[5:4], profile_id=[3:0]. + * @status_err0_qw0: Status/Error section 0 in quad word 0. + * @ptype_err_fflags0: ff0=[15:12], udp_len_err=[11], ip_hdr_err=[10], + * ptype=[9:0]. + * @pktlen_gen_bufq_id: bufq_id=[15] only in splitq, gen=[14] only in splitq, + * plen=[13:0]. + * @hdrlen_flags: miss_prepend=[15], trunc_mirr=[14], int_udp_0=[13], + * ext_udp0=[12], sph=[11] only in splitq, rsc=[10] + * only in splitq, header=[9:0]. + * @status_err0_qw1: Status/Error section 0 in quad word 1. + * @status_err1: Status/Error section 1. + * @fflags1: Flexible flags section 1. + * @ts_low: Lower word of timestamp value. + * @buf_id: Buffer identifier. Only in splitq mode. + * @misc: Union. + * @misc.raw_cs: Raw checksum. + * @misc.l2tag1: Stripped L2 tag from the received packet + * @misc.rscseglen: + * @hash1: Lower bits of Rx hash value. + * @ff2_mirrid_hash2: Union. + * @ff2_mirrid_hash2.fflags2: Flexible flags section 2. + * @ff2_mirrid_hash2.mirrorid: Mirror id. + * @ff2_mirrid_hash2.rscseglen: RSC segment length. + * @hash3: Upper bits of Rx hash value. + * @l2tag2: Extracted L2 tag 2 from the packet. + * @fmd4: Flexible metadata container 4. + * @l2tag1: Stripped L2 tag from the received packet + * @fmd6: Flexible metadata container 6. + * @ts_high: Timestamp higher word of the timestamp value. + * + * Profile ID 0x2, SplitQ, flex writeback format + * + * Flex-field 0: BufferID + * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW) + * Flex-field 2: Hash[15:0] + * Flex-flags 2: Hash[23:16] + * Flex-field 3: L2TAG2 + * Flex-field 5: L2TAG1 + * Flex-field 7: Timestamp (upper 32 bits) + */ +struct virtchnl2_rx_flex_desc_adv_nic_3 { + /* Qword 0 */ + u8 rxdid_ucast; + u8 status_err0_qw0; + __le16 ptype_err_fflags0; + __le16 pktlen_gen_bufq_id; + __le16 hdrlen_flags; + /* Qword 1 */ + u8 status_err0_qw1; + u8 status_err1; + u8 fflags1; + u8 ts_low; + __le16 buf_id; + union { + __le16 raw_cs; + __le16 l2tag1; + __le16 rscseglen; + } misc; + /* Qword 2 */ + __le16 hash1; + union { + u8 fflags2; + u8 mirrorid; + u8 hash2; + } ff2_mirrid_hash2; + u8 hash3; + __le16 l2tag2; + __le16 fmd4; + /* Qword 3 */ + __le16 l2tag1; + __le16 fmd6; + __le32 ts_high; +}; + +/* Common union for accessing descriptor format structs */ +union virtchnl2_rx_desc { + struct virtchnl2_singleq_base_rx_desc base_wb; + struct virtchnl2_rx_flex_desc_nic flex_nic_wb; + struct virtchnl2_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb; +}; + +#endif /* _VIRTCHNL_LAN_DESC_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 015b78144114..a2b759531cb7 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -34,11 +34,11 @@ struct igb_adapter; /* TX/RX descriptor defines */ #define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TX_WORK 128 -#define IGB_MIN_TXD 80 +#define IGB_MIN_TXD 64 #define IGB_MAX_TXD 4096 #define IGB_DEFAULT_RXD 256 -#define IGB_MIN_RXD 80 +#define IGB_MIN_RXD 64 #define IGB_MAX_RXD 4096 #define IGB_DEFAULT_ITR 3 /* dynamic */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 319ed601eaa1..16d2a55d5e17 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2356,10 +2356,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) break; case ETH_SS_STATS: for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", igb_gstrings_stats[i].stat_string); for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", igb_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); @@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, if (err) goto err_out_w_lock; - igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + if (err) + goto err_out_input_filter; spin_unlock(&adapter->nfc_lock); return 0; +err_out_input_filter: + igb_erase_filter(adapter, input); err_out_w_lock: spin_unlock(&adapter->nfc_lock); err_out: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9a2561409b06..b2295caa2f0a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2615,10 +2615,10 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, struct netlink_ext_ack *extack = f->common.extack; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); return -EOPNOTSUPP; @@ -3069,6 +3069,7 @@ void igb_set_fw_version(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fw_version fw; + char *lbuf; igb_get_fw_version(hw, &fw); @@ -3076,36 +3077,34 @@ void igb_set_fw_version(struct igb_adapter *adapter) case e1000_i210: case e1000_i211: if (!(igb_get_flash_presence_i210(hw))) { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, - fw.invm_img_type); + lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); break; } fallthrough; default: - /* if option is rom valid, display its version too */ + /* if option rom is valid, display its version too */ if (fw.or_valid) { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d, 0x%08x, %d.%d.%d", - fw.eep_major, fw.eep_minor, fw.etrack_id, - fw.or_major, fw.or_build, fw.or_patch); + lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, + fw.etrack_id, fw.or_major, fw.or_build, + fw.or_patch); /* no option rom */ } else if (fw.etrack_id != 0X0000) { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, fw.etrack_id); + lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, + fw.etrack_id); } else { - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d.%d", - fw.eep_major, fw.eep_minor, fw.eep_build); + lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major, + fw.eep_minor, fw.eep_build); } break; } + + /* the truncate happens here if it doesn't fit */ + strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version)); + kfree(lbuf); } /** @@ -3264,7 +3263,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igb_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = pci_resource_start(pdev, 0); netdev->mem_end = pci_resource_end(pdev, 0); @@ -3827,8 +3826,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit) } /* only call pci_enable_sriov() if no VFs are allocated already */ - if (!old_vfs) + if (!old_vfs) { err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } goto out; @@ -3933,8 +3935,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter) struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - /* Virtualization features not supported on i210 family. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + /* Virtualization features not supported on i210 and 82580 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || + (hw->mac.type == e1000_82580)) return; /* Of the below we really only want the effect of getting @@ -4814,6 +4817,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, static void igb_set_rx_buffer_len(struct igb_adapter *adapter, struct igb_ring *rx_ring) { +#if (PAGE_SIZE < 8192) + struct e1000_hw *hw = &adapter->hw; +#endif + /* set build_skb and buffer size flags */ clear_ring_build_skb_enabled(rx_ring); clear_ring_uses_large_buffer(rx_ring); @@ -4824,10 +4831,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter, set_ring_build_skb_enabled(rx_ring); #if (PAGE_SIZE < 8192) - if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) - return; - - set_ring_uses_large_buffer(rx_ring); + if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB || + rd32(E1000_RCTL) & E1000_RCTL_SBP) + set_ring_uses_large_buffer(rx_ring); #endif } @@ -7850,8 +7856,8 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, { struct pci_dev *pdev = adapter->pdev; struct vf_data_storage *vf_data = &adapter->vf_data[vf]; - struct list_head *pos; - struct vf_mac_filter *entry = NULL; + struct vf_mac_filter *entry; + bool found = false; int ret = 0; if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && @@ -7871,8 +7877,7 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, switch (info) { case E1000_VF_MAC_FILTER_CLR: /* remove all unicast MAC filters related to the current VF */ - list_for_each(pos, &adapter->vf_macs.l) { - entry = list_entry(pos, struct vf_mac_filter, l); + list_for_each_entry(entry, &adapter->vf_macs.l, l) { if (entry->vf == vf) { entry->vf = -1; entry->free = true; @@ -7882,13 +7887,14 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, break; case E1000_VF_MAC_FILTER_ADD: /* try to find empty slot in the list */ - list_for_each(pos, &adapter->vf_macs.l) { - entry = list_entry(pos, struct vf_mac_filter, l); - if (entry->free) + list_for_each_entry(entry, &adapter->vf_macs.l, l) { + if (entry->free) { + found = true; break; + } } - if (entry && entry->free) { + if (found) { entry->free = false; entry->vf = vf; ether_addr_copy(entry->vf_mac, addr); diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 57d39ee00b58..7b83678ba83a 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -39,11 +39,11 @@ enum latency_range { /* Tx/Rx descriptor defines */ #define IGBVF_DEFAULT_TXD 256 #define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_MIN_TXD 64 #define IGBVF_DEFAULT_RXD 256 #define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_MIN_RXD 64 #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 7ff2752dd763..fd712585af27 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2785,7 +2785,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) igbvf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); adapter->bd_number = cards_found++; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 38901d2a4680..f48f82d5e274 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -15,6 +15,7 @@ #include <linux/net_tstamp.h> #include <linux/bitfield.h> #include <linux/hrtimer.h> +#include <net/xdp.h> #include "igc_hw.h" @@ -37,6 +38,8 @@ void igc_ethtool_set_ops(struct net_device *); #define MAX_FLEX_FILTER 32 +#define IGC_MAX_TX_TSTAMP_REGS 4 + enum igc_mac_filter_type { IGC_MAC_FILTER_TYPE_DST = 0, IGC_MAC_FILTER_TYPE_SRC @@ -69,6 +72,15 @@ struct igc_rx_packet_stats { u64 other_packets; }; +struct igc_tx_timestamp_request { + struct sk_buff *skb; /* reference to the packet being timestamped */ + unsigned long start; /* when the tstamp request started (jiffies) */ + u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */ + u32 regl; /* which TXSTMPL_{X} register should be used */ + u32 regh; /* which TXSTMPH_{X} register should be used */ + u32 flags; /* flags that should be added to the tx_buffer */ +}; + struct igc_ring_container { struct igc_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ @@ -244,9 +256,8 @@ struct igc_adapter { * ptp_tx_lock. */ spinlock_t ptp_tx_lock; - struct sk_buff *ptp_tx_skb; + struct igc_tx_timestamp_request tx_tstamp[IGC_MAX_TX_TSTAMP_REGS]; struct hwtstamp_config tstamp_config; - unsigned long ptp_tx_start; unsigned int ptp_flags; /* System time value lock */ spinlock_t tmreg_lock; @@ -368,11 +379,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) /* TX/RX descriptor defines */ #define IGC_DEFAULT_TXD 256 #define IGC_DEFAULT_TX_WORK 128 -#define IGC_MIN_TXD 80 +#define IGC_MIN_TXD 64 #define IGC_MAX_TXD 4096 #define IGC_DEFAULT_RXD 256 -#define IGC_MIN_RXD 80 +#define IGC_MIN_RXD 64 #define IGC_MAX_RXD 4096 /* Supported Rx Buffer Sizes */ @@ -454,6 +465,10 @@ enum igc_tx_flags { /* olinfo flags */ IGC_TX_FLAGS_IPV4 = 0x10, IGC_TX_FLAGS_CSUM = 0x20, + + IGC_TX_FLAGS_TSTAMP_1 = 0x100, + IGC_TX_FLAGS_TSTAMP_2 = 0x200, + IGC_TX_FLAGS_TSTAMP_3 = 0x400, }; enum igc_boards { diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 9f3827eda157..f7d6491d4c60 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -34,6 +34,9 @@ struct igc_adv_tx_context_desc { /* Adv Transmit Descriptor Config Masks */ #define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */ +#define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */ +#define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */ #define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ #define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 2f780cc90883..b3037016f31d 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -454,6 +454,9 @@ /* Time Sync Transmit Control bit definitions */ #define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_TXTT_1 0x00000002 /* Tx timestamp reg 1 valid */ +#define IGC_TSYNCTXCTL_TXTT_2 0x00000004 /* Tx timestamp reg 2 valid */ +#define IGC_TSYNCTXCTL_TXTT_3 0x00000008 /* Tx timestamp reg 3 valid */ #define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ #define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ #define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ @@ -461,6 +464,10 @@ #define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ #define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ +#define IGC_TSYNCTXCTL_TXTT_ANY ( \ + IGC_TSYNCTXCTL_TXTT_0 | IGC_TSYNCTXCTL_TXTT_1 | \ + IGC_TSYNCTXCTL_TXTT_2 | IGC_TSYNCTXCTL_TXTT_3) + /* Timer selection bits */ #define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ #define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ @@ -549,7 +556,7 @@ #define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) -#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ +#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */ #define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ #define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 93bce729be76..785eaa8e0ba8 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -773,9 +773,10 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, break; case ETH_SS_STATS: for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + ethtool_sprintf(&p, "%s", + igc_gstrings_stats[i].stat_string); for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", igc_gstrings_net_stats[i].stat_string); for (i = 0; i < adapter->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); @@ -868,6 +869,18 @@ static void igc_ethtool_get_stats(struct net_device *netdev, spin_unlock(&adapter->stats64_lock); } +static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->rx_itr_setting <= 3) ? + adapter->rx_itr_setting : adapter->rx_itr_setting >> 2; +} + +static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->tx_itr_setting <= 3) ? + adapter->tx_itr_setting : adapter->tx_itr_setting >> 2; +} + static int igc_ethtool_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, @@ -875,17 +888,8 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev, { struct igc_adapter *adapter = netdev_priv(netdev); - if (adapter->rx_itr_setting <= 3) - ec->rx_coalesce_usecs = adapter->rx_itr_setting; - else - ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - - if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { - if (adapter->tx_itr_setting <= 3) - ec->tx_coalesce_usecs = adapter->tx_itr_setting; - else - ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; - } + ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter); + ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter); return 0; } @@ -910,8 +914,12 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev, ec->tx_coalesce_usecs == 2) return -EINVAL; - if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && + ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) { + NL_SET_ERR_MSG_MOD(extack, + "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs"); return -EINVAL; + } /* If ITR is disabled, disable DMAC */ if (ec->rx_coalesce_usecs == 0) { @@ -1810,7 +1818,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev, struct igc_adapter *adapter = netdev_priv(netdev); struct net_device *dev = adapter->netdev; struct igc_hw *hw = &adapter->hw; - u32 advertising; + u16 advertised = 0; /* When adapter in resetting mode, autoneg/speed/duplex * cannot be changed @@ -1835,18 +1843,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev, while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) usleep_range(1000, 2000); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. - * We have to check this and convert it to ADVERTISE_2500_FULL - * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. - */ - if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) - advertising |= ADVERTISE_2500_FULL; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 2500baseT_Full)) + advertised |= ADVERTISE_2500_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) + advertised |= ADVERTISE_1000_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Full)) + advertised |= ADVERTISE_100_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Half)) + advertised |= ADVERTISE_100_HALF; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Full)) + advertised |= ADVERTISE_10_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Half)) + advertised |= ADVERTISE_10_HALF; if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; - hw->phy.autoneg_advertised = advertising; + hw->phy.autoneg_advertised = advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = igc_fc_default; } else { diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 6f557e843e49..e9bb403bbacf 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1271,10 +1271,21 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, (IGC_ADVTXD_DCMD_TSE)); - /* set timestamp bit if present */ + /* set timestamp bit if present, will select the register set + * based on the _TSTAMP(_X) bit. + */ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, (IGC_ADVTXD_MAC_TSTAMP)); + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1, + (IGC_ADVTXD_TSTAMP_REG_1)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2, + (IGC_ADVTXD_TSTAMP_REG_2)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3, + (IGC_ADVTXD_TSTAMP_REG_3)); + /* insert frame checksum */ cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); @@ -1533,6 +1544,26 @@ static int igc_tso(struct igc_ring *tx_ring, return 1; } +static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags) +{ + int i; + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + if (tstamp->skb) + continue; + + tstamp->skb = skb_get(skb); + tstamp->start = jiffies; + *flags = tstamp->flags; + + return true; + } + + return false; +} + static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { @@ -1614,14 +1645,12 @@ done: * timestamping request. */ unsigned long flags; + u32 tstamp_flags; spin_lock_irqsave(&adapter->ptp_tx_lock, flags); - if (!adapter->ptp_tx_skb) { + if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - tx_flags |= IGC_TX_FLAGS_TSTAMP; - - adapter->ptp_tx_skb = skb_get(skb); - adapter->ptp_tx_start = jiffies; + tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; } else { adapter->tx_hwtstamp_skipped++; } @@ -6162,6 +6191,26 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter) return 0; } +static void igc_taprio_stats(struct net_device *dev, + struct tc_taprio_qopt_stats *stats) +{ + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + +static void igc_taprio_queue_stats(struct net_device *dev, + struct tc_taprio_qopt_queue_stats *queue_stats) +{ + struct tc_taprio_qopt_stats *stats = &queue_stats->stats; + + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { @@ -6173,11 +6222,20 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, size_t n; int i; - if (qopt->cmd == TAPRIO_CMD_DESTROY) + switch (qopt->cmd) { + case TAPRIO_CMD_REPLACE: + break; + case TAPRIO_CMD_DESTROY: return igc_tsn_clear_schedule(adapter); - - if (qopt->cmd != TAPRIO_CMD_REPLACE) + case TAPRIO_CMD_STATS: + igc_taprio_stats(adapter->netdev, &qopt->stats); + return 0; + case TAPRIO_CMD_QUEUE_STATS: + igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); + return 0; + default: return -EOPNOTSUPP; + } if (qopt->base_time < 0) return -ERANGE; @@ -6433,7 +6491,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, struct igc_ring *ring; int i, drops; - if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + if (unlikely(!netif_carrier_ok(dev))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -6877,7 +6935,7 @@ static int igc_probe(struct pci_dev *pdev, */ igc_get_hw_control(adapter); - strncpy(netdev->name, "eth%d", IFNAMSIZ); + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); err = register_netdev(netdev); if (err) goto err_register; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index f0b979a70655..928f38792203 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -558,11 +558,16 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) { unsigned long flags; + int i; spin_lock_irqsave(&adapter->ptp_tx_lock, flags); - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + dev_kfree_skb_any(tstamp->skb); + tstamp->skb = NULL; + } spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } @@ -659,61 +664,106 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, } /* Requires adapter->ptp_tx_lock held by caller. */ -static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +static void igc_ptp_tx_timeout(struct igc_adapter *adapter, + struct igc_tx_timestamp_request *tstamp) { - struct igc_hw *hw = &adapter->hw; - - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; + dev_kfree_skb_any(tstamp->skb); + tstamp->skb = NULL; adapter->tx_hwtstamp_timeouts++; - /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ - rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); } void igc_ptp_tx_hang(struct igc_adapter *adapter) { + struct igc_tx_timestamp_request *tstamp; + struct igc_hw *hw = &adapter->hw; unsigned long flags; + bool found = false; + int i; spin_lock_irqsave(&adapter->ptp_tx_lock, flags); - if (!adapter->ptp_tx_skb) - goto unlock; + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + tstamp = &adapter->tx_tstamp[i]; + + if (!tstamp->skb) + continue; - if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT)) - goto unlock; + if (time_is_after_jiffies(tstamp->start + IGC_PTP_TX_TIMEOUT)) + continue; - igc_ptp_tx_timeout(adapter); + igc_ptp_tx_timeout(adapter, tstamp); + found = true; + } + + if (found) { + /* Reading the high register of the first set of timestamp registers + * clears all the equivalent bits in the TSYNCTXCTL register. + */ + rd32(IGC_TXSTMPH_0); + } -unlock: spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } +static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter, + struct igc_tx_timestamp_request *tstamp, u64 regval) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb; + int adjust = 0; + + skb = tstamp->skb; + if (!skb) + return; + + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + tstamp->skb = NULL; + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + /** * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: Board private structure * - * If we were asked to do hardware stamping and such a time stamp is - * available, then it must have been for this skb here because we only - * allow only one such packet into the queue. + * Check against the ready mask for which of the timestamp register + * sets are ready to be retrieved, then retrieve that and notify the + * rest of the stack. * * Context: Expects adapter->ptp_tx_lock to be held by caller. */ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) { - struct sk_buff *skb = adapter->ptp_tx_skb; - struct skb_shared_hwtstamps shhwtstamps; struct igc_hw *hw = &adapter->hw; - u32 tsynctxctl; - int adjust = 0; u64 regval; + u32 mask; + int i; - if (WARN_ON_ONCE(!skb)) - return; - - tsynctxctl = rd32(IGC_TSYNCTXCTL); - tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0; - if (tsynctxctl) { + mask = rd32(IGC_TSYNCTXCTL) & IGC_TSYNCTXCTL_TXTT_ANY; + if (mask & IGC_TSYNCTXCTL_TXTT_0) { regval = rd32(IGC_TXSTMPL); regval |= (u64)rd32(IGC_TXSTMPH) << 32; } else { @@ -742,37 +792,30 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) txstmpl_new = rd32(IGC_TXSTMPL); if (txstmpl_old == txstmpl_new) - return; + goto done; regval = txstmpl_new; regval |= (u64)rd32(IGC_TXSTMPH) << 32; } - if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) - return; - switch (adapter->link_speed) { - case SPEED_10: - adjust = IGC_I225_TX_LATENCY_10; - break; - case SPEED_100: - adjust = IGC_I225_TX_LATENCY_100; - break; - case SPEED_1000: - adjust = IGC_I225_TX_LATENCY_1000; - break; - case SPEED_2500: - adjust = IGC_I225_TX_LATENCY_2500; - break; - } + igc_ptp_tx_reg_to_stamp(adapter, &adapter->tx_tstamp[0], regval); - shhwtstamps.hwtstamp = - ktime_add_ns(shhwtstamps.hwtstamp, adjust); +done: + /* Now that the problematic first register was handled, we can + * use retrieve the timestamps from the other registers + * (starting from '1') with less complications. + */ + for (i = 1; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; - adapter->ptp_tx_skb = NULL; + if (!(tstamp->mask & mask)) + continue; - /* Notify the stack and free the skb after we've unlocked */ - skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); + regval = rd32(tstamp->regl); + regval |= (u64)rd32(tstamp->regh) << 32; + + igc_ptp_tx_reg_to_stamp(adapter, tstamp, regval); + } } /** @@ -788,12 +831,8 @@ void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter) spin_lock_irqsave(&adapter->ptp_tx_lock, flags); - if (!adapter->ptp_tx_skb) - goto unlock; - igc_ptp_tx_hwtstamp(adapter); -unlock: spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } @@ -1006,9 +1045,34 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, void igc_ptp_init(struct igc_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct igc_tx_timestamp_request *tstamp; struct igc_hw *hw = &adapter->hw; int i; + tstamp = &adapter->tx_tstamp[0]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_0; + tstamp->regl = IGC_TXSTMPL_0; + tstamp->regh = IGC_TXSTMPH_0; + tstamp->flags = 0; + + tstamp = &adapter->tx_tstamp[1]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_1; + tstamp->regl = IGC_TXSTMPL_1; + tstamp->regh = IGC_TXSTMPH_1; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_1; + + tstamp = &adapter->tx_tstamp[2]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_2; + tstamp->regl = IGC_TXSTMPL_2; + tstamp->regh = IGC_TXSTMPH_2; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_2; + + tstamp = &adapter->tx_tstamp[3]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_3; + tstamp->regl = IGC_TXSTMPL_3; + tstamp->regh = IGC_TXSTMPH_3; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_3; + switch (hw->mac.type) { case igc_i225: for (i = 0; i < IGC_N_SDP; i++) { diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index dba5a5759b1c..20e17f5fbce3 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -243,6 +243,18 @@ #define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ #define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ +/* TX Timestamp Low */ +#define IGC_TXSTMPL_0 0x0B618 +#define IGC_TXSTMPL_1 0x0B698 +#define IGC_TXSTMPL_2 0x0B6B8 +#define IGC_TXSTMPL_3 0x0B6D8 + +/* TX Timestamp High */ +#define IGC_TXSTMPH_0 0x0B61C +#define IGC_TXSTMPH_1 0x0B69C +#define IGC_TXSTMPH_2 0x0B6BC +#define IGC_TXSTMPH_3 0x0B6DC + #define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ #define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 63d4e32df029..b6f0376e42f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -945,8 +945,6 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, - struct ixgbe_tx_buffer *); void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); void ixgbe_write_eitr(struct ixgbe_q_vector *); int ixgbe_poll(struct napi_struct *napi, int budget); @@ -997,10 +995,6 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); int ixgbe_fcoe_enable(struct net_device *netdev); int ixgbe_fcoe_disable(struct net_device *netdev); -#ifdef CONFIG_IXGBE_DCB -u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); -u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); -#endif /* CONFIG_IXGBE_DCB */ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, struct netdev_fcoe_hbainfo *info); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 4b531e8ae38a..34761e691d52 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -8,7 +8,6 @@ #include "ixgbe.h" u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0bbad4a5cc2f..4dd897806fa5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1413,11 +1413,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: for (i = 0; i < IXGBE_TEST_LEN; i++) - ethtool_sprintf(&p, ixgbe_gstrings_test[i]); + ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]); break; case ETH_SS_STATS: for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) - ethtool_sprintf(&p, + ethtool_sprintf(&p, "%s", ixgbe_gstrings_stats[i].stat_string); for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 8eb9839a3ca6..94bde2cad0f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2421,7 +2421,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (xdp_xmit & IXGBE_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); @@ -10042,9 +10042,6 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; - if (nla_len(attr) < sizeof(mode)) - return -EINVAL; - mode = nla_get_u16(attr); status = ixgbe_configure_bridge_mode(adapter, mode); if (status) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 0310af851086..9339edbd9082 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + u32 aflags = adapter->flags; bool is_l2 = false; u32 regval; @@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: @@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, if (hw->mac.type >= ixgbe_mac_X550) { tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; break; } fallthrough; @@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } @@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_TSYNCRXCTL_TYPE_ALL | IXGBE_TSYNCRXCTL_TSIP_UT_EN; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; is_l2 = true; break; default: @@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); + /* configure adapter flags only when HW is actually configured */ + adapter->flags = aflags; + /* clear TX/RX time stamp registers, just to be sure */ ixgbe_ptp_clear_tx_timestamp(adapter); IXGBE_READ_REG(hw, IXGBE_RXSTMPH); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 29cc60988071..9cfdfa8a4355 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, struct vf_macvlans *mv_list; int num_vf_macvlans, i; + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + num_vf_macvlans = hw->mac.num_rar_entries - (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); if (!num_vf_macvlans) @@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); if (mv_list) { - /* Initialize list of VF macvlans */ - INIT_LIST_HEAD(&adapter->vf_mvs.l); for (i = 0; i < num_vf_macvlans; i++) { mv_list[i].vf = -1; mv_list[i].free = true; @@ -639,12 +640,11 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { struct vf_macvlans *entry; - struct list_head *pos; + bool found = false; int retval = 0; if (index <= 1) { - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); + list_for_each_entry(entry, &adapter->vf_mvs.l, l) { if (entry->vf == vf) { entry->vf = -1; entry->free = true; @@ -662,23 +662,22 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, if (!index) return 0; - entry = NULL; - - list_for_each(pos, &adapter->vf_mvs.l) { - entry = list_entry(pos, struct vf_macvlans, l); - if (entry->free) + list_for_each_entry(entry, &adapter->vf_mvs.l, l) { + if (entry->free) { + found = true; break; + } } /* * If we traversed the entire list and didn't find a free entry - * then we're out of space on the RAR table. Also entry may - * be NULL because the original memory allocation for the list - * failed, which is not fatal but does mean we can't support - * VF requests for MACVLAN because we couldn't allocate - * memory for the list management required. + * then we're out of space on the RAR table. It's also possible + * for the &adapter->vf_mvs.l list to be empty because the original + * memory allocation for the list failed, which is not fatal but does + * mean we can't support VF requests for MACVLAN because we couldn't + * allocate memory for the list management required. */ - if (!entry || !entry->free) + if (!found) return -ENOSPC; retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 1703c640a434..59798bc33298 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -351,7 +351,7 @@ construct_skb: } if (xdp_xmit & IXGBE_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 149c733fcc2b..130cb868774c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -486,9 +486,6 @@ static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, { return 0; } #endif /* CONFIG_IXGBEVF_IPSEC */ -void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); -void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); - #define ixgbevf_hw_to_netdev(hw) \ (((struct ixgbevf_adapter *)(hw)->back)->netdev) diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 8537578e1cf1..81cf3361a1e5 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -43,7 +43,7 @@ #include <linux/ioport.h> #include <linux/iopoll.h> #include <linux/in.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_net.h> #include <linux/slab.h> #include <linux/string.h> @@ -1380,13 +1380,11 @@ static int korina_probe(struct platform_device *pdev) return rc; } -static int korina_remove(struct platform_device *pdev) +static void korina_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); - - return 0; } #ifdef CONFIG_OF @@ -1405,7 +1403,7 @@ static struct platform_driver korina_driver = { .of_match_table = of_match_ptr(korina_match), }, .probe = korina_probe, - .remove = korina_remove, + .remove_new = korina_remove, }; module_platform_driver(korina_driver); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index f5961bdcc480..1d5b7bb6380f 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -721,8 +721,7 @@ err_out: return err; } -static int -ltq_etop_remove(struct platform_device *pdev) +static void ltq_etop_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -732,11 +731,10 @@ ltq_etop_remove(struct platform_device *pdev) ltq_etop_mdio_cleanup(dev); unregister_netdev(dev); } - return 0; } static struct platform_driver ltq_mii_driver = { - .remove = ltq_etop_remove, + .remove_new = ltq_etop_remove, .driver = { .name = "ltq_etop", }, diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 8d646c7f8c82..8bd4def3622e 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -641,7 +641,7 @@ err_uninit_dma: return err; } -static int xrx200_remove(struct platform_device *pdev) +static void xrx200_remove(struct platform_device *pdev) { struct xrx200_priv *priv = platform_get_drvdata(pdev); struct net_device *net_dev = priv->net_dev; @@ -659,8 +659,6 @@ static int xrx200_remove(struct platform_device *pdev) /* shut down hardware */ xrx200_hw_cleanup(priv); - - return 0; } static const struct of_device_id xrx200_match[] = { @@ -671,7 +669,7 @@ MODULE_DEVICE_TABLE(of, xrx200_match); static struct platform_driver xrx200_driver = { .probe = xrx200_probe, - .remove = xrx200_remove, + .remove_new = xrx200_remove, .driver = { .name = "lantiq,xrx200-net", .of_match_table = xrx200_match, diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c index ffa96059079c..5182fe737c37 100644 --- a/drivers/net/ethernet/litex/litex_liteeth.c +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -294,13 +294,11 @@ static int liteeth_probe(struct platform_device *pdev) return 0; } -static int liteeth_remove(struct platform_device *pdev) +static void liteeth_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); unregister_netdev(netdev); - - return 0; } static const struct of_device_id liteeth_of_match[] = { @@ -311,7 +309,7 @@ MODULE_DEVICE_TABLE(of, liteeth_of_match); static struct platform_driver liteeth_driver = { .probe = liteeth_probe, - .remove = liteeth_remove, + .remove_new = liteeth_remove, .driver = { .name = DRV_NAME, .of_match_table = liteeth_of_match, diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 3b129a1c3381..f0bdc06d253d 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2892,19 +2892,18 @@ err_put_clk: return ret; } -static int mv643xx_eth_shared_remove(struct platform_device *pdev) +static void mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); mv643xx_eth_shared_of_remove(); if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); - return 0; } static struct platform_driver mv643xx_eth_shared_driver = { .probe = mv643xx_eth_shared_probe, - .remove = mv643xx_eth_shared_remove, + .remove_new = mv643xx_eth_shared_remove, .driver = { .name = MV643XX_ETH_SHARED_NAME, .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), @@ -3279,7 +3278,7 @@ out: return err; } -static int mv643xx_eth_remove(struct platform_device *pdev) +static void mv643xx_eth_remove(struct platform_device *pdev) { struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); struct net_device *dev = mp->dev; @@ -3293,8 +3292,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev) clk_disable_unprepare(mp->clk); free_netdev(mp->dev); - - return 0; } static void mv643xx_eth_shutdown(struct platform_device *pdev) @@ -3311,7 +3308,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) static struct platform_driver mv643xx_eth_driver = { .probe = mv643xx_eth_probe, - .remove = mv643xx_eth_remove, + .remove_new = mv643xx_eth_remove, .shutdown = mv643xx_eth_shutdown, .driver = { .name = MV643XX_ETH_NAME, diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 8662543ca5c8..89f26402f8fb 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -24,8 +24,8 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> @@ -269,7 +269,7 @@ static int orion_mdio_probe(struct platform_device *pdev) struct orion_mdio_dev *dev; int i, ret; - type = (enum orion_mdio_bus_type)device_get_match_data(&pdev->dev); + type = (uintptr_t)device_get_match_data(&pdev->dev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { @@ -388,7 +388,7 @@ out_clk: return ret; } -static int orion_mdio_remove(struct platform_device *pdev) +static void orion_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct orion_mdio_dev *dev = bus->priv; @@ -404,8 +404,6 @@ static int orion_mdio_remove(struct platform_device *pdev) clk_disable_unprepare(dev->clk[i]); clk_put(dev->clk[i]); } - - return 0; } static const struct of_device_id orion_mdio_match[] = { @@ -426,7 +424,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, - .remove = orion_mdio_remove, + .remove_new = orion_mdio_remove, .driver = { .name = "orion-mdio", .of_match_table = orion_mdio_match, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index acf4f6ba73a6..29aac327574d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -37,7 +37,7 @@ #include <net/ip.h> #include <net/ipv6.h> #include <net/tso.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <net/pkt_sched.h> #include <linux/bpf_trace.h> @@ -2520,7 +2520,7 @@ next: mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); if (ps.xdp_redirect) - xdp_do_flush_map(); + xdp_do_flush(); if (ps.rx_packets) mvneta_update_stats(pp, &ps); @@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) { + struct mvneta_port *pp = netdev_priv(netdev); int i; for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) memcpy(data + i * ETH_GSTRING_LEN, mvneta_statistics[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); - page_pool_ethtool_stats_get_strings(data); + if (!pp->bm_priv) { + data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); + page_pool_ethtool_stats_get_strings(data); + } } } @@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) struct page_pool_stats stats = {}; int i; - for (i = 0; i < rxq_number; i++) - page_pool_get_stats(pp->rxqs[i].page_pool, &stats); + for (i = 0; i < rxq_number; i++) { + if (pp->rxqs[i].page_pool) + page_pool_get_stats(pp->rxqs[i].page_pool, &stats); + } page_pool_ethtool_stats_get(data, &stats); } @@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev, for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) *data++ = pp->ethtool_stats[i]; - mvneta_ethtool_pp_stats(pp, data); + if (!pp->bm_priv) + mvneta_ethtool_pp_stats(pp, data); } static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) { - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(mvneta_statistics) + - page_pool_ethtool_stats_get_count(); + if (sset == ETH_SS_STATS) { + int count = ARRAY_SIZE(mvneta_statistics); + struct mvneta_port *pp = netdev_priv(dev); + + if (!pp->bm_priv) + count += page_pool_ethtool_stats_get_count(); + + return count; + } return -EOPNOTSUPP; } @@ -5725,7 +5737,7 @@ err_free_irq: } /* Device removal routine */ -static int mvneta_remove(struct platform_device *pdev) +static void mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct mvneta_port *pp = netdev_priv(dev); @@ -5744,8 +5756,6 @@ static int mvneta_remove(struct platform_device *pdev) 1 << pp->id); mvneta_bm_put(pp->bm_priv); } - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -5871,7 +5881,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match); static struct platform_driver mvneta_driver = { .probe = mvneta_probe, - .remove = mvneta_remove, + .remove_new = mvneta_remove, .driver = { .name = MVNETA_DRIVER_NAME, .of_match_table = mvneta_match, diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 46c942ef2287..3f46a0fed048 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -457,7 +457,7 @@ err_clk: return err; } -static int mvneta_bm_remove(struct platform_device *pdev) +static void mvneta_bm_remove(struct platform_device *pdev) { struct mvneta_bm *priv = platform_get_drvdata(pdev); u8 all_ports_map = 0xff; @@ -475,8 +475,6 @@ static int mvneta_bm_remove(struct platform_device *pdev) mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); clk_disable_unprepare(priv->clk); - - return 0; } static const struct of_device_id mvneta_bm_match[] = { @@ -487,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match); static struct platform_driver mvneta_bm_driver = { .probe = mvneta_bm_probe, - .remove = mvneta_bm_remove, + .remove_new = mvneta_bm_remove, .driver = { .name = MVNETA_BM_DRIVER_NAME, .of_match_table = mvneta_bm_match, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 11e603686a27..e809f91c08fb 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -16,7 +16,7 @@ #include <linux/phy.h> #include <linux/phylink.h> #include <net/flow_offload.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> #include <linux/bpf.h> #include <net/xdp.h> diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 75e83ea2a926..0f9bc4f8ec3b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -593,8 +593,6 @@ static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent, sprintf(c2_entry_name, "%03d", id); c2_entry_dir = debugfs_create_dir(c2_entry_name, parent); - if (!c2_entry_dir) - return -ENOMEM; entry = &priv->dbgfs_entries->c2_entries[id]; @@ -626,8 +624,6 @@ static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent, sprintf(flow_tbl_entry_name, "%03d", id); flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent); - if (!flow_tbl_entry_dir) - return -ENOMEM; entry = &priv->dbgfs_entries->flt_entries[id]; @@ -646,12 +642,8 @@ static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv) int i, ret; cls_dir = debugfs_create_dir("classifier", parent); - if (!cls_dir) - return -ENOMEM; c2_dir = debugfs_create_dir("c2", cls_dir); - if (!c2_dir) - return -ENOMEM; for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) { ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i); @@ -660,8 +652,6 @@ static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv) } flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir); - if (!flow_tbl_dir) - return -ENOMEM; for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) { ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 1fec84b4c068..93137606869e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -24,7 +24,6 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/phy.h> #include <linux/phylink.h> #include <linux/phy/phy.h> @@ -36,6 +35,7 @@ #include <uapi/linux/ppp_defs.h> #include <net/ip.h> #include <net/ipv6.h> +#include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bpf_trace.h> @@ -4027,7 +4027,7 @@ err_drop_frame: } if (xdp_ret & MVPP2_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (ps.rx_packets) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); @@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, break; case ETHTOOL_GRXCLSRLALL: for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { + if (loc == info->rule_cnt) { + ret = -EMSGSIZE; + break; + } + if (port->rfs_rules[i]) rules[loc++] = i; } @@ -5826,7 +5831,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, v->type = MVPP2_QUEUE_VECTOR_SHARED; if (port->flags & MVPP2_F_DT_COMPAT) - strncpy(irqname, "rx-shared", sizeof(irqname)); + strscpy(irqname, "rx-shared", sizeof(irqname)); } if (port_node) @@ -7657,7 +7662,7 @@ err_pp_clk: return err; } -static int mvpp2_remove(struct platform_device *pdev) +static void mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); struct fwnode_handle *fwnode = pdev->dev.fwnode; @@ -7695,15 +7700,13 @@ static int mvpp2_remove(struct platform_device *pdev) } if (is_acpi_node(port_fwnode)) - return 0; + return; clk_disable_unprepare(priv->axi_clk); clk_disable_unprepare(priv->mg_core_clk); clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); - - return 0; } static const struct of_device_id mvpp2_match[] = { @@ -7729,7 +7732,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); static struct platform_driver mvpp2_driver = { .probe = mvpp2_probe, - .remove = mvpp2_remove, + .remove_new = mvpp2_remove, .driver = { .name = MVPP2_DRIVER_NAME, .of_match_table = mvpp2_match, diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c index 90c3a419932d..d4ee2454675b 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c @@ -16,9 +16,6 @@ #define CTRL_MBOX_MAX_PF 128 #define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF)) -#define FW_HB_INTERVAL_IN_SECS 1 -#define FW_HB_MISS_COUNT 10 - /* Names of Hardware non-queue generic interrupts */ static char *cn93_non_ioq_msix_names[] = { "epf_ire_rint", @@ -250,12 +247,11 @@ static void octep_init_config_cn93_pf(struct octep_device *oct) link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link); } conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + - (0x400000ull * 7) + + CN93_PEM_BAR4_INDEX_OFFSET + (link * CTRL_MBOX_SZ); - conf->hb_interval = FW_HB_INTERVAL_IN_SECS; - conf->max_hb_miss_cnt = FW_HB_MISS_COUNT; - + conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL; + conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT; } /* Setup registers for a hardware Tx Queue */ @@ -373,34 +369,40 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); } -/* Process non-ioq interrupts required to keep pf interface running. - * OEI_RINT is needed for control mailbox - */ -static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct) -{ - bool handled = false; - u64 reg0; - - /* Check for OEI INTR */ - reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); - if (reg0) { - dev_info(&oct->pdev->dev, - "Received OEI_RINT intr: 0x%llx\n", - reg0); - octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0); - if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) +/* Poll OEI events like heartbeat */ +static void octep_poll_oei_cn93_pf(struct octep_device *oct) +{ + u64 reg; + + reg = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); + if (reg) { + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg); + if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) queue_work(octep_wq, &oct->ctrl_mbox_task); - else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) + else if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) atomic_set(&oct->hb_miss_cnt, 0); - - handled = true; } +} + +/* OEI interrupt handler */ +static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + + octep_poll_oei_cn93_pf(oct); + return IRQ_HANDLED; +} - return handled; +/* Process non-ioq interrupts required to keep pf interface running. + * OEI_RINT is needed for control mailbox + */ +static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct) +{ + octep_poll_oei_cn93_pf(oct); } -/* Interrupts handler for all non-queue generic interrupts. */ -static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) +/* Interrupt handler for input ring error interrupts. */ +static irqreturn_t octep_ire_intr_handler_cn93_pf(void *dev) { struct octep_device *oct = (struct octep_device *)dev; struct pci_dev *pdev = oct->pdev; @@ -425,8 +427,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) reg_val); } } - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for output ring error interrupts. */ +static irqreturn_t octep_ore_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; + int i = 0; /* Check for ORERR INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); @@ -444,9 +455,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) reg_val); } } - - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for vf input ring error interrupts. */ +static irqreturn_t octep_vfire_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for VFIRE INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); @@ -454,8 +472,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received VFIRE_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for vf output ring error interrupts. */ +static irqreturn_t octep_vfore_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for VFORE INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); @@ -463,19 +489,30 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received VFORE_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} - /* Check for MBOX INTR and OEI INTR */ - if (octep_poll_non_ioq_interrupts_cn93_pf(oct)) - goto irq_handled; +/* Interrupt handler for dpi dma related interrupts. */ +static irqreturn_t octep_dma_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + u64 reg_val = 0; /* Check for DMA INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); if (reg_val) { octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for dpi dma transaction error interrupts for VFs */ +static irqreturn_t octep_dma_vf_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for DMA VF INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); @@ -483,8 +520,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for pp transaction error interrupts for VFs */ +static irqreturn_t octep_pp_vf_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for PPVF INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); @@ -492,8 +537,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received PP_VF_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupt handler for mac related interrupts. */ +static irqreturn_t octep_misc_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; /* Check for MISC INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); @@ -501,11 +554,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) dev_info(&pdev->dev, "Received MISC_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); - goto irq_handled; } + return IRQ_HANDLED; +} + +/* Interrupts handler for all reserved interrupts. */ +static irqreturn_t octep_rsvd_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); -irq_handled: return IRQ_HANDLED; } @@ -569,8 +628,15 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); + + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); + + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL); } /* Disable all interrupts */ @@ -588,8 +654,15 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); + + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); + + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL); } /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ @@ -722,7 +795,16 @@ void octep_device_setup_cn93_pf(struct octep_device *oct) oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; - oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; + oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf; + oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf; + oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf; + oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cn93_pf; + oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cn93_pf; + oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cn93_pf; + oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cn93_pf; + oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cn93_pf; + oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf; + oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf; oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h index df7cd39d9fce..1622a6ebf036 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h @@ -49,6 +49,11 @@ /* Default MTU */ #define OCTEP_DEFAULT_MTU 1500 +/* pf heartbeat interval in milliseconds */ +#define OCTEP_DEFAULT_FW_HB_INTERVAL 1000 +/* pf heartbeat miss count */ +#define OCTEP_DEFAULT_FW_HB_MISS_COUNT 20 + /* Macros to get octeon config params */ #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) #define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) @@ -181,6 +186,16 @@ struct octep_ctrl_mbox_config { void __iomem *barmem_addr; }; +/* Info from firmware */ +struct octep_fw_info { + /* interface pkind */ + u16 pkind; + /* heartbeat interval in milliseconds */ + u16 hb_interval; + /* heartbeat miss count */ + u16 hb_miss_count; +}; + /* Data Structure to hold configuration limits and active config */ struct octep_config { /* Input Queue attributes. */ @@ -201,10 +216,7 @@ struct octep_config { /* ctrl mbox config */ struct octep_ctrl_mbox_config ctrl_mbox_cfg; - /* Configured maximum heartbeat miss count */ - u32 max_hb_miss_cnt; - - /* Configured firmware heartbeat interval in secs */ - u32 hb_interval; + /* fw info */ + struct octep_fw_info fw_info; }; #endif /* _OCTEP_CONFIG_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h new file mode 100644 index 000000000000..0c741e752db6 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022 Marvell. + */ +#ifndef __OCTEP_CP_VERSION_H__ +#define __OCTEP_CP_VERSION_H__ + +#define OCTEP_CP_VERSION(a, b, c) ((((a) & 0xff) << 16) + \ + (((b) & 0xff) << 8) + \ + ((c) & 0xff)) + +#endif /* __OCTEP_CP_VERSION_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c index dab61cc1acb5..9d53c1402cb4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c @@ -37,7 +37,9 @@ #define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m) #define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8) +#define OCTEP_CTRL_MBOX_INFO_HOST_VERSION(m) ((m) + 16) #define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24) +#define OCTEP_CTRL_MBOX_INFO_FW_VERSION(m) ((m) + 136) #define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144) #define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ) @@ -71,7 +73,7 @@ static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz) int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) { - u64 magic_num, status; + u64 magic_num, status, fw_versions; if (!mbox) return -EINVAL; @@ -93,6 +95,9 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) return -EINVAL; } + fw_versions = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION(mbox->barmem)); + mbox->min_fw_version = ((fw_versions & 0xffffffff00000000ull) >> 32); + mbox->max_fw_version = (fw_versions & 0xffffffff); mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem)); writeq(OCTEP_CTRL_MBOX_STATUS_INIT, @@ -113,6 +118,7 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) OCTEP_CTRL_MBOX_TOTAL_INFO_SZ + mbox->h2fq.sz; + writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem)); /* ensure ready state is seen after everything is initialized */ wmb(); writeq(OCTEP_CTRL_MBOX_STATUS_READY, @@ -258,6 +264,7 @@ int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) if (!mbox->barmem) return -EINVAL; + writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem)); writeq(OCTEP_CTRL_MBOX_STATUS_INVALID, OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); /* ensure uninit state is written before uninitialization */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h index 9c4ff0fba6a0..7f8135788efc 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h @@ -121,6 +121,8 @@ struct octep_ctrl_mbox_q { }; struct octep_ctrl_mbox { + /* control plane version */ + u64 version; /* size of bar memory */ u32 barmem_sz; /* pointer to BAR memory */ @@ -133,6 +135,10 @@ struct octep_ctrl_mbox { struct mutex h2fq_lock; /* lock for f2hq */ struct mutex f2hq_lock; + /* Min control plane version supported by firmware */ + u32 min_fw_version; + /* Max control plane version supported by firmware */ + u32 max_fw_version; }; /* Initialize control mbox. diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c index 565320ec24f8..0594607a2585 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -14,6 +14,9 @@ #include "octep_main.h" #include "octep_ctrl_net.h" +/* Control plane version */ +#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0) + static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr); static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu); static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac); @@ -21,6 +24,18 @@ static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state); static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info); static atomic_t ctrl_net_msg_id; +/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */ +static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = { + [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] = + OCTEP_CP_VERSION(1, 0, 0) +}; + +/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */ +static const u32 octep_ctrl_net_f2h_cmd_versions[OCTEP_CTRL_NET_F2H_CMD_MAX] = { + [OCTEP_CTRL_NET_F2H_CMD_INVALID ... OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS] = + OCTEP_CP_VERSION(1, 0, 0) +}; + static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf, u16 sz, int vfid) { @@ -41,7 +56,13 @@ static int octep_send_mbox_req(struct octep_device *oct, struct octep_ctrl_net_wait_data *d, bool wait_for_response) { - int err, ret; + int err, ret, cmd; + + /* check if firmware is compatible for this request */ + cmd = d->data.req.hdr.s.cmd; + if (octep_ctrl_net_h2f_cmd_versions[cmd] > oct->ctrl_mbox.max_fw_version || + octep_ctrl_net_h2f_cmd_versions[cmd] < oct->ctrl_mbox.min_fw_version) + return -EOPNOTSUPP; err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg); if (err < 0) @@ -84,12 +105,16 @@ int octep_ctrl_net_init(struct octep_device *oct) /* Initialize control mbox */ ctrl_mbox = &oct->ctrl_mbox; + ctrl_mbox->version = OCTEP_CP_VERSION_CURRENT; ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf); ret = octep_ctrl_mbox_init(ctrl_mbox); if (ret) { dev_err(&pdev->dev, "Failed to initialize control mbox\n"); return ret; } + dev_info(&pdev->dev, "Control plane versions host: %llx, firmware: %x:%x\n", + ctrl_mbox->version, ctrl_mbox->min_fw_version, + ctrl_mbox->max_fw_version); oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz; return 0; @@ -273,9 +298,17 @@ static int process_mbox_notify(struct octep_device *oct, { struct net_device *netdev = oct->netdev; struct octep_ctrl_net_f2h_req *req; + int cmd; req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg; - switch (req->hdr.s.cmd) { + cmd = req->hdr.s.cmd; + + /* check if we support this command */ + if (octep_ctrl_net_f2h_cmd_versions[cmd] > OCTEP_CP_VERSION_CURRENT || + octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT) + return -EOPNOTSUPP; + + switch (cmd) { case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS: if (netif_running(netdev)) { if (req->link.state) { @@ -320,6 +353,28 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct) } } +int octep_ctrl_net_get_info(struct octep_device *oct, int vfid, + struct octep_fw_info *info) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_net_h2f_req *req; + int err; + + req = &d.data.req; + init_send_req(&d.msg, req, 0, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_INFO; + req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET; + err = octep_send_mbox_req(oct, &d, true); + if (err < 0) + return err; + + resp = &d.data.resp; + memcpy(info, &resp->info.fw_info, sizeof(struct octep_fw_info)); + + return 0; +} + int octep_ctrl_net_uninit(struct octep_device *oct) { struct octep_ctrl_net_wait_data *pos, *n; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h index 37880dd79116..b330f370131b 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h @@ -7,6 +7,8 @@ #ifndef __OCTEP_CTRL_NET_H__ #define __OCTEP_CTRL_NET_H__ +#include "octep_cp_version.h" + #define OCTEP_CTRL_NET_INVALID_VFID (-1) /* Supported commands */ @@ -39,12 +41,15 @@ enum octep_ctrl_net_h2f_cmd { OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS, OCTEP_CTRL_NET_H2F_CMD_RX_STATE, OCTEP_CTRL_NET_H2F_CMD_LINK_INFO, + OCTEP_CTRL_NET_H2F_CMD_GET_INFO, + OCTEP_CTRL_NET_H2F_CMD_MAX }; /* Supported fw to host commands */ enum octep_ctrl_net_f2h_cmd { OCTEP_CTRL_NET_F2H_CMD_INVALID = 0, OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS, + OCTEP_CTRL_NET_F2H_CMD_MAX }; union octep_ctrl_net_req_hdr { @@ -157,6 +162,11 @@ struct octep_ctrl_net_h2f_resp_cmd_state { u16 state; }; +/* get info request */ +struct octep_ctrl_net_h2f_resp_cmd_get_info { + struct octep_fw_info fw_info; +}; + /* Host to fw response data */ struct octep_ctrl_net_h2f_resp { union octep_ctrl_net_resp_hdr hdr; @@ -167,6 +177,7 @@ struct octep_ctrl_net_h2f_resp { struct octep_ctrl_net_h2f_resp_cmd_state link; struct octep_ctrl_net_h2f_resp_cmd_state rx; struct octep_ctrl_net_link_info link_info; + struct octep_ctrl_net_h2f_resp_cmd_get_info info; }; } __packed; @@ -326,6 +337,17 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct, */ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct); +/** Get info from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param info: non-null pointer to struct octep_fw_info. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_get_info(struct octep_device *oct, int vfid, + struct octep_fw_info *info); + /** Uninitialize data for ctrl net. * * @param oct: non-null pointer to struct octep_device. diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 4424de2ffd70..a9bdf3283a85 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -155,18 +155,153 @@ static void octep_disable_msix(struct octep_device *oct) } /** - * octep_non_ioq_intr_handler() - common handler for all generic interrupts. + * octep_oei_intr_handler() - common handler for output endpoint interrupts. * * @irq: Interrupt number. * @data: interrupt data. * - * this is common handler for all non-queue (generic) interrupts. + * this is common handler for all output endpoint interrupts. + */ +static irqreturn_t octep_oei_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.oei_intr_handler(oct); +} + +/** + * octep_ire_intr_handler() - common handler for input ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for input ring error interrupts. + */ +static irqreturn_t octep_ire_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.ire_intr_handler(oct); +} + +/** + * octep_ore_intr_handler() - common handler for output ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for output ring error interrupts. + */ +static irqreturn_t octep_ore_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.ore_intr_handler(oct); +} + +/** + * octep_vfire_intr_handler() - common handler for vf input ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for vf input ring error interrupts. + */ +static irqreturn_t octep_vfire_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.vfire_intr_handler(oct); +} + +/** + * octep_vfore_intr_handler() - common handler for vf output ring error interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for vf output ring error interrupts. + */ +static irqreturn_t octep_vfore_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.vfore_intr_handler(oct); +} + +/** + * octep_dma_intr_handler() - common handler for dpi dma related interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for dpi dma related interrupts. + */ +static irqreturn_t octep_dma_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.dma_intr_handler(oct); +} + +/** + * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for dpi dma transaction error interrupts for VFs. + */ +static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.dma_vf_intr_handler(oct); +} + +/** + * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for pp transaction error interrupts for VFs. + */ +static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.pp_vf_intr_handler(oct); +} + +/** + * octep_misc_intr_handler() - common handler for mac related interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for mac related interrupts. */ -static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data) +static irqreturn_t octep_misc_intr_handler(int irq, void *data) { struct octep_device *oct = data; - return oct->hw_ops.non_ioq_intr_handler(oct); + return oct->hw_ops.misc_intr_handler(oct); +} + +/** + * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use). + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for all reserved interrupts. + */ +static irqreturn_t octep_rsvd_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.rsvd_intr_handler(oct); } /** @@ -222,9 +357,57 @@ static int octep_request_irqs(struct octep_device *oct) snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, "%s-%s", netdev->name, non_ioq_msix_names[i]); - ret = request_irq(msix_entry->vector, - octep_non_ioq_intr_handler, 0, - irq_name, oct); + if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint", + strlen("epf_oei_rint"))) { + ret = request_irq(msix_entry->vector, + octep_oei_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint", + strlen("epf_ire_rint"))) { + ret = request_irq(msix_entry->vector, + octep_ire_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint", + strlen("epf_ore_rint"))) { + ret = request_irq(msix_entry->vector, + octep_ore_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint", + strlen("epf_vfire_rint"))) { + ret = request_irq(msix_entry->vector, + octep_vfire_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint", + strlen("epf_vfore_rint"))) { + ret = request_irq(msix_entry->vector, + octep_vfore_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint", + strlen("epf_dma_rint"))) { + ret = request_irq(msix_entry->vector, + octep_dma_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint", + strlen("epf_dma_vf_rint"))) { + ret = request_irq(msix_entry->vector, + octep_dma_vf_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint", + strlen("epf_pp_vf_rint"))) { + ret = request_irq(msix_entry->vector, + octep_pp_vf_intr_handler, 0, + irq_name, oct); + } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint", + strlen("epf_misc_rint"))) { + ret = request_irq(msix_entry->vector, + octep_misc_intr_handler, 0, + irq_name, oct); + } else { + ret = request_irq(msix_entry->vector, + octep_rsvd_intr_handler, 0, + irq_name, oct); + } + if (ret) { netdev_err(netdev, "request_irq failed for %s; err=%d", @@ -715,32 +898,31 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb, hw_desc->dptr = tx_buffer->sglist_dma; } - /* Flush the hw descriptor before writing to doorbell */ - wmb(); - - /* Ring Doorbell to notify the NIC there is a new packet */ - writel(1, iq->doorbell_reg); + netdev_tx_sent_queue(iq->netdev_q, skb->len); + skb_tx_timestamp(skb); atomic_inc(&iq->instr_pending); wi++; if (wi == iq->max_count) wi = 0; iq->host_write_index = wi; + /* Flush the hw descriptor before writing to doorbell */ + wmb(); - netdev_tx_sent_queue(iq->netdev_q, skb->len); + /* Ring Doorbell to notify the NIC there is a new packet */ + writel(1, iq->doorbell_reg); iq->stats.instr_posted++; - skb_tx_timestamp(skb); return NETDEV_TX_OK; dma_map_sg_err: if (si > 0) { dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], - sglist[0].len[0], DMA_TO_DEVICE); - sglist[0].len[0] = 0; + sglist[0].len[3], DMA_TO_DEVICE); + sglist[0].len[3] = 0; } while (si > 1) { dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], - sglist[si >> 2].len[si & 3], DMA_TO_DEVICE); - sglist[si >> 2].len[si & 3] = 0; + sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE); + sglist[si >> 2].len[3 - (si & 3)] = 0; si--; } tx_buffer->gather = 0; @@ -918,9 +1100,9 @@ static void octep_hb_timeout_task(struct work_struct *work) int miss_cnt; miss_cnt = atomic_inc_return(&oct->hb_miss_cnt); - if (miss_cnt < oct->conf->max_hb_miss_cnt) { + if (miss_cnt < oct->conf->fw_info.hb_miss_count) { queue_delayed_work(octep_wq, &oct->hb_task, - msecs_to_jiffies(oct->conf->hb_interval * 1000)); + msecs_to_jiffies(oct->conf->fw_info.hb_interval)); return; } @@ -1011,10 +1193,16 @@ int octep_device_setup(struct octep_device *oct) if (ret) return ret; + INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); + INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); + INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task); + oct->poll_non_ioq_intr = true; + queue_delayed_work(octep_wq, &oct->intr_poll_task, + msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); + atomic_set(&oct->hb_miss_cnt, 0); INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); - queue_delayed_work(octep_wq, &oct->hb_task, - msecs_to_jiffies(oct->conf->hb_interval * 1000)); + return 0; unsupported_dev: @@ -1077,7 +1265,8 @@ static bool get_fw_ready_status(struct pci_dev *pdev) pci_read_config_byte(pdev, (pos + 8), &status); dev_info(&pdev->dev, "Firmware ready status = %u\n", status); - return status; +#define FW_STATUS_READY 1ULL + return status == FW_STATUS_READY; } return false; } @@ -1143,12 +1332,18 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "Device setup failed\n"); goto err_octep_config; } - INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); - INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); - INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task); - octep_dev->poll_non_ioq_intr = true; - queue_delayed_work(octep_wq, &octep_dev->intr_poll_task, - msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); + + err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, + &octep_dev->conf->fw_info); + if (err) { + dev_err(&pdev->dev, "Failed to get firmware info\n"); + goto register_dev_err; + } + dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n", + octep_dev->conf->fw_info.hb_interval, + octep_dev->conf->fw_info.hb_miss_count); + queue_delayed_work(octep_wq, &octep_dev->hb_task, + msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval)); netdev->netdev_ops = &octep_netdev_ops; octep_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index e0907a719133..6df902ebb7f3 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -65,7 +65,16 @@ struct octep_hw_ops { void (*setup_oq_regs)(struct octep_device *oct, int q); void (*setup_mbox_regs)(struct octep_device *oct, int mbox); - irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); + irqreturn_t (*oei_intr_handler)(void *ioq_vector); + irqreturn_t (*ire_intr_handler)(void *ioq_vector); + irqreturn_t (*ore_intr_handler)(void *ioq_vector); + irqreturn_t (*vfire_intr_handler)(void *ioq_vector); + irqreturn_t (*vfore_intr_handler)(void *ioq_vector); + irqreturn_t (*dma_intr_handler)(void *ioq_vector); + irqreturn_t (*dma_vf_intr_handler)(void *ioq_vector); + irqreturn_t (*pp_vf_intr_handler)(void *ioq_vector); + irqreturn_t (*misc_intr_handler)(void *ioq_vector); + irqreturn_t (*rsvd_intr_handler)(void *ioq_vector); irqreturn_t (*ioq_intr_handler)(void *ioq_vector); int (*soft_reset)(struct octep_device *oct); void (*reinit_regs)(struct octep_device *oct); @@ -73,7 +82,7 @@ struct octep_hw_ops { void (*enable_interrupts)(struct octep_device *oct); void (*disable_interrupts)(struct octep_device *oct); - bool (*poll_non_ioq_interrupts)(struct octep_device *oct); + void (*poll_non_ioq_interrupts)(struct octep_device *oct); void (*enable_io_queues)(struct octep_device *oct); void (*disable_io_queues)(struct octep_device *oct); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h index b25c3093dc7b..0a43983e9101 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h @@ -370,4 +370,8 @@ /* bit 1 for firmware heartbeat interrupt */ #define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1) +#define CN93_PEM_BAR4_INDEX 7 +#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL +#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE) + #endif /* _OCTEP_REGS_CN9K_PF_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h index 782a24f27f3e..49feae80d7d2 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h @@ -20,6 +20,7 @@ struct octep_oq_desc_hw { dma_addr_t buffer_ptr; u64 info_ptr; }; +static_assert(sizeof(struct octep_oq_desc_hw) == 16); #define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw)) @@ -39,6 +40,7 @@ struct octep_oq_resp_hw_ext { /* checksum verified. */ u64 csum_verified:2; }; +static_assert(sizeof(struct octep_oq_resp_hw_ext) == 8); #define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext)) @@ -50,6 +52,7 @@ struct octep_oq_resp_hw { /* The Length of the packet. */ __be64 length; }; +static_assert(sizeof(struct octep_oq_resp_hw) == 8); #define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw)) diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c index 5a520d37bea0..d0adb82d65c3 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -69,12 +69,12 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget) compl_sg++; dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], - tx_buffer->sglist[0].len[0], DMA_TO_DEVICE); + tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); i = 1; /* entry 0 is main skb, unmapped above */ while (frags--) { dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], - tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); i++; } @@ -131,13 +131,13 @@ static void octep_iq_free_pending(struct octep_iq *iq) dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], - tx_buffer->sglist[0].len[0], + tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); i = 1; /* entry 0 is main skb, unmapped above */ while (frags--) { dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], - tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); i++; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h index 2ef57980eb47..86c98b13fc44 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -17,11 +17,26 @@ #define TX_BUFTYPE_NET_SG 2 #define NUM_TX_BUFTYPES 3 -/* Hardware format for Scatter/Gather list */ +/* Hardware format for Scatter/Gather list + * + * 63 48|47 32|31 16|15 0 + * ----------------------------------------- + * | Len 0 | Len 1 | Len 2 | Len 3 | + * ----------------------------------------- + * | Ptr 0 | + * ----------------------------------------- + * | Ptr 1 | + * ----------------------------------------- + * | Ptr 2 | + * ----------------------------------------- + * | Ptr 3 | + * ----------------------------------------- + */ struct octep_tx_sglist_desc { u16 len[4]; dma_addr_t dma_ptr[4]; }; +static_assert(sizeof(struct octep_tx_sglist_desc) == 40); /* Each Scatter/Gather entry sent to hardwar hold four pointers. * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' @@ -225,6 +240,7 @@ struct octep_instr_hdr { /* Reserved3 */ u64 reserved3:1; }; +static_assert(sizeof(struct octep_instr_hdr) == 8); /* Hardware Tx completion response header */ struct octep_instr_resp_hdr { @@ -249,6 +265,7 @@ struct octep_instr_resp_hdr { /* Opcode for the return packet */ u64 opcode:16; }; +static_assert(sizeof(struct octep_instr_hdr) == 8); /* 64-byte Tx instruction format. * Format of instruction for a 64-byte mode input queue. @@ -279,6 +296,7 @@ struct octep_tx_desc_hw { /* Additional headers available in a 64-byte instruction. */ u64 exhdr[4]; }; +static_assert(sizeof(struct octep_tx_desc_hw) == 64); #define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw)) #endif /* _OCTEP_TX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 592037f4e55b..6c70c8498690 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -55,6 +55,7 @@ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = { [LMAC_MODE_50G_R] = "50G_R", [LMAC_MODE_100G_R] = "100G_R", [LMAC_MODE_USXGMII] = "USXGMII", + [LMAC_MODE_USGMII] = "USGMII", }; /* CGX PHY management internal APIs */ @@ -223,24 +224,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id, return 0; } -static u64 mac2u64 (u8 *mac_addr) -{ - u64 mac = 0; - int index; - - for (index = ETH_ALEN - 1; index >= 0; index--) - mac |= ((u64)*mac_addr++) << (8 * index); - return mac; -} - -static void cfg2mac(u64 cfg, u8 *mac_addr) -{ - int i, index = 0; - - for (i = ETH_ALEN - 1; i >= 0; i--, index++) - mac_addr[i] = (cfg >> (8 * index)) & 0xFF; -} - int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); @@ -249,13 +232,16 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) int index, id; u64 cfg; + if (!lmac) + return -ENODEV; + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ - cfg = mac2u64 (mac_addr); + cfg = ether_addr_to_u64(mac_addr); id = get_sequence_id_of_lmac(cgx_dev, lmac_id); @@ -322,7 +308,7 @@ int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) index = id * lmac->mac_to_index_bmap.max + idx; - cfg = mac2u64 (mac_addr); + cfg = ether_addr_to_u64(mac_addr); cfg |= CGX_DMAC_CAM_ADDR_ENABLE; cfg |= ((u64)lmac_id << 49); cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); @@ -405,7 +391,7 @@ int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); cfg &= ~CGX_RX_DMAC_ADR_MASK; - cfg |= mac2u64 (mac_addr); + cfg |= ether_addr_to_u64(mac_addr); cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); return 0; @@ -441,7 +427,7 @@ int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) /* Read MAC address to check whether it is ucast or mcast */ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); - cfg2mac(cfg, mac); + u64_to_ether_addr(cfg, mac); if (is_multicast_ether_addr(mac)) lmac->mcast_filters_count--; @@ -567,15 +553,16 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx); - u16 max_dmac = lmac->mac_to_index_bmap.max; struct mac_ops *mac_ops; + u16 max_dmac; int index, i; u64 cfg = 0; int id; - if (!cgx) + if (!cgx || !lmac) return; + max_dmac = lmac->mac_to_index_bmap.max; id = get_sequence_id_of_lmac(cgx, lmac_id); mac_ops = cgx->mac_ops; @@ -748,7 +735,7 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp) int corr_reg, uncorr_reg; struct cgx *cgx = cgxd; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) @@ -1231,8 +1218,6 @@ static inline void link_status_user_format(u64 lstat, struct cgx_link_user_info *linfo, struct cgx *cgx, u8 lmac_id) { - const char *lmac_string; - linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; @@ -1243,12 +1228,12 @@ static inline void link_status_user_format(u64 lstat, if (linfo->lmac_type_id >= LMAC_MODE_MAX) { dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", linfo->lmac_type_id, cgx->cgx_id, lmac_id); - strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1); + strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type)); return; } - lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; - strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); + strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id], + sizeof(linfo->lmac_type)); } /* Hardware event handlers */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 574114179688..6f7d1dee5830 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -110,6 +110,7 @@ enum LMAC_TYPE { LMAC_MODE_50G_R = 8, LMAC_MODE_100G_R = 9, LMAC_MODE_USXGMII = 10, + LMAC_MODE_USGMII = 11, LMAC_MODE_MAX, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index eba307eee2b2..5df42634ceb8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -136,6 +136,7 @@ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ +M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -235,7 +236,7 @@ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \ M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \ npc_install_flow_req, npc_install_flow_rsp) \ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \ - npc_delete_flow_req, msg_rsp) \ + npc_delete_flow_req, npc_delete_flow_rsp) \ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_req, \ npc_mcam_read_entry_rsp) \ @@ -1437,6 +1438,12 @@ struct npc_get_kex_cfg_rsp { u8 mkex_pfl_name[MKEX_NAME_LEN]; }; +struct ptp_get_cap_rsp { + struct mbox_msghdr hdr; +#define PTP_CAP_HW_ATOMIC_UPDATE BIT_ULL(0) + u64 cap; +}; + struct flow_msg { unsigned char dmac[6]; unsigned char smac[6]; @@ -1451,6 +1458,10 @@ struct flow_msg { __be32 ip4dst; __be32 ip6dst[4]; }; + union { + __be32 spi; + }; + u8 tos; u8 ip_ver; u8 ip_proto; @@ -1461,6 +1472,13 @@ struct flow_msg { u8 ip_flag; u8 next_header; }; + __be16 vlan_itci; +#define OTX2_FLOWER_MASK_MPLS_LB GENMASK(31, 12) +#define OTX2_FLOWER_MASK_MPLS_TC GENMASK(11, 9) +#define OTX2_FLOWER_MASK_MPLS_BOS BIT(8) +#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0) +#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8) + u32 mpls_lse[4]; }; struct npc_install_flow_req { @@ -1491,6 +1509,8 @@ struct npc_install_flow_req { u8 vtag0_op; u16 vtag1_def; u8 vtag1_op; + /* old counter value */ + u16 cntr_val; }; struct npc_install_flow_rsp { @@ -1506,6 +1526,11 @@ struct npc_delete_flow_req { u8 all; /* PF + VFs */ }; +struct npc_delete_flow_rsp { + struct mbox_msghdr hdr; + u16 cntr_val; +}; + struct npc_mcam_read_entry_req { struct mbox_msghdr hdr; u16 entry; /* MCAM entry to read */ @@ -1555,7 +1580,9 @@ enum ptp_op { PTP_OP_GET_CLOCK = 1, PTP_OP_GET_TSTMP = 2, PTP_OP_SET_THRESH = 3, - PTP_OP_EXTTS_ON = 4, + PTP_OP_PPS_ON = 4, + PTP_OP_ADJTIME = 5, + PTP_OP_SET_CLOCK = 6, }; struct ptp_req { @@ -1563,12 +1590,16 @@ struct ptp_req { u8 op; s64 scaled_ppm; u64 thresh; - int extts_on; + u64 period; + int pps_on; + s64 delta; + u64 clk; }; struct ptp_rsp { struct mbox_msghdr hdr; u64 clk; + u64 tsc; }; struct npc_get_field_status_req { @@ -1914,7 +1945,7 @@ struct mcs_hw_info { u8 tcam_entries; /* RX/TX Tcam entries per mcs block */ u8 secy_entries; /* RX/TX SECY entries per mcs block */ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */ - u8 sa_entries; /* PN table entries = SA entries */ + u16 sa_entries; /* PN table entries = SA entries */ u64 rsvd[16]; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c index c43f19dfbd74..c1775bd01c2b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c @@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id); stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg); - reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id); + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id); stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id); @@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id); stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg); - reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id); + reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id); stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg); if (mcs->hw->mcs_blks > 1) { @@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id) return NULL; } +bool is_mcs_bypass(int mcs_id) +{ + struct mcs *mcs_dev; + + list_for_each_entry(mcs_dev, &mcs_list, mcs_list) { + if (mcs_dev->mcs_id == mcs_id) + return mcs_dev->bypass; + } + return true; +} + void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req) { u64 val = 0; @@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs) return err; } -static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass) +static void mcs_set_external_bypass(struct mcs *mcs, bool bypass) { u64 val; @@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass) else val &= ~BIT_ULL(6); mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); + mcs->bypass = bypass; } static void mcs_global_cfg(struct mcs *mcs) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h index 0f89dcb76465..f927cc61dfd2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h @@ -149,6 +149,7 @@ struct mcs { u16 num_vec; void *rvu; u16 *tx_sa_active; + bool bypass; }; struct mcs_ops { @@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req * int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc); int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req); int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req); +bool is_mcs_bypass(int mcs_id); /* CN10K-B APIs */ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h index f3ab01fc363c..f4c6de89002c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h @@ -810,14 +810,37 @@ offset = 0x9d8ull; \ offset; }) +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \ + u64 offset; \ + \ + offset = 0xee80ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xe818ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \ + u64 offset; \ + \ + offset = 0xa680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xd018ull; \ + offset += (a) * 0x8ull; \ + offset; }) + +#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \ + u64 offset; \ + \ + offset = 0xf680ull; \ + if (mcs->hw->mcs_blks > 1) \ + offset = 0xe018ull; \ + offset += (a) * 0x8ull; \ + offset; }) + #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull) -#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull) #define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 9beeead56d7b..ab3e39eef2eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -184,6 +184,7 @@ enum key_fields { NPC_VLAN_ETYPE_CTAG, /* 0x8100 */ NPC_VLAN_ETYPE_STAG, /* 0x88A8 */ NPC_OUTER_VID, + NPC_INNER_VID, NPC_TOS, NPC_IPFRAG_IPV4, NPC_SIP_IPV4, @@ -204,6 +205,15 @@ enum key_fields { NPC_DPORT_UDP, NPC_SPORT_SCTP, NPC_DPORT_SCTP, + NPC_IPSEC_SPI, + NPC_MPLS1_LBTCBOS, + NPC_MPLS1_TTL, + NPC_MPLS2_LBTCBOS, + NPC_MPLS2_TTL, + NPC_MPLS3_LBTCBOS, + NPC_MPLS3_TTL, + NPC_MPLS4_LBTCBOS, + NPC_MPLS4_TTL, NPC_HEADER_FIELDS_MAX, NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */ NPC_PF_FUNC, /* Valid when Tx */ @@ -229,6 +239,8 @@ enum key_fields { NPC_VLAN_TAG1, /* outer vlan tci for double tagged frame */ NPC_VLAN_TAG2, + /* inner vlan tci for double tagged frame */ + NPC_VLAN_TAG3, /* other header fields programmed to extract but not of our interest */ NPC_UNKNOWN, NPC_KEY_FIELDS_MAX, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 0ee420a489fc..bcc96eed2481 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -12,8 +12,8 @@ #include <linux/hrtimer.h> #include <linux/ktime.h> -#include "ptp.h" #include "mbox.h" +#include "ptp.h" #include "rvu.h" #define DRV_NAME "Marvell PTP Driver" @@ -40,11 +40,13 @@ #define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9) #define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8) #define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10) +#define PTP_CLOCK_CFG_ATOMIC_OP_MASK GENMASK_ULL(28, 26) #define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30) #define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31) #define PTP_PPS_HI_INCR 0xF60ULL #define PTP_PPS_LO_INCR 0xF68ULL +#define PTP_PPS_THRESH_LO 0xF50ULL #define PTP_PPS_THRESH_HI 0xF58ULL #define PTP_CLOCK_LO 0xF08ULL @@ -53,36 +55,62 @@ #define PTP_TIMESTAMP 0xF20ULL #define PTP_CLOCK_SEC 0xFD0ULL #define PTP_SEC_ROLLOVER 0xFD8ULL +/* Atomic update related CSRs */ +#define PTP_FRNS_TIMESTAMP 0xFE0ULL +#define PTP_NXT_ROLLOVER_SET 0xFE8ULL +#define PTP_CURR_ROLLOVER_SET 0xFF0ULL +#define PTP_NANO_TIMESTAMP 0xFF8ULL +#define PTP_SEC_TIMESTAMP 0x1000ULL #define CYCLE_MULT 1000 +#define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0) +#define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1) + +/* PTP atomic update operation type */ +enum atomic_opcode { + ATOMIC_SET = 1, + ATOMIC_INC = 3, + ATOMIC_DEC = 4 +}; + static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; -static bool is_ptp_dev_cnf10kb(struct ptp *ptp) +static bool is_ptp_dev_cnf10ka(struct ptp *ptp) { - return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false; + return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP; } -static bool is_ptp_dev_cn10k(struct ptp *ptp) +static bool is_ptp_dev_cn10ka(struct ptp *ptp) { - return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false; + return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP; } static bool cn10k_ptp_errata(struct ptp *ptp) { - if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || - ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) + if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && + (is_rev_A0(ptp) || is_rev_A1(ptp))) return true; + return false; } -static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp) +static bool is_tstmp_atomic_update_supported(struct rvu *rvu) { - if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP || - ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP) - return true; - return false; + struct ptp *ptp = rvu->ptp; + + if (is_rvu_otx2(rvu)) + return false; + + /* On older silicon variants of CN10K, atomic update feature + * is not available. + */ + if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && + (is_rev_A0(ptp) || is_rev_A1(ptp))) + return false; + + return true; } static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer) @@ -222,6 +250,65 @@ void ptp_put(struct ptp *ptp) pci_dev_put(ptp->pdev); } +static void ptp_atomic_update(struct ptp *ptp, u64 timestamp) +{ + u64 regval, curr_rollover_set, nxt_rollover_set; + + /* First setup NSECs and SECs */ + writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + writeq(timestamp / NSEC_PER_SEC, + ptp->reg_base + PTP_SEC_TIMESTAMP); + + nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC); + curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC; + writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET); + writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET); + + /* Now, initiate atomic update */ + regval = readq(ptp->reg_base + PTP_CLOCK_CFG); + regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + regval |= (ATOMIC_SET << 26); + writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); +} + +static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta) +{ + bool neg_adj = false, atomic_inc_dec = false; + u64 regval, ptp_clock_hi; + + if (delta < 0) { + delta = -delta; + neg_adj = true; + } + + /* use atomic inc/dec when delta < 1 second */ + if (delta < NSEC_PER_SEC) + atomic_inc_dec = true; + + if (!atomic_inc_dec) { + ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); + if (neg_adj) { + if (ptp_clock_hi > delta) + ptp_clock_hi -= delta; + else + ptp_clock_hi = delta - ptp_clock_hi; + } else { + ptp_clock_hi += delta; + } + ptp_atomic_update(ptp, ptp_clock_hi); + } else { + writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + + /* initiate atomic inc/dec */ + regval = readq(ptp->reg_base + PTP_CLOCK_CFG); + regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26); + writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); + } +} + static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) { bool neg_adj = false; @@ -277,8 +364,9 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk) return 0; } -void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) +void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts) { + struct ptp *ptp = rvu->ptp; struct pci_dev *pdev; u64 clock_comp; u64 clock_cfg; @@ -297,8 +385,14 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) ptp->clock_rate = sclk * 1000000; /* Program the seconds rollover value to 1 second */ - if (is_ptp_dev_cnf10kb(ptp)) + if (is_tstmp_atomic_update_supported(rvu)) { + writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP); + writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET); + writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET); writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER); + } /* Enable PTP clock */ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); @@ -318,24 +412,11 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) } clock_cfg |= PTP_CLOCK_CFG_PTP_EN; - clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); - - /* Set 50% duty cycle for 1Hz output */ - writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); - writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR); - if (cn10k_ptp_errata(ptp)) { - /* The ptp_clock_hi rollsover to zero once clock cycle before it - * reaches one second boundary. so, program the pps_lo_incr in - * such a way that the pps threshold value comparison at one - * second boundary will succeed and pps edge changes. After each - * one second boundary, the hrtimer handler will be invoked and - * reprograms the pps threshold value. - */ - ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate; - writeq((0x1dcd6500ULL - ptp->clock_period) << 32, - ptp->reg_base + PTP_PPS_LO_INCR); - } + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; + clock_cfg |= (ATOMIC_SET << 26); + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); if (cn10k_ptp_errata(ptp)) clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate); @@ -350,7 +431,7 @@ static int ptp_get_tstmp(struct ptp *ptp, u64 *clk) { u64 timestamp; - if (is_ptp_dev_cn10k(ptp)) { + if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) { timestamp = readq(ptp->reg_base + PTP_TIMESTAMP); *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF); } else { @@ -368,20 +449,68 @@ static int ptp_set_thresh(struct ptp *ptp, u64 thresh) return 0; } -static int ptp_extts_on(struct ptp *ptp, int on) +static int ptp_config_hrtimer(struct ptp *ptp, int on) { u64 ptp_clock_hi; - if (cn10k_ptp_errata(ptp)) { - if (on) { - ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); - ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi); - } else { - if (hrtimer_active(&ptp->hrtimer)) - hrtimer_cancel(&ptp->hrtimer); + if (on) { + ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); + ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi); + } else { + if (hrtimer_active(&ptp->hrtimer)) + hrtimer_cancel(&ptp->hrtimer); + } + + return 0; +} + +static int ptp_pps_on(struct ptp *ptp, int on, u64 period) +{ + u64 clock_cfg; + + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + if (on) { + if (cn10k_ptp_errata(ptp) && period != NSEC_PER_SEC) { + dev_err(&ptp->pdev->dev, "Supports max period value as 1 second\n"); + return -EINVAL; + } + + if (period > (8 * NSEC_PER_SEC)) { + dev_err(&ptp->pdev->dev, "Supports max period as 8 seconds\n"); + return -EINVAL; } + + clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + + writeq(0, ptp->reg_base + PTP_PPS_THRESH_HI); + writeq(0, ptp->reg_base + PTP_PPS_THRESH_LO); + + /* Configure high/low phase time */ + period = period / 2; + writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_HI_INCR); + writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_LO_INCR); + } else { + clock_cfg &= ~(PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV); + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + } + + if (on && cn10k_ptp_errata(ptp)) { + /* The ptp_clock_hi rollsover to zero once clock cycle before it + * reaches one second boundary. so, program the pps_lo_incr in + * such a way that the pps threshold value comparison at one + * second boundary will succeed and pps edge changes. After each + * one second boundary, the hrtimer handler will be invoked and + * reprograms the pps threshold value. + */ + ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate; + writeq((0x1dcd6500ULL - ptp->clock_period) << 32, + ptp->reg_base + PTP_PPS_LO_INCR); } + if (cn10k_ptp_errata(ptp)) + ptp_config_hrtimer(ptp, on); + return 0; } @@ -414,14 +543,12 @@ static int ptp_probe(struct pci_dev *pdev, first_ptp_block = ptp; spin_lock_init(&ptp->ptp_lock); - if (is_ptp_tsfmt_sec_nsec(ptp)) - ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; - else - ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; - if (cn10k_ptp_errata(ptp)) { + ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ptp->hrtimer.function = ptp_reset_thresh; + } else { + ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; } return 0; @@ -518,8 +645,14 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, case PTP_OP_SET_THRESH: err = ptp_set_thresh(rvu->ptp, req->thresh); break; - case PTP_OP_EXTTS_ON: - err = ptp_extts_on(rvu->ptp, req->extts_on); + case PTP_OP_PPS_ON: + err = ptp_pps_on(rvu->ptp, req->pps_on, req->period); + break; + case PTP_OP_ADJTIME: + ptp_atomic_adjtime(rvu->ptp, req->delta); + break; + case PTP_OP_SET_CLOCK: + ptp_atomic_update(rvu->ptp, (u64)req->clk); break; default: err = -EINVAL; @@ -528,3 +661,17 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, return err; } + +int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req, + struct ptp_get_cap_rsp *rsp) +{ + if (!rvu->ptp) + return -ENODEV; + + if (is_tstmp_atomic_update_supported(rvu)) + rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE; + else + rsp->cap &= ~BIT_ULL_MASK(0); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index b9d92abc3844..1229344c7279 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -23,9 +23,10 @@ struct ptp { u32 clock_period; }; +struct rvu; struct ptp *ptp_get(void); void ptp_put(struct ptp *ptp); -void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts); +void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts); extern struct pci_driver ptp_driver; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index b4fcb20c3f4f..4728ba34b0e3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -355,8 +355,8 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) { + u64 cfg, pfc_class_mask_cfg; rpm_t *rpm = rpmd; - u64 cfg; /* ALL pause frames received are completely ignored */ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); @@ -373,6 +373,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + /* Disable forward pause to driver */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + /* Enable channel mask for all LMACS */ if (is_dev_rpm2(rpm)) rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff); @@ -380,9 +385,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL); /* Disable all PFC classes */ - cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL); + pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : + RPMX_CMRX_PRT_CBFC_CTL; + cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg); cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg); - rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg); + rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg); } int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat) @@ -605,18 +612,19 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; + pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : + RPMX_CMRX_PRT_CBFC_CTL; + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); - class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL); + class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg); pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en); if (rx_pause) { cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); + RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE); } else { cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); + RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE); } if (tx_pause) { @@ -635,10 +643,6 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); - - pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : - RPMX_CMRX_PRT_CBFC_CTL; - rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 73df2d564545..731bb82b577c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2631,6 +2631,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) rvu_npc_free_mcam_entries(rvu, pcifunc, -1); rvu_mac_reset(rvu, pcifunc); + if (rvu->mcs_blk_cnt) + rvu_mcs_flr_handler(rvu, pcifunc); + mutex_unlock(&rvu->flr_lock); } @@ -3322,7 +3325,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&rvu->rswitch.switch_lock); if (rvu->fwdata) - ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, + ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, rvu->fwdata->ptp_ext_tstamp); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index e8e65fd7888d..cce2806aaa50 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -17,6 +17,7 @@ #include "mbox.h" #include "npc.h" #include "rvu_reg.h" +#include "ptp.h" /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 @@ -26,6 +27,7 @@ #define PCI_SUBSYS_DEVID_98XX 0xB100 #define PCI_SUBSYS_DEVID_96XX 0xB200 #define PCI_SUBSYS_DEVID_CN10K_A 0xB900 +#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 @@ -343,6 +345,7 @@ struct nix_hw { struct nix_txvlan txvlan; struct nix_ipolicer *ipolicer; u64 *tx_credits; + u8 cc_mcs_cnt; }; /* RVU block's capabilities or functionality, @@ -634,6 +637,16 @@ static inline bool is_rvu_otx2(struct rvu *rvu) midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } +static inline bool is_cnf10ka_a0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A && + (pdev->revision & 0x0F) == 0x0) + return true; + return false; +} + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) { u64 npc_const3; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 095b2cc4a699..15a319684ed3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -236,6 +236,11 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) linfo = &event->link_uinfo; pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); + if (!pfmap) { + dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n", + event->cgx_id, event->lmac_id); + return; + } do { pfid = find_first_bit(&pfmap, @@ -345,7 +350,7 @@ int rvu_cgx_init(struct rvu *rvu) rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); if (!rvu->cgx_cnt_max) { dev_info(rvu->dev, "No CGX devices found!\n"); - return -ENODEV; + return 0; } rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * @@ -686,7 +691,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; - int rc = 0, i; + int rc = 0; u64 cfg; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -697,8 +702,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, rsp->hdr.rc = rc; cfg = cgx_lmac_addr_get(cgx_id, lmac_id); /* copy 48 bit mac address to req->mac_addr */ - for (i = 0; i < ETH_ALEN; i++) - rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8; + u64_to_ether_addr(cfg, rsp->mac_addr); return 0; } @@ -752,12 +756,11 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; - /* This msg is expected only from PFs that are mapped to CGX LMACs, + /* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs, * if received from other PF/VF simply ACK, nothing to do. */ - if ((pcifunc & RVU_PFVF_FUNC_MASK) || - !is_pf_cgxmapped(rvu, pf)) - return -ENODEV; + if (!is_pf_cgxmapped(rvu, pf)) + return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 3b26893efdf8..bd817ee88735 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2756,6 +2756,27 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); +#define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \ +do { \ + seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \ + seq_printf(s, "mask 0x%lx\n", \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \ +} while (0) \ + +#define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \ +do { \ + typeof(_pkt) (pkt) = (_pkt); \ + typeof(_mask) (mask) = (_mask); \ + seq_printf(s, "%ld %ld %ld\n", \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \ + seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \ + FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \ +} while (0) \ + static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { @@ -2787,6 +2808,11 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.vlan_tci)); break; + case NPC_INNER_VID: + seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci)); + seq_printf(s, "mask 0x%x\n", + ntohs(rule->mask.vlan_itci)); + break; case NPC_TOS: seq_printf(s, "%d ", rule->packet.tos); seq_printf(s, "mask 0x%x\n", rule->mask.tos); @@ -2827,6 +2853,42 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "%d ", ntohs(rule->packet.dport)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); break; + case NPC_IPSEC_SPI: + seq_printf(s, "0x%x ", ntohl(rule->packet.spi)); + seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi)); + break; + case NPC_MPLS1_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0], + rule->mask.mpls_lse[0]); + break; + case NPC_MPLS1_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0], + rule->mask.mpls_lse[0]); + break; + case NPC_MPLS2_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1], + rule->mask.mpls_lse[1]); + break; + case NPC_MPLS2_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1], + rule->mask.mpls_lse[1]); + break; + case NPC_MPLS3_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2], + rule->mask.mpls_lse[2]); + break; + case NPC_MPLS3_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2], + rule->mask.mpls_lse[2]); + break; + case NPC_MPLS4_LBTCBOS: + RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3], + rule->mask.mpls_lse[3]); + break; + case NPC_MPLS4_TTL: + RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3], + rule->mask.mpls_lse[3]); + break; default: seq_puts(s, "\n"); break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 41df5ac23f92..21b5d71c1e37 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -14,26 +14,16 @@ #define DRV_NAME "octeontx2-af" -static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name) +static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name) { - int err; - - err = devlink_fmsg_pair_nest_start(fmsg, name); - if (err) - return err; - - return devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_pair_nest_start(fmsg, name); + devlink_fmsg_obj_nest_start(fmsg); } -static int rvu_report_pair_end(struct devlink_fmsg *fmsg) +static void rvu_report_pair_end(struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } static bool rvu_common_request_irq(struct rvu *rvu, int offset, @@ -284,175 +274,81 @@ static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx, { struct rvu_nix_event_ctx *nix_event_context; u64 intr_val; - int err; nix_event_context = ctx; switch (health_reporter) { case NIX_AF_RVU_INTR: intr_val = nix_event_context->nix_af_rvu_int; - err = rvu_report_pair_start(fmsg, "NIX_AF_RVU"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ", - nix_event_context->nix_af_rvu_int); - if (err) - return err; - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_RVU"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ", + nix_event_context->nix_af_rvu_int); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); + rvu_report_pair_end(fmsg); break; case NIX_AF_RVU_GEN: intr_val = nix_event_context->nix_af_rvu_gen; - err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ", - nix_event_context->nix_af_rvu_gen); - if (err) - return err; - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop"); - if (err) - return err; - } - if (intr_val & BIT_ULL(1)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop"); - if (err) - return err; - } - if (intr_val & BIT_ULL(4)) { - err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_GENERAL"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ", + nix_event_context->nix_af_rvu_gen); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop"); + if (intr_val & BIT_ULL(1)) + devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop"); + if (intr_val & BIT_ULL(4)) + devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done"); + rvu_report_pair_end(fmsg); break; case NIX_AF_RVU_ERR: intr_val = nix_event_context->nix_af_rvu_err; - err = rvu_report_pair_start(fmsg, "NIX_AF_ERR"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ", - nix_event_context->nix_af_rvu_err); - if (err) - return err; - if (intr_val & BIT_ULL(14)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(13)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write"); - if (err) - return err; - } - if (intr_val & BIT_ULL(12)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); - if (err) - return err; - } - if (intr_val & BIT_ULL(6)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC"); - if (err) - return err; - } - if (intr_val & BIT_ULL(5)) { - err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error"); - if (err) - return err; - } - if (intr_val & BIT_ULL(4)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(3)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(2)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read"); - if (err) - return err; - } - if (intr_val & BIT_ULL(1)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write"); - if (err) - return err; - } - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_ERR"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ", + nix_event_context->nix_af_rvu_err); + if (intr_val & BIT_ULL(14)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read"); + if (intr_val & BIT_ULL(13)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write"); + if (intr_val & BIT_ULL(12)) + devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); + if (intr_val & BIT_ULL(6)) + devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC"); + if (intr_val & BIT_ULL(5)) + devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error"); + if (intr_val & BIT_ULL(4)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read"); + if (intr_val & BIT_ULL(3)) + devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read"); + if (intr_val & BIT_ULL(2)) + devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read"); + if (intr_val & BIT_ULL(1)) + devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write"); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write"); + rvu_report_pair_end(fmsg); break; case NIX_AF_RVU_RAS: intr_val = nix_event_context->nix_af_rvu_err; - err = rvu_report_pair_start(fmsg, "NIX_AF_RAS"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", - nix_event_context->nix_af_rvu_err); - if (err) - return err; - err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); - if (err) - return err; - if (intr_val & BIT_ULL(34)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); - if (err) - return err; - } - if (intr_val & BIT_ULL(33)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S"); - if (err) - return err; - } - if (intr_val & BIT_ULL(32)) { - err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx"); - if (err) - return err; - } - if (intr_val & BIT_ULL(4)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer"); - if (err) - return err; - } - if (intr_val & BIT_ULL(3)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer"); - - if (err) - return err; - } - if (intr_val & BIT_ULL(2)) { - err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer"); - if (err) - return err; - } - if (intr_val & BIT_ULL(1)) { - err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer"); - if (err) - return err; - } - if (intr_val & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NIX_AF_RAS"); + devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", + nix_event_context->nix_af_rvu_err); + devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); + if (intr_val & BIT_ULL(34)) + devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); + if (intr_val & BIT_ULL(33)) + devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S"); + if (intr_val & BIT_ULL(32)) + devlink_fmsg_string_put(fmsg, "\n\tHW ctx"); + if (intr_val & BIT_ULL(4)) + devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer"); + if (intr_val & BIT_ULL(3)) + devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer"); + if (intr_val & BIT_ULL(2)) + devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer"); + if (intr_val & BIT_ULL(1)) + devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer"); + if (intr_val & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read"); + rvu_report_pair_end(fmsg); break; default: return -EINVAL; @@ -642,7 +538,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); if (!rvu_dl->devlink_wq) - goto err; + return -ENOMEM; INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work); INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work); @@ -650,9 +546,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work); return 0; -err: - rvu_nix_health_reporters_destroy(rvu_dl); - return -ENOMEM; } static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl) @@ -922,181 +815,87 @@ static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx, struct rvu_npa_event_ctx *npa_event_context; unsigned int alloc_dis, free_dis; u64 intr_val; - int err; npa_event_context = ctx; switch (health_reporter) { case NPA_AF_RVU_GEN: intr_val = npa_event_context->npa_af_rvu_gen; - err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ", - npa_event_context->npa_af_rvu_gen); - if (err) - return err; - if (intr_val & BIT_ULL(32)) { - err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error"); - if (err) - return err; - } + rvu_report_pair_start(fmsg, "NPA_AF_GENERAL"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ", + npa_event_context->npa_af_rvu_gen); + if (intr_val & BIT_ULL(32)) + devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error"); free_dis = FIELD_GET(GENMASK(15, 0), intr_val); - if (free_dis & BIT(NPA_INPQ_NIX0_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_NIX0_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_NIX1_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_NIX1_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_SSO)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_TIM)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_DPI)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI"); - if (err) - return err; - } - if (free_dis & BIT(NPA_INPQ_AURA_OP)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA"); - if (err) - return err; - } + if (free_dis & BIT(NPA_INPQ_NIX0_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX"); + if (free_dis & BIT(NPA_INPQ_NIX0_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX"); + if (free_dis & BIT(NPA_INPQ_NIX1_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX"); + if (free_dis & BIT(NPA_INPQ_NIX1_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX"); + if (free_dis & BIT(NPA_INPQ_SSO)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO"); + if (free_dis & BIT(NPA_INPQ_TIM)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM"); + if (free_dis & BIT(NPA_INPQ_DPI)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI"); + if (free_dis & BIT(NPA_INPQ_AURA_OP)) + devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA"); alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val); - if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) { - err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_SSO)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_TIM)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_DPI)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI"); - if (err) - return err; - } - if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX"); + if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX"); + if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX"); + if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) + devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX"); + if (alloc_dis & BIT(NPA_INPQ_SSO)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO"); + if (alloc_dis & BIT(NPA_INPQ_TIM)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM"); + if (alloc_dis & BIT(NPA_INPQ_DPI)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI"); + if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) + devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA"); + + rvu_report_pair_end(fmsg); break; case NPA_AF_RVU_ERR: - err = rvu_report_pair_start(fmsg, "NPA_AF_ERR"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ", - npa_event_context->npa_af_rvu_err); - if (err) - return err; - - if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) { - err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) { - err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NPA_AF_ERR"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ", + npa_event_context->npa_af_rvu_err); + if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read"); + if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) + devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write"); + if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) + devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); + rvu_report_pair_end(fmsg); break; case NPA_AF_RVU_RAS: - err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ", - npa_event_context->npa_af_rvu_ras); - if (err) - return err; - if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S"); - if (err) - return err; - } - if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) { - err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context"); - if (err) - return err; - } - err = rvu_report_pair_end(fmsg); - if (err) - return err; + rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ", + npa_event_context->npa_af_rvu_ras); + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) + devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S"); + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) + devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S"); + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) + devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context"); + rvu_report_pair_end(fmsg); break; case NPA_AF_RVU_INTR: - err = rvu_report_pair_start(fmsg, "NPA_AF_RVU"); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ", - npa_event_context->npa_af_rvu_int); - if (err) - return err; - if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) { - err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); - if (err) - return err; - } - return rvu_report_pair_end(fmsg); + rvu_report_pair_start(fmsg, "NPA_AF_RVU"); + devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ", + npa_event_context->npa_af_rvu_int); + if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) + devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); + rvu_report_pair_end(fmsg); + break; default: return -EINVAL; } @@ -1285,7 +1084,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl) rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); if (!rvu_dl->devlink_wq) - goto err; + return -ENOMEM; INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work); INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work); @@ -1293,9 +1092,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl) INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work); return 0; -err: - rvu_npa_health_reporters_destroy(rvu_dl); - return -ENOMEM; } static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index c2f68678e947..4227ebb4a758 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -12,6 +12,7 @@ #include "rvu_reg.h" #include "rvu.h" #include "npc.h" +#include "mcs.h" #include "cgx.h" #include "lmac_common.h" #include "rvu_npc_hash.h" @@ -846,6 +847,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return 0; } +static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, + u16 *smq, u16 *smq_mask) +{ + struct nix_cn10k_aq_enq_req *aq_req; + + if (!is_rvu_otx2(rvu)) { + aq_req = (struct nix_cn10k_aq_enq_req *)req; + *smq = aq_req->sq.smq; + *smq_mask = aq_req->sq_mask.smq; + } else { + *smq = req->sq.smq; + *smq_mask = req->sq_mask.smq; + } +} + static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) @@ -857,6 +873,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, struct rvu_block *block; struct admin_queue *aq; struct rvu_pfvf *pfvf; + u16 smq, smq_mask; void *ctx, *mask; bool ena; u64 cfg; @@ -928,13 +945,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, if (rc) return rc; + nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); /* Check if SQ pointed SMQ belongs to this PF/VF or not */ if (req->ctype == NIX_AQ_CTYPE_SQ && ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || (req->op == NIX_AQ_INSTOP_WRITE && - req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { + req->sq_mask.ena && req->sq.ena && smq_mask))) { if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, - pcifunc, req->sq.smq)) + pcifunc, smq)) return NIX_AF_ERR_AQ_ENQUEUE; } @@ -4372,6 +4390,12 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); } + /* Get MCS external bypass status for CN10K-B */ + if (mcs_get_blkcnt() == 1) { + /* Adjust for 2 credits when external bypass is disabled */ + nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; + } + /* Set credits for Tx links assuming max packet length allowed. * This will be reconfigured based on MTU set for PF/VF. */ @@ -4395,6 +4419,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; /* Enable credits and set credit pkt count to max allowed */ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); link = iter + slink; nix_hw->tx_credits[link] = tx_credits; @@ -5488,6 +5513,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, ipolicer = &nix_hw->ipolicer[layer]; for (idx = 0; idx < req->prof_count[layer]; idx++) { + if (idx == MAX_BANDPROF_PER_PFFUNC) + break; prof_idx = req->prof_idx[layer][idx]; if (prof_idx >= ipolicer->band_prof.max || ipolicer->pfvf_map[prof_idx] != pcifunc) @@ -5501,8 +5528,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, ipolicer->pfvf_map[prof_idx] = 0x00; ipolicer->match_id[prof_idx] = 0; rvu_free_rsrc(&ipolicer->band_prof, prof_idx); - if (idx == MAX_BANDPROF_PER_PFFUNC) - break; } } mutex_unlock(&rvu->rsrc_lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 16cfc802e348..0bcf3e559280 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -389,7 +389,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, int bank, nixlf, index; /* get ucast entry rule entry index */ - nix_get_nixlf(rvu, pf_func, &nixlf, NULL); + if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) { + dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n", + __func__, pf_func); + /* Action 0 is drop */ + return 0; + } + index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, NIXLF_UCAST_ENTRY); bank = npc_get_bank(mcam, index); @@ -665,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int blkaddr, ucast_idx, index; struct nix_rx_action action = { 0 }; u64 relaxed_mask; + u8 flow_key_alg; if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) return; @@ -695,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, action.op = NIX_RX_ACTIONOP_UCAST; } + flow_key_alg = action.flow_key_alg; + /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { @@ -734,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, req.vf = pcifunc; req.index = action.index; req.match_id = action.match_id; - req.flow_key_alg = action.flow_key_alg; + req.flow_key_alg = flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } @@ -848,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u8 mac_addr[ETH_ALEN] = { 0 }; struct nix_rx_action action = { 0 }; struct rvu_pfvf *pfvf; + u8 flow_key_alg; u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ @@ -882,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, ucast_idx); + flow_key_alg = action.flow_key_alg; if (action.op != NIX_RX_ACTIONOP_RSS) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; @@ -918,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, req.vf = pcifunc | vf_func; req.index = action.index; req.match_id = action.match_id; - req.flow_key_alg = action.flow_key_alg; + req.flow_key_alg = flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } @@ -984,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, mutex_unlock(&mcam->lock); } +static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action, + struct rvu_pfvf *pfvf, int mcam_index, int blkaddr, + int alg_idx) + +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + int bank, op_rss; + + if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index)) + return; + + op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list); + + bank = npc_get_bank(mcam, mcam_index); + mcam_index &= (mcam->banksize - 1); + + /* If Rx action is MCAST update only RSS algorithm index */ + if (!op_rss) { + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank)); + + action.flow_key_alg = alg_idx; + } + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action); +} + void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index) { struct npc_mcam *mcam = &rvu->hw->mcam; - struct rvu_hwinfo *hw = rvu->hw; struct nix_rx_action action; int blkaddr, index, bank; struct rvu_pfvf *pfvf; @@ -1044,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, /* If PF's promiscuous entry is enabled, * Set RSS action for that entry as well */ - if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && - is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { - bank = npc_get_bank(mcam, index); - index &= (mcam->banksize - 1); + npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, + alg_idx); - rvu_write64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank), - *(u64 *)&action); - } + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_ALLMULTI_ENTRY); + /* If PF's allmulti entry is enabled, + * Set RSS action for that entry as well + */ + npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, + alg_idx); } void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 952319453701..114e4ec21802 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -20,6 +20,7 @@ static const char * const npc_flow_names[] = { [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", [NPC_OUTER_VID] = "outer vlan id", + [NPC_INNER_VID] = "inner vlan id", [NPC_TOS] = "tos", [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ", [NPC_SIP_IPV4] = "ipv4 source ip", @@ -41,6 +42,15 @@ static const char * const npc_flow_names[] = { [NPC_SPORT_SCTP] = "sctp source port", [NPC_DPORT_SCTP] = "sctp destination port", [NPC_LXMB] = "Mcast/Bcast header ", + [NPC_IPSEC_SPI] = "SPI ", + [NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos", + [NPC_MPLS1_TTL] = "lse depth 1 ttl", + [NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos", + [NPC_MPLS2_TTL] = "lse depth 2 ttl", + [NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos", + [NPC_MPLS3_TTL] = "lse depth 3 ttl", + [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos", + [NPC_MPLS4_TTL] = "lse depth 4", [NPC_UNKNOWN] = "unknown", }; @@ -327,6 +337,8 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) */ struct npc_key_field *vlan_tag1; struct npc_key_field *vlan_tag2; + /* Inner VLAN TCI for double tagged frames */ + struct npc_key_field *vlan_tag3; u64 *features; u8 start_lid; int i; @@ -349,6 +361,7 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; + vlan_tag3 = &key_fields[NPC_VLAN_TAG3]; /* if key profile programmed does not extract Ethertype at all */ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) { @@ -430,6 +443,12 @@ vlan_tci: goto done; } *features |= BIT_ULL(NPC_OUTER_VID); + + /* If key profile extracts inner vlan tci */ + if (vlan_tag3->nr_kws) { + key_fields[NPC_INNER_VID] = *vlan_tag3; + *features |= BIT_ULL(NPC_INNER_VID); + } done: return; } @@ -512,7 +531,20 @@ do { \ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); + NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2); NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); + + NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4); + NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4); + NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3); + NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1); + NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3); + NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1); + NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3); + NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1); + NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3); + NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1); + /* SMAC follows the DMAC(which is 6 bytes) */ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6); /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ @@ -564,6 +596,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); + /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ + if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && + (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) + *features |= BIT_ULL(NPC_IPSEC_SPI); + /* for vlan ethertypes corresponding layer type should be in the key */ if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | @@ -572,6 +609,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) /* for L2M/L2B/L3M/L3B, check if the type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf)) *features |= BIT_ULL(NPC_LXMB); + + for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) { + if (npc_check_field(rvu, blkaddr, hdr, intf)) + *features |= BIT_ULL(hdr); + } } /* Scan key extraction profile and record how fields of our interest @@ -930,8 +972,54 @@ do { \ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, ntohs(mask->dport), 0); + NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0, + ntohl(mask->spi), 0); + NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, ntohs(mask->vlan_tci), 0); + NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0, + ntohs(mask->vlan_itci), 0); + + NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[0]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[0]), 0); + NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[0]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[0]), 0); + NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[1]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[1]), 0); + NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[1]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[1]), 0); + NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[2]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[2]), 0); + NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[2]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[2]), 0); + NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + pkt->mpls_lse[3]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL, + mask->mpls_lse[3]), 0); + NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + pkt->mpls_lse[3]), 0, + FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, + mask->mpls_lse[3]), 0); NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0, mask->next_header, 0); @@ -1192,7 +1280,7 @@ find_rule: write_req.enable_entry = (u8)enable; /* if counter is available then clear and use it */ if (req->set_cntr && rule->has_cntr) { - rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00); + rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val); write_req.set_cntr = 1; write_req.cntr = rule->cntr; } @@ -1407,12 +1495,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, struct npc_delete_flow_req *req, - struct msg_rsp *rsp) + struct npc_delete_flow_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *iter, *tmp; u16 pcifunc = req->hdr.pcifunc; struct list_head del_list; + int blkaddr; INIT_LIST_HEAD(&del_list); @@ -1428,6 +1517,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, list_move_tail(&iter->list, &del_list); /* single rule */ } else if (req->entry == iter->entry) { + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr) + rsp->cntr_val = rvu_read64(rvu, blkaddr, + NPC_AF_MATCH_STATX(iter->cntr)); list_move_tail(&iter->list, &del_list); break; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index 7e20282c12d0..d2661e7fabdb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -391,22 +391,6 @@ int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu, } /** - * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64. - * @mac_addr: MAC address. - * Return: mdata for exact match table. - */ -static u64 rvu_npc_exact_mac2u64(u8 *mac_addr) -{ - u64 mac = 0; - int index; - - for (index = ETH_ALEN - 1; index >= 0; index--) - mac |= ((u64)*mac_addr++) << (8 * index); - - return mac; -} - -/** * rvu_exact_prepare_mdata - Make mdata for mcam entry * @mac: MAC address * @chan: Channel number. @@ -416,7 +400,7 @@ static u64 rvu_npc_exact_mac2u64(u8 *mac_addr) */ static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask) { - u64 ldata = rvu_npc_exact_mac2u64(mac); + u64 ldata = ether_addr_to_u64(mac); /* Please note that mask is 48bit which excludes chan and ctype. * Increase mask bits if we need to include them as well. @@ -604,7 +588,7 @@ static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable, u8 ctype, u16 chan, u8 *mac_addr) { - u64 ldata = rvu_npc_exact_mac2u64(mac_addr); + u64 ldata = ether_addr_to_u64(mac_addr); /* Enable or disable */ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c index b3150f053291..d46ac29adb96 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, {0x1200, 0x12E0} } }, {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, - {0x1610, 0x1618}, {0x1700, 0x17B0} } }, - {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } }, + {0x1610, 0x1618}, {0x1700, 0x17C8} } }, + {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } }, {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index b42e631e52d0..18c1c9f361cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -437,6 +437,7 @@ #define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0) #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16) +#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32) /* SSO */ #define SSO_AF_CONST (0x1000) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 592b317f4637..854045ed3b06 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -158,6 +158,7 @@ void rvu_switch_enable(struct rvu *rvu) struct npc_mcam_alloc_entry_req alloc_req = { 0 }; struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_delete_flow_rsp uninstall_rsp = { 0 }; struct npc_mcam_free_entry_req free_req = { 0 }; struct rvu_switch *rswitch = &rvu->rswitch; struct msg_rsp rsp; @@ -197,7 +198,7 @@ void rvu_switch_enable(struct rvu *rvu) uninstall_rules: uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; - rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); kfree(rswitch->entry2pcifunc); free_entries: free_req.all = 1; @@ -209,6 +210,7 @@ exit: void rvu_switch_disable(struct rvu *rvu) { struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_delete_flow_rsp uninstall_rsp = { 0 }; struct npc_mcam_free_entry_req free_req = { 0 }; struct rvu_switch *rswitch = &rvu->rswitch; struct rvu_hwinfo *hw = rvu->hw; @@ -250,7 +252,7 @@ void rvu_switch_disable(struct rvu *rvu) uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; free_req.all = 1; - rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); rswitch->used_entries = 0; kfree(rswitch->entry2pcifunc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 826f691de259..c1c99d7054f8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) } #define NPA_MAX_BURST 16 -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; u64 ptrs[NPA_MAX_BURST]; - int num_ptrs = 1; dma_addr_t bufptr; + int num_ptrs = 1; /* Refill pool with new buffers */ while (cq->pool_ptrs) { @@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) num_ptrs = 1; } } + return cnt - cq->pool_ptrs; } void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) @@ -448,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, aq->prof.pebs_mantissa = 0; aq->prof_mask.pebs_mantissa = 0xFF; + aq->prof.hl_en = 0; + aq->prof_mask.hl_en = 1; + /* Fill AQ info */ aq->qidx = profile; aq->ctype = NIX_AQ_CTYPE_BANDPROF; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 8ae96815865e..c1861f7de254 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) return weight; } -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int cn10k_lmtst_init(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 59b138214af2..6cc7a78968fc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -1357,10 +1357,12 @@ static int cn10k_mdo_upd_txsa(struct macsec_context *ctx) if (netif_running(secy->netdev)) { /* Keys cannot be changed after creation */ - err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, - sw_tx_sa->next_pn); - if (err) - return err; + if (ctx->sa.update_pn) { + err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn); + if (err) + return err; + } err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, sw_tx_sa->active); @@ -1529,6 +1531,9 @@ static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx) if (err) return err; + if (!ctx->sa.update_pn) + return 0; + err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, rx_sa->next_pn); if (err) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 77c8f650f7ac..7ca6941ea0b9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -7,6 +7,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> +#include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bitfield.h> @@ -573,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma) { - if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) { - struct refill_work *work; - struct delayed_work *dwork; - - work = &pfvf->refill_wrk[cq->cq_idx]; - dwork = &work->pool_refill_work; - /* Schedule a task if no other task is running */ - if (!cq->refill_task_sched) { - cq->refill_task_sched = true; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } + if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) return -ENOMEM; - } return 0; } @@ -774,6 +763,7 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) rsp->schq_list[lvl][schq]; pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; + pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; return 0; } @@ -804,6 +794,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq) mutex_unlock(&pfvf->mbox.lock); } +EXPORT_SYMBOL(otx2_txschq_free_one); void otx2_txschq_stop(struct otx2_nic *pfvf) { @@ -827,7 +818,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) int qidx, sqe_tail, sqe_head; struct otx2_snd_queue *sq; u64 incr, *ptr, val; - int timeout = 1000; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { @@ -836,15 +826,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) continue; incr = (u64)qidx << 32; - while (timeout) { - val = otx2_atomic64_add(incr, ptr); - sqe_head = (val >> 20) & 0x3F; - sqe_tail = (val >> 28) & 0x3F; - if (sqe_head == sqe_tail) - break; - usleep_range(1, 3); - timeout--; - } + val = otx2_atomic64_add(incr, ptr); + sqe_head = (val >> 20) & 0x3F; + sqe_tail = (val >> 28) & 0x3F; + if (sqe_head != sqe_tail) + usleep_range(50, 60); } } @@ -1079,39 +1065,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) static void otx2_pool_refill_task(struct work_struct *work) { struct otx2_cq_queue *cq; - struct otx2_pool *rbpool; struct refill_work *wrk; - int qidx, free_ptrs = 0; struct otx2_nic *pfvf; - dma_addr_t bufptr; + int qidx; wrk = container_of(work, struct refill_work, pool_refill_work.work); pfvf = wrk->pf; qidx = wrk - pfvf->refill_wrk; cq = &pfvf->qset.cq[qidx]; - rbpool = cq->rbpool; - free_ptrs = cq->pool_ptrs; - while (cq->pool_ptrs) { - if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { - /* Schedule a WQ if we fails to free atleast half of the - * pointers else enable napi for this RQ. - */ - if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { - struct delayed_work *dwork; - - dwork = &wrk->pool_refill_work; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } else { - cq->refill_task_sched = false; - } - return; - } - pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); - cq->pool_ptrs--; - } cq->refill_task_sched = false; + + local_bh_disable(); + napi_schedule(wrk->napi); + local_bh_enable(); } int otx2_config_nix_queues(struct otx2_nic *pfvf) @@ -1431,8 +1398,9 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, return 0; } - pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; - pp_params.pool_size = numptrs; + pp_params.order = get_order(buf_size); + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); pp_params.nid = NUMA_NO_NODE; pp_params.dev = pfvf->dev; pp_params.dma_dir = DMA_FROM_DEVICE; @@ -1903,31 +1871,16 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t } } - if ((changed & NETIF_F_HW_TC) && tc) { - if (!pfvf->flow_cfg->max_flows) { - netdev_err(netdev, - "Can't enable TC, MCAM entries not allocated\n"); - return -EINVAL; - } - } - if ((changed & NETIF_F_HW_TC) && !tc && - pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) { + otx2_tc_flower_rule_cnt(pfvf)) { netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); return -EBUSY; } if ((changed & NETIF_F_NTUPLE) && ntuple && - (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) { - netdev_err(netdev, - "Can't enable NTUPLE when TC is active, disable TC and retry\n"); - return -EINVAL; - } - - if ((changed & NETIF_F_HW_TC) && tc && - (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) { + otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) { netdev_err(netdev, - "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n"); + "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index ba8091131ec0..06910307085e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -224,6 +224,7 @@ struct otx2_hw { /* NIX */ u8 txschq_link_cfg_lvl; + u8 txschq_aggr_lvl_rr_prio; u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; u32 dwrr_mtu; @@ -301,6 +302,7 @@ struct flr_work { struct refill_work { struct delayed_work pool_refill_work; struct otx2_nic *pf; + struct napi_struct *napi; }; /* PTPv2 originTimestamp structure */ @@ -325,6 +327,7 @@ struct otx2_ptp { struct ptp_pin_desc extts_config; u64 (*convert_rx_ptp_tstmp)(u64 timestamp); u64 (*convert_tx_ptp_tstmp)(u64 timestamp); + u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); struct delayed_work synctstamp_work; u64 tstamp; u32 base_ns; @@ -360,20 +363,15 @@ struct otx2_flow_config { struct list_head flow_list; u32 dmacflt_max_flows; u16 max_flows; -}; - -struct otx2_tc_info { - /* hash table to store TC offloaded flows */ - struct rhashtable flow_table; - struct rhashtable_params flow_ht_params; - unsigned long *tc_entries_bitmap; + struct list_head flow_list_tc; + bool ntuple; }; struct dev_hw_ops { int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); - void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); + int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); void (*aura_freeptr)(void *dev, int aura, u64 buf); }; @@ -491,7 +489,6 @@ struct otx2_nic { /* NPC MCAM */ struct otx2_flow_config *flow_cfg; struct otx2_mac_table *mac_table; - struct otx2_tc_info tc_info; u64 reset_count; struct work_struct reset_task; @@ -945,6 +942,15 @@ static inline u64 otx2_convert_rate(u64 rate) return converted_rate; } +static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) +{ + /* return here if MCAM entries not allocated */ + if (!pfvf->flow_cfg) + return 0; + + return pfvf->flow_cfg->nr_flows; +} + /* MSI-X APIs */ void otx2_free_cints(struct otx2_nic *pfvf, int n); void otx2_set_cints_affinity(struct otx2_nic *pfvf); @@ -971,6 +977,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); int otx2_txsch_alloc(struct otx2_nic *pfvf); void otx2_txschq_stop(struct otx2_nic *pfvf); void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); +void otx2_free_pending_sqe(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf); int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t *dma); @@ -1063,7 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); -int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); +void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); + /* CGX/RPM DMAC filters support */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index ccaf97bb1ce0..28fb643d2917 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -70,7 +70,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) * link config level. These rest of the scheduler can be * same as hw.txschq_list. */ - for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) + for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) req->schq[lvl] = 1; rc = otx2_sync_mbox_msg(&pfvf->mbox); @@ -83,7 +83,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) return PTR_ERR(rsp); /* Setup transmit scheduler list */ - for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) { + for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) { if (!rsp->schq[lvl]) return -ENOSPC; @@ -125,19 +125,12 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf) static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio) { - struct nix_txsch_free_req *free_req; + int lvl; - mutex_lock(&pfvf->mbox.lock); /* free PFC TLx nodes */ - free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); - if (!free_req) { - mutex_unlock(&pfvf->mbox.lock); - return -ENOMEM; - } - - free_req->flags = TXSCHQ_FREE_ALL; - otx2_sync_mbox_msg(&pfvf->mbox); - mutex_unlock(&pfvf->mbox.lock); + for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) + otx2_txschq_free_one(pfvf, lvl, + pfvf->pfc_schq_list[lvl][prio]); pfvf->pfc_alloc_status[prio] = false; return 0; @@ -406,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct otx2_nic *pfvf = netdev_priv(dev); + u8 old_pfc_en; int err; - /* Save PFC configuration to interface */ + old_pfc_en = pfvf->pfc_en; pfvf->pfc_en = pfc->pfc_en; if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX) @@ -418,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) * supported by the tx queue configuration */ err = otx2_check_pfc_config(pfvf); - if (err) + if (err) { + pfvf->pfc_en = old_pfc_en; return err; + } process_pfc: err = otx2_config_priority_flow_ctrl(pfvf); - if (err) + if (err) { + pfvf->pfc_en = old_pfc_en; return err; + } /* Request Per channel Bpids */ if (pfc->pfc_en) @@ -432,6 +430,12 @@ process_pfc: err = otx2_pfc_txschq_update(pfvf); if (err) { + if (pfc->pfc_en) + otx2_nix_config_bp(pfvf, false); + + otx2_pfc_txschq_stop(pfvf); + pfvf->pfc_en = old_pfc_en; + otx2_config_priority_flow_ctrl(pfvf); dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__); return err; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 63ef7c41d18d..4e1130496573 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -41,7 +41,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, return 0; otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); - otx2_tc_alloc_ent_bitmap(pfvf); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index c47d91da32dc..53f6258a973c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -334,9 +334,12 @@ static void otx2_get_pauseparam(struct net_device *netdev, if (is_otx2_lbkvf(pfvf->pdev)) return; + mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); - if (!req) + if (!req) { + mutex_unlock(&pfvf->mbox.lock); return; + } if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pause_frm_cfg *) @@ -344,6 +347,7 @@ static void otx2_get_pauseparam(struct net_device *netdev, pause->rx_pause = rsp->rx_pause; pause->tx_pause = rsp->tx_pause; } + mutex_unlock(&pfvf->mbox.lock); } static int otx2_set_pauseparam(struct net_device *netdev, @@ -764,6 +768,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) struct otx2_nic *pfvf = netdev_priv(dev); int ret = -EOPNOTSUPP; + pfvf->flow_cfg->ntuple = ntuple; switch (nfc->cmd) { case ETHTOOL_SRXFH: ret = otx2_set_rss_hash_opts(pfvf, nfc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 2d7713a1a153..97a71e9b8563 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -276,6 +276,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) flow_cfg = pfvf->flow_cfg; INIT_LIST_HEAD(&flow_cfg->flow_list); + INIT_LIST_HEAD(&flow_cfg->flow_list_tc); flow_cfg->max_flows = 0; return 0; @@ -298,6 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) return -ENOMEM; INIT_LIST_HEAD(&pf->flow_cfg->flow_list); + INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. @@ -1086,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) struct ethhdr *eth_hdr; bool new = false; int err = 0; + u64 vf_num; u32 ring; if (!flow_cfg->max_flows) { @@ -1098,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return -ENOMEM; - if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) + /* Number of queues on a VF can be greater or less than + * the PF's queue. Hence no need to check for the + * queue count. Hence no need to check queue count if PF + * is installing for its VF. Below is the expected vf_num value + * based on the ethtool commands. + * + * e.g. + * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255 + * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0 + * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==> + * vf_num:vf_idx+1 + */ + vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + if (!is_otx2_vf(pfvf->pcifunc) && !vf_num && + ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; if (fsp->location >= otx2_get_maxflows(flow_cfg)) @@ -1180,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) flow_cfg->nr_flows++; } + if (flow->is_vf) + netdev_info(pfvf->netdev, + "Make sure that VF's queue number is within its queue limit\n"); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 9551b422622a..a57455aebff6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -16,6 +16,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/bitfield.h> +#include <net/page_pool/types.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -565,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, TYPE_PFVF); - vfs -= 64; + if (intr) + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); + vfs = 64; } intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); @@ -573,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); - trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); + if (intr) + trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); return IRQ_HANDLED; } @@ -1192,31 +1196,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { }; static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { - "NIX_SND_STATUS_GOOD", - "NIX_SND_STATUS_SQ_CTX_FAULT", - "NIX_SND_STATUS_SQ_CTX_POISON", - "NIX_SND_STATUS_SQB_FAULT", - "NIX_SND_STATUS_SQB_POISON", - "NIX_SND_STATUS_HDR_ERR", - "NIX_SND_STATUS_EXT_ERR", - "NIX_SND_STATUS_JUMP_FAULT", - "NIX_SND_STATUS_JUMP_POISON", - "NIX_SND_STATUS_CRC_ERR", - "NIX_SND_STATUS_IMM_ERR", - "NIX_SND_STATUS_SG_ERR", - "NIX_SND_STATUS_MEM_ERR", - "NIX_SND_STATUS_INVALID_SUBDC", - "NIX_SND_STATUS_SUBDC_ORDER_ERR", - "NIX_SND_STATUS_DATA_FAULT", - "NIX_SND_STATUS_DATA_POISON", - "NIX_SND_STATUS_NPC_DROP_ACTION", - "NIX_SND_STATUS_LOCK_VIOL", - "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", - "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", - "NIX_SND_STATUS_NPC_MCAST_ABORT", - "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", - "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", - "NIX_SND_STATUS_SEND_STATS_ERR", + [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD", + [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT", + [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON", + [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT", + [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON", + [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR", + [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR", + [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT", + [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON", + [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR", + [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR", + [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR", + [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR", + [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC", + [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR", + [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT", + [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON", + [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION", + [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL", + [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR", + [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR", + [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT", + [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", + [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", + [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT", + [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR", }; static irqreturn_t otx2_q_intr_handler(int irq, void *data) @@ -1237,14 +1242,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) continue; if (val & BIT_ULL(42)) { - netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + netdev_err(pf->netdev, + "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); } else { if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) netdev_err(pf->netdev, "CQ%lld: Doorbell error", qidx); if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) - netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", + netdev_err(pf->netdev, + "CQ%lld: Memory fault on CQE write to LLC/DRAM", qidx); } @@ -1271,7 +1278,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) (val & NIX_SQINT_BITS)); if (val & BIT_ULL(42)) { - netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", + netdev_err(pf->netdev, + "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); goto done; } @@ -1281,8 +1289,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) goto chk_mnq_err_dbg; sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); - netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n", - qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); + netdev_err(pf->netdev, + "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n", + qidx, sq_op_err_dbg, + nix_sqoperr_e_str[sq_op_err_code], + sq_op_err_code); otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); @@ -1299,16 +1310,21 @@ chk_mnq_err_dbg: goto chk_snd_err_dbg; mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); - netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n", - qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]); + netdev_err(pf->netdev, + "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n", + qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code], + mnq_err_code); otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); chk_snd_err_dbg: snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); if (snd_err_dbg & BIT(44)) { snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); - netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", - qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); + netdev_err(pf->netdev, + "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n", + qidx, snd_err_dbg, + nix_snd_status_e_str[snd_err_code], + snd_err_code); otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); } @@ -1588,6 +1604,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) else otx2_cleanup_tx_cqes(pf, cq); } + otx2_free_pending_sqe(pf); otx2_free_sq_res(pf); @@ -1633,6 +1650,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) mutex_unlock(&mbox->lock); } +static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf) +{ + int vf; + + /* The AF driver will determine whether to allow the VF netdev or not */ + if (is_otx2_vf(pfvf->pcifunc)) + return true; + + /* check if there are any trusted VFs associated with the PF netdev */ + for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++) + if (pfvf->vf_configs[vf].trusted) + return true; + return false; +} + static void otx2_do_set_rx_mode(struct otx2_nic *pf) { struct net_device *netdev = pf->netdev; @@ -1665,12 +1697,21 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) req->mode |= NIX_RX_MODE_ALLMULTI; - req->mode |= NIX_RX_MODE_USE_MCE; + if (otx2_promisc_use_mce_list(pf)) + req->mode |= NIX_RX_MODE_USE_MCE; otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); } +static void otx2_set_irq_coalesce(struct otx2_nic *pfvf) +{ + int cint; + + for (cint = 0; cint < pfvf->hw.cint_cnt; cint++) + otx2_config_irq_coalescing(pfvf, cint); +} + static void otx2_dim_work(struct work_struct *w) { struct dim_cq_moder cur_moder; @@ -1686,6 +1727,7 @@ static void otx2_dim_work(struct work_struct *w) CQ_TIMER_THRESH_MAX : cur_moder.usec; pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ? NAPI_POLL_WEIGHT : cur_moder.pkts; + otx2_set_irq_coalesce(pfvf); dim->state = DIM_START_MEASURE; } @@ -1856,6 +1898,8 @@ int otx2_open(struct net_device *netdev) if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) otx2_dmacflt_reinstall_flows(pf); + otx2_tc_apply_ingress_police_rules(pf); + err = otx2_rxtx_enable(pf, true); /* If a mbox communication error happens at this point then interface * will end up in a state such that it is in down state but hardware @@ -1920,6 +1964,8 @@ int otx2_stop(struct net_device *netdev) /* Clear RSS enable flag */ rss = &pf->hw.rss_info; rss->enable = false; + if (!netif_is_rxfh_configured(netdev)) + kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, @@ -1942,6 +1988,10 @@ int otx2_stop(struct net_device *netdev) netif_tx_disable(netdev); + for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) + cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); + devm_kfree(pf->dev, pf->refill_wrk); + otx2_free_hw_resources(pf); otx2_free_cints(pf, pf->hw.cint_cnt); otx2_disable_napi(pf); @@ -1949,9 +1999,6 @@ int otx2_stop(struct net_device *netdev) for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); - for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) - cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); - devm_kfree(pf->dev, pf->refill_wrk); kfree(qset->sq); kfree(qset->cq); @@ -2027,7 +2074,7 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, #endif int txq; - qos_enabled = (netdev->real_num_tx_queues > pf->hw.tx_queues) ? true : false; + qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues; if (unlikely(qos_enabled)) { /* This smp_load_acquire() pairs with smp_store_release() in * otx2_qos_root_add() called from htb offload root creation @@ -2660,11 +2707,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf, pf->vf_configs[vf].trusted = enable; rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF); - if (rc) + if (rc) { pf->vf_configs[vf].trusted = !enable; - else + } else { netdev_info(pf->netdev, "VF %d is %strusted\n", vf, enable ? "" : "not "); + otx2_set_rx_mode(netdev); + } + return rc; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 896b2f9bac34..63130ba37e9d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -10,6 +10,65 @@ #include "otx2_common.h" #include "otx2_ptp.h" +static bool is_tstmp_atomic_update_supported(struct otx2_ptp *ptp) +{ + struct ptp_get_cap_rsp *rsp; + struct msg_req *req; + int err; + + if (!ptp->nic) + return false; + + mutex_lock(&ptp->nic->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_get_cap(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&ptp->nic->mbox.lock); + return false; + } + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) { + mutex_unlock(&ptp->nic->mbox.lock); + return false; + } + rsp = (struct ptp_get_cap_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + mutex_unlock(&ptp->nic->mbox.lock); + + if (IS_ERR(rsp)) + return false; + + if (rsp->cap & PTP_CAP_HW_ATOMIC_UPDATE) + return true; + + return false; +} + +static int otx2_ptp_hw_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + struct ptp_req *req; + int rc; + + if (!ptp->nic) + return -ENODEV; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + req->op = PTP_OP_ADJTIME; + req->delta = delta; + rc = otx2_sync_mbox_msg(&ptp->nic->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return rc; +} + static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) { struct ptp_req *req; @@ -37,6 +96,49 @@ static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) return rsp->clk; } +static int otx2_ptp_hw_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + u64 tstamp; + + tstamp = otx2_ptp_get_clock(ptp); + + *ts = ns_to_timespec64(tstamp); + return 0; +} + +static int otx2_ptp_hw_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + struct otx2_nic *pfvf = ptp->nic; + struct ptp_req *req; + u64 nsec; + int rc; + + if (!ptp->nic) + return -ENODEV; + + nsec = timespec64_to_ns(ts); + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->op = PTP_OP_SET_CLOCK; + req->clk = nsec; + rc = otx2_sync_mbox_msg(&ptp->nic->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return rc; +} + static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -73,7 +175,7 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh) return otx2_sync_mbox_msg(&ptp->nic->mbox); } -static int ptp_extts_on(struct otx2_ptp *ptp, int on) +static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period) { struct ptp_req *req; @@ -84,8 +186,9 @@ static int ptp_extts_on(struct otx2_ptp *ptp, int on) if (!req) return -ENOMEM; - req->op = PTP_OP_EXTTS_ON; - req->extts_on = on; + req->op = PTP_OP_PPS_ON; + req->pps_on = on; + req->period = period; return otx2_sync_mbox_msg(&ptp->nic->mbox); } @@ -124,16 +227,7 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp) return rsp->clk; } -static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp) -{ - struct otx2_nic *pfvf = ptp->nic; - - mutex_lock(&pfvf->mbox.lock); - *tstamp = timecounter_read(&ptp->time_counter); - mutex_unlock(&pfvf->mbox.lock); -} - -static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +static int otx2_ptp_tc_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); @@ -146,32 +240,33 @@ static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) return 0; } -static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info, - struct timespec64 *ts) +static int otx2_ptp_tc_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); u64 tstamp; - otx2_get_ptpclock(ptp, &tstamp); + mutex_lock(&ptp->nic->mbox.lock); + tstamp = timecounter_read(&ptp->time_counter); + mutex_unlock(&ptp->nic->mbox.lock); *ts = ns_to_timespec64(tstamp); return 0; } -static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, - const struct timespec64 *ts) +static int otx2_ptp_tc_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); - struct otx2_nic *pfvf = ptp->nic; u64 nsec; nsec = timespec64_to_ns(ts); - mutex_lock(&pfvf->mbox.lock); + mutex_lock(&ptp->nic->mbox.lock); timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec); - mutex_unlock(&pfvf->mbox.lock); + mutex_unlock(&ptp->nic->mbox.lock); return 0; } @@ -182,14 +277,20 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, switch (func) { case PTP_PF_NONE: case PTP_PF_EXTTS: - break; case PTP_PF_PEROUT: + break; case PTP_PF_PHYSYNC: return -1; } return 0; } +static u64 otx2_ptp_hw_tstamp2time(const struct timecounter *time_counter, u64 tstamp) +{ + /* On HW which supports atomic updates, timecounter is not initialized */ + return tstamp; +} + static void otx2_ptp_extts_check(struct work_struct *work) { struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, @@ -204,7 +305,7 @@ static void otx2_ptp_extts_check(struct work_struct *work) if (tstmp != ptp->last_extts) { event.type = PTP_CLOCK_EXTTS; event.index = 0; - event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp); + event.timestamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstmp); ptp_clock_event(ptp->ptp_clock, &event); new_thresh = tstmp % 500000000; if (ptp->thresh != new_thresh) { @@ -229,7 +330,7 @@ static void otx2_sync_tstamp(struct work_struct *work) tstamp = otx2_ptp_get_clock(ptp); mutex_unlock(&pfvf->mbox.lock); - ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + ptp->tstamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstamp); ptp->base_ns = tstamp % NSEC_PER_SEC; schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250)); @@ -240,6 +341,7 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); + u64 period = 0; int pin; if (!ptp->nic) @@ -251,12 +353,24 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, rq->extts.index); if (pin < 0) return -EBUSY; - if (on) { - ptp_extts_on(ptp, on); + if (on) schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); - } else { - ptp_extts_on(ptp, on); + else cancel_delayed_work_sync(&ptp->extts_work); + + return 0; + case PTP_CLK_REQ_PEROUT: + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (rq->perout.index >= ptp_info->n_pins) + return -EINVAL; + if (on) { + period = rq->perout.period.sec * NSEC_PER_SEC + + rq->perout.period.nsec; + ptp_pps_on(ptp, on, period); + } else { + ptp_pps_on(ptp, on, period); } return 0; default: @@ -302,15 +416,6 @@ int otx2_ptp_init(struct otx2_nic *pfvf) ptp_ptr->nic = pfvf; - cc = &ptp_ptr->cycle_counter; - cc->read = ptp_cc_read; - cc->mask = CYCLECOUNTER_MASK(64); - cc->mult = 1; - cc->shift = 0; - - timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, - ktime_to_ns(ktime_get_real())); - snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP"); ptp_ptr->extts_config.index = 0; ptp_ptr->extts_config.func = PTP_PF_NONE; @@ -320,17 +425,38 @@ int otx2_ptp_init(struct otx2_nic *pfvf) .name = "OcteonTX2 PTP", .max_adj = 1000000000ull, .n_ext_ts = 1, + .n_per_out = 1, .n_pins = 1, .pps = 0, .pin_config = &ptp_ptr->extts_config, .adjfine = otx2_ptp_adjfine, - .adjtime = otx2_ptp_adjtime, - .gettime64 = otx2_ptp_gettime, - .settime64 = otx2_ptp_settime, .enable = otx2_ptp_enable, .verify = otx2_ptp_verify_pin, }; + /* Check whether hardware supports atomic updates to timestamp */ + if (is_tstmp_atomic_update_supported(ptp_ptr)) { + ptp_ptr->ptp_info.adjtime = otx2_ptp_hw_adjtime; + ptp_ptr->ptp_info.gettime64 = otx2_ptp_hw_gettime; + ptp_ptr->ptp_info.settime64 = otx2_ptp_hw_settime; + + ptp_ptr->ptp_tstamp2nsec = otx2_ptp_hw_tstamp2time; + } else { + ptp_ptr->ptp_info.adjtime = otx2_ptp_tc_adjtime; + ptp_ptr->ptp_info.gettime64 = otx2_ptp_tc_gettime; + ptp_ptr->ptp_info.settime64 = otx2_ptp_tc_settime; + + cc = &ptp_ptr->cycle_counter; + cc->read = ptp_cc_read; + cc->mask = CYCLECOUNTER_MASK(64); + cc->mult = 1; + cc->shift = 0; + ptp_ptr->ptp_tstamp2nsec = timecounter_cyc2time; + + timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, + ktime_to_ns(ktime_get_real())); + } + INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check); ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); @@ -387,7 +513,7 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) if (!pfvf->ptp) return -ENODEV; - *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp); + *tsns = pfvf->ptp->ptp_tstamp2nsec(&pfvf->ptp->time_counter, tstamp); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index fa37b9f312ca..4e5899d8fa2e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -318,23 +318,23 @@ enum nix_snd_status_e { NIX_SND_STATUS_EXT_ERR = 0x6, NIX_SND_STATUS_JUMP_FAULT = 0x7, NIX_SND_STATUS_JUMP_POISON = 0x8, - NIX_SND_STATUS_CRC_ERR = 0x9, - NIX_SND_STATUS_IMM_ERR = 0x10, - NIX_SND_STATUS_SG_ERR = 0x11, - NIX_SND_STATUS_MEM_ERR = 0x12, - NIX_SND_STATUS_INVALID_SUBDC = 0x13, - NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14, - NIX_SND_STATUS_DATA_FAULT = 0x15, - NIX_SND_STATUS_DATA_POISON = 0x16, - NIX_SND_STATUS_NPC_DROP_ACTION = 0x17, - NIX_SND_STATUS_LOCK_VIOL = 0x18, - NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19, - NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20, - NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21, - NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22, - NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23, - NIX_SND_STATUS_SEND_MEM_FAULT = 0x24, - NIX_SND_STATUS_SEND_STATS_ERR = 0x25, + NIX_SND_STATUS_CRC_ERR = 0x10, + NIX_SND_STATUS_IMM_ERR = 0x11, + NIX_SND_STATUS_SG_ERR = 0x12, + NIX_SND_STATUS_MEM_ERR = 0x13, + NIX_SND_STATUS_INVALID_SUBDC = 0x14, + NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15, + NIX_SND_STATUS_DATA_FAULT = 0x16, + NIX_SND_STATUS_DATA_POISON = 0x17, + NIX_SND_STATUS_NPC_DROP_ACTION = 0x20, + NIX_SND_STATUS_LOCK_VIOL = 0x21, + NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22, + NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23, + NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24, + NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25, + NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26, + NIX_SND_STATUS_SEND_MEM_FAULT = 0x27, + NIX_SND_STATUS_SEND_STATS_ERR = 0x28, NIX_SND_STATUS_MAX, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 5e56b6c3e60a..db1e0e0e812d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -27,6 +27,8 @@ #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) +#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) + struct otx2_tc_flow_stats { u64 bytes; u64 pkts; @@ -34,9 +36,8 @@ struct otx2_tc_flow_stats { }; struct otx2_tc_flow { - struct rhash_head node; + struct list_head list; unsigned long cookie; - unsigned int bitpos; struct rcu_head rcu; struct otx2_tc_flow_stats stats; spinlock_t lock; /* lock for stats */ @@ -44,31 +45,13 @@ struct otx2_tc_flow { u16 entry; u16 leaf_profile; bool is_act_police; + u32 prio; + struct npc_install_flow_req req; + u64 rate; + u32 burst; + bool is_pps; }; -int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) -{ - struct otx2_tc_info *tc = &nic->tc_info; - - if (!nic->flow_cfg->max_flows) - return 0; - - /* Max flows changed, free the existing bitmap */ - kfree(tc->tc_entries_bitmap); - - tc->tc_entries_bitmap = - kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), - sizeof(long), GFP_KERNEL); - if (!tc->tc_entries_bitmap) { - netdev_err(nic->netdev, - "Unable to alloc TC flow entries bitmap\n"); - return -ENOMEM; - } - - return 0; -} -EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); - static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, u32 *burst_exp, u32 *burst_mantissa) { @@ -304,21 +287,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, return err; } -static int otx2_tc_act_set_police(struct otx2_nic *nic, - struct otx2_tc_flow *node, - struct flow_cls_offload *f, - u64 rate, u32 burst, u32 mark, - struct npc_install_flow_req *req, bool pps) +static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, + struct otx2_tc_flow *node) { - struct netlink_ext_ack *extack = f->common.extack; - struct otx2_hw *hw = &nic->hw; - int rq_idx, rc; - - rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); - if (rq_idx >= hw->rx_queues) { - NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); - return -EINVAL; - } + int rc; mutex_lock(&nic->mbox.lock); @@ -328,23 +300,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic, return rc; } - rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); + rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, + node->burst, node->rate, node->is_pps); if (rc) goto free_leaf; - rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); + rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); if (rc) goto free_leaf; mutex_unlock(&nic->mbox.lock); - req->match_id = mark & 0xFFFFULL; - req->index = rq_idx; - req->op = NIX_RX_ACTIONOP_UCAST; - set_bit(rq_idx, &nic->rq_bmap); - node->is_act_police = true; - node->rq = rq_idx; - return 0; free_leaf: @@ -356,6 +322,39 @@ free_leaf: return rc; } +static int otx2_tc_act_set_police(struct otx2_nic *nic, + struct otx2_tc_flow *node, + struct flow_cls_offload *f, + u64 rate, u32 burst, u32 mark, + struct npc_install_flow_req *req, bool pps) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct otx2_hw *hw = &nic->hw; + int rq_idx, rc; + + rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); + if (rq_idx >= hw->rx_queues) { + NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); + return -EINVAL; + } + + req->match_id = mark & 0xFFFFULL; + req->index = rq_idx; + req->op = NIX_RX_ACTIONOP_UCAST; + + node->is_act_police = true; + node->rq = rq_idx; + node->burst = burst; + node->rate = rate; + node->is_pps = pps; + + rc = otx2_tc_act_set_hw_police(nic, node); + if (!rc) + set_bit(rq_idx, &nic->rq_bmap); + + return rc; +} + static int otx2_tc_parse_actions(struct otx2_nic *nic, struct flow_action *flow_action, struct npc_install_flow_req *req, @@ -461,6 +460,62 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, return 0; } +static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, + struct flow_msg *flow_mask, struct flow_rule *rule, + struct npc_install_flow_req *req, bool is_inner) +{ + struct flow_match_vlan match; + u16 vlan_tci, vlan_tci_mask; + + if (is_inner) + flow_rule_match_cvlan(rule, &match); + else + flow_rule_match_vlan(rule, &match); + + if (!eth_type_vlan(match.key->vlan_tpid)) { + netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", + ntohs(match.key->vlan_tpid)); + return -EOPNOTSUPP; + } + + if (!match.mask->vlan_id) { + struct flow_action_entry *act; + int i; + + flow_action_for_each(i, act, &rule->action) { + if (act->id == FLOW_ACTION_DROP) { + netdev_err(nic->netdev, + "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", + ntohs(match.key->vlan_tpid), match.key->vlan_id); + return -EOPNOTSUPP; + } + } + } + + if (match.mask->vlan_id || + match.mask->vlan_dei || + match.mask->vlan_priority) { + vlan_tci = match.key->vlan_id | + match.key->vlan_dei << 12 | + match.key->vlan_priority << 13; + + vlan_tci_mask = match.mask->vlan_id | + match.mask->vlan_dei << 12 | + match.mask->vlan_priority << 13; + if (is_inner) { + flow_spec->vlan_itci = htons(vlan_tci); + flow_mask->vlan_itci = htons(vlan_tci_mask); + req->features |= BIT_ULL(NPC_INNER_VID); + } else { + flow_spec->vlan_tci = htons(vlan_tci); + flow_mask->vlan_tci = htons(vlan_tci_mask); + req->features |= BIT_ULL(NPC_OUTER_VID); + } + } + + return 0; +} + static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, struct flow_cls_offload *f, struct npc_install_flow_req *req) @@ -476,15 +531,18 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, dissector = rule->match.dissector; if ((dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_IP)))) { - netdev_info(nic->netdev, "unsupported flow used key 0x%x", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IPSEC) | + BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { + netdev_info(nic->netdev, "unsupported flow used key 0x%llx", dissector->used_keys); return -EOPNOTSUPP; } @@ -504,6 +562,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, match.key->ip_proto != IPPROTO_UDP && match.key->ip_proto != IPPROTO_SCTP && match.key->ip_proto != IPPROTO_ICMP && + match.key->ip_proto != IPPROTO_ESP && + match.key->ip_proto != IPPROTO_AH && match.key->ip_proto != IPPROTO_ICMPV6)) { netdev_info(nic->netdev, "ip_proto=0x%x not supported\n", @@ -523,6 +583,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, req->features |= BIT_ULL(NPC_IPPROTO_ICMP); else if (ip_proto == IPPROTO_ICMPV6) req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); + else if (ip_proto == IPPROTO_ESP) + req->features |= BIT_ULL(NPC_IPPROTO_ESP); + else if (ip_proto == IPPROTO_AH) + req->features |= BIT_ULL(NPC_IPPROTO_AH); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { @@ -567,6 +631,26 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { + struct flow_match_ipsec match; + + flow_rule_match_ipsec(rule, &match); + if (!match.mask->spi) { + NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); + return -EOPNOTSUPP; + } + if (ip_proto != IPPROTO_ESP && + ip_proto != IPPROTO_AH) { + NL_SET_ERR_MSG_MOD(extack, + "SPI index is valid only for ESP/AH proto"); + return -EOPNOTSUPP; + } + + flow_spec->spi = match.key->spi; + flow_mask->spi = match.mask->spi; + req->features |= BIT_ULL(NPC_IPSEC_SPI); + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match; @@ -586,47 +670,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_match_vlan match; - u16 vlan_tci, vlan_tci_mask; - - flow_rule_match_vlan(rule, &match); - - if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { - netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", - ntohs(match.key->vlan_tpid)); - return -EOPNOTSUPP; - } + int ret; - if (!match.mask->vlan_id) { - struct flow_action_entry *act; - int i; - - flow_action_for_each(i, act, &rule->action) { - if (act->id == FLOW_ACTION_DROP) { - netdev_err(nic->netdev, - "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", - ntohs(match.key->vlan_tpid), - match.key->vlan_id); - return -EOPNOTSUPP; - } - } - } - - if (match.mask->vlan_id || - match.mask->vlan_dei || - match.mask->vlan_priority) { - vlan_tci = match.key->vlan_id | - match.key->vlan_dei << 12 | - match.key->vlan_priority << 13; + ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); + if (ret) + return ret; + } - vlan_tci_mask = match.mask->vlan_id | - match.mask->vlan_dei << 12 | - match.mask->vlan_priority << 13; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + int ret; - flow_spec->vlan_tci = htons(vlan_tci); - flow_mask->vlan_tci = htons(vlan_tci_mask); - req->features |= BIT_ULL(NPC_OUTER_VID); - } + ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); + if (ret) + return ret; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { @@ -704,11 +760,175 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { + struct flow_match_mpls match; + u8 bit; + + flow_rule_match_mpls(rule, &match); + + if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported LSE depth for MPLS match offload"); + return -EOPNOTSUPP; + } + + for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, + FLOW_DIS_MPLS_MAX) { + /* check if any of the fields LABEL,TC,BOS are set */ + if (*((u32 *)&match.mask->ls[bit]) & + OTX2_FLOWER_MASK_MPLS_NON_TTL) { + /* Hardware will capture 4 byte MPLS header into + * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. + * Derive the associated NPC key based on header + * index and offset. + */ + + req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + + 2 * bit); + flow_spec->mpls_lse[bit] = + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, + match.key->ls[bit].mpls_label) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, + match.key->ls[bit].mpls_tc) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, + match.key->ls[bit].mpls_bos); + + flow_mask->mpls_lse[bit] = + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, + match.mask->ls[bit].mpls_label) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, + match.mask->ls[bit].mpls_tc) | + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, + match.mask->ls[bit].mpls_bos); + } + + if (match.mask->ls[bit].mpls_ttl) { + req->features |= BIT_ULL(NPC_MPLS1_TTL + + 2 * bit); + flow_spec->mpls_lse[bit] |= + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, + match.key->ls[bit].mpls_ttl); + flow_mask->mpls_lse[bit] |= + FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, + match.mask->ls[bit].mpls_ttl); + } + } + } + return otx2_tc_parse_actions(nic, &rule->action, req, f, node); } -static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) +static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) { + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct otx2_tc_flow *iter, *tmp; + + if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) + return; + + list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { + list_del(&iter->list); + kfree(iter); + flow_cfg->nr_flows--; + } +} + +static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, + unsigned long cookie) +{ + struct otx2_tc_flow *tmp; + + list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { + if (tmp->cookie == cookie) + return tmp; + } + + return NULL; +} + +static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, + int index) +{ + struct otx2_tc_flow *tmp; + int i = 0; + + list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { + if (i == index) + return tmp; + i++; + } + + return NULL; +} + +static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + struct list_head *pos, *n; + struct otx2_tc_flow *tmp; + + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + tmp = list_entry(pos, struct otx2_tc_flow, list); + if (node == tmp) { + list_del(&node->list); + return; + } + } +} + +static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + struct list_head *pos, *n; + struct otx2_tc_flow *tmp; + int index = 0; + + /* If the flow list is empty then add the new node */ + if (list_empty(&flow_cfg->flow_list_tc)) { + list_add(&node->list, &flow_cfg->flow_list_tc); + return index; + } + + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + tmp = list_entry(pos, struct otx2_tc_flow, list); + if (node->prio < tmp->prio) + break; + index++; + } + + list_add(&node->list, pos->prev); + return index; +} + +static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) +{ + struct npc_install_flow_req *tmp_req; + int err; + + mutex_lock(&nic->mbox.lock); + tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); + if (!tmp_req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); + /* Send message to AF */ + err = otx2_sync_mbox_msg(&nic->mbox); + if (err) { + netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", + req->entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + + mutex_unlock(&nic->mbox.lock); + return 0; +} + +static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) +{ + struct npc_delete_flow_rsp *rsp; struct npc_delete_flow_req *req; int err; @@ -729,22 +949,113 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) mutex_unlock(&nic->mbox.lock); return -EFAULT; } + + if (cntr_val) { + rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", + entry); + mutex_unlock(&nic->mbox.lock); + return -EFAULT; + } + + *cntr_val = rsp->cntr_val; + } + mutex_unlock(&nic->mbox.lock); + return 0; +} + +static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, + struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + struct list_head *pos, *n; + struct otx2_tc_flow *tmp; + int i = 0, index = 0; + u16 cntr_val = 0; + + /* Find and delete the entry from the list and re-install + * all the entries from beginning to the index of the + * deleted entry to higher mcam indexes. + */ + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + tmp = list_entry(pos, struct otx2_tc_flow, list); + if (node == tmp) { + list_del(&tmp->list); + break; + } + + otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); + tmp->entry++; + tmp->req.entry = tmp->entry; + tmp->req.cntr_val = cntr_val; + index++; + } + + list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { + if (i == index) + break; + + tmp = list_entry(pos, struct otx2_tc_flow, list); + otx2_add_mcam_flow_entry(nic, &tmp->req); + i++; + } return 0; } +static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, + struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node) +{ + int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; + struct otx2_tc_flow *tmp; + int list_idx, i; + u16 cntr_val = 0; + + /* Find the index of the entry(list_idx) whose priority + * is greater than the new entry and re-install all + * the entries from beginning to list_idx to higher + * mcam indexes. + */ + list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); + for (i = 0; i < list_idx; i++) { + tmp = otx2_tc_get_entry_by_index(flow_cfg, i); + if (!tmp) + return -ENOMEM; + + otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); + tmp->entry = flow_cfg->flow_ent[mcam_idx]; + tmp->req.entry = tmp->entry; + tmp->req.cntr_val = cntr_val; + otx2_add_mcam_flow_entry(nic, &tmp->req); + mcam_idx++; + } + + return mcam_idx; +} + +static int otx2_tc_update_mcam_table(struct otx2_nic *nic, + struct otx2_flow_config *flow_cfg, + struct otx2_tc_flow *node, + bool add_req) +{ + if (add_req) + return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); + + return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); +} + static int otx2_tc_del_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { struct otx2_flow_config *flow_cfg = nic->flow_cfg; - struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *flow_node; int err; - flow_node = rhashtable_lookup_fast(&tc_info->flow_table, - &tc_flow_cmd->cookie, - tc_info->flow_ht_params); + flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (!flow_node) { netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", tc_flow_cmd->cookie); @@ -752,6 +1063,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, } if (flow_node->is_act_police) { + __clear_bit(flow_node->rq, &nic->rq_bmap); + + if (nic->flags & OTX2_FLAG_INTF_DOWN) + goto free_mcam_flow; + mutex_lock(&nic->mbox.lock); err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, @@ -767,21 +1083,14 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, "Unable to free leaf bandwidth profile(%d)\n", flow_node->leaf_profile); - __clear_bit(flow_node->rq, &nic->rq_bmap); - mutex_unlock(&nic->mbox.lock); } - otx2_del_mcam_flow_entry(nic, flow_node->entry); - - WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, - &flow_node->node, - nic->tc_info.flow_ht_params)); +free_mcam_flow: + otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); + otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); kfree_rcu(flow_node, rcu); - - clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); flow_cfg->nr_flows--; - return 0; } @@ -790,15 +1099,19 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, { struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; struct otx2_flow_config *flow_cfg = nic->flow_cfg; - struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *new_node, *old_node; struct npc_install_flow_req *req, dummy; - int rc, err; + int rc, err, mcam_idx; if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) return -ENOMEM; - if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { + if (nic->flags & OTX2_FLAG_INTF_DOWN) { + NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); + return -EINVAL; + } + + if (flow_cfg->nr_flows == flow_cfg->max_flows) { NL_SET_ERR_MSG_MOD(extack, "Free MCAM entry not available to add the flow"); return -ENOMEM; @@ -810,6 +1123,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, return -ENOMEM; spin_lock_init(&new_node->lock); new_node->cookie = tc_flow_cmd->cookie; + new_node->prio = tc_flow_cmd->common.prio; memset(&dummy, 0, sizeof(struct npc_install_flow_req)); @@ -820,12 +1134,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, } /* If a flow exists with the same cookie, delete it */ - old_node = rhashtable_lookup_fast(&tc_info->flow_table, - &tc_flow_cmd->cookie, - tc_info->flow_ht_params); + old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (old_node) otx2_tc_del_flow(nic, tc_flow_cmd); + mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); if (!req) { @@ -836,11 +1149,8 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); - - new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, - flow_cfg->max_flows); req->channel = nic->hw.rx_chan_base; - req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; + req->entry = flow_cfg->flow_ent[mcam_idx]; req->intf = NIX_INTF_RX; req->set_cntr = 1; new_node->entry = req->entry; @@ -850,26 +1160,18 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); mutex_unlock(&nic->mbox.lock); - kfree_rcu(new_node, rcu); goto free_leaf; } - mutex_unlock(&nic->mbox.lock); - /* add new flow to flow-table */ - rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, - nic->tc_info.flow_ht_params); - if (rc) { - otx2_del_mcam_flow_entry(nic, req->entry); - kfree_rcu(new_node, rcu); - goto free_leaf; - } + mutex_unlock(&nic->mbox.lock); + memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); - set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); flow_cfg->nr_flows++; - return 0; free_leaf: + otx2_tc_del_from_flow_list(flow_cfg, new_node); + kfree_rcu(new_node, rcu); if (new_node->is_act_police) { mutex_lock(&nic->mbox.lock); @@ -896,16 +1198,13 @@ free_leaf: static int otx2_tc_get_flow_stats(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { - struct otx2_tc_info *tc_info = &nic->tc_info; struct npc_mcam_get_stats_req *req; struct npc_mcam_get_stats_rsp *rsp; struct otx2_tc_flow_stats *stats; struct otx2_tc_flow *flow_node; int err; - flow_node = rhashtable_lookup_fast(&tc_info->flow_table, - &tc_flow_cmd->cookie, - tc_info->flow_ht_params); + flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); if (!flow_node) { netdev_info(nic->netdev, "tc flow not found for cookie %lx", tc_flow_cmd->cookie); @@ -1053,12 +1352,20 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct otx2_nic *nic = cb_priv; + bool ntuple; if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) return -EOPNOTSUPP; + ntuple = nic->netdev->features & NETIF_F_NTUPLE; switch (type) { case TC_SETUP_CLSFLOWER: + if (ntuple) { + netdev_warn(nic->netdev, + "Can't install TC flower offload rule when NTUPLE is active"); + return -EOPNOTSUPP; + } + return otx2_setup_tc_cls_flower(nic, type_data); case TC_SETUP_CLSMATCHALL: return otx2_setup_tc_ingress_matchall(nic, type_data); @@ -1143,18 +1450,8 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, } EXPORT_SYMBOL(otx2_setup_tc); -static const struct rhashtable_params tc_flow_ht_params = { - .head_offset = offsetof(struct otx2_tc_flow, node), - .key_offset = offsetof(struct otx2_tc_flow, cookie), - .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), - .automatic_shrinking = true, -}; - int otx2_init_tc(struct otx2_nic *nic) { - struct otx2_tc_info *tc = &nic->tc_info; - int err; - /* Exclude receive queue 0 being used for police action */ set_bit(0, &nic->rq_bmap); @@ -1164,25 +1461,54 @@ int otx2_init_tc(struct otx2_nic *nic) return -EINVAL; } - err = otx2_tc_alloc_ent_bitmap(nic); - if (err) - return err; - - tc->flow_ht_params = tc_flow_ht_params; - err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params); - if (err) { - kfree(tc->tc_entries_bitmap); - tc->tc_entries_bitmap = NULL; - } - return err; + return 0; } EXPORT_SYMBOL(otx2_init_tc); void otx2_shutdown_tc(struct otx2_nic *nic) { - struct otx2_tc_info *tc = &nic->tc_info; - - kfree(tc->tc_entries_bitmap); - rhashtable_destroy(&tc->flow_table); + otx2_destroy_tc_flow_list(nic); } EXPORT_SYMBOL(otx2_shutdown_tc); + +static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, + struct otx2_tc_flow *node) +{ + struct npc_install_flow_req *req; + + if (otx2_tc_act_set_hw_police(nic, node)) + return; + + mutex_lock(&nic->mbox.lock); + + req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); + if (!req) + goto err; + + memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); + + if (otx2_sync_mbox_msg(&nic->mbox)) + netdev_err(nic->netdev, + "Failed to install MCAM flow entry for ingress rule"); +err: + mutex_unlock(&nic->mbox.lock); +} + +void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) +{ + struct otx2_flow_config *flow_cfg = nic->flow_cfg; + struct otx2_tc_flow *node; + + /* If any ingress policer rules exist for the interface then + * apply those rules. Ingress policer rules depend on bandwidth + * profiles linked to the receive queues. Since no receive queues + * exist when interface is down, ingress policer rules are stored + * and configured in hardware after all receive queues are allocated + * in otx2_open. + */ + list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { + if (node->is_act_police) + otx2_tc_config_ingress_rule(nic, node); + } +} +EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index e369baf11530..4d519ea833b2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -29,7 +29,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, - struct otx2_cq_queue *cq); + struct otx2_cq_queue *cq, + bool *need_xdp_flush); static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) @@ -337,7 +338,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct napi_struct *napi, struct otx2_cq_queue *cq, - struct nix_cqe_rx_s *cqe) + struct nix_cqe_rx_s *cqe, bool *need_xdp_flush) { struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; @@ -353,7 +354,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, } if (pfvf->xdp_prog) - if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq)) + if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) return; skb = napi_get_frags(napi); @@ -388,6 +389,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, struct napi_struct *napi, struct otx2_cq_queue *cq, int budget) { + bool need_xdp_flush = false; struct nix_cqe_rx_s *cqe; int processed_cqe = 0; @@ -409,13 +411,15 @@ process_cqe: cq->cq_head++; cq->cq_head &= (cq->cqe_cnt - 1); - otx2_rcv_pkt_handler(pfvf, napi, cq, cqe); + otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush); cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->sg.seg_addr = 0x00; processed_cqe++; cq->pend_cqe--; } + if (need_xdp_flush) + xdp_do_flush(); /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, @@ -424,9 +428,10 @@ process_cqe: return processed_cqe; } -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; dma_addr_t bufptr; while (cq->pool_ptrs) { @@ -435,6 +440,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } + + return cnt - cq->pool_ptrs; } static int otx2_tx_napi_handler(struct otx2_nic *pfvf, @@ -505,11 +512,18 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p { struct dim_sample dim_sample; u64 rx_frames, rx_bytes; + u64 tx_frames, tx_bytes; rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) + OTX2_GET_RX_STATS(RX_UCAST); rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); - dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample); + tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); + tx_frames = OTX2_GET_TX_STATS(TX_UCAST); + + dim_update_sample(pfvf->napi_events, + rx_frames + tx_frames, + rx_bytes + tx_bytes, + &dim_sample); net_dim(&cq_poll->dim, dim_sample); } @@ -521,6 +535,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) struct otx2_cq_queue *cq; struct otx2_qset *qset; struct otx2_nic *pfvf; + int filled_cnt = -1; cq_poll = container_of(napi, struct otx2_cq_poll, napi); pfvf = (struct otx2_nic *)cq_poll->dev; @@ -541,7 +556,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } if (rx_cq && rx_cq->pool_ptrs) - pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); + filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -550,20 +565,29 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return workdone; - /* Check for adaptive interrupt coalesce */ - if (workdone != 0 && - ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == - OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { - /* Adjust irq coalese using net_dim */ + /* Adjust irq coalese using net_dim */ + if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) otx2_adjust_adaptive_coalese(pfvf, cq_poll); - /* Update irq coalescing */ - for (i = 0; i < pfvf->hw.cint_cnt; i++) - otx2_config_irq_coalescing(pfvf, i); - } - /* Re-enable interrupts */ - otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), - BIT_ULL(0)); + if (unlikely(!filled_cnt)) { + struct refill_work *work; + struct delayed_work *dwork; + + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + work->napi = napi; + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + } else { + /* Re-enable interrupts */ + otx2_write64(pfvf, + NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), + BIT_ULL(0)); + } } return workdone; } @@ -1223,9 +1247,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) { + int tx_pkts = 0, tx_bytes = 0; struct sk_buff *skb = NULL; struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; + struct netdev_queue *txq; int processed_cqe = 0; struct sg_list *sg; int qidx; @@ -1246,12 +1272,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) sg = &sq->sg[cqe->comp.sqe_id]; skb = (struct sk_buff *)sg->skb; if (skb) { + tx_bytes += skb->len; + tx_pkts++; otx2_dma_unmap_skb_frags(pfvf, sg); dev_kfree_skb_any(skb); sg->skb = (u64)NULL; } } + if (likely(tx_pkts)) { + if (qidx >= pfvf->hw.tx_queues) + qidx -= pfvf->hw.xdp_queues; + txq = netdev_get_tx_queue(pfvf->netdev, qidx); + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); @@ -1278,6 +1312,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) return err; } +void otx2_free_pending_sqe(struct otx2_nic *pfvf) +{ + int tx_pkts = 0, tx_bytes = 0; + struct sk_buff *skb = NULL; + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + struct sg_list *sg; + int sq_idx, sqe; + + for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) { + sq = &pfvf->qset.sq[sq_idx]; + for (sqe = 0; sqe < sq->sqe_cnt; sqe++) { + sg = &sq->sg[sqe]; + skb = (struct sk_buff *)sg->skb; + if (skb) { + tx_bytes += skb->len; + tx_pkts++; + otx2_dma_unmap_skb_frags(pfvf, sg); + dev_kfree_skb_any(skb); + sg->skb = (u64)NULL; + } + } + + if (!tx_pkts) + continue; + txq = netdev_get_tx_queue(pfvf->netdev, sq_idx); + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + tx_pkts = 0; + tx_bytes = 0; + } +} + static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, int len, int *offset) { @@ -1334,7 +1400,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, - struct otx2_cq_queue *cq) + struct otx2_cq_queue *cq, + bool *need_xdp_flush) { unsigned char *hard_start, *data; int qidx = cq->cq_idx; @@ -1371,8 +1438,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); - if (!err) + if (!err) { + *need_xdp_flush = true; return true; + } put_page(page); break; default: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index b5d689eeff80..a82ffca8ce1b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -23,6 +23,8 @@ #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) #define OTX2_MIN_MTU 60 +#define OTX2_PAGE_POOL_SZ 2048 + #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 @@ -168,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); #endif /* OTX2_TXRX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index d3a76c5ccda8..1e77bbf5d22a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -19,6 +19,9 @@ #define OTX2_QOS_CLASS_NONE 0 #define OTX2_QOS_DEFAULT_PRIO 0xF #define OTX2_QOS_INVALID_SQ 0xFFFF +#define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF +#define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0) +#define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0) static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf) { @@ -65,11 +68,24 @@ static void otx2_qos_get_regaddr(struct otx2_qos_node *node, } } +static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum) +{ + u32 weight; + + weight = quantum / pfvf->hw.dwrr_mtu; + if (quantum % pfvf->hw.dwrr_mtu) + weight += 1; + + return weight; +} + static void otx2_config_sched_shaping(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct nix_txschq_config *cfg, int *num_regs) { + u32 rr_weight; + u32 quantum; u64 maxrate; otx2_qos_get_regaddr(node, cfg, *num_regs); @@ -86,8 +102,17 @@ static void otx2_config_sched_shaping(struct otx2_nic *pfvf, return; } - /* configure priority */ - cfg->regval[*num_regs] = (node->schq - node->parent->prio_anchor) << 24; + /* configure priority/quantum */ + if (node->is_static) { + cfg->regval[*num_regs] = + (node->schq - node->parent->prio_anchor) << 24; + } else { + quantum = node->quantum ? + node->quantum : pfvf->tx_max_pktlen; + rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); + cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 | + rr_weight; + } (*num_regs)++; /* configure PIR */ @@ -195,9 +220,8 @@ static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf, cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq); cfg->regval[0] = (u64)parent->prio_anchor << 32; - if (parent->level == NIX_TXSCH_LVL_TL1) - cfg->regval[0] |= (u64)TXSCH_TL1_DFLT_RR_PRIO << 1; - + cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ? + parent->child_dwrr_prio : 0) << 1; cfg->num_regs++; rc = otx2_sync_mbox_msg(&pfvf->mbox); @@ -315,9 +339,14 @@ static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent, list_for_each_entry(node, &parent->child_list, list) { otx2_qos_fill_cfg_tl(node, cfg); - cfg->schq_contig[node->level]++; otx2_qos_fill_cfg_schq(node, cfg); } + + /* Assign the required number of transmit schedular queues under the + * given class + */ + cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt + + parent->max_static_prio + 1; } static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf, @@ -378,10 +407,12 @@ otx2_qos_alloc_root(struct otx2_nic *pfvf) return ERR_PTR(-ENOMEM); node->parent = NULL; - if (!is_otx2_vf(pfvf->pcifunc)) + if (!is_otx2_vf(pfvf->pcifunc)) { node->level = NIX_TXSCH_LVL_TL1; - else + } else { node->level = NIX_TXSCH_LVL_TL2; + node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + } WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); node->classid = OTX2_QOS_ROOT_CLASSID; @@ -401,9 +432,13 @@ static int otx2_qos_add_child_node(struct otx2_qos_node *parent, struct otx2_qos_node *tmp_node; struct list_head *tmp; + if (node->prio > parent->max_static_prio) + parent->max_static_prio = node->prio; + for (tmp = head->next; tmp != head; tmp = tmp->next) { tmp_node = list_entry(tmp, struct otx2_qos_node, list); - if (tmp_node->prio == node->prio) + if (tmp_node->prio == node->prio && + tmp_node->is_static) return -EEXIST; if (tmp_node->prio > node->prio) { list_add_tail(&node->list, tmp); @@ -434,6 +469,10 @@ static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf, txschq_node->rate = 0; txschq_node->ceil = 0; txschq_node->prio = 0; + txschq_node->quantum = 0; + txschq_node->is_static = true; + txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; mutex_lock(&pfvf->qos.qos_lock); list_add_tail(&txschq_node->list, &node->child_schq_list); @@ -459,7 +498,7 @@ static struct otx2_qos_node * otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, struct otx2_qos_node *parent, u16 classid, u32 prio, u64 rate, u64 ceil, - u16 qid) + u32 quantum, u16 qid, bool static_cfg) { struct otx2_qos_node *node; int err; @@ -476,6 +515,10 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, node->rate = otx2_convert_rate(rate); node->ceil = otx2_convert_rate(ceil); node->prio = prio; + node->quantum = quantum; + node->is_static = static_cfg; + node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; __set_bit(qid, pfvf->qos.qos_sq_bmap); @@ -622,12 +665,28 @@ static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf, } pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl; + pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; out: mutex_unlock(&mbox->lock); return rc; } +static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf, + struct otx2_qos_cfg *cfg) +{ + int lvl, idx, schq; + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { + if (!cfg->schq_index_used[lvl][idx]) { + schq = cfg->schq_contig_list[lvl][idx]; + otx2_txschq_free_one(pfvf, lvl, schq); + } + } + } +} + static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) @@ -652,9 +711,11 @@ static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf, list_for_each_entry(tmp, &node->child_list, list) { otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg); cnt = cfg->static_node_pos[tmp->level]; - tmp->schq = cfg->schq_contig_list[tmp->level][cnt]; + tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx]; + cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true; if (cnt == 0) - node->prio_anchor = tmp->schq; + node->prio_anchor = + cfg->schq_contig_list[tmp->level][0]; cfg->static_node_pos[tmp->level]++; otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg); } @@ -667,9 +728,87 @@ static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf, mutex_lock(&pfvf->qos.qos_lock); otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg); otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg); + otx2_qos_free_unused_txschq(pfvf, cfg); mutex_unlock(&pfvf->qos.qos_lock); } +static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, + struct otx2_qos_node *tmp, + unsigned long *child_idx_bmap, + int child_cnt) +{ + int idx; + + if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX) + return; + + /* assign static nodes 1:1 prio mapping first, then remaining nodes */ + for (idx = 0; idx < child_cnt; idx++) { + if (tmp->is_static && tmp->prio == idx && + !test_bit(idx, child_idx_bmap)) { + tmp->txschq_idx = idx; + set_bit(idx, child_idx_bmap); + return; + } else if (!tmp->is_static && idx >= tmp->prio && + !test_bit(idx, child_idx_bmap)) { + tmp->txschq_idx = idx; + set_bit(idx, child_idx_bmap); + return; + } + } +} + +static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, + struct otx2_qos_node *node) +{ + unsigned long *child_idx_bmap; + struct otx2_qos_node *tmp; + int child_cnt; + + list_for_each_entry(tmp, &node->child_list, list) + tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; + + /* allocate child index array */ + child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1; + child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt), + sizeof(unsigned long), + GFP_KERNEL); + if (!child_idx_bmap) + return -ENOMEM; + + list_for_each_entry(tmp, &node->child_list, list) + otx2_qos_assign_base_idx_tl(pfvf, tmp); + + /* assign base index of static priority children first */ + list_for_each_entry(tmp, &node->child_list, list) { + if (!tmp->is_static) + continue; + __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, + child_cnt); + } + + /* assign base index of dwrr priority children */ + list_for_each_entry(tmp, &node->child_list, list) + __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, + child_cnt); + + kfree(child_idx_bmap); + + return 0; +} + +static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf, + struct otx2_qos_node *node) +{ + int ret = 0; + + mutex_lock(&pfvf->qos.qos_lock); + ret = otx2_qos_assign_base_idx_tl(pfvf, node); + mutex_unlock(&pfvf->qos.qos_lock); + + return ret; +} + static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) @@ -761,8 +900,10 @@ static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg) for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { - schq = cfg->schq_contig_list[lvl][idx]; - otx2_txschq_free_one(pfvf, lvl, schq); + if (cfg->schq_index_used[lvl][idx]) { + schq = cfg->schq_contig_list[lvl][idx]; + otx2_txschq_free_one(pfvf, lvl, schq); + } } } } @@ -838,6 +979,10 @@ static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf, if (ret) return -ENOSPC; + ret = otx2_qos_assign_base_idx(pfvf, node); + if (ret) + return -ENOMEM; + if (!(pfvf->netdev->flags & IFF_UP)) { otx2_qos_txschq_fill_cfg(pfvf, node, cfg); return 0; @@ -894,6 +1039,13 @@ static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defc goto free_root_node; } + /* Update TL1 RR PRIO */ + if (root->level == NIX_TXSCH_LVL_TL1) { + root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio; + netdev_dbg(pfvf->netdev, + "TL1 DWRR Priority %d\n", root->child_dwrr_prio); + } + if (!(pfvf->netdev->flags & IFF_UP) || root->level == NIX_TXSCH_LVL_TL1) { root->schq = new_cfg->schq_list[root->level][0]; @@ -940,37 +1092,126 @@ static int otx2_qos_root_destroy(struct otx2_nic *pfvf) return 0; } +static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum) +{ + u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); + int err = 0; + + /* Max Round robin weight supported by octeontx2 and CN10K + * is different. Validate accordingly + */ + if (is_dev_otx2(pfvf->pdev)) + err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0; + else if (rr_weight > CN10K_MAX_RR_WEIGHT) + err = -EINVAL; + + return err; +} + +static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent, + struct netlink_ext_ack *extack, + struct otx2_nic *pfvf, + u64 prio, u64 quantum) +{ + int err; + + err = otx2_qos_validate_quantum(pfvf, quantum); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value"); + return err; + } + + if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) { + parent->child_dwrr_prio = prio; + } else if (prio != parent->child_dwrr_prio) { + NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed"); + return -EOPNOTSUPP; + } + + return 0; +} + static int otx2_qos_validate_configuration(struct otx2_qos_node *parent, struct netlink_ext_ack *extack, struct otx2_nic *pfvf, - u64 prio) + u64 prio, bool static_cfg) { - if (test_bit(prio, parent->prio_bmap)) { - NL_SET_ERR_MSG_MOD(extack, - "Static priority child with same priority exists"); + if (prio == parent->child_dwrr_prio && static_cfg) { + NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists"); return -EEXIST; } - if (prio == TXSCH_TL1_DFLT_RR_PRIO) { + if (static_cfg && test_bit(prio, parent->prio_bmap)) { NL_SET_ERR_MSG_MOD(extack, - "Priority is reserved for Round Robin"); - return -EINVAL; + "Static priority child with same priority exists"); + return -EEXIST; } return 0; } +static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio) +{ + /* For PF, root node dwrr priority is static */ + if (parent->level == NIX_TXSCH_LVL_TL1) + return; + + if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) { + parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; + clear_bit(prio, parent->prio_bmap); + } +} + +static bool is_qos_node_dwrr(struct otx2_qos_node *parent, + struct otx2_nic *pfvf, + u64 prio) +{ + struct otx2_qos_node *node; + bool ret = false; + + if (parent->child_dwrr_prio == prio) + return true; + + mutex_lock(&pfvf->qos.qos_lock); + list_for_each_entry(node, &parent->child_list, list) { + if (prio == node->prio) { + if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO && + parent->child_dwrr_prio != prio) + continue; + + if (otx2_qos_validate_quantum(pfvf, node->quantum)) { + netdev_err(pfvf->netdev, + "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d", + node->classid, node->quantum, + node->prio); + break; + } + /* mark old node as dwrr */ + node->is_static = false; + parent->child_dwrr_cnt++; + parent->child_static_cnt--; + ret = true; + break; + } + } + mutex_unlock(&pfvf->qos.qos_lock); + + return ret; +} + static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, u32 parent_classid, u64 rate, u64 ceil, - u64 prio, struct netlink_ext_ack *extack) + u64 prio, u32 quantum, + struct netlink_ext_ack *extack) { struct otx2_qos_cfg *old_cfg, *new_cfg; struct otx2_qos_node *node, *parent; int qid, ret, err; + bool static_cfg; netdev_dbg(pfvf->netdev, - "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld\n", - classid, parent_classid, rate, ceil, prio); + "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n", + classid, parent_classid, rate, ceil, prio, quantum); if (prio > OTX2_QOS_MAX_PRIO) { NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7"); @@ -978,6 +1219,12 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, goto out; } + if (!quantum || quantum > INT_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes"); + ret = -EOPNOTSUPP; + goto out; + } + /* get parent node */ parent = otx2_sw_node_find(pfvf, parent_classid); if (!parent) { @@ -991,10 +1238,24 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, goto out; } - ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio); + static_cfg = !is_qos_node_dwrr(parent, pfvf, prio); + ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio, + static_cfg); if (ret) goto out; + if (!static_cfg) { + ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio, + quantum); + if (ret) + goto out; + } + + if (static_cfg) + parent->child_static_cnt++; + else + parent->child_dwrr_cnt++; + set_bit(prio, parent->prio_bmap); /* read current txschq configuration */ @@ -1019,7 +1280,7 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, /* allocate and initialize a new child node */ node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate, - ceil, qid); + ceil, quantum, qid, static_cfg); if (IS_ERR(node)) { NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); ret = PTR_ERR(node); @@ -1067,6 +1328,11 @@ free_node: free_old_cfg: kfree(old_cfg); reset_prio: + if (static_cfg) + parent->child_static_cnt--; + else + parent->child_dwrr_cnt--; + clear_bit(prio, parent->prio_bmap); out: return ret; @@ -1074,10 +1340,11 @@ out: static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, u16 child_classid, u64 rate, u64 ceil, u64 prio, - struct netlink_ext_ack *extack) + u32 quantum, struct netlink_ext_ack *extack) { struct otx2_qos_cfg *old_cfg, *new_cfg; struct otx2_qos_node *node, *child; + bool static_cfg; int ret, err; u16 qid; @@ -1091,6 +1358,12 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, goto out; } + if (!quantum || quantum > INT_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes"); + ret = -EOPNOTSUPP; + goto out; + } + /* find node related to classid */ node = otx2_sw_node_find(pfvf, classid); if (!node) { @@ -1105,6 +1378,19 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, goto out; } + static_cfg = !is_qos_node_dwrr(node, pfvf, prio); + if (!static_cfg) { + ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio, + quantum); + if (ret) + goto out; + } + + if (static_cfg) + node->child_static_cnt++; + else + node->child_dwrr_cnt++; + set_bit(prio, node->prio_bmap); /* store the qid to assign to leaf node */ @@ -1127,7 +1413,8 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, /* allocate and initialize a new child node */ child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid, - prio, rate, ceil, qid); + prio, rate, ceil, quantum, + qid, static_cfg); if (IS_ERR(child)) { NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); ret = PTR_ERR(child); @@ -1178,6 +1465,10 @@ free_node: free_old_cfg: kfree(old_cfg); reset_prio: + if (static_cfg) + node->child_static_cnt--; + else + node->child_dwrr_cnt--; clear_bit(prio, node->prio_bmap); out: return ret; @@ -1187,6 +1478,7 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, struct netlink_ext_ack *extack) { struct otx2_qos_node *node, *parent; + int dwrr_del_node = false; u64 prio; u16 qid; @@ -1202,12 +1494,27 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, prio = node->prio; qid = node->qid; + if (!node->is_static) + dwrr_del_node = true; + otx2_qos_disable_sq(pfvf, node->qid); otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; - clear_bit(prio, parent->prio_bmap); + if (dwrr_del_node) { + parent->child_dwrr_cnt--; + } else { + parent->child_static_cnt--; + clear_bit(prio, parent->prio_bmap); + } + + /* Reset DWRR priority if all dwrr nodes are deleted */ + if (!parent->child_dwrr_cnt) + otx2_reset_dwrr_prio(parent, prio); + + if (!parent->child_static_cnt) + parent->max_static_prio = 0; return 0; } @@ -1217,6 +1524,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force { struct otx2_qos_node *node, *parent; struct otx2_qos_cfg *new_cfg; + int dwrr_del_node = false; u64 prio; int err; u16 qid; @@ -1241,11 +1549,26 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force return -ENOENT; } + if (!node->is_static) + dwrr_del_node = true; + /* destroy the leaf node */ otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; - clear_bit(prio, parent->prio_bmap); + if (dwrr_del_node) { + parent->child_dwrr_cnt--; + } else { + parent->child_static_cnt--; + clear_bit(prio, parent->prio_bmap); + } + + /* Reset DWRR priority if all dwrr nodes are deleted */ + if (!parent->child_dwrr_cnt) + otx2_reset_dwrr_prio(parent, prio); + + if (!parent->child_static_cnt) + parent->max_static_prio = 0; /* create downstream txschq entries to parent */ err = otx2_qos_alloc_txschq_node(pfvf, parent); @@ -1298,10 +1621,12 @@ void otx2_qos_config_txschq(struct otx2_nic *pfvf) if (!root) return; - err = otx2_qos_txschq_config(pfvf, root); - if (err) { - netdev_err(pfvf->netdev, "Error update txschq configuration\n"); - goto root_destroy; + if (root->level != NIX_TXSCH_LVL_TL1) { + err = otx2_qos_txschq_config(pfvf, root); + if (err) { + netdev_err(pfvf->netdev, "Error update txschq configuration\n"); + goto root_destroy; + } } err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL); @@ -1334,7 +1659,8 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb) res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid, htb->parent_classid, htb->rate, htb->ceil, - htb->prio, htb->extack); + htb->prio, htb->quantum, + htb->extack); if (res < 0) return res; htb->qid = res; @@ -1343,7 +1669,7 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb) return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid, htb->classid, htb->rate, htb->ceil, htb->prio, - htb->extack); + htb->quantum, htb->extack); case TC_HTB_LEAF_DEL: return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack); case TC_HTB_LEAF_DEL_LAST: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h index 19773284be27..221bd0438f60 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h @@ -35,6 +35,7 @@ struct otx2_qos_cfg { int dwrr_node_pos[NIX_TXSCH_LVL_CNT]; u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + bool schq_index_used[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; }; struct otx2_qos { @@ -59,10 +60,18 @@ struct otx2_qos_node { u64 ceil; u32 classid; u32 prio; - u16 schq; /* hw txschq */ + u32 quantum; + /* hw txschq */ + u16 schq; u16 qid; u16 prio_anchor; + u16 max_static_prio; + u16 child_dwrr_cnt; + u16 child_static_cnt; + u16 child_dwrr_prio; + u16 txschq_idx; /* txschq allocation index */ u8 level; + bool is_static; }; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index 3e20e71b0f81..8b9455d8a4f7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -202,16 +202,16 @@ static int prestera_flower_parse(struct prestera_flow_block *block, int err; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_META) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ICMP) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) | - BIT(FLOW_DISSECTOR_KEY_VLAN))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index 9277a8fd1339..cc2a9ae794be 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -5,9 +5,6 @@ #include <linux/dmapool.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of.h> #include <linux/platform_device.h> #include "prestera_dsa.h" diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index d5691b6a2bc5..dd6ca2e4fd51 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1528,7 +1528,7 @@ err_clk: return err; } -static int pxa168_eth_remove(struct platform_device *pdev) +static void pxa168_eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct pxa168_eth_private *pep = netdev_priv(dev); @@ -1547,7 +1547,6 @@ static int pxa168_eth_remove(struct platform_device *pdev) mdiobus_free(pep->smi_bus); unregister_netdev(dev); free_netdev(dev); - return 0; } static void pxa168_eth_shutdown(struct platform_device *pdev) @@ -1580,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match); static struct platform_driver pxa168_eth_driver = { .probe = pxa168_eth_probe, - .remove = pxa168_eth_remove, + .remove_new = pxa168_eth_remove, .shutdown = pxa168_eth_shutdown, .resume = pxa168_eth_resume, .suspend = pxa168_eth_suspend, diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 7c487f9b36ec..07720841a8d7 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -32,7 +32,6 @@ #include <linux/prefetch.h> #include <linux/debugfs.h> #include <linux/mii.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/dmi.h> @@ -4529,7 +4528,7 @@ static __init void sky2_debug_init(void) struct dentry *ent; ent = debugfs_create_dir("sky2", NULL); - if (!ent || IS_ERR(ent)) + if (IS_ERR(ent)) return; sky2_debug = ent; diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ddec1627f1a7..8d0bacf4e49c 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2195,7 +2195,7 @@ struct rx_ring_info { struct sk_buff *skb; dma_addr_t data_addr; DEFINE_DMA_UNMAP_LEN(data_size); - dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1]; }; enum flow_control { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c index 317e447f4991..7c27a19c4d8f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_path.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c @@ -15,10 +15,10 @@ struct mtk_eth_muxc { const char *name; int cap_bit; - int (*set_path)(struct mtk_eth *eth, int path); + int (*set_path)(struct mtk_eth *eth, u64 path); }; -static const char *mtk_eth_path_name(int path) +static const char *mtk_eth_path_name(u64 path) { switch (path) { case MTK_ETH_PATH_GMAC1_RGMII: @@ -40,10 +40,10 @@ static const char *mtk_eth_path_name(int path) } } -static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path) +static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path) { bool updated = true; - u32 val, mask, set; + u32 mask, set, reg; switch (path) { case MTK_ETH_PATH_GMAC1_SGMII: @@ -59,11 +59,13 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path) break; } - if (updated) { - val = mtk_r32(eth, MTK_MAC_MISC); - val = (val & mask) | set; - mtk_w32(eth, val, MTK_MAC_MISC); - } + if (mtk_is_netsys_v3_or_greater(eth)) + reg = MTK_MAC_MISC_V3; + else + reg = MTK_MAC_MISC; + + if (updated) + mtk_m32(eth, mask, set, reg); dev_dbg(eth->dev, "path %s in %s updated = %d\n", mtk_eth_path_name(path), __func__, updated); @@ -71,7 +73,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path) return 0; } -static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path) +static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path) { unsigned int val = 0; bool updated = true; @@ -94,7 +96,7 @@ static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path) return 0; } -static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path) +static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path) { unsigned int val = 0, mask = 0, reg = 0; bool updated = true; @@ -125,7 +127,7 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path) return 0; } -static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path) +static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path) { unsigned int val = 0; bool updated = true; @@ -163,7 +165,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path) return 0; } -static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path) +static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, u64 path) { unsigned int val = 0; bool updated = true; @@ -218,7 +220,7 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = { }, }; -static int mtk_eth_mux_setup(struct mtk_eth *eth, int path) +static int mtk_eth_mux_setup(struct mtk_eth *eth, u64 path) { int i, err = 0; @@ -249,7 +251,7 @@ out: int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) { - int path; + u64 path; path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII : MTK_ETH_PATH_GMAC2_SGMII; @@ -260,7 +262,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) { - int path = 0; + u64 path = 0; if (mac_id == 1) path = MTK_ETH_PATH_GMAC2_GEPHY; @@ -274,7 +276,7 @@ int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id) { - int path; + u64 path; path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII : MTK_ETH_PATH_GMAC2_RGMII; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 2d15342c260a..3cf6589cfdac 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -6,11 +6,12 @@ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> */ -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_address.h> #include <linux/mfd/syscon.h> +#include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/clk.h> #include <linux/pm_runtime.h> @@ -25,6 +26,7 @@ #include <linux/bitfield.h> #include <net/dsa.h> #include <net/dst_metadata.h> +#include <net/page_pool/helpers.h> #include "mtk_eth_soc.h" #include "mtk_wed.h" @@ -152,6 +154,55 @@ static const struct mtk_reg_map mt7986_reg_map = { .pse_oq_sta = 0x01a0, }; +static const struct mtk_reg_map mt7988_reg_map = { + .tx_irq_mask = 0x461c, + .tx_irq_status = 0x4618, + .pdma = { + .rx_ptr = 0x6900, + .rx_cnt_cfg = 0x6904, + .pcrx_ptr = 0x6908, + .glo_cfg = 0x6a04, + .rst_idx = 0x6a08, + .delay_irq = 0x6a0c, + .irq_status = 0x6a20, + .irq_mask = 0x6a28, + .adma_rx_dbg0 = 0x6a38, + .int_grp = 0x6a50, + }, + .qdma = { + .qtx_cfg = 0x4400, + .qtx_sch = 0x4404, + .rx_ptr = 0x4500, + .rx_cnt_cfg = 0x4504, + .qcrx_ptr = 0x4508, + .glo_cfg = 0x4604, + .rst_idx = 0x4608, + .delay_irq = 0x460c, + .fc_th = 0x4610, + .int_grp = 0x4620, + .hred = 0x4644, + .ctx_ptr = 0x4700, + .dtx_ptr = 0x4704, + .crx_ptr = 0x4710, + .drx_ptr = 0x4714, + .fq_head = 0x4720, + .fq_tail = 0x4724, + .fq_count = 0x4728, + .fq_blen = 0x472c, + .tx_sch_rate = 0x4798, + }, + .gdm1_cnt = 0x1c00, + .gdma_to_ppe = 0x3333, + .ppe_base = 0x2000, + .wdma_base = { + [0] = 0x4800, + [1] = 0x4c00, + [2] = 0x5000, + }, + .pse_iq_sta = 0x0180, + .pse_oq_sta = 0x01a0, +}; + /* strings used by ethtool */ static const struct mtk_ethtool_stats { char str[ETH_GSTRING_LEN]; @@ -179,10 +230,54 @@ static const struct mtk_ethtool_stats { }; static const char * const mtk_clks_source_name[] = { - "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll", - "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", - "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb", - "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1" + "ethif", + "sgmiitop", + "esw", + "gp0", + "gp1", + "gp2", + "gp3", + "xgp1", + "xgp2", + "xgp3", + "crypto", + "fe", + "trgpll", + "sgmii_tx250m", + "sgmii_rx250m", + "sgmii_cdr_ref", + "sgmii_cdr_fb", + "sgmii2_tx250m", + "sgmii2_rx250m", + "sgmii2_cdr_ref", + "sgmii2_cdr_fb", + "sgmii_ck", + "eth2pll", + "wocpu0", + "wocpu1", + "netsys0", + "netsys1", + "ethwarp_wocpu2", + "ethwarp_wocpu1", + "ethwarp_wocpu0", + "top_usxgmii0_sel", + "top_usxgmii1_sel", + "top_sgm0_sel", + "top_sgm1_sel", + "top_xfi_phy0_xtal_sel", + "top_xfi_phy1_xtal_sel", + "top_eth_gmii_sel", + "top_eth_refck_50m_sel", + "top_eth_sys_200m_sel", + "top_eth_sys_sel", + "top_eth_xgmii_sel", + "top_eth_mii_sel", + "top_netsys_sel", + "top_netsys_500m_sel", + "top_netsys_pao_2x_sel", + "top_netsys_sync_250m_sel", + "top_netsys_ppefb_250m_sel", + "top_netsys_warp_sel", }; void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) @@ -195,7 +290,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg) return __raw_readl(eth->base + reg); } -static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg) +u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg) { u32 val; @@ -385,10 +480,8 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, } static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, - phy_interface_t interface, int speed) + phy_interface_t interface) { - unsigned long rate; - u32 tck, rck, intf; int ret; if (interface == PHY_INTERFACE_MODE_TRGMII) { @@ -399,30 +492,20 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, return; } - if (speed == SPEED_1000) { - intf = INTF_MODE_RGMII_1000; - rate = 250000000; - rck = RCK_CTRL_RGMII_1000; - tck = TCK_CTRL_RGMII_1000; - } else { - intf = INTF_MODE_RGMII_10_100; - rate = 500000000; - rck = RCK_CTRL_RGMII_10_100; - tck = TCK_CTRL_RGMII_10_100; - } - - mtk_w32(eth, intf, INTF_MODE); - - regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, - ETHSYS_TRGMII_CLK_SEL362_5, - ETHSYS_TRGMII_CLK_SEL362_5); + dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n"); +} - ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate); - if (ret) - dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); +static void mtk_setup_bridge_switch(struct mtk_eth *eth) +{ + /* Force Port1 XGMAC Link Up */ + mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID), + MTK_XGMAC_STS(MTK_GMAC1_ID)); - mtk_w32(eth, rck, TRGMII_RCK_CTRL); - mtk_w32(eth, tck, TRGMII_TCK_CTRL); + /* Adjust GSW bridge IPG to 11 */ + mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK, + (GSW_IPG_11 << GSWTX_IPG_SHIFT) | + (GSW_IPG_11 << GSWRX_IPG_SHIFT), + MTK_GSW_CFG); } static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config, @@ -484,6 +567,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, goto init_err; } break; + case PHY_INTERFACE_MODE_INTERNAL: + break; default: goto err_phy; } @@ -498,17 +583,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, state->interface)) goto err_phy; } else { - /* FIXME: this is incorrect. Not only does it - * use state->speed (which is not guaranteed - * to be correct) but it also makes use of it - * in a code path that will only be reachable - * when the PHY interface mode changes, not - * when the speed changes. Consequently, RGMII - * is probably broken. - */ mtk_gmac0_rgmii_adjust(mac->hw, - state->interface, - state->speed); + state->interface); /* mt7623_pad_clk_setup */ for (i = 0 ; i < NUM_TRGMII_CTRL; i++) @@ -562,6 +638,15 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, return; } + /* Setup gmac */ + if (mtk_is_netsys_v3_or_greater(eth) && + mac->interface == PHY_INTERFACE_MODE_INTERNAL) { + mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id)); + mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id)); + + mtk_setup_bridge_switch(eth); + } + return; err_phy: @@ -602,38 +687,6 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, return 0; } -static void mtk_mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state) -{ - struct mtk_mac *mac = container_of(config, struct mtk_mac, - phylink_config); - u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id)); - - state->link = (pmsr & MAC_MSR_LINK); - state->duplex = (pmsr & MAC_MSR_DPX) >> 1; - - switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) { - case 0: - state->speed = SPEED_10; - break; - case MAC_MSR_SPEED_100: - state->speed = SPEED_100; - break; - case MAC_MSR_SPEED_1000: - state->speed = SPEED_1000; - break; - default: - state->speed = SPEED_UNKNOWN; - break; - } - - state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); - if (pmsr & MAC_MSR_RX_FC) - state->pause |= MLO_PAUSE_RX; - if (pmsr & MAC_MSR_TX_FC) - state->pause |= MLO_PAUSE_TX; -} - static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -659,7 +712,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) | FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) | MTK_QTX_SCH_LEAKY_BUCKET_SIZE; - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v1(eth)) val |= MTK_QTX_SCH_LEAKY_BUCKET_EN; if (IS_ENABLED(CONFIG_SOC_MT7621)) { @@ -756,7 +809,6 @@ static void mtk_mac_link_up(struct phylink_config *config, static const struct phylink_mac_ops mtk_phylink_ops = { .mac_select_pcs = mtk_mac_select_pcs, - .mac_pcs_get_state = mtk_mac_pcs_get_state, .mac_config = mtk_mac_config, .mac_finish = mtk_mac_finish, .mac_link_down = mtk_mac_link_down, @@ -807,11 +859,15 @@ static int mtk_mdio_init(struct mtk_eth *eth) } divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); + /* Configure MDC Turbo Mode */ + if (mtk_is_netsys_v3_or_greater(eth)) + mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); + /* Configure MDC Divider */ - val = mtk_r32(eth, MTK_PPSC); - val &= ~PPSC_MDC_CFG; - val |= FIELD_PREP(PPSC_MDC_CFG, divider) | PPSC_MDC_TURBO; - mtk_w32(eth, val, MTK_PPSC); + val = FIELD_PREP(PPSC_MDC_CFG, divider); + if (!mtk_is_netsys_v3_or_greater(eth)) + val |= PPSC_MDC_TURBO; + mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); @@ -943,17 +999,32 @@ void mtk_stats_update_mac(struct mtk_mac *mac) mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs); hw_stats->rx_flow_control_packets += mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs); - hw_stats->tx_skip += - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); - hw_stats->tx_collisions += - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); - hw_stats->tx_bytes += - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); - stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); - if (stats) - hw_stats->tx_bytes += (stats << 32); - hw_stats->tx_packets += - mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); + + if (mtk_is_netsys_v3_or_greater(eth)) { + hw_stats->tx_skip += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs); + hw_stats->tx_collisions += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs); + hw_stats->tx_bytes += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs); + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs); + if (stats) + hw_stats->tx_bytes += (stats << 32); + hw_stats->tx_packets += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs); + } else { + hw_stats->tx_skip += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); + hw_stats->tx_collisions += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); + hw_stats->tx_bytes += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); + if (stats) + hw_stats->tx_bytes += (stats << 32); + hw_stats->tx_packets += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); + } } u64_stats_update_end(&hw_stats->syncp); @@ -963,7 +1034,7 @@ static void mtk_stats_update(struct mtk_eth *eth) { int i; - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->mac[i] || !eth->mac[i]->hw_stats) continue; if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { @@ -1037,7 +1108,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); } @@ -1065,10 +1136,13 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) dma_addr_t dma_addr; int i; - eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, - cnt * soc->txrx.txd_size, - ð->phy_scratch_ring, - GFP_KERNEL); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) + eth->scratch_ring = eth->sram_base; + else + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, + cnt * soc->txrx.txd_size, + ð->phy_scratch_ring, + GFP_KERNEL); if (unlikely(!eth->scratch_ring)) return -ENOMEM; @@ -1095,7 +1169,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); txd->txd4 = 0; - if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { txd->txd5 = 0; txd->txd6 = 0; txd->txd7 = 0; @@ -1255,9 +1329,25 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd, data = TX_DMA_PLEN0(info->size); if (info->last) data |= TX_DMA_LS0; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + data |= TX_DMA_PREP_ADDR64(info->addr); + WRITE_ONCE(desc->txd3, data); - data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */ + /* set forward port */ + switch (mac->id) { + case MTK_GMAC1_ID: + data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2; + break; + case MTK_GMAC2_ID: + data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2; + break; + case MTK_GMAC3_ID: + data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2; + break; + } + data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); WRITE_ONCE(desc->txd4, data); @@ -1268,6 +1358,8 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd, /* tx checksum offload */ if (info->csum) data |= TX_DMA_CHKSUM_V2; + if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev)) + data |= TX_DMA_SPTAG_V3; } WRITE_ONCE(desc->txd5, data); @@ -1286,7 +1378,7 @@ static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd, struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) mtk_tx_set_dma_desc_v2(dev, txd, info); else mtk_tx_set_dma_desc_v1(dev, txd, info); @@ -1333,8 +1425,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, mtk_tx_set_dma_desc(dev, itxd, &txd_info); itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; - itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : - MTK_TX_FLAGS_FPORT1; + itx_buf->mac_id = mac->id; setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, k++); @@ -1382,8 +1473,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, memset(tx_buf, 0, sizeof(*tx_buf)); tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; tx_buf->flags |= MTK_TX_FLAGS_PAGE0; - tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : - MTK_TX_FLAGS_FPORT1; + tx_buf->mac_id = mac->id; setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size, k++); @@ -1468,7 +1558,7 @@ static int mtk_queue_stopped(struct mtk_eth *eth) { int i; - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) continue; if (netif_queue_stopped(eth->netdev[i])) @@ -1482,7 +1572,7 @@ static void mtk_wake_queue(struct mtk_eth *eth) { int i; - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) continue; netif_tx_wake_all_queues(eth->netdev[i]); @@ -1593,7 +1683,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) static bool mtk_page_pool_enabled(struct mtk_eth *eth) { - return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); + return mtk_is_netsys_v2_or_greater(eth); } static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, @@ -1685,7 +1775,7 @@ static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, } mtk_tx_set_dma_desc(dev, txd, txd_info); - tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; + tx_buf->mac_id = mac->id; tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; @@ -1912,14 +2002,15 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, bool xdp_flush = false; int idx; struct sk_buff *skb; + u64 addr64 = 0; u8 *data, *new_data; struct mtk_rx_dma_v2 *rxd, trxd; int done = 0, bytes = 0; + dma_addr_t dma_addr = DMA_MAPPING_ERROR; while (done < budget) { unsigned int pktlen, *rxdcsum; struct net_device *netdev; - dma_addr_t dma_addr; u32 hash, reason; int mac = 0; @@ -1935,13 +2026,26 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, break; /* find out which mac the packet come from. values start at 1 */ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) - mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1; - else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && - !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) + if (mtk_is_netsys_v2_or_greater(eth)) { + u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5); + + switch (val) { + case PSE_GDM1_PORT: + case PSE_GDM2_PORT: + mac = val - 1; + break; + case PSE_GDM3_PORT: + mac = MTK_GMAC3_ID; + break; + default: + break; + } + } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && + !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) { mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1; + } - if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || + if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS || !eth->netdev[mac])) goto release_desc; @@ -2014,7 +2118,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; } - dma_unmap_single(eth->dma_dev, trxd.rxd1, + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + addr64 = RX_DMA_GET_ADDR64(trxd.rxd2); + + dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), ring->buf_size, DMA_FROM_DEVICE); skb = build_skb(data, ring->frag_size); @@ -2031,7 +2138,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb->dev = netdev; bytes += skb->len; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5); hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; if (hash != MTK_RXD5_FOE_ENTRY) @@ -2056,8 +2163,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, /* When using VLAN untagging in combination with DSA, the * hardware treats the MTK special tag as a VLAN and untags it. */ - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && - (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) { + if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) && + netdev_uses_dsa(netdev)) { unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0); if (port < ARRAY_SIZE(eth->dsa_meta) && @@ -2080,6 +2187,10 @@ release_desc: else rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && + likely(dma_addr != DMA_MAPPING_ERROR)) + rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); + ring->calc_idx = idx; done++; } @@ -2100,7 +2211,7 @@ rx_done: net_dim(ð->rx_dim, dim_sample); if (xdp_flush) - xdp_do_flush_map(); + xdp_do_flush(); return done; } @@ -2161,7 +2272,6 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, while ((cpu != dma) && budget) { u32 next_cpu = desc->txd2; - int mac = 0; desc = mtk_qdma_phys_to_virt(ring, desc->txd2); if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) @@ -2169,15 +2279,13 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, tx_buf = mtk_desc_to_tx_buf(ring, desc, eth->soc->txrx.txd_size); - if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) - mac = 1; - if (!tx_buf->data) break; if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { if (tx_buf->type == MTK_TYPE_SKB) - mtk_poll_tx_done(eth, state, mac, tx_buf->data); + mtk_poll_tx_done(eth, state, tx_buf->mac_id, + tx_buf->data); budget--; } @@ -2354,8 +2462,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (!ring->buf) goto no_tx_mem; - ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, - &ring->phys, GFP_KERNEL); + if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) { + ring->dma = eth->sram_base + ring_size * sz; + ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; + } else { + ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, + &ring->phys, GFP_KERNEL); + } + if (!ring->dma) goto no_tx_mem; @@ -2367,7 +2481,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) txd->txd2 = next_ptr; txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; txd->txd4 = 0; - if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { txd->txd5 = 0; txd->txd6 = 0; txd->txd7 = 0; @@ -2420,14 +2534,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth) FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) | FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) | MTK_QTX_SCH_LEAKY_BUCKET_SIZE; - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v1(eth)) val |= MTK_QTX_SCH_LEAKY_BUCKET_EN; mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); ofs += MTK_QTX_OFFSET; } val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16); mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); } else { mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); @@ -2454,8 +2568,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) kfree(ring->buf); ring->buf = NULL; } - - if (ring->dma) { + if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) { dma_free_coherent(eth->dma_dev, ring->dma_size * soc->txrx.txd_size, ring->dma, ring->phys); @@ -2474,9 +2587,14 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct mtk_rx_ring *ring; - int rx_data_len, rx_dma_size; + int rx_data_len, rx_dma_size, tx_ring_size; int i; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + tx_ring_size = MTK_QDMA_RING_SIZE; + else + tx_ring_size = MTK_DMA_SIZE; + if (rx_flag == MTK_RX_FLAGS_QDMA) { if (ring_no) return -EINVAL; @@ -2511,9 +2629,20 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) ring->page_pool = pp; } - ring->dma = dma_alloc_coherent(eth->dma_dev, - rx_dma_size * eth->soc->txrx.rxd_size, - &ring->phys, GFP_KERNEL); + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || + rx_flag != MTK_RX_FLAGS_NORMAL) { + ring->dma = dma_alloc_coherent(eth->dma_dev, + rx_dma_size * eth->soc->txrx.rxd_size, + &ring->phys, GFP_KERNEL); + } else { + struct mtk_tx_ring *tx_ring = ð->tx_ring; + + ring->dma = tx_ring->dma + tx_ring_size * + eth->soc->txrx.txd_size * (ring_no + 1); + ring->phys = tx_ring->phys + tx_ring_size * + eth->soc->txrx.txd_size * (ring_no + 1); + } + if (!ring->dma) return -ENOMEM; @@ -2554,9 +2683,12 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) else rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); + rxd->rxd3 = 0; rxd->rxd4 = 0; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { rxd->rxd5 = 0; rxd->rxd6 = 0; rxd->rxd7 = 0; @@ -2598,8 +2730,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return 0; } -static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram) { + u64 addr64 = 0; int i; if (ring->data && ring->dma) { @@ -2613,7 +2746,10 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) if (!rxd->rxd1) continue; - dma_unmap_single(eth->dma_dev, rxd->rxd1, + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) + addr64 = RX_DMA_GET_ADDR64(rxd->rxd2); + + dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64), ring->buf_size, DMA_FROM_DEVICE); mtk_rx_put_buff(ring, ring->data[i], false); } @@ -2621,7 +2757,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) ring->data = NULL; } - if (ring->dma) { + if (!in_sram && ring->dma) { dma_free_coherent(eth->dma_dev, ring->dma_size * eth->soc->txrx.rxd_size, ring->dma, ring->phys); @@ -2860,6 +2996,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev, int i; for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + if (mac->hwlro_ip[i]) { rule_locs[cnt] = i; cnt++; @@ -2978,10 +3117,10 @@ static void mtk_dma_free(struct mtk_eth *eth) const struct mtk_soc_data *soc = eth->soc; int i; - for (i = 0; i < MTK_MAC_COUNT; i++) + for (i = 0; i < MTK_MAX_DEVS; i++) if (eth->netdev[i]) netdev_reset_queue(eth->netdev[i]); - if (eth->scratch_ring) { + if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { dma_free_coherent(eth->dma_dev, MTK_QDMA_RING_SIZE * soc->txrx.txd_size, eth->scratch_ring, eth->phy_scratch_ring); @@ -2989,13 +3128,13 @@ static void mtk_dma_free(struct mtk_eth *eth) eth->phy_scratch_ring = 0; } mtk_tx_clean(eth); - mtk_rx_clean(eth, ð->rx_ring[0]); - mtk_rx_clean(eth, ð->rx_ring_qdma); + mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM)); + mtk_rx_clean(eth, ð->rx_ring_qdma, false); if (eth->hwlro) { mtk_hwlro_rx_uninit(eth); for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) - mtk_rx_clean(eth, ð->rx_ring[i]); + mtk_rx_clean(eth, ð->rx_ring[i], false); } kfree(eth->scratch_head); @@ -3033,8 +3172,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { - __napi_schedule(ð->rx_napi); mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + __napi_schedule(ð->rx_napi); } return IRQ_HANDLED; @@ -3046,8 +3185,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { - __napi_schedule(ð->tx_napi); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + __napi_schedule(ð->tx_napi); } return IRQ_HANDLED; @@ -3104,7 +3243,7 @@ static int mtk_start_dma(struct mtk_eth *eth) MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) val |= MTK_MUTLI_CNT | MTK_RESV_BUF | MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN; @@ -3132,8 +3271,13 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config) if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) return; - for (i = 0; i < MTK_MAC_COUNT; i++) { - u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + for (i = 0; i < MTK_MAX_DEVS; i++) { + u32 val; + + if (!eth->netdev[i]) + continue; + + val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); /* default setup the forward port to send frame to PDMA */ val &= ~0xffff; @@ -3143,7 +3287,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config) val |= config; - if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i])) + if (netdev_uses_dsa(eth->netdev[i])) val |= MTK_GDMA_SPECIAL_TAG; mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); @@ -3185,7 +3329,7 @@ static int mtk_device_event(struct notifier_block *n, unsigned long event, void return NOTIFY_DONE; found: - if (!dsa_slave_dev_check(dev)) + if (!dsa_user_dev_check(dev)) return NOTIFY_DONE; if (__ethtool_get_link_ksettings(dev, &s)) @@ -3250,7 +3394,7 @@ static int mtk_open(struct net_device *dev) phylink_start(mac->phylink); netif_tx_start_all_queues(dev); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return 0; if (mtk_uses_dsa(dev) && !eth->prog) { @@ -3516,19 +3660,34 @@ static void mtk_hw_reset(struct mtk_eth *eth) { u32 val; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); + + if (mtk_is_netsys_v3_or_greater(eth)) { + val = RSTCTRL_PPE0_V3; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1_V3; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + val |= RSTCTRL_PPE2; + + val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2; + } else if (mtk_is_netsys_v2_or_greater(eth)) { val = RSTCTRL_PPE0_V2; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1; } else { val = RSTCTRL_PPE0; } - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val |= RSTCTRL_PPE1; - ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v3_or_greater(eth)) + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, + 0x6f8ff); + else if (mtk_is_netsys_v2_or_greater(eth)) regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0x3ffffff); } @@ -3554,13 +3713,21 @@ static void mtk_hw_warm_reset(struct mtk_eth *eth) return; } - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v3_or_greater(eth)) { + rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + rst_mask |= RSTCTRL_PPE1_V3; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + rst_mask |= RSTCTRL_PPE2; + + rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2; + } else if (mtk_is_netsys_v2_or_greater(eth)) { rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2; - else + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + rst_mask |= RSTCTRL_PPE1; + } else { rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - rst_mask |= RSTCTRL_PPE1; + } regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); @@ -3724,7 +3891,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) else mtk_hw_reset(eth); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { /* Set FE to PDMAv2 if necessary */ val = mtk_r32(eth, MTK_FE_GLO_MISC); mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); @@ -3745,15 +3912,15 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) * up with the more appropriate value when mtk_mac_config call is being * invoked. */ - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { struct net_device *dev = eth->netdev[i]; - mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); - if (dev) { - struct mtk_mac *mac = netdev_priv(dev); + if (!dev) + continue; - mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN); - } + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); + mtk_set_mcr_max_rx(netdev_priv(dev), + dev->mtu + MTK_RX_ETH_HLEN); } /* Indicates CDM to parse the MTK special tag from CPU @@ -3761,7 +3928,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) */ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v1(eth)) { val = mtk_r32(eth, MTK_CDMP_IG_CTRL); mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); @@ -3783,7 +3950,24 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v3_or_greater(eth)) { + /* PSE should not drop port1, port8 and port9 packets */ + mtk_w32(eth, 0x00000302, PSE_DROP_CFG); + + /* GDM and CDM Threshold */ + mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES); + mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES); + + /* Disable GDM1 RX CRC stripping */ + mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0)); + + /* PSE GDM3 MIB counter has incorrect hw default values, + * so the driver ought to read clear the values beforehand + * in case ethtool retrieve wrong mib values. + */ + for (i = 0; i < 0x80; i += 0x4) + mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i); + } else if (!mtk_is_netsys_v1(eth)) { /* PSE should not drop port8 and port9 packets from WDMA Tx */ mtk_w32(eth, 0x00000300, PSE_DROP_CFG); @@ -3895,11 +4079,17 @@ static void mtk_prepare_for_reset(struct mtk_eth *eth) u32 val; int i; - /* disabe FE P3 and P4 */ - val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val |= MTK_FE_LINK_DOWN_P4; - mtk_w32(eth, val, MTK_FE_GLO_CFG); + /* set FE PPE ports link down */ + for (i = MTK_GMAC1_ID; + i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); + i += 2) { + val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT); + mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); + } /* adjust PPE configurations to prepare for reset */ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) @@ -3933,7 +4123,7 @@ static void mtk_pending_work(struct work_struct *work) mtk_prepare_for_reset(eth); /* stop all devices to make sure that dma is properly shut down */ - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i] || !netif_running(eth->netdev[i])) continue; @@ -3949,8 +4139,8 @@ static void mtk_pending_work(struct work_struct *work) mtk_hw_init(eth, true); /* restart DMA and enable IRQs */ - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!test_bit(i, &restart)) + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i] || !test_bit(i, &restart)) continue; if (mtk_open(eth->netdev[i])) { @@ -3960,11 +4150,18 @@ static void mtk_pending_work(struct work_struct *work) } } - /* enabe FE P3 and P4 */ - val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val &= ~MTK_FE_LINK_DOWN_P4; - mtk_w32(eth, val, MTK_FE_GLO_CFG); + /* set FE PPE ports link up */ + for (i = MTK_GMAC1_ID; + i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID); + i += 2) { + val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2)) + val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT); + + mtk_w32(eth, val, MTK_FE_GLO_CFG(i)); + } clear_bit(MTK_RESETTING, ð->state); @@ -3977,7 +4174,7 @@ static int mtk_free_dev(struct mtk_eth *eth) { int i; - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) continue; free_netdev(eth->netdev[i]); @@ -3996,7 +4193,7 @@ static int mtk_unreg_dev(struct mtk_eth *eth) { int i; - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { struct mtk_mac *mac; if (!eth->netdev[i]) continue; @@ -4298,7 +4495,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) } id = be32_to_cpup(_id); - if (id >= MTK_MAC_COUNT) { + if (id >= MTK_MAX_DEVS) { dev_err(eth->dev, "%d is not a valid mac id\n", id); return -EINVAL; } @@ -4346,7 +4543,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) } spin_lock_init(&mac->hw_stats->stats_lock); u64_stats_init(&mac->hw_stats->syncp); - mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + + if (mtk_is_netsys_v3_or_greater(eth)) + mac->hw_stats->reg_offset = id * 0x80; + else + mac->hw_stats->reg_offset = id * 0x40; /* phylink create */ err = of_get_phy_mode(np, &phy_mode); @@ -4361,18 +4562,22 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->phylink_config.dev = ð->netdev[id]->dev; mac->phylink_config.type = PHYLINK_NETDEV; - /* This driver makes use of state->speed in mac_config */ - mac->phylink_config.legacy_pre_march2020 = true; mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; - __set_bit(PHY_INTERFACE_MODE_MII, - mac->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_GMII, - mac->phylink_config.supported_interfaces); + /* MT7623 gmac0 is now missing its speed-specific PLL configuration + * in its .mac_config method (since state->speed is not valid there. + * Disable support for MII, GMII and RGMII. + */ + if (!mac->hw->soc->disable_pll_modes || mac->id != 0) { + __set_bit(PHY_INTERFACE_MODE_MII, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + mac->phylink_config.supported_interfaces); - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) - phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) + phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); + } if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id) __set_bit(PHY_INTERFACE_MODE_TRGMII, @@ -4396,6 +4601,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->phylink_config.supported_interfaces); } + if (mtk_is_netsys_v3_or_greater(mac->hw) && + MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) && + id == MTK_GMAC1_ID) { + mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_SYM_PAUSE | + MAC_10000FD; + phy_interface_zero(mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + mac->phylink_config.supported_interfaces); + } + phylink = phylink_create(&mac->phylink_config, of_fwnode_handle(mac->of_node), phy_mode, &mtk_phylink_ops); @@ -4454,7 +4670,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) rtnl_lock(); - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { dev = eth->netdev[i]; if (!dev || !(dev->flags & IFF_UP)) @@ -4507,7 +4723,7 @@ static int mtk_sgmii_init(struct mtk_eth *eth) static int mtk_probe(struct platform_device *pdev) { - struct resource *res = NULL; + struct resource *res = NULL, *res_sram; struct device_node *mac_np; struct mtk_eth *eth; int err, i; @@ -4527,6 +4743,28 @@ static int mtk_probe(struct platform_device *pdev) if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) eth->ip_align = NET_IP_ALIGN; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { + /* SRAM is actual memory and supports transparent access just like DRAM. + * Hence we don't require __iomem being set and don't need to use accessor + * functions to read from or write to SRAM. + */ + if (mtk_is_netsys_v3_or_greater(eth)) { + eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(eth->sram_base)) + return PTR_ERR(eth->sram_base); + } else { + eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET; + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); + if (err) { + dev_err(&pdev->dev, "Wrong DMA config\n"); + return -EINVAL; + } + } + spin_lock_init(ð->page_lock); spin_lock_init(ð->tx_irq_lock); spin_lock_init(ð->rx_irq_lock); @@ -4584,12 +4822,24 @@ static int mtk_probe(struct platform_device *pdev) } } - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -EINVAL; goto err_destroy_sgmii; } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) { + if (mtk_is_netsys_v3_or_greater(eth)) { + res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res_sram) { + err = -EINVAL; + goto err_destroy_sgmii; + } + eth->phy_scratch_ring = res_sram->start; + } else { + eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET; + } + } } if (eth->soc->offload_version) { @@ -4692,9 +4942,8 @@ static int mtk_probe(struct platform_device *pdev) } if (eth->soc->offload_version) { - u32 num_ppe; + u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; - num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); for (i = 0; i < num_ppe; i++) { u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; @@ -4754,14 +5003,14 @@ err_destroy_sgmii: return err; } -static int mtk_remove(struct platform_device *pdev) +static void mtk_remove(struct platform_device *pdev) { struct mtk_eth *eth = platform_get_drvdata(pdev); struct mtk_mac *mac; int i; /* stop all devices to make sure that dma is properly shut down */ - for (i = 0; i < MTK_MAC_COUNT; i++) { + for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) continue; mtk_stop(eth->netdev[i]); @@ -4776,8 +5025,6 @@ static int mtk_remove(struct platform_device *pdev) netif_napi_del(ð->rx_napi); mtk_cleanup(eth); mtk_mdio_cleanup(eth); - - return 0; } static const struct mtk_soc_data mt2701_data = { @@ -4786,6 +5033,7 @@ static const struct mtk_soc_data mt2701_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, + .version = 1, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@ -4802,9 +5050,10 @@ static const struct mtk_soc_data mt7621_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, + .version = 1, .offload_version = 1, .hash_offset = 2, - .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, + .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@ -4822,10 +5071,11 @@ static const struct mtk_soc_data mt7622_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, + .version = 1, .offload_version = 2, .hash_offset = 2, .has_accounting = true, - .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, + .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@ -4842,9 +5092,11 @@ static const struct mtk_soc_data mt7623_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, + .version = 1, .offload_version = 1, .hash_offset = 2, - .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, + .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, + .disable_pll_modes = true, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@ -4863,6 +5115,7 @@ static const struct mtk_soc_data mt7629_data = { .required_clks = MT7629_CLKS_BITMAP, .required_pctl = false, .has_accounting = true, + .version = 1, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@ -4880,10 +5133,11 @@ static const struct mtk_soc_data mt7981_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7981_CLKS_BITMAP, .required_pctl = false, + .version = 2, .offload_version = 2, .hash_offset = 4, - .foe_entry_size = sizeof(struct mtk_foe_entry), .has_accounting = true, + .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, .txrx = { .txd_size = sizeof(struct mtk_tx_dma_v2), .rxd_size = sizeof(struct mtk_rx_dma_v2), @@ -4901,10 +5155,33 @@ static const struct mtk_soc_data mt7986_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7986_CLKS_BITMAP, .required_pctl = false, + .version = 2, + .offload_version = 2, + .hash_offset = 4, + .has_accounting = true, + .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma_v2), + .rxd_size = sizeof(struct mtk_rx_dma_v2), + .rx_irq_done_mask = MTK_RX_DONE_INT_V2, + .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, + }, +}; + +static const struct mtk_soc_data mt7988_data = { + .reg_map = &mt7988_reg_map, + .ana_rgc3 = 0x128, + .caps = MT7988_CAPS, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7988_CLKS_BITMAP, + .required_pctl = false, + .version = 3, .offload_version = 2, .hash_offset = 4, - .foe_entry_size = sizeof(struct mtk_foe_entry), .has_accounting = true, + .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE, .txrx = { .txd_size = sizeof(struct mtk_tx_dma_v2), .rxd_size = sizeof(struct mtk_rx_dma_v2), @@ -4921,6 +5198,7 @@ static const struct mtk_soc_data rt5350_data = { .hw_features = MTK_HW_FEATURES_MT7628, .required_clks = MT7628_CLKS_BITMAP, .required_pctl = false, + .version = 1, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), @@ -4932,21 +5210,22 @@ static const struct mtk_soc_data rt5350_data = { }; const struct of_device_id of_mtk_match[] = { - { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, - { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data}, - { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, - { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, - { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, - { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data}, - { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data}, - { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, + { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data }, + { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data }, + { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data }, + { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data }, + { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data }, + { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data }, + { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data }, + { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data }, + { .compatible = "ralink,rt5350-eth", .data = &rt5350_data }, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); static struct platform_driver mtk_driver = { .probe = mtk_probe, - .remove = mtk_remove, + .remove_new = mtk_remove, .driver = { .name = "mtk_soc_eth", .of_match_table = of_mtk_match, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 707445f6bcb1..9ae3b8a71d0e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -18,7 +18,7 @@ #include <linux/rhashtable.h> #include <linux/dim.h> #include <linux/bitfield.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> #include <linux/bpf_trace.h> #include "mtk_ppe.h" @@ -33,7 +33,6 @@ #define MTK_TX_DMA_BUF_LEN_V2 0xffff #define MTK_QDMA_RING_SIZE 2048 #define MTK_DMA_SIZE 512 -#define MTK_MAC_COUNT 2 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) #define MTK_DMA_DUMMY_DESC 0xffffffff @@ -77,9 +76,8 @@ #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 /* Frame Engine Global Configuration */ -#define MTK_FE_GLO_CFG 0x00 -#define MTK_FE_LINK_DOWN_P3 BIT(11) -#define MTK_FE_LINK_DOWN_P4 BIT(12) +#define MTK_FE_GLO_CFG(x) (((x) == MTK_GMAC3_ID) ? 0x24 : 0x00) +#define MTK_FE_LINK_DOWN_P(x) BIT(((x) + 8) % 16) /* Frame Engine Global Reset Register */ #define MTK_RST_GL 0x04 @@ -118,19 +116,31 @@ #define MTK_CDMP_EG_CTRL 0x404 /* GDM Exgress Control Register */ -#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) +#define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ + 0x540 : 0x500 + (_x * 0x1000); }) #define MTK_GDMA_SPECIAL_TAG BIT(24) #define MTK_GDMA_ICS_EN BIT(22) #define MTK_GDMA_TCS_EN BIT(21) #define MTK_GDMA_UCS_EN BIT(20) +#define MTK_GDMA_STRP_CRC BIT(16) #define MTK_GDMA_TO_PDMA 0x0 #define MTK_GDMA_DROP_ALL 0x7777 +/* GDM Egress Control Register */ +#define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ + 0x544 : 0x504 + (_x * 0x1000); }) +#define MTK_GDMA_XGDM_SEL BIT(31) + /* Unicast Filter MAC Address Register - Low */ -#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) +#define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ + 0x548 : 0x508 + (_x * 0x1000); }) /* Unicast Filter MAC Address Register - High */ -#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) +#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \ + 0x54C : 0x50C + (_x * 0x1000); }) + +/* Internal SRAM offset */ +#define MTK_ETH_SRAM_OFFSET 0x40000 /* FE global misc reg*/ #define MTK_FE_GLO_MISC 0x124 @@ -288,8 +298,6 @@ /* QDMA Interrupt grouping registers */ #define MTK_RLS_DONE_INT BIT(0) -#define MTK_STAT_OFFSET 0x40 - /* QDMA TX NUM */ #define QID_BITS_V2(x) (((x) & 0x3f) << 16) #define MTK_QDMA_GMAC2_QID 8 @@ -302,6 +310,8 @@ #define TX_DMA_CHKSUM_V2 (0x7 << 28) #define TX_DMA_TSO_V2 BIT(31) +#define TX_DMA_SPTAG_V3 BIT(27) + /* QDMA V2 descriptor txd4 */ #define TX_DMA_FPORT_SHIFT_V2 8 #define TX_DMA_FPORT_MASK_V2 0xf @@ -321,6 +331,14 @@ #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) #define TX_DMA_SWC BIT(14) #define TX_DMA_PQID GENMASK(3, 0) +#define TX_DMA_ADDR64_MASK GENMASK(3, 0) +#if IS_ENABLED(CONFIG_64BIT) +# define TX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(TX_DMA_ADDR64_MASK, (x))) << 32) +# define TX_DMA_PREP_ADDR64(x) FIELD_PREP(TX_DMA_ADDR64_MASK, ((x) >> 32)) +#else +# define TX_DMA_GET_ADDR64(x) (0) +# define TX_DMA_PREP_ADDR64(x) (0) +#endif /* PDMA on MT7628 */ #define TX_DMA_DONE BIT(31) @@ -333,6 +351,14 @@ #define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) #define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) #define RX_DMA_VTAG BIT(15) +#define RX_DMA_ADDR64_MASK GENMASK(3, 0) +#if IS_ENABLED(CONFIG_64BIT) +# define RX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(RX_DMA_ADDR64_MASK, (x))) << 32) +# define RX_DMA_PREP_ADDR64(x) FIELD_PREP(RX_DMA_ADDR64_MASK, ((x) >> 32)) +#else +# define RX_DMA_GET_ADDR64(x) (0) +# define RX_DMA_PREP_ADDR64(x) (0) +#endif /* QDMA descriptor rxd3 */ #define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) @@ -389,7 +415,26 @@ #define PHY_IAC_TIMEOUT HZ #define MTK_MAC_MISC 0x1000c +#define MTK_MAC_MISC_V3 0x10010 #define MTK_MUX_TO_ESW BIT(0) +#define MISC_MDC_TURBO BIT(4) + +/* XMAC status registers */ +#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C) +#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15)) +#define MTK_USXGMII_PCS_LINK BIT(8) +#define MTK_XGMAC_RX_FC BIT(5) +#define MTK_XGMAC_TX_FC BIT(4) +#define MTK_USXGMII_PCS_MODE GENMASK(3, 1) +#define MTK_XGMAC_LINK_STS BIT(0) + +/* GSW bridge registers */ +#define MTK_GSW_CFG (0x10080) +#define GSWTX_IPG_MASK GENMASK(19, 16) +#define GSWTX_IPG_SHIFT 16 +#define GSWRX_IPG_MASK GENMASK(3, 0) +#define GSWRX_IPG_SHIFT 0 +#define GSW_IPG_11 11 /* Mac control registers */ #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) @@ -478,7 +523,7 @@ #define ETHSYS_SYSCFG0 0x14 #define SYSCFG0_GE_MASK 0x3 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) -#define SYSCFG0_SGMII_MASK GENMASK(9, 8) +#define SYSCFG0_SGMII_MASK GENMASK(9, 7) #define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK) #define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) #define SYSCFG0_SGMII_GMAC1_V2 BIT(9) @@ -495,9 +540,15 @@ /* ethernet reset control register */ #define ETHSYS_RSTCTRL 0x34 #define RSTCTRL_FE BIT(6) +#define RSTCTRL_WDMA0 BIT(24) +#define RSTCTRL_WDMA1 BIT(25) +#define RSTCTRL_WDMA2 BIT(26) #define RSTCTRL_PPE0 BIT(31) #define RSTCTRL_PPE0_V2 BIT(30) #define RSTCTRL_PPE1 BIT(31) +#define RSTCTRL_PPE0_V3 BIT(29) +#define RSTCTRL_PPE1_V3 BIT(30) +#define RSTCTRL_PPE2 BIT(31) #define RSTCTRL_ETH BIT(23) /* ethernet reset check idle register */ @@ -635,12 +686,6 @@ enum mtk_tx_flags { */ MTK_TX_FLAGS_SINGLE0 = 0x01, MTK_TX_FLAGS_PAGE0 = 0x02, - - /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted - * SKB out instead of looking up through hardware TX descriptor. - */ - MTK_TX_FLAGS_FPORT0 = 0x04, - MTK_TX_FLAGS_FPORT1 = 0x08, }; /* This enum allows us to identify how the clock is defined on the array of the @@ -653,6 +698,11 @@ enum mtk_clks_map { MTK_CLK_GP0, MTK_CLK_GP1, MTK_CLK_GP2, + MTK_CLK_GP3, + MTK_CLK_XGP1, + MTK_CLK_XGP2, + MTK_CLK_XGP3, + MTK_CLK_CRYPTO, MTK_CLK_FE, MTK_CLK_TRGPLL, MTK_CLK_SGMII_TX_250M, @@ -669,63 +719,145 @@ enum mtk_clks_map { MTK_CLK_WOCPU1, MTK_CLK_NETSYS0, MTK_CLK_NETSYS1, + MTK_CLK_ETHWARP_WOCPU2, + MTK_CLK_ETHWARP_WOCPU1, + MTK_CLK_ETHWARP_WOCPU0, + MTK_CLK_TOP_USXGMII_SBUS_0_SEL, + MTK_CLK_TOP_USXGMII_SBUS_1_SEL, + MTK_CLK_TOP_SGM_0_SEL, + MTK_CLK_TOP_SGM_1_SEL, + MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL, + MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL, + MTK_CLK_TOP_ETH_GMII_SEL, + MTK_CLK_TOP_ETH_REFCK_50M_SEL, + MTK_CLK_TOP_ETH_SYS_200M_SEL, + MTK_CLK_TOP_ETH_SYS_SEL, + MTK_CLK_TOP_ETH_XGMII_SEL, + MTK_CLK_TOP_ETH_MII_SEL, + MTK_CLK_TOP_NETSYS_SEL, + MTK_CLK_TOP_NETSYS_500M_SEL, + MTK_CLK_TOP_NETSYS_PAO_2X_SEL, + MTK_CLK_TOP_NETSYS_SYNC_250M_SEL, + MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL, + MTK_CLK_TOP_NETSYS_WARP_SEL, MTK_CLK_MAX }; -#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ - BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ - BIT(MTK_CLK_TRGPLL)) -#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ - BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ - BIT(MTK_CLK_GP2) | \ - BIT(MTK_CLK_SGMII_TX_250M) | \ - BIT(MTK_CLK_SGMII_RX_250M) | \ - BIT(MTK_CLK_SGMII_CDR_REF) | \ - BIT(MTK_CLK_SGMII_CDR_FB) | \ - BIT(MTK_CLK_SGMII_CK) | \ - BIT(MTK_CLK_ETH2PLL)) +#define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ + BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \ + BIT_ULL(MTK_CLK_TRGPLL)) +#define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ + BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \ + BIT_ULL(MTK_CLK_GP2) | \ + BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ + BIT_ULL(MTK_CLK_SGMII_CK) | \ + BIT_ULL(MTK_CLK_ETH2PLL)) #define MT7621_CLKS_BITMAP (0) #define MT7628_CLKS_BITMAP (0) -#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ - BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ - BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \ - BIT(MTK_CLK_SGMII_TX_250M) | \ - BIT(MTK_CLK_SGMII_RX_250M) | \ - BIT(MTK_CLK_SGMII_CDR_REF) | \ - BIT(MTK_CLK_SGMII_CDR_FB) | \ - BIT(MTK_CLK_SGMII2_TX_250M) | \ - BIT(MTK_CLK_SGMII2_RX_250M) | \ - BIT(MTK_CLK_SGMII2_CDR_REF) | \ - BIT(MTK_CLK_SGMII2_CDR_FB) | \ - BIT(MTK_CLK_SGMII_CK) | \ - BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP)) -#define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ - BIT(MTK_CLK_WOCPU0) | \ - BIT(MTK_CLK_SGMII_TX_250M) | \ - BIT(MTK_CLK_SGMII_RX_250M) | \ - BIT(MTK_CLK_SGMII_CDR_REF) | \ - BIT(MTK_CLK_SGMII_CDR_FB) | \ - BIT(MTK_CLK_SGMII2_TX_250M) | \ - BIT(MTK_CLK_SGMII2_RX_250M) | \ - BIT(MTK_CLK_SGMII2_CDR_REF) | \ - BIT(MTK_CLK_SGMII2_CDR_FB) | \ - BIT(MTK_CLK_SGMII_CK)) -#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ - BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \ - BIT(MTK_CLK_SGMII_TX_250M) | \ - BIT(MTK_CLK_SGMII_RX_250M) | \ - BIT(MTK_CLK_SGMII_CDR_REF) | \ - BIT(MTK_CLK_SGMII_CDR_FB) | \ - BIT(MTK_CLK_SGMII2_TX_250M) | \ - BIT(MTK_CLK_SGMII2_RX_250M) | \ - BIT(MTK_CLK_SGMII2_CDR_REF) | \ - BIT(MTK_CLK_SGMII2_CDR_FB)) +#define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \ + BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \ + BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \ + BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ + BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ + BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \ + BIT_ULL(MTK_CLK_SGMII_CK) | \ + BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP)) +#define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \ + BIT_ULL(MTK_CLK_GP1) | \ + BIT_ULL(MTK_CLK_WOCPU0) | \ + BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ + BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ + BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \ + BIT_ULL(MTK_CLK_SGMII_CK)) +#define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \ + BIT_ULL(MTK_CLK_GP1) | \ + BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \ + BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \ + BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \ + BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \ + BIT_ULL(MTK_CLK_SGMII2_CDR_FB)) +#define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \ + BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \ + BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \ + BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \ + BIT_ULL(MTK_CLK_CRYPTO) | \ + BIT_ULL(MTK_CLK_SGMII_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII_RX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \ + BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \ + BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \ + BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \ + BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \ + BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \ + BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \ + BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \ + BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \ + BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \ + BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \ + BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \ + BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \ + BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \ + BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \ + BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \ + BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \ + BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \ + BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \ + BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \ + BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \ + BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \ + BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL)) enum mtk_dev_state { MTK_HW_INIT, MTK_RESETTING }; +/* PSE Port Definition */ +enum mtk_pse_port { + PSE_ADMA_PORT = 0, + PSE_GDM1_PORT, + PSE_GDM2_PORT, + PSE_PPE0_PORT, + PSE_PPE1_PORT, + PSE_QDMA_TX_PORT, + PSE_QDMA_RX_PORT, + PSE_DROP_PORT, + PSE_WDMA0_PORT, + PSE_WDMA1_PORT, + PSE_TDMA_PORT, + PSE_NONE_PORT, + PSE_PPE2_PORT, + PSE_WDMA2_PORT, + PSE_EIP197_PORT, + PSE_GDM3_PORT, + PSE_PORT_MAX +}; + +/* GMAC Identifier */ +enum mtk_gmac_id { + MTK_GMAC1_ID = 0, + MTK_GMAC2_ID, + MTK_GMAC3_ID, + MTK_GMAC_ID_MAX +}; + enum mtk_tx_buf_type { MTK_TYPE_SKB, MTK_TYPE_XDP_TX, @@ -744,7 +876,8 @@ struct mtk_tx_buf { enum mtk_tx_buf_type type; void *data; - u32 flags; + u16 mac_id; + u16 flags; DEFINE_DMA_UNMAP_ADDR(dma_addr0); DEFINE_DMA_UNMAP_LEN(dma_len0); DEFINE_DMA_UNMAP_ADDR(dma_addr1); @@ -820,10 +953,12 @@ enum mkt_eth_capabilities { MTK_SHARED_INT_BIT, MTK_TRGMII_MT7621_CLK_BIT, MTK_QDMA_BIT, - MTK_NETSYS_V2_BIT, MTK_SOC_MT7628_BIT, MTK_RSTCTRL_PPE1_BIT, + MTK_RSTCTRL_PPE2_BIT, MTK_U3_COPHY_V2_BIT, + MTK_SRAM_BIT, + MTK_36BIT_DMA_BIT, /* MUX BITS*/ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, @@ -843,42 +978,44 @@ enum mkt_eth_capabilities { }; /* Supported hardware group on SoCs */ -#define MTK_RGMII BIT(MTK_RGMII_BIT) -#define MTK_TRGMII BIT(MTK_TRGMII_BIT) -#define MTK_SGMII BIT(MTK_SGMII_BIT) -#define MTK_ESW BIT(MTK_ESW_BIT) -#define MTK_GEPHY BIT(MTK_GEPHY_BIT) -#define MTK_MUX BIT(MTK_MUX_BIT) -#define MTK_INFRA BIT(MTK_INFRA_BIT) -#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT) -#define MTK_HWLRO BIT(MTK_HWLRO_BIT) -#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) -#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) -#define MTK_QDMA BIT(MTK_QDMA_BIT) -#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT) -#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) -#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT) -#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT) +#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT) +#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT) +#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT) +#define MTK_ESW BIT_ULL(MTK_ESW_BIT) +#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT) +#define MTK_MUX BIT_ULL(MTK_MUX_BIT) +#define MTK_INFRA BIT_ULL(MTK_INFRA_BIT) +#define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT) +#define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT) +#define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT) +#define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT) +#define MTK_QDMA BIT_ULL(MTK_QDMA_BIT) +#define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT) +#define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT) +#define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT) +#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT) +#define MTK_SRAM BIT_ULL(MTK_SRAM_BIT) +#define MTK_36BIT_DMA BIT_ULL(MTK_36BIT_DMA_BIT) #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ - BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) + BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) #define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \ - BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) + BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ - BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) + BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) #define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ - BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) + BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) #define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \ - BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT) + BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT) /* Supported path present on SoCs */ -#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT) -#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT) -#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT) -#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT) -#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT) -#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT) -#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT) +#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT) +#define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT) +#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT) +#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT) +#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT) +#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT) +#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT) #define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII) #define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII) @@ -934,11 +1071,14 @@ enum mkt_eth_capabilities { #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \ - MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1) + MTK_RSTCTRL_PPE1 | MTK_SRAM) #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ - MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1) + MTK_RSTCTRL_PPE1 | MTK_SRAM) + +#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \ + MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM) struct mtk_tx_dma_desc_info { dma_addr_t addr; @@ -992,7 +1132,7 @@ struct mtk_reg_map { u32 gdm1_cnt; u32 gdma_to_ppe; u32 ppe_base; - u32 wdma_base[2]; + u32 wdma_base[3]; u32 pse_iq_sta; u32 pse_oq_sta; }; @@ -1009,6 +1149,7 @@ struct mtk_reg_map { * @required_pctl A bool value to show whether the SoC requires * the extra setup for those pins used by GMAC. * @hash_offset Flow table hash offset. + * @version SoC version. * @foe_entry_size Foe table entry size. * @has_accounting Bool indicating support for accounting of * offloaded flows. @@ -1022,14 +1163,16 @@ struct mtk_reg_map { struct mtk_soc_data { const struct mtk_reg_map *reg_map; u32 ana_rgc3; - u32 caps; - u32 required_clks; + u64 caps; + u64 required_clks; bool required_pctl; u8 offload_version; u8 hash_offset; + u8 version; u16 foe_entry_size; netdev_features_t hw_features; bool has_accounting; + bool disable_pll_modes; struct { u32 txd_size; u32 rxd_size; @@ -1042,8 +1185,8 @@ struct mtk_soc_data { #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) -/* currently no SoC has more than 2 macs */ -#define MTK_MAX_DEVS 2 +/* currently no SoC has more than 3 macs */ +#define MTK_MAX_DEVS 3 /* struct mtk_eth - This is the main datasructure for holding the state * of the driver @@ -1095,6 +1238,7 @@ struct mtk_eth { struct device *dev; struct device *dma_dev; void __iomem *base; + void *sram_base; spinlock_t page_lock; spinlock_t tx_irq_lock; spinlock_t rx_irq_lock; @@ -1182,6 +1326,21 @@ struct mtk_mac { /* the struct describing the SoC. these are declared in the soc_xyz.c files */ extern const struct of_device_id of_mtk_match[]; +static inline bool mtk_is_netsys_v1(struct mtk_eth *eth) +{ + return eth->soc->version == 1; +} + +static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth) +{ + return eth->soc->version > 1; +} + +static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth) +{ + return eth->soc->version > 2; +} + static inline struct mtk_foe_entry * mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash) { @@ -1192,7 +1351,7 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash) static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return MTK_FOE_IB1_BIND_TIMESTAMP_V2; return MTK_FOE_IB1_BIND_TIMESTAMP; @@ -1200,7 +1359,7 @@ static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth) static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return MTK_FOE_IB1_BIND_PPPOE_V2; return MTK_FOE_IB1_BIND_PPPOE; @@ -1208,7 +1367,7 @@ static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth) static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return MTK_FOE_IB1_BIND_VLAN_TAG_V2; return MTK_FOE_IB1_BIND_VLAN_TAG; @@ -1216,7 +1375,7 @@ static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth) static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return MTK_FOE_IB1_BIND_VLAN_LAYER_V2; return MTK_FOE_IB1_BIND_VLAN_LAYER; @@ -1224,7 +1383,7 @@ static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth) static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val); @@ -1232,7 +1391,7 @@ static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val) static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val); @@ -1240,7 +1399,7 @@ static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val) static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return MTK_FOE_IB1_PACKET_TYPE_V2; return MTK_FOE_IB1_PACKET_TYPE; @@ -1248,7 +1407,7 @@ static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth) static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val); return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val); @@ -1256,7 +1415,7 @@ static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val) static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) return MTK_FOE_IB2_MULTICAST_V2; return MTK_FOE_IB2_MULTICAST; @@ -1267,6 +1426,7 @@ void mtk_stats_update_mac(struct mtk_mac *mac); void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); u32 mtk_r32(struct mtk_eth *eth, unsigned reg); +u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg); int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 9129821f3ab8..b2a5d9c3733d 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -92,7 +92,6 @@ static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe) static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets) { - u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high; u32 val, cnt_r0, cnt_r1, cnt_r2; int ret; @@ -107,12 +106,20 @@ static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *p cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1); cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2); - byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0); - byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1); - pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1); - pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2); - *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low; - *packets = (pkt_cnt_high << 16) | pkt_cnt_low; + if (mtk_is_netsys_v3_or_greater(ppe->eth)) { + /* 64 bit for each counter */ + u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3); + *bytes = ((u64)cnt_r1 << 32) | cnt_r0; + *packets = ((u64)cnt_r3 << 32) | cnt_r2; + } else { + /* 48 bit byte counter, 40 bit packet counter */ + u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0); + u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1); + u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1); + u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2); + *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low; + *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low; + } return 0; } @@ -208,7 +215,7 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, memset(entry, 0, sizeof(*entry)); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) | FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | @@ -272,7 +279,7 @@ int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, u32 *ib2 = mtk_foe_entry_ib2(eth, entry); u32 val = *ib2; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { val &= ~MTK_FOE_IB2_DEST_PORT_V2; val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port); } else { @@ -418,18 +425,29 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, } int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, - int wdma_idx, int txq, int bss, int wcid) + int wdma_idx, int txq, int bss, int wcid, + bool amsdu_en) { struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); u32 *ib2 = mtk_foe_entry_ib2(eth, entry); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + switch (eth->soc->version) { + case 3: + *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | + MTK_FOE_IB2_WDMA_WINFO_V2; + l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) | + FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss); + l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en); + break; + case 2: *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | MTK_FOE_IB2_WDMA_WINFO_V2; l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) | FIELD_PREP(MTK_FOE_WINFO_BSS, bss); - } else { + break; + default: *ib2 &= ~MTK_FOE_IB2_PORT_MG; *ib2 |= MTK_FOE_IB2_WDMA_WINFO; if (wdma_idx) @@ -437,6 +455,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); + break; } return 0; @@ -447,7 +466,7 @@ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, { u32 *ib2 = mtk_foe_entry_ib2(eth, entry); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { *ib2 &= ~MTK_FOE_IB2_QID_V2; *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue); *ib2 |= MTK_FOE_IB2_PSE_QOS_V2; @@ -603,7 +622,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, struct mtk_foe_entry *hwe; u32 val; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2; entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2, timestamp); @@ -619,7 +638,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, hwe->ib1 = entry->ib1; if (ppe->accounting) { - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(eth)) val = MTK_FOE_IB2_MIB_CNT_V2; else val = MTK_FOE_IB2_MIB_CNT; @@ -964,8 +983,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe) mtk_ppe_init_foe_table(ppe); ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); - val = MTK_PPE_TB_CFG_ENTRY_80B | - MTK_PPE_TB_CFG_AGE_NON_L4 | + val = MTK_PPE_TB_CFG_AGE_NON_L4 | MTK_PPE_TB_CFG_AGE_UNBIND | MTK_PPE_TB_CFG_AGE_TCP | MTK_PPE_TB_CFG_AGE_UDP | @@ -979,8 +997,10 @@ void mtk_ppe_start(struct mtk_ppe *ppe) MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, MTK_PPE_ENTRIES_SHIFT); - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(ppe->eth)) val |= MTK_PPE_TB_CFG_INFO_SEL; + if (!mtk_is_netsys_v3_or_greater(ppe->eth)) + val |= MTK_PPE_TB_CFG_ENTRY_80B; ppe_w32(ppe, MTK_PPE_TB_CFG, val); ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, @@ -995,7 +1015,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe) MTK_PPE_FLOW_CFG_IP4_NAPT | MTK_PPE_FLOW_CFG_IP4_DSLITE | MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(ppe->eth)) val |= MTK_PPE_MD_TOAP_BYP_CRSN0 | MTK_PPE_MD_TOAP_BYP_CRSN1 | MTK_PPE_MD_TOAP_BYP_CRSN2 | @@ -1037,7 +1057,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe) ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(ppe->eth)) { ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777); ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f); } diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index e51de31a52ec..691806bca372 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -85,6 +85,17 @@ enum { #define MTK_FOE_WINFO_BSS GENMASK(5, 0) #define MTK_FOE_WINFO_WCID GENMASK(15, 6) +#define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16) +#define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0) + +#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0) +#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16) +#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20) +#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21) +#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22) +#define MTK_FOE_WINFO_AMSDU_HF BIT(23) +#define MTK_FOE_WINFO_AMSDU_EN BIT(24) + enum { MTK_FOE_STATE_INVALID, MTK_FOE_STATE_UNBIND, @@ -106,8 +117,13 @@ struct mtk_foe_mac_info { u16 pppoe_id; u16 src_mac_lo; + /* netsys_v2 */ u16 minfo; u16 winfo; + + /* netsys_v3 */ + u32 w3info; + u32 amsdu; }; /* software-only entry type */ @@ -216,6 +232,10 @@ struct mtk_foe_ipv6_6rd { struct mtk_foe_mac_info l2; }; +#define MTK_FOE_ENTRY_V1_SIZE 80 +#define MTK_FOE_ENTRY_V2_SIZE 96 +#define MTK_FOE_ENTRY_V3_SIZE 128 + struct mtk_foe_entry { u32 ib1; @@ -225,7 +245,7 @@ struct mtk_foe_entry { struct mtk_foe_ipv4_dslite dslite; struct mtk_foe_ipv6 ipv6; struct mtk_foe_ipv6_6rd ipv6_6rd; - u32 data[23]; + u32 data[31]; }; }; @@ -372,7 +392,8 @@ int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, int sid); int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, - int wdma_idx, int txq, int bss, int wcid); + int wdma_idx, int txq, int bss, int wcid, + bool amsdu_en); int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, unsigned int queue); int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 02eebff02d45..fbb5e9d5af13 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i info->queue = path->mtk_wdma.queue; info->bss = path->mtk_wdma.bss; info->wcid = path->mtk_wdma.wcid; + info->amsdu = path->mtk_wdma.amsdu; return 0; } @@ -174,7 +175,7 @@ mtk_flow_get_dsa_port(struct net_device **dev) if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) return -ENODEV; - *dev = dsa_port_to_master(dp); + *dev = dsa_port_to_conduit(dp); return dp->index; #else @@ -192,14 +193,17 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue, - info.bss, info.wcid); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + info.bss, info.wcid, info.amsdu); + if (mtk_is_netsys_v2_or_greater(eth)) { switch (info.wdma_idx) { case 0: - pse_port = 8; + pse_port = PSE_WDMA0_PORT; break; case 1: - pse_port = 9; + pse_port = PSE_WDMA1_PORT; + break; + case 2: + pse_port = PSE_WDMA2_PORT; break; default: return -EINVAL; @@ -214,9 +218,11 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, dsa_port = mtk_flow_get_dsa_port(&dev); if (dev == eth->netdev[0]) - pse_port = 1; + pse_port = PSE_GDM1_PORT; else if (dev == eth->netdev[1]) - pse_port = 2; + pse_port = PSE_GDM2_PORT; + else if (dev == eth->netdev[2]) + pse_port = PSE_GDM3_PORT; else return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h index a2e61b3eb006..3ce088eef0ef 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h @@ -163,6 +163,8 @@ enum { #define MTK_PPE_MIB_SER_R2 0x348 #define MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH GENMASK(23, 0) +#define MTK_PPE_MIB_SER_R3 0x34c + #define MTK_PPE_MIB_CACHE_CTL 0x350 #define MTK_PPE_MIB_CACHE_CTL_EN BIT(0) #define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2) diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 02c03325911f..31aebeb2e285 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -17,7 +17,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/platform_device.h> diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 3b651efcc25e..9a6744c0d458 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ #include <linux/kernel.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitfield.h> @@ -16,17 +17,21 @@ #include <net/flow_offload.h> #include <net/pkt_cls.h> #include "mtk_eth_soc.h" -#include "mtk_wed_regs.h" #include "mtk_wed.h" #include "mtk_ppe.h" #include "mtk_wed_wo.h" #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) -#define MTK_WED_PKT_SIZE 1900 +#define MTK_WED_PKT_SIZE 1920 #define MTK_WED_BUF_SIZE 2048 +#define MTK_WED_PAGE_BUF_SIZE 128 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) +#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) #define MTK_WED_RX_RING_SIZE 1536 +#define MTK_WED_RX_PG_BM_CNT 8192 +#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) +#define MTK_WED_AMSDU_NPAGES 32 #define MTK_WED_TX_RING_SIZE 2048 #define MTK_WED_WDMA_RING_SIZE 1024 @@ -40,7 +45,10 @@ #define MTK_WED_RRO_QUE_CNT 8192 #define MTK_WED_MIOD_ENTRY_CNT 128 -static struct mtk_wed_hw *hw_list[2]; +#define MTK_WED_TX_BM_DMA_SIZE 65536 +#define MTK_WED_TX_BM_PKT_CNT 32768 + +static struct mtk_wed_hw *hw_list[3]; static DEFINE_MUTEX(hw_lock); struct mtk_wed_flow_block_priv { @@ -48,6 +56,39 @@ struct mtk_wed_flow_block_priv { struct net_device *dev; }; +static const struct mtk_wed_soc_data mt7622_data = { + .regmap = { + .tx_bm_tkid = 0x088, + .wpdma_rx_ring0 = 0x770, + .reset_idx_tx_mask = GENMASK(3, 0), + .reset_idx_rx_mask = GENMASK(17, 16), + }, + .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), + .wdma_desc_size = sizeof(struct mtk_wdma_desc), +}; + +static const struct mtk_wed_soc_data mt7986_data = { + .regmap = { + .tx_bm_tkid = 0x0c8, + .wpdma_rx_ring0 = 0x770, + .reset_idx_tx_mask = GENMASK(1, 0), + .reset_idx_rx_mask = GENMASK(7, 6), + }, + .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), + .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), +}; + +static const struct mtk_wed_soc_data mt7988_data = { + .regmap = { + .tx_bm_tkid = 0x0c8, + .wpdma_rx_ring0 = 0x7d0, + .reset_idx_tx_mask = GENMASK(1, 0), + .reset_idx_rx_mask = GENMASK(7, 6), + }, + .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), + .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), +}; + static void wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) { @@ -108,6 +149,90 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev) return wdma_r32(dev, MTK_WDMA_GLO_CFG); } +static void +mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) +{ + u32 status; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return; + + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) + dev_err(dev->hw->dev, "rx reset failed\n"); + + /* prefetch FIFO */ + wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); + + /* core FIFO */ + wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); + wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); + + /* writeback FIFO */ + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); + + /* prefetch ring status */ + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); + + /* writeback ring status */ + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); +} + static int mtk_wdma_rx_reset(struct mtk_wed_device *dev) { @@ -120,6 +245,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) if (ret) dev_err(dev->hw->dev, "rx reset failed\n"); + mtk_wdma_v3_rx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); @@ -134,6 +260,101 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev) return ret; } +static u32 +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return !!(wed_r32(dev, reg) & mask); +} + +static int +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + int sleep = 15000; + int timeout = 100 * sleep; + u32 val; + + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, + timeout, false, dev, reg, mask); +} + +static void +mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) +{ + u32 status; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return; + + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + if (read_poll_timeout(wdma_r32, status, + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + /* prefetch FIFO */ + wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); + + /* core FIFO */ + wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); + wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); + + /* writeback FIFO */ + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); + + /* prefetch ring status */ + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); + + /* writeback ring status */ + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); +} + static void mtk_wdma_tx_reset(struct mtk_wed_device *dev) { @@ -145,6 +366,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev) !(status & mask), 0, 10000)) dev_err(dev->hw->dev, "tx reset failed\n"); + mtk_wdma_v3_tx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); @@ -277,7 +499,7 @@ mtk_wed_assign(struct mtk_wed_device *dev) if (!hw->wed_dev) goto out; - if (hw->version == 1) + if (mtk_wed_is_v1(hw)) return NULL; /* MT7986 WED devices do not have any pcie slot restrictions */ @@ -297,35 +519,152 @@ out: } static int +mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw = dev->hw; + struct mtk_wed_amsdu *wed_amsdu; + int i; + + if (!mtk_wed_is_v3_or_greater(hw)) + return 0; + + wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES, + sizeof(*wed_amsdu), GFP_KERNEL); + if (!wed_amsdu) + return -ENOMEM; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { + void *ptr; + + /* each segment is 64K */ + ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | + __GFP_ZERO | __GFP_COMP | + GFP_DMA32, + get_order(MTK_WED_AMSDU_BUF_SIZE)); + if (!ptr) + goto error; + + wed_amsdu[i].txd = ptr; + wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, + MTK_WED_AMSDU_BUF_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy)) + goto error; + } + dev->hw->wed_amsdu = wed_amsdu; + + return 0; + +error: + for (i--; i >= 0; i--) + dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); + return -ENOMEM; +} + +static void +mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; + int i; + + if (!wed_amsdu) + return; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { + dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); + free_pages((unsigned long)wed_amsdu[i].txd, + get_order(MTK_WED_AMSDU_BUF_SIZE)); + } +} + +static int +mtk_wed_amsdu_init(struct mtk_wed_device *dev) +{ + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; + int i, ret; + + if (!wed_amsdu) + return 0; + + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) + wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), + wed_amsdu[i].txd_phy); + + /* init all sta parameter */ + wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | + MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, + dev->wlan.amsdu_max_len >> 8) | + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, + dev->wlan.amsdu_max_subframes)); + + wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); + + ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, + MTK_WED_AMSDU_STA_INFO_DO_INIT); + if (ret) { + dev_err(dev->hw->dev, "amsdu initialization failed\n"); + return ret; + } + + /* init partial amsdu offload txd src */ + wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, + FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); + + /* init qmem */ + wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); + ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); + if (ret) { + pr_info("%s: amsdu qmem initialization failed\n", __func__); + return ret; + } + + /* eagle E1 PCIE1 tx ring 22 flow control issue */ + if (dev->wlan.id == 0x7991) + wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); + + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + + return 0; +} + +static int mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) { - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; - void **page_list; + u32 desc_size = dev->hw->soc->tx_ring_desc_size; + int i, page_idx = 0, n_pages, ring_size; int token = dev->wlan.token_start; - int ring_size; - int n_pages; - int i, page_idx; + struct mtk_wed_buf *page_list; + dma_addr_t desc_phys; + void *desc_ptr; - ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); - n_pages = ring_size / MTK_WED_BUF_PER_PAGE; + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + dev->tx_buf_ring.size = ring_size; + } else { + dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; + ring_size = MTK_WED_TX_BM_PKT_CNT; + } + n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); if (!page_list) return -ENOMEM; - dev->tx_buf_ring.size = ring_size; dev->tx_buf_ring.pages = page_list; - desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), - &desc_phys, GFP_KERNEL); - if (!desc) + desc_ptr = dma_alloc_coherent(dev->hw->dev, + dev->tx_buf_ring.size * desc_size, + &desc_phys, GFP_KERNEL); + if (!desc_ptr) return -ENOMEM; - dev->tx_buf_ring.desc = desc; + dev->tx_buf_ring.desc = desc_ptr; dev->tx_buf_ring.desc_phys = desc_phys; - for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { + for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { dma_addr_t page_phys, buf_phys; struct page *page; void *buf; @@ -342,7 +681,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) return -ENOMEM; } - page_list[page_idx++] = page; + page_list[page_idx].p = page; + page_list[page_idx++].phy_addr = page_phys; dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, DMA_BIDIRECTIONAL); @@ -350,28 +690,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) buf_phys = page_phys; for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { - u32 txd_size; - u32 ctrl; - - txd_size = dev->wlan.init_buf(buf, buf_phys, token++); + struct mtk_wdma_desc *desc = desc_ptr; desc->buf0 = cpu_to_le32(buf_phys); - desc->buf1 = cpu_to_le32(buf_phys + txd_size); - - if (dev->hw->version == 1) - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, - MTK_WED_BUF_SIZE - txd_size) | - MTK_WDMA_DESC_CTRL_LAST_SEG1; - else - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, - MTK_WED_BUF_SIZE - txd_size) | - MTK_WDMA_DESC_CTRL_LAST_SEG0; - desc->ctrl = cpu_to_le32(ctrl); - desc->info = 0; - desc++; - + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + u32 txd_size, ctrl; + + txd_size = dev->wlan.init_buf(buf, buf_phys, + token++); + desc->buf1 = cpu_to_le32(buf_phys + txd_size); + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); + if (mtk_wed_is_v1(dev->hw)) + ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, + MTK_WED_BUF_SIZE - txd_size); + else + ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, + MTK_WED_BUF_SIZE - txd_size); + desc->ctrl = cpu_to_le32(ctrl); + desc->info = 0; + } else { + desc->ctrl = cpu_to_le32(token << 16); + } + + desc_ptr += desc_size; buf += MTK_WED_BUF_SIZE; buf_phys += MTK_WED_BUF_SIZE; } @@ -386,42 +729,103 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) static void mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) { - struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; - void **page_list = dev->tx_buf_ring.pages; - int page_idx; - int i; + struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; + struct mtk_wed_hw *hw = dev->hw; + int i, page_idx = 0; if (!page_list) return; - if (!desc) + if (!dev->tx_buf_ring.desc) goto free_pagelist; - for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; - i += MTK_WED_BUF_PER_PAGE) { - void *page = page_list[page_idx++]; - dma_addr_t buf_addr; + for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { + dma_addr_t page_phy = page_list[page_idx].phy_addr; + void *page = page_list[page_idx++].p; if (!page) break; - buf_addr = le32_to_cpu(desc[i].buf0); - dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(page); } - dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), - desc, dev->tx_buf_ring.desc_phys); + dma_free_coherent(dev->hw->dev, + dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, + dev->tx_buf_ring.desc, + dev->tx_buf_ring.desc_phys); free_pagelist: kfree(page_list); } static int +mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) +{ + int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; + struct mtk_wed_buf *page_list; + struct mtk_wed_bm_desc *desc; + dma_addr_t desc_phys; + int i, page_idx = 0; + + if (!dev->wlan.hw_rro) + return 0; + + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + dev->hw_rro.pages = page_list; + desc = dma_alloc_coherent(dev->hw->dev, + dev->wlan.rx_nbuf * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev->hw_rro.desc = desc; + dev->hw_rro.desc_phys = desc_phys; + + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { + dma_addr_t page_phys, buf_phys; + struct page *page; + int s; + + page = __dev_alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev->hw->dev, page_phys)) { + __free_page(page); + return -ENOMEM; + } + + page_list[page_idx].p = page; + page_list[page_idx++].phy_addr = page_phys; + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + buf_phys = page_phys; + for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { + desc->buf0 = cpu_to_le32(buf_phys); + buf_phys += MTK_WED_PAGE_BUF_SIZE; + desc++; + } + + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) { - struct mtk_rxbm_desc *desc; + struct mtk_wed_bm_desc *desc; dma_addr_t desc_phys; dev->rx_buf_ring.size = dev->wlan.rx_nbuf; @@ -435,13 +839,48 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) dev->rx_buf_ring.desc_phys = desc_phys; dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); - return 0; + return mtk_wed_hwrro_buffer_alloc(dev); +} + +static void +mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wed_buf *page_list = dev->hw_rro.pages; + struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; + int i, page_idx = 0; + + if (!dev->wlan.hw_rro) + return; + + if (!page_list) + return; + + if (!desc) + goto free_pagelist; + + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { + dma_addr_t buf_addr = page_list[page_idx].phy_addr; + void *page = page_list[page_idx++].p; + + if (!page) + break; + + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(page); + } + + dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc), + desc, dev->hw_rro.desc_phys); + +free_pagelist: + kfree(page_list); } static void mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) { - struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; + struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; if (!desc) return; @@ -449,6 +888,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) dev->wlan.release_rx_buf(dev); dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), desc, dev->rx_buf_ring.desc_phys); + + mtk_wed_hwrro_free_buffer(dev); +} + +static void +mtk_wed_hwrro_init(struct mtk_wed_device *dev) +{ + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) + return; + + wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, + FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); + + wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys); + + wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, + MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | + FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, + MTK_WED_RX_PG_BM_CNT)); + + /* enable rx_page_bm to fetch dmad */ + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); } static void @@ -462,6 +923,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); + + mtk_wed_hwrro_init(dev); } static void @@ -497,13 +960,23 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) { u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; - if (dev->hw->version == 1) + switch (dev->hw->version) { + case 1: mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; - else + break; + case 2: mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + break; + case 3: + mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + break; + default: + break; + } if (!dev->hw->num_flows) mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; @@ -515,6 +988,9 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) static void mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) { + if (!mtk_wed_is_v2(dev->hw)) + return; + if (enable) { wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); wed_w32(dev, MTK_WED_TXP_DW1, @@ -526,22 +1002,15 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) } } -#define MTK_WFMDA_RX_DMA_EN BIT(2) -static void -mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) +static int +mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, + struct mtk_wed_ring *ring) { - u32 val; int i; - if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) - return; /* queue is not configured by mt76 */ - for (i = 0; i < 3; i++) { - u32 cur_idx; + u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); - cur_idx = wed_r32(dev, - MTK_WED_WPDMA_RING_RX_DATA(idx) + - MTK_WED_RING_OFS_CPU_IDX); if (cur_idx == MTK_WED_RX_RING_SIZE - 1) break; @@ -550,12 +1019,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) if (i == 3) { dev_err(dev->hw->dev, "rx dma enable failed\n"); - return; + return -ETIMEDOUT; } - val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | - MTK_WFMDA_RX_DMA_EN; - wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); + return 0; } static void @@ -576,7 +1043,7 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); @@ -589,6 +1056,14 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) MTK_WED_WPDMA_RX_D_RX_DRV_EN); wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + + if (mtk_wed_is_v3_or_greater(dev->hw) && + mtk_wed_get_rx_capa(dev)) { + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, + MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, + MTK_WDMA_PREF_RX_CFG_PREF_EN); + } } mtk_wed_set_512_support(dev, false); @@ -605,7 +1080,7 @@ mtk_wed_stop(struct mtk_wed_device *dev) wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); - if (dev->hw->version == 1) + if (!mtk_wed_get_rx_capa(dev)) return; wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); @@ -624,13 +1099,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev) MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - if (dev->hw->version == 1) + if (mtk_wed_is_v1(dev->hw)) return; wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN | MTK_WED_CTRL_WED_RX_BM_EN | MTK_WED_CTRL_RX_RRO_QM_EN); + + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); + wed_clr(dev, MTK_WED_PCIE_INT_CTRL, + MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | + MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); + } } static void @@ -642,6 +1125,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev) mtk_wdma_rx_reset(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); + mtk_wed_amsdu_free_buffer(dev); mtk_wed_free_tx_buffer(dev); mtk_wed_free_tx_rings(dev); @@ -680,21 +1164,37 @@ mtk_wed_detach(struct mtk_wed_device *dev) mutex_unlock(&hw_lock); } -#define PCIE_BASE_ADDR0 0x11280000 static void mtk_wed_bus_init(struct mtk_wed_device *dev) { switch (dev->wlan.bus_type) { case MTK_WED_BUS_PCIE: { struct device_node *np = dev->hw->eth->dev->of_node; - struct regmap *regs; - regs = syscon_regmap_lookup_by_phandle(np, - "mediatek,wed-pcie"); - if (IS_ERR(regs)) - break; + if (mtk_wed_is_v2(dev->hw)) { + struct regmap *regs; + + regs = syscon_regmap_lookup_by_phandle(np, + "mediatek,wed-pcie"); + if (IS_ERR(regs)) + break; - regmap_update_bits(regs, 0, BIT(0), BIT(0)); + regmap_update_bits(regs, 0, BIT(0), BIT(0)); + } + + if (dev->wlan.msi) { + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, + dev->hw->pcie_base | 0xc08); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, + dev->hw->pcie_base | 0xc04); + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); + } else { + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, + dev->hw->pcie_base | 0x180); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, + dev->hw->pcie_base | 0x184); + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); + } wed_w32(dev, MTK_WED_PCIE_INT_CTRL, FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); @@ -702,19 +1202,9 @@ mtk_wed_bus_init(struct mtk_wed_device *dev) /* pcie interrupt control: pola/source selection */ wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | - FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); - wed_r32(dev, MTK_WED_PCIE_INT_CTRL); - - wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); - wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); - - /* pcie interrupt status trigger register */ - wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); - wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); - - /* pola setting */ - wed_set(dev, MTK_WED_PCIE_INT_CTRL, - MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); + MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, + dev->hw->index)); break; } case MTK_WED_BUS_AXI: @@ -730,38 +1220,55 @@ mtk_wed_bus_init(struct mtk_wed_device *dev) static void mtk_wed_set_wpdma(struct mtk_wed_device *dev) { - if (dev->hw->version == 1) { - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); - } else { - mtk_wed_bus_init(dev); + int i; - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); - wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); - wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); - wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); + if (mtk_wed_is_v1(dev->hw)) { + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); + return; } + + mtk_wed_bus_init(dev); + + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); + wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); + + if (!mtk_wed_get_rx_capa(dev)) + return; + + wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); + wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx); + + if (!dev->wlan.hw_rro) + return; + + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]); + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]); + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), + dev->wlan.wpdma_rx_pg + i * 0x10); } static void mtk_wed_hw_init_early(struct mtk_wed_device *dev) { - u32 mask, set; + u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); + u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; mtk_wed_deinit(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); mtk_wed_set_wpdma(dev); - mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | - MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | - MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; - set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | - MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | - MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; + set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + } wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { u32 offset = dev->hw->index ? 0x04000400 : 0; wdma_set(dev, MTK_WDMA_GLO_CFG, @@ -906,11 +1413,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) } /* configure RX_ROUTE_QM */ - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); - wed_set(dev, MTK_WED_RTQM_GLO_CFG, - FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); - wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + if (mtk_wed_is_v2(dev->hw)) { + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); + wed_set(dev, MTK_WED_RTQM_GLO_CFG, + FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, + 0x3 + dev->hw->index)); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + } else { + wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, + FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, + 0x3 + dev->hw->index)); + } /* enable RX_ROUTE_QM */ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); } @@ -923,34 +1437,30 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) dev->init_done = true; mtk_wed_set_ext_int(dev, false); - wed_w32(dev, MTK_WED_TX_BM_CTRL, - MTK_WED_TX_BM_CTRL_PAUSE | - FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, - dev->tx_buf_ring.size / 128) | - FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, - MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); - wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); - if (dev->hw->version == 1) { - wed_w32(dev, MTK_WED_TX_BM_TKID, - FIELD_PREP(MTK_WED_TX_BM_TKID_START, - dev->wlan.token_start) | - FIELD_PREP(MTK_WED_TX_BM_TKID_END, - dev->wlan.token_start + - dev->wlan.nbuf - 1)); + if (mtk_wed_is_v1(dev->hw)) { + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | MTK_WED_TX_BM_DYN_THR_HI); - } else { - wed_w32(dev, MTK_WED_TX_BM_TKID_V2, - FIELD_PREP(MTK_WED_TX_BM_TKID_START, - dev->wlan.token_start) | - FIELD_PREP(MTK_WED_TX_BM_TKID_END, - dev->wlan.token_start + - dev->wlan.nbuf - 1)); + } else if (mtk_wed_is_v2(dev->hw)) { + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); + wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, + FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | + MTK_WED_TX_TKID_DYN_THR_HI); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | MTK_WED_TX_BM_DYN_THR_HI_V2); @@ -960,31 +1470,71 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, dev->tx_buf_ring.size / 128)); - wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, - FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | - MTK_WED_TX_TKID_DYN_THR_HI); } + wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid, + FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | + FIELD_PREP(MTK_WED_TX_BM_TKID_END, + dev->wlan.token_start + dev->wlan.nbuf - 1)); + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); - if (dev->hw->version == 1) { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* switch to new bm architecture */ + wed_clr(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_LEGACY_EN); + + wed_w32(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, + dev->wlan.nbuf / 128) | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, + dev->wlan.nbuf / 128)); + /* return SKBID + SDP back to bm */ + wed_set(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_FREE_FORMAT); + + wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, + MTK_WED_TX_BM_PKT_CNT | + MTK_WED_TX_BM_INIT_SW_TAIL_IDX); + } + + if (mtk_wed_is_v1(dev->hw)) { wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - } else { - wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); + } else if (mtk_wed_get_rx_capa(dev)) { /* rx hw init */ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); + /* reset prefetch index of ring */ + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, + MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); + + /* reset prefetch FIFO of ring */ + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, + MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | + MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); + wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0); + mtk_wed_rx_buffer_hw_init(dev); mtk_wed_rro_hw_init(dev); mtk_wed_route_qm_hw_init(dev); } wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); + if (!mtk_wed_is_v1(dev->hw)) + wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); } static void @@ -1007,23 +1557,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) } } -static u32 -mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) -{ - return !!(wed_r32(dev, reg) & mask); -} - -static int -mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) -{ - int sleep = 15000; - int timeout = 100 * sleep; - u32 val; - - return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, - timeout, false, dev, reg, mask); -} - static int mtk_wed_rx_reset(struct mtk_wed_device *dev) { @@ -1037,13 +1570,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) if (ret) return ret; + if (dev->wlan.hw_rro) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, + MTK_WED_RX_IND_CMD_BUSY); + mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); + } + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); + if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_BUSY); if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); } else { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* 1.a. disable prefetch HW */ + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_EN); + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_BUSY); + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, + MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); + } + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); @@ -1071,23 +1624,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); } + if (dev->wlan.hw_rro) { + /* disable rro msdu page drv */ + wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + + /* disable rro data drv */ + wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); + + /* rro msdu page drv reset */ + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + + /* rro data drv reset */ + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), + MTK_WED_RRO_RX_D_DRV_CLR); + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), + MTK_WED_RRO_RX_D_DRV_CLR); + } + /* reset route qm */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_BUSY); - if (ret) + if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); - else - wed_set(dev, MTK_WED_RTQM_GLO_CFG, - MTK_WED_RTQM_Q_RST); + } else if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); + wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); + } else { + wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + } /* reset tx wdma */ mtk_wdma_tx_reset(dev); /* reset tx wdma drv */ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); - mtk_wed_poll_busy(dev, MTK_WED_CTRL, - MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); + if (mtk_wed_is_v3_or_greater(dev->hw)) + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, + MTK_WED_WPDMA_STATUS_TX_DRV); + else + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); /* reset wed rx dma */ @@ -1097,13 +1679,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); } else { - struct mtk_eth *eth = dev->hw->eth; - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) - wed_set(dev, MTK_WED_RESET_IDX, - MTK_WED_RESET_IDX_RX_V2); - else - wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); + wed_set(dev, MTK_WED_RESET_IDX, + dev->hw->soc->regmap.reset_idx_rx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } @@ -1113,6 +1690,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) MTK_WED_CTRL_WED_RX_BM_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); + if (dev->wlan.hw_rro) { + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_RX_PG_BM_BUSY); + wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); + } + /* wo change to enable state */ val = MTK_WED_WO_STATE_ENABLE; ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, @@ -1130,6 +1715,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev) false); } mtk_wed_free_rx_buffer(dev); + mtk_wed_hwrro_free_buffer(dev); return 0; } @@ -1156,21 +1742,48 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); } else { - wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); + wed_w32(dev, MTK_WED_RESET_IDX, + dev->hw->soc->regmap.reset_idx_tx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } /* 2. reset WDMA rx DMA */ busy = !!mtk_wdma_rx_reset(dev); - wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | + wed_r32(dev, MTK_WED_WDMA_GLO_CFG); + val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; + wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); + } else { + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + } + if (!busy) busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); + if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); } else { + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* 1.a. disable prefetch HW */ + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_EN); + mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_BUSY); + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_DDONE2_EN); + + /* 2. Reset dma index */ + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, + MTK_WED_WDMA_RESET_IDX_RX_ALL); + } + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); @@ -1186,8 +1799,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); for (i = 0; i < 100; i++) { - val = wed_r32(dev, MTK_WED_TX_BM_INTF); - if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) + if (mtk_wed_is_v1(dev->hw)) + val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, + wed_r32(dev, MTK_WED_TX_BM_INTF)); + else + val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, + wed_r32(dev, MTK_WED_TX_TKID_INTF)); + if (val == 0x40) break; } @@ -1209,6 +1827,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); + if (mtk_wed_is_v3_or_greater(dev->hw)) + wed_w32(dev, MTK_WED_RX1_CTRL2, 0); } else { wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, MTK_WED_WPDMA_RESET_IDX_TX | @@ -1217,7 +1837,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) } dev->init_done = false; - if (dev->hw->version == 1) + if (mtk_wed_is_v1(dev->hw)) return; if (!busy) { @@ -1225,7 +1845,14 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_RESET_IDX, 0); } - mtk_wed_rx_reset(dev); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + /* reset amsdu engine */ + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); + mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); + } + + if (mtk_wed_get_rx_capa(dev)) + mtk_wed_rx_reset(dev); } static int @@ -1248,7 +1875,6 @@ static int mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->rx_wdma)) @@ -1256,7 +1882,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, wdma = &dev->rx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, - desc_size, true)) + dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, @@ -1277,7 +1903,6 @@ static int mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->tx_wdma)) @@ -1285,9 +1910,27 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, wdma = &dev->tx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, - desc_size, true)) + dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; + if (mtk_wed_is_v3_or_greater(dev->hw)) { + struct mtk_wdma_desc *desc = wdma->desc; + int i; + + for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { + desc->buf0 = 0; + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc->buf1 = 0; + desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); + desc++; + desc->buf0 = 0; + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc->buf1 = 0; + desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); + desc++; + } + } + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, wdma->desc_phys); wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, @@ -1343,7 +1986,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); @@ -1353,8 +1996,9 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); } else { - wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, - GENMASK(1, 0)); + if (mtk_wed_is_v3_or_greater(dev->hw)) + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); + /* initail tx interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | @@ -1373,15 +2017,20 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, dev->wlan.txfree_tbit)); - wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, - MTK_WED_WPDMA_INT_CTRL_RX0_EN | - MTK_WED_WPDMA_INT_CTRL_RX0_CLR | - MTK_WED_WPDMA_INT_CTRL_RX1_EN | - MTK_WED_WPDMA_INT_CTRL_RX1_CLR | - FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, - dev->wlan.rx_tbit[0]) | - FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, - dev->wlan.rx_tbit[1])); + if (mtk_wed_get_rx_capa(dev)) { + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, + MTK_WED_WPDMA_INT_CTRL_RX0_EN | + MTK_WED_WPDMA_INT_CTRL_RX0_CLR | + MTK_WED_WPDMA_INT_CTRL_RX1_EN | + MTK_WED_WPDMA_INT_CTRL_RX1_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, + dev->wlan.rx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, + dev->wlan.rx_tbit[1])); + + wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, + GENMASK(1, 0)); + } wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); wed_set(dev, MTK_WED_WDMA_INT_CTRL, @@ -1397,55 +2046,280 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) wed_w32(dev, MTK_WED_INT_MASK, irq_mask); } +#define MTK_WFMDA_RX_DMA_EN BIT(2) static void mtk_wed_dma_enable(struct mtk_wed_device *dev) { - wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + int i; + + if (!mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, + MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wdma_set(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_TX_DMA_EN | + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); + wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); + } else { + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); + wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); + } wed_set(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN | MTK_WED_GLO_CFG_RX_DMA_EN); - wed_set(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_set(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); - wdma_set(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_TX_DMA_EN | - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); - - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); - } else { - int i; + return; + } - wed_set(dev, MTK_WED_WPDMA_CTRL, - MTK_WED_WPDMA_CTRL_SDL1_FIXED); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); - wed_set(dev, MTK_WED_WDMA_GLO_CFG, - MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | - MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, + FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | + FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_DDONE2_EN); + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); wed_set(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); + MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); - wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | - MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); + wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); + } - wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, - MTK_WED_WPDMA_RX_D_RX_DRV_EN | - FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | - FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, - 0x2)); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | + MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + + if (!mtk_wed_get_rx_capa(dev)) + return; + + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RX_DRV_EN | + FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | + FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); + + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, + MTK_WED_WPDMA_RX_D_PREF_EN | + FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | + FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); + + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); + wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); + wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); + } + + for (i = 0; i < MTK_WED_RX_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_ring[i]; + u32 val; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; /* queue is not configured by mt76 */ + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { + dev_err(dev->hw->dev, + "rx_ring(%d) dma enable failed\n", i); + continue; + } + + val = wifi_r32(dev, + dev->wlan.wpdma_rx_glo - + dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; + wifi_w32(dev, + dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, + val); + } +} + +static void +mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) +{ + int i; + + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); + wed_w32(dev, MTK_WED_INT_MASK, irq_mask); + + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) + return; + + if (reset) { + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + return; + } + + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_CLR); + + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, + dev->wlan.rro_rx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, + dev->wlan.rro_rx_tbit[1])); + + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, + dev->wlan.rx_pg_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, + dev->wlan.rx_pg_tbit[1]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, + dev->wlan.rx_pg_tbit[2])); + + /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after + * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken + */ + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, + MTK_WED_RRO_MSDU_PG_DRV_EN); + + for (i = 0; i < MTK_WED_RX_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) + dev_err(dev->hw->dev, + "rx_rro_ring(%d) initialization failed\n", i); + } + + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { + struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; + + if (!(ring->flags & MTK_WED_RING_CONFIGURED)) + continue; + + if (mtk_wed_check_wfdma_rx_fill(dev, ring)) + dev_err(dev->hw->dev, + "rx_page_ring(%d) initialization failed\n", i); + } +} + +static void +mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, + void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; + + ring->wpdma = regs; + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, + readl(regs)); + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + ring->flags |= MTK_WED_RING_CONFIGURED; +} + +static void +mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; + + ring->wpdma = regs; + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, + readl(regs)); + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + ring->flags |= MTK_WED_RING_CONFIGURED; +} + +static int +mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->ind_cmd_ring; + u32 val = readl(regs + MTK_WED_RING_OFS_COUNT); + int i, count = 0; - for (i = 0; i < MTK_WED_RX_QUEUES; i++) - mtk_wed_check_wfdma_rx_fill(dev, i); + ring->wpdma = regs; + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, + readl(regs) & 0xfffffff0); + + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, + readl(regs + MTK_WED_RING_OFS_COUNT)); + + /* ack sn cr */ + wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base + + dev->wlan.ind_cmd.ack_sn_addr); + wed_w32(dev, MTK_WED_RRO_CFG1, + FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, + dev->wlan.ind_cmd.win_size) | + FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, + dev->wlan.ind_cmd.particular_sid)); + + /* particular session addr element */ + wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, + dev->wlan.ind_cmd.particular_se_phys); + + for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { + wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, + dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); + wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, + MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); + + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); + while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); + if (count >= 100) + dev_err(dev->hw->dev, + "write ba session base failed\n"); + } + + /* pn check init */ + for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { + wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, + MTK_WED_PN_CHECK_IS_FIRST); + + wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | + FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); + + count = 0; + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); + while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); + if (count >= 100) + dev_err(dev->hw->dev, + "session(%d) initialization failed\n", i); } + + wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); + + return 0; } static void @@ -1465,14 +2339,14 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) mtk_wed_set_ext_int(dev, true); - if (dev->hw->version == 1) { + if (mtk_wed_is_v1(dev->hw)) { u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); val |= BIT(0) | (BIT(1) * !!dev->hw->index); regmap_write(dev->hw->mirror, dev->hw->index * 4, val); - } else { + } else if (mtk_wed_get_rx_capa(dev)) { /* driver set mid ready and only once */ wed_w32(dev, MTK_WED_EXT_INT_MASK1, MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); @@ -1482,12 +2356,18 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) wed_r32(dev, MTK_WED_EXT_INT_MASK1); wed_r32(dev, MTK_WED_EXT_INT_MASK2); + if (mtk_wed_is_v3_or_greater(dev->hw)) { + wed_w32(dev, MTK_WED_EXT_INT_MASK3, + MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); + wed_r32(dev, MTK_WED_EXT_INT_MASK3); + } + if (mtk_wed_rro_cfg(dev)) return; - } mtk_wed_set_512_support(dev, dev->wlan.wcid_512); + mtk_wed_amsdu_init(dev); mtk_wed_dma_enable(dev); dev->running = true; @@ -1534,6 +2414,7 @@ mtk_wed_attach(struct mtk_wed_device *dev) dev->irq = hw->irq; dev->wdma_idx = hw->index; dev->version = hw->version; + dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); if (hw->eth->dma_dev == hw->eth->dev && of_dma_is_coherent(hw->eth->dev->of_node)) @@ -1543,6 +2424,10 @@ mtk_wed_attach(struct mtk_wed_device *dev) if (ret) goto out; + ret = mtk_wed_amsdu_buffer_alloc(dev); + if (ret) + goto out; + if (mtk_wed_get_rx_capa(dev)) { ret = mtk_wed_rro_alloc(dev); if (ret) @@ -1550,13 +2435,14 @@ mtk_wed_attach(struct mtk_wed_device *dev) } mtk_wed_hw_init_early(dev); - if (hw->version == 1) { + if (mtk_wed_is_v1(hw)) regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); - } else { + else dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); + + if (mtk_wed_get_rx_capa(dev)) ret = mtk_wed_wo_init(hw); - } out: if (ret) { dev_err(dev->hw->dev, "failed to attach wed device\n"); @@ -1600,6 +2486,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, ring->reg_base = MTK_WED_RING_TX(idx); ring->wpdma = regs; + if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) { + /* reset prefetch index */ + wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); + + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, + MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | + MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); + + /* reset prefetch FIFO */ + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, + MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | + MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); + wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); + } + /* WED -> WPDMA */ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); @@ -1618,7 +2521,7 @@ static int mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) { struct mtk_wed_ring *ring = &dev->txfree_ring; - int i, index = dev->hw->version == 1; + int i, index = mtk_wed_is_v1(dev->hw); /* * For txfree event handling, the same DMA ring is shared between WED @@ -1674,15 +2577,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, static u32 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) { - u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; + u32 val, ext_mask; - if (dev->hw->version == 1) - ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; + if (mtk_wed_is_v3_or_greater(dev->hw)) + ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; else - ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | - MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | - MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | - MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); @@ -1712,19 +2613,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) int mtk_wed_flow_add(int index) { struct mtk_wed_hw *hw = hw_list[index]; - int ret; + int ret = 0; - if (!hw || !hw->wed_dev) - return -ENODEV; + mutex_lock(&hw_lock); - if (hw->num_flows) { - hw->num_flows++; - return 0; + if (!hw || !hw->wed_dev) { + ret = -ENODEV; + goto out; } - mutex_lock(&hw_lock); - if (!hw->wed_dev) { - ret = -ENODEV; + if (!hw->wed_dev->wlan.offload_enable) + goto out; + + if (hw->num_flows) { + hw->num_flows++; goto out; } @@ -1743,14 +2645,15 @@ void mtk_wed_flow_remove(int index) { struct mtk_wed_hw *hw = hw_list[index]; - if (!hw) - return; + mutex_lock(&hw_lock); - if (--hw->num_flows) - return; + if (!hw || !hw->wed_dev) + goto out; - mutex_lock(&hw_lock); - if (!hw->wed_dev) + if (!hw->wed_dev->wlan.offload_disable) + goto out; + + if (--hw->num_flows) goto out; hw->wed_dev->wlan.offload_disable(hw->wed_dev); @@ -1841,7 +2744,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, { struct mtk_wed_hw *hw = wed->hw; - if (hw->version < 2) + if (mtk_wed_is_v1(hw)) return -EOPNOTSUPP; switch (type) { @@ -1873,6 +2776,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, .detach = mtk_wed_detach, .ppe_check = mtk_wed_ppe_check, .setup_tc = mtk_wed_setup_tc, + .start_hw_rro = mtk_wed_start_hw_rro, + .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, + .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, + .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, }; struct device_node *eth_np = eth->dev->of_node; struct platform_device *pdev; @@ -1915,9 +2822,17 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, hw->wdma = wdma; hw->index = index; hw->irq = irq; - hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; + hw->version = eth->soc->version; - if (hw->version == 1) { + switch (hw->version) { + case 2: + hw->soc = &mt7986_data; + break; + case 3: + hw->soc = &mt7988_data; + break; + default: + case 1: hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, "mediatek,pcie-mirror"); hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, @@ -1931,6 +2846,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, regmap_write(hw->mirror, 0, 0); regmap_write(hw->mirror, 4, 0); } + hw->soc = &mt7622_data; + break; } mtk_wed_hw_add_debugfs(hw); diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h index 43ab77eaf683..c1f0479d7a71 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.h +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -9,10 +9,29 @@ #include <linux/regmap.h> #include <linux/netdevice.h> +#include "mtk_wed_regs.h" + struct mtk_eth; struct mtk_wed_wo; +struct mtk_wed_soc_data { + struct { + u32 tx_bm_tkid; + u32 wpdma_rx_ring0; + u32 reset_idx_tx_mask; + u32 reset_idx_rx_mask; + } regmap; + u32 tx_ring_desc_size; + u32 wdma_desc_size; +}; + +struct mtk_wed_amsdu { + void *txd; + dma_addr_t txd_phy; +}; + struct mtk_wed_hw { + const struct mtk_wed_soc_data *soc; struct device_node *node; struct mtk_eth *eth; struct regmap *regs; @@ -24,6 +43,8 @@ struct mtk_wed_hw { struct dentry *debugfs_dir; struct mtk_wed_device *wed_dev; struct mtk_wed_wo *wed_wo; + struct mtk_wed_amsdu *wed_amsdu; + u32 pcie_base; u32 debugfs_reg; u32 num_flows; u8 version; @@ -37,9 +58,30 @@ struct mtk_wdma_info { u8 queue; u16 wcid; u8 bss; + u8 amsdu; }; #ifdef CONFIG_NET_MEDIATEK_SOC_WED +static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw) +{ + return hw->version == 1; +} + +static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw) +{ + return hw->version == 2; +} + +static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw) +{ + return hw->version == 3; +} + +static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw) +{ + return hw->version > 2; +} + static inline void wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) { @@ -122,6 +164,21 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) writel(val, dev->txfree_ring.wpdma + reg); } +static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev) +{ + if (!mtk_wed_is_v3_or_greater(dev->hw)) + return MTK_WED_PCIE_BASE; + + switch (dev->hw->index) { + case 1: + return MTK_WED_PCIE_BASE1; + case 2: + return MTK_WED_PCIE_BASE2; + default: + return MTK_WED_PCIE_BASE0; + } +} + void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, void __iomem *wdma, phys_addr_t wdma_phy, int index); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c index b244c02c5b51..781c691473e1 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c @@ -11,6 +11,7 @@ struct reg_dump { u16 offset; u8 type; u8 base; + u32 mask; }; enum { @@ -25,6 +26,8 @@ enum { #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } +#define DUMP_REG_MASK(_reg, _mask) \ + { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask } #define DUMP_RING(_prefix, _base, ...) \ { _prefix " BASE", _base, __VA_ARGS__ }, \ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ @@ -32,6 +35,7 @@ enum { { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) +#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask) #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) @@ -84,7 +88,6 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, } } - static int wed_txinfo_show(struct seq_file *s, void *data) { @@ -127,16 +130,23 @@ wed_txinfo_show(struct seq_file *s, void *data) DUMP_WDMA_RING(WDMA_RING_RX(0)), DUMP_WDMA_RING(WDMA_RING_RX(1)), - DUMP_STR("TX FREE"), + DUMP_STR("WED TX FREE"), DUMP_WED(WED_RX_MIB(0)), + DUMP_WED_RING(WED_RING_RX(0)), + DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)), + DUMP_WED(WED_RX_MIB(1)), + DUMP_WED_RING(WED_RING_RX(1)), + DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)), + + DUMP_STR("WED WPDMA TX FREE"), + DUMP_WED_RING(WED_WPDMA_RING_RX(0)), + DUMP_WED_RING(WED_WPDMA_RING_RX(1)), }; struct mtk_wed_hw *hw = s->private; struct mtk_wed_device *dev = hw->wed_dev; - if (!dev) - return 0; - - dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + if (dev) + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); return 0; } @@ -145,7 +155,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo); static int wed_rxinfo_show(struct seq_file *s, void *data) { - static const struct reg_dump regs[] = { + static const struct reg_dump regs_common[] = { DUMP_STR("WPDMA RX"), DUMP_WPDMA_RX_RING(0), DUMP_WPDMA_RX_RING(1), @@ -163,7 +173,7 @@ wed_rxinfo_show(struct seq_file *s, void *data) DUMP_WED_RING(WED_RING_RX_DATA(0)), DUMP_WED_RING(WED_RING_RX_DATA(1)), - DUMP_STR("WED RRO"), + DUMP_STR("WED WO RRO"), DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0), DUMP_WED(WED_RROQM_MID_MIB), DUMP_WED(WED_RROQM_MOD_MIB), @@ -174,17 +184,6 @@ wed_rxinfo_show(struct seq_file *s, void *data) DUMP_WED(WED_RROQM_FDBK_ANC_MIB), DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB), - DUMP_STR("WED Route QM"), - DUMP_WED(WED_RTQM_R2H_MIB(0)), - DUMP_WED(WED_RTQM_R2Q_MIB(0)), - DUMP_WED(WED_RTQM_Q2H_MIB(0)), - DUMP_WED(WED_RTQM_R2H_MIB(1)), - DUMP_WED(WED_RTQM_R2Q_MIB(1)), - DUMP_WED(WED_RTQM_Q2H_MIB(1)), - DUMP_WED(WED_RTQM_Q2N_MIB), - DUMP_WED(WED_RTQM_Q2B_MIB), - DUMP_WED(WED_RTQM_PFDBK_MIB), - DUMP_STR("WED WDMA TX"), DUMP_WED(WED_WDMA_TX_MIB), DUMP_WED_RING(WED_WDMA_RING_TX), @@ -205,19 +204,385 @@ wed_rxinfo_show(struct seq_file *s, void *data) DUMP_WED(WED_RX_BM_INTF), DUMP_WED(WED_RX_BM_ERR_STS), }; + static const struct reg_dump regs_wed_v2[] = { + DUMP_STR("WED Route QM"), + DUMP_WED(WED_RTQM_R2H_MIB(0)), + DUMP_WED(WED_RTQM_R2Q_MIB(0)), + DUMP_WED(WED_RTQM_Q2H_MIB(0)), + DUMP_WED(WED_RTQM_R2H_MIB(1)), + DUMP_WED(WED_RTQM_R2Q_MIB(1)), + DUMP_WED(WED_RTQM_Q2H_MIB(1)), + DUMP_WED(WED_RTQM_Q2N_MIB), + DUMP_WED(WED_RTQM_Q2B_MIB), + DUMP_WED(WED_RTQM_PFDBK_MIB), + }; + static const struct reg_dump regs_wed_v3[] = { + DUMP_STR("WED RX RRO DATA"), + DUMP_WED_RING(WED_RRO_RX_D_RX(0)), + DUMP_WED_RING(WED_RRO_RX_D_RX(1)), + + DUMP_STR("WED RX MSDU PAGE"), + DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)), + DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)), + DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)), + + DUMP_STR("WED RX IND CMD"), + DUMP_WED(WED_IND_CMD_RX_CTRL1), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX), + DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT), + DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT), + DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, + WED_IND_CMD_PREFETCH_FREE_CNT), + DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID), + + DUMP_STR("WED ADDR ELEM"), + DUMP_WED(WED_ADDR_ELEM_CFG0), + DUMP_WED_MASK(WED_ADDR_ELEM_CFG1, + WED_ADDR_ELEM_PREFETCH_FREE_CNT), + + DUMP_STR("WED Route QM"), + DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT), + DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT), + DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT), + DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT), + DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT), + DUMP_WED(WED_RTQM_ENQ_ERR_CNT), + + DUMP_WED(WED_RTQM_DEQ_DMAD_CNT), + DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT), + DUMP_WED(WED_RTQM_DEQ_PKT_CNT), + DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT), + DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT), + DUMP_WED(WED_RTQM_DEQ_ERR_CNT), + }; struct mtk_wed_hw *hw = s->private; struct mtk_wed_device *dev = hw->wed_dev; - if (!dev) - return 0; - - dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + if (dev) { + dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common)); + if (mtk_wed_is_v2(hw)) + dump_wed_regs(s, dev, + regs_wed_v2, ARRAY_SIZE(regs_wed_v2)); + else + dump_wed_regs(s, dev, + regs_wed_v3, ARRAY_SIZE(regs_wed_v3)); + } return 0; } DEFINE_SHOW_ATTRIBUTE(wed_rxinfo); static int +wed_amsdu_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WED AMDSU INFO"), + DUMP_WED(WED_MON_AMSDU_FIFO_DMAD), + + DUMP_STR("WED AMDSU ENG0 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG1 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG2 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG3 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG4 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG5 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG6 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG7 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED AMDSU ENG8 INFO"), + DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)), + DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)), + DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)), + DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)), + DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8), + WED_AMSDU_ENG_MAX_PL_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8), + WED_AMSDU_ENG_MAX_QGPP_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8), + WED_AMSDU_ENG_CUR_ENTRY), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8), + WED_AMSDU_ENG_MAX_BUF_MERGED), + DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8), + WED_AMSDU_ENG_MAX_MSDU_MERGED), + + DUMP_STR("WED QMEM INFO"), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT), + + DUMP_STR("WED QMEM HEAD INFO"), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD), + + DUMP_STR("WED QMEM TAIL INFO"), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL), + DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL), + + DUMP_STR("WED HIFTXD MSDU INFO"), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)), + DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (dev) + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_amsdu); + +static int +wed_rtqm_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"), + DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT), + + DUMP_STR("WED Route QM IGRS1(Legacy)"), + DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)), + DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT), + + DUMP_STR("WED Route QM IGRS2(RRO3.0)"), + DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)), + DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT), + + DUMP_STR("WED Route QM IGRS3(DEBUG)"), + DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT), + DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)), + DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)), + DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT), + DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)), + DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)), + DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (dev) + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_rtqm); + +static int +wed_rro_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("RRO/IND CMD CNT"), + DUMP_WED(WED_RX_IND_CMD_CNT(1)), + DUMP_WED(WED_RX_IND_CMD_CNT(2)), + DUMP_WED(WED_RX_IND_CMD_CNT(3)), + DUMP_WED(WED_RX_IND_CMD_CNT(4)), + DUMP_WED(WED_RX_IND_CMD_CNT(5)), + DUMP_WED(WED_RX_IND_CMD_CNT(6)), + DUMP_WED(WED_RX_IND_CMD_CNT(7)), + DUMP_WED(WED_RX_IND_CMD_CNT(8)), + DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9), + WED_IND_CMD_MAGIC_CNT_FAIL_CNT), + + DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)), + DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1), + WED_ADDR_ELEM_SIG_FAIL_CNT), + DUMP_WED(WED_RX_MSDU_PG_CNT(1)), + DUMP_WED(WED_RX_MSDU_PG_CNT(2)), + DUMP_WED(WED_RX_MSDU_PG_CNT(3)), + DUMP_WED(WED_RX_MSDU_PG_CNT(4)), + DUMP_WED(WED_RX_MSDU_PG_CNT(5)), + DUMP_WED_MASK(WED_RX_PN_CHK_CNT, + WED_PN_CHK_FAIL_CNT), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (dev) + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_rro); + +static int mtk_wed_reg_set(void *data, u64 val) { struct mtk_wed_hw *hw = data; @@ -257,7 +622,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); - if (hw->version != 1) + if (!mtk_wed_is_v1(hw)) { debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, &wed_rxinfo_fops); + if (mtk_wed_is_v3_or_greater(hw)) { + debugfs_create_file_unsafe("amsdu", 0400, dir, hw, + &wed_amsdu_fops); + debugfs_create_file_unsafe("rtqm", 0400, dir, hw, + &wed_rtqm_fops); + debugfs_create_file_unsafe("rro", 0400, dir, hw, + &wed_rro_fops); + } + } } diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c index 071ed3dea860..ea0884186d76 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c @@ -16,14 +16,30 @@ #include "mtk_wed_wo.h" #include "mtk_wed.h" -static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg) +static struct mtk_wed_wo_memory_region mem_region[] = { + [MTK_WED_WO_REGION_EMI] = { + .name = "wo-emi", + }, + [MTK_WED_WO_REGION_ILM] = { + .name = "wo-ilm", + }, + [MTK_WED_WO_REGION_DATA] = { + .name = "wo-data", + .shared = true, + }, + [MTK_WED_WO_REGION_BOOT] = { + .name = "wo-boot", + }, +}; + +static u32 wo_r32(u32 reg) { - return readl(wo->boot.addr + reg); + return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg); } -static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) +static void wo_w32(u32 reg, u32 val) { - writel(val, wo->boot.addr + reg); + writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg); } static struct sk_buff * @@ -68,6 +84,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb) struct mtk_wed_wo_rx_stats *stats; int i; + if (!wed->wlan.update_wo_rx_stats) + return; + if (count * sizeof(*stats) > skb->len - sizeof(u32)) return; @@ -204,7 +223,7 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, { struct mtk_wed_wo *wo = dev->hw->wed_wo; - if (dev->hw->version == 1) + if (!mtk_wed_get_rx_capa(dev)) return 0; if (WARN_ON(!wo)) @@ -215,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, } static int -mtk_wed_get_memory_region(struct mtk_wed_wo *wo, +mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index, struct mtk_wed_wo_memory_region *region) { struct reserved_mem *rmem; struct device_node *np; - int index; - index = of_property_match_string(wo->hw->node, "memory-region-names", - region->name); - if (index < 0) - return index; - - np = of_parse_phandle(wo->hw->node, "memory-region", index); + np = of_parse_phandle(hw->node, "memory-region", index); if (!np) return -ENODEV; @@ -239,14 +252,13 @@ mtk_wed_get_memory_region(struct mtk_wed_wo *wo, region->phy_addr = rmem->base; region->size = rmem->size; - region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size); + region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size); return !region->addr ? -EINVAL : 0; } static int -mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw, - struct mtk_wed_wo_memory_region *region) +mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw) { const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data; const struct mtk_wed_fw_trailer *trailer; @@ -259,50 +271,46 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw, while (region_ptr < trailer_ptr) { u32 length; + int i; fw_region = (const struct mtk_wed_fw_region *)region_ptr; length = le32_to_cpu(fw_region->len); - - if (region->phy_addr != le32_to_cpu(fw_region->addr)) + if (first_region_ptr < ptr + length) goto next; - if (region->size < length) - goto next; + for (i = 0; i < ARRAY_SIZE(mem_region); i++) { + struct mtk_wed_wo_memory_region *region; - if (first_region_ptr < ptr + length) - goto next; + region = &mem_region[i]; + if (region->phy_addr != le32_to_cpu(fw_region->addr)) + continue; + + if (region->size < length) + continue; - if (region->shared && region->consumed) - return 0; + if (region->shared && region->consumed) + break; - if (!region->shared || !region->consumed) { - memcpy_toio(region->addr, ptr, length); - region->consumed = true; - return 0; + if (!region->shared || !region->consumed) { + memcpy_toio(region->addr, ptr, length); + region->consumed = true; + break; + } } + + if (i == ARRAY_SIZE(mem_region)) + return -EINVAL; next: region_ptr += sizeof(*fw_region); ptr += length; } - return -EINVAL; + return 0; } static int mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) { - static struct mtk_wed_wo_memory_region mem_region[] = { - [MTK_WED_WO_REGION_EMI] = { - .name = "wo-emi", - }, - [MTK_WED_WO_REGION_ILM] = { - .name = "wo-ilm", - }, - [MTK_WED_WO_REGION_DATA] = { - .name = "wo-data", - .shared = true, - }, - }; const struct mtk_wed_fw_trailer *trailer; const struct firmware *fw; const char *fw_name; @@ -311,25 +319,38 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) /* load firmware region metadata */ for (i = 0; i < ARRAY_SIZE(mem_region); i++) { - ret = mtk_wed_get_memory_region(wo, &mem_region[i]); + int index = of_property_match_string(wo->hw->node, + "memory-region-names", + mem_region[i].name); + if (index < 0) + continue; + + ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]); if (ret) return ret; } - wo->boot.name = "wo-boot"; - ret = mtk_wed_get_memory_region(wo, &wo->boot); - if (ret) - return ret; - /* set dummy cr */ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL, wo->hw->index + 1); /* load firmware */ - if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed")) - fw_name = MT7981_FIRMWARE_WO; - else - fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0; + switch (wo->hw->version) { + case 2: + if (of_device_is_compatible(wo->hw->node, + "mediatek,mt7981-wed")) + fw_name = MT7981_FIRMWARE_WO; + else + fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 + : MT7986_FIRMWARE_WO0; + break; + case 3: + fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1 + : MT7988_FIRMWARE_WO0; + break; + default: + return -EINVAL; + } ret = request_firmware(&fw, fw_name, wo->hw->dev); if (ret) @@ -343,23 +364,22 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n", trailer->chip_id, trailer->num_region); - for (i = 0; i < ARRAY_SIZE(mem_region); i++) { - ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]); - if (ret) - goto out; - } + ret = mtk_wed_mcu_run_firmware(wo, fw); + if (ret) + goto out; /* set the start address */ - boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR - : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; - wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16); + if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index) + boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR; + else + boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; + wo_w32(boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16); /* wo firmware reset */ - wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); + wo_w32(MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); - val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR); - val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK - : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK; - wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); + val = wo_r32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) | + MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK; + wo_w32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); out: release_firmware(fw); @@ -393,3 +413,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *wo) MODULE_FIRMWARE(MT7981_FIRMWARE_WO); MODULE_FIRMWARE(MT7986_FIRMWARE_WO0); MODULE_FIRMWARE(MT7986_FIRMWARE_WO1); +MODULE_FIRMWARE(MT7988_FIRMWARE_WO0); +MODULE_FIRMWARE(MT7988_FIRMWARE_WO1); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h index 0a50bb98c5ea..c71190924816 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h @@ -13,6 +13,9 @@ #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) +#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29) +#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31) + struct mtk_wdma_desc { __le32 buf0; __le32 ctrl; @@ -25,6 +28,8 @@ struct mtk_wdma_desc { #define MTK_WED_RESET 0x008 #define MTK_WED_RESET_TX_BM BIT(0) #define MTK_WED_RESET_RX_BM BIT(1) +#define MTK_WED_RESET_RX_PG_BM BIT(2) +#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3) #define MTK_WED_RESET_TX_FREE_AGENT BIT(4) #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) @@ -37,6 +42,7 @@ struct mtk_wdma_desc { #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) #define MTK_WED_RESET_RX_RRO_QM BIT(20) #define MTK_WED_RESET_RX_ROUTE_QM BIT(21) +#define MTK_WED_RESET_TX_AMSDU BIT(22) #define MTK_WED_RESET_WED BIT(31) #define MTK_WED_CTRL 0x00c @@ -44,6 +50,9 @@ struct mtk_wdma_desc { #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) +#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5) +#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6) +#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7) #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) @@ -54,9 +63,14 @@ struct mtk_wdma_desc { #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15) #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16) #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17) +#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20) +#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21) +#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22) +#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23) #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) +#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28) #define MTK_WED_EXT_INT_STATUS 0x020 #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) @@ -64,8 +78,8 @@ struct mtk_wdma_desc { #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) -#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) -#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */ +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) @@ -89,19 +103,26 @@ struct mtk_wdma_desc { #define MTK_WED_EXT_INT_MASK 0x028 #define MTK_WED_EXT_INT_MASK1 0x02c #define MTK_WED_EXT_INT_MASK2 0x030 +#define MTK_WED_EXT_INT_MASK3 0x034 #define MTK_WED_STATUS 0x060 #define MTK_WED_STATUS_TX GENMASK(15, 8) +#define MTK_WED_WPDMA_STATUS 0x068 +#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8) + #define MTK_WED_TX_BM_CTRL 0x080 #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) +#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26) +#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27) #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) #define MTK_WED_TX_BM_BASE 0x084 +#define MTK_WED_TX_BM_INIT_PTR 0x088 +#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0) +#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16) -#define MTK_WED_TX_BM_TKID 0x088 -#define MTK_WED_TX_BM_TKID_V2 0x0c8 #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) @@ -124,6 +145,12 @@ struct mtk_wdma_desc { #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16) #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28) +#define MTK_WED_TX_TKID_INTF 0x0dc +#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16) + +#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0) +#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16) + #define MTK_WED_TX_TKID_DYN_THR 0x0e0 #define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0) #define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16) @@ -160,9 +187,6 @@ struct mtk_wdma_desc { #define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) #define MTK_WED_RESET_IDX 0x20c -#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) -#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) -#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6) #define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30) #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) @@ -174,6 +198,7 @@ struct mtk_wdma_desc { #define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10) #define MTK_WED_SCR0 0x3c0 +#define MTK_WED_RX1_CTRL2 0x418 #define MTK_WED_WPDMA_INT_TRIGGER 0x504 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) @@ -204,12 +229,15 @@ struct mtk_wdma_desc { #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5) #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6) #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7) -#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18) #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19) -#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20) #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21) #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24) +#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25) #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28) +#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30) #define MTK_WED_WPDMA_RESET_IDX 0x50c #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) @@ -255,9 +283,10 @@ struct mtk_wdma_desc { #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) #define MTK_WED_PCIE_INT_CTRL 0x57c -#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20) -#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16) #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12) +#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16) +#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20) +#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21) #define MTK_WED_WPDMA_CFG_BASE 0x580 #define MTK_WED_WPDMA_CFG_INT_MASK 0x584 @@ -266,6 +295,8 @@ struct mtk_wdma_desc { #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) +#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4) +#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4) #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) @@ -281,15 +312,30 @@ struct mtk_wdma_desc { #define MTK_WED_WPDMA_RX_D_RST_IDX 0x760 #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16) +#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20) #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24) #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c -#define MTK_WED_WPDMA_RX_RING 0x770 #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4) #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4) #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c +#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4 +#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0) +#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1) +#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8) +#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16) + +#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8 +#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15) + +#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc + +#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0 +#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0) +#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16) + #define MTK_WED_WDMA_RING_TX 0x800 #define MTK_WED_WDMA_TX_MIB 0x810 @@ -297,6 +343,20 @@ struct mtk_wdma_desc { #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) +#define MTK_WED_WDMA_RX_PREF_CFG 0x950 +#define MTK_WED_WDMA_RX_PREF_EN BIT(0) +#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1) +#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8) +#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16) +#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24) +#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25) +#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26) +#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27) + +#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C +#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0) +#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16) + #define MTK_WED_WDMA_GLO_CFG 0xa04 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1) @@ -320,6 +380,7 @@ struct mtk_wdma_desc { #define MTK_WED_WDMA_RESET_IDX 0xa08 #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) +#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20) #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) #define MTK_WED_WDMA_INT_CLR 0xa24 @@ -329,6 +390,7 @@ struct mtk_wdma_desc { #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) #define MTK_WED_WDMA_INT_CTRL 0xa2c +#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0) #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) #define MTK_WED_WDMA_CFG_BASE 0xaa0 @@ -389,9 +451,62 @@ struct mtk_wdma_desc { #define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) #define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) +#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238 +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0) +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4) +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8) +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12) + +#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18) +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21) + #define MTK_WDMA_INT_GRP1 0x250 #define MTK_WDMA_INT_GRP2 0x254 +#define MTK_WDMA_PREF_TX_CFG 0x2d0 +#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0) +#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1) + +#define MTK_WDMA_PREF_RX_CFG 0x2dc +#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0) +#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1) + +#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0 +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0) +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16) + +#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4 +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0) +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16) + +#define MTK_WDMA_PREF_SIDX_CFG 0x2e4 +#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0) +#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4) + +#define MTK_WDMA_WRBK_TX_CFG 0x300 +#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0) +#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30) + +#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4) +#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0) + +#define MTK_WDMA_WRBK_RX_CFG 0x344 +#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0) +#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30) + +#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4) +#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0) + +#define MTK_WDMA_WRBK_SIDX_CFG 0x388 +#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0) +#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4) + #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) #define MTK_PCIE_MIRROR_MAP_EN BIT(0) #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) @@ -405,6 +520,32 @@ struct mtk_wdma_desc { #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5) #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20) +#define MTK_WED_RTQM_RST 0xb04 + +#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c +#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28 +#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34 + +#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44 +#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50 +#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c + +#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c +#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78 +#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84 + +#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94 +#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0 +#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4) +#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac + #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4) #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4) #define MTK_WED_RTQM_Q2N_MIB 0xb80 @@ -413,6 +554,24 @@ struct mtk_wdma_desc { #define MTK_WED_RTQM_Q2B_MIB 0xb8c #define MTK_WED_RTQM_PFDBK_MIB 0xb90 +#define MTK_WED_RTQM_ENQ_CFG0 0xbb8 +#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12) + +#define MTK_WED_RTQM_FDROP_MIB 0xb84 +#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc +#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0 +#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4 +#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8 +#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc +#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0 + +#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8 +#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc +#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0 +#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4 +#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8 +#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec + #define MTK_WED_RROQM_GLO_CFG 0xc04 #define MTK_WED_RROQM_RST_IDX 0xc08 #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0) @@ -462,7 +621,195 @@ struct mtk_wdma_desc { #define MTK_WED_RX_BM_INTF 0xd9c #define MTK_WED_RX_BM_ERR_STS 0xda8 +#define MTK_RRO_IND_CMD_SIGNATURE 0xe00 +#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0) +#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28) + +#define MTK_WED_IND_CMD_RX_CTRL0 0xe04 +#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0) +#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16) +#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28) + +#define MTK_WED_IND_CMD_RX_CTRL1 0xe08 +#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c +#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0) +#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16) + +#define MTK_WED_RRO_CFG0 0xe10 +#define MTK_WED_RRO_CFG1 0xe14 +#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29) +#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16) +#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0) + +#define MTK_WED_ADDR_ELEM_CFG0 0xe18 +#define MTK_WED_ADDR_ELEM_CFG1 0xe1c +#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16) + +#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20 +#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0) +#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28) +#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29) +#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30) +#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31) + +#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24 +#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28 + +#define MTK_WED_PN_CHECK_CFG 0xe30 +#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0) +#define MTK_WED_PN_CHECK_RD_RDY BIT(28) +#define MTK_WED_PN_CHECK_WR_RDY BIT(29) +#define MTK_WED_PN_CHECK_RD BIT(30) +#define MTK_WED_PN_CHECK_WR BIT(31) + +#define MTK_WED_PN_CHECK_WDATA_M 0xe38 +#define MTK_WED_PN_CHECK_IS_FIRST BIT(17) + +#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8) + +#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58 +#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26) +#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31) + +#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc) +#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc) +#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc) + +#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10) + +#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13) + +#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4) +#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26) +#define MTK_WED_RRO_RX_D_DRV_EN BIT(31) + +#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0 +#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0) + +#define MTK_WED_RRO_PG_BM_BASE 0xeb4 +#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8 +#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0) +#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16) + +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9) +#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10) + +#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4 +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17) +#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18) + +#define MTK_WED_RRO_RX_HW_STS 0xf00 +#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0) + +#define MTK_WED_RX_IND_CMD_CNT0 0xf20 +#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31) + +#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4) +#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0) + +#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4) +#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0) +#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16) +#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0) + +#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4) + +#define MTK_WED_RX_PN_CHK_CNT 0xf70 +#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0) + #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000 #define MTK_WED_PCIE_INT_MASK 0x0 +#define MTK_WED_AMSDU_FIFO 0x1800 +#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10) + +#define MTK_WED_AMSDU_STA_INFO 0x01810 +#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0) +#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1) + +#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814 +#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0) +#define MTK_WED_AMSDU_STA_RMVL BIT(1) +#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2) +#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8) + +#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4) + +#define MTK_WED_AMSDU_PSE 0x1910 +#define MTK_WED_AMSDU_PSE_RESET BIT(16) + +#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968 +#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15) + +#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34 + +#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50) +#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50) + +#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50) +#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0) +#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16) + +#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50) +#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0) +#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16) +#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24) + +#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04 + +#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4) +#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0) + +#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4) +#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0) +#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16) +#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0) + +#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4) + +#define MTK_WED_PCIE_BASE 0x11280000 +#define MTK_WED_PCIE_BASE0 0x11300000 +#define MTK_WED_PCIE_BASE1 0x11310000 +#define MTK_WED_PCIE_BASE2 0x11290000 #endif diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c index 69fba29055e9..ae44ad5f8ce8 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c @@ -7,10 +7,9 @@ #include <linux/kernel.h> #include <linux/dma-mapping.h> -#include <linux/of_platform.h> #include <linux/interrupt.h> -#include <linux/of_address.h> #include <linux/mfd/syscon.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/bitfield.h> @@ -292,6 +291,9 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) for (i = 0; i < q->n_desc; i++) { struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; + if (!entry->buf) + continue; + dma_unmap_single(wo->hw->dev, entry->addr, entry->len, DMA_TO_DEVICE); skb_free_frag(entry->buf); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h index 7a1a2a28f1ac..87a67fa3868d 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h @@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx { #define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin" #define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin" #define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin" +#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin" +#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin" #define MTK_WO_MCU_CFG_LS_BASE 0 #define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000) @@ -228,7 +230,6 @@ struct mtk_wed_wo_queue { struct mtk_wed_wo { struct mtk_wed_hw *hw; - struct mtk_wed_wo_memory_region boot; struct mtk_wed_wo_queue q_tx; struct mtk_wed_wo_queue q_rx; diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 1b4b1f642317..825e05fb8607 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -27,6 +27,7 @@ config MLX4_EN_DCB config MLX4_CORE tristate depends on PCI + select AUXILIARY_BUS select NET_DEVLINK default n diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 0eb7b83637d8..0d8a362c2673 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -194,7 +194,7 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) mutex_unlock(&persist->device_state_mutex); /* At that step HW was already reset, now notify clients */ - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, NULL); mlx4_cmd_wake_completions(dev); return; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index c56d2194cbfc..f5b1f8c7834f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2113,7 +2113,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) goto inform_slave_state; - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, &slave); /* write the version in the event field */ reply |= mlx4_comm_get_version(); @@ -2152,7 +2152,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (mlx4_master_activate_admin_state(priv, slave)) goto reset_slave; slave_state[slave].active = true; - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, &slave); break; case MLX4_COMM_CMD_VHCR_POST: if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 7d45f1d55f79..164a13272faa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1467,8 +1467,8 @@ static int add_ip_rule(struct mlx4_en_priv *priv, struct list_head *list_h) { int err; - struct mlx4_spec_list *spec_l2 = NULL; - struct mlx4_spec_list *spec_l3 = NULL; + struct mlx4_spec_list *spec_l2; + struct mlx4_spec_list *spec_l3; struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); @@ -1505,9 +1505,9 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv, struct list_head *list_h, int proto) { int err; - struct mlx4_spec_list *spec_l2 = NULL; - struct mlx4_spec_list *spec_l3 = NULL; - struct mlx4_spec_list *spec_l4 = NULL; + struct mlx4_spec_list *spec_l2; + struct mlx4_spec_list *spec_l3; + struct mlx4_spec_list *spec_l4; struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index f1259bdb1a29..d8f4d00ad26b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -183,24 +183,31 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) } } -static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) +static int mlx4_en_event(struct notifier_block *this, unsigned long event, + void *param) { - struct mlx4_en_dev *endev = ctx; - - return endev->pndev[port]; -} - -static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, - enum mlx4_dev_event event, unsigned long port) -{ - struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; + struct mlx4_en_dev *mdev = + container_of(this, struct mlx4_en_dev, mlx_nb); + struct mlx4_dev *dev = mdev->dev; struct mlx4_en_priv *priv; + int port; + + switch (event) { + case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: + case MLX4_DEV_EVENT_SLAVE_INIT: + case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + break; + default: + port = *(int *)param; + break; + } switch (event) { case MLX4_DEV_EVENT_PORT_UP: case MLX4_DEV_EVENT_PORT_DOWN: if (!mdev->pndev[port]) - return; + return NOTIFY_DONE; priv = netdev_priv(mdev->pndev[port]); /* To prevent races, we poll the link state in a separate task rather than changing it here */ @@ -212,23 +219,30 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, mlx4_err(mdev, "Internal error detected, restarting device\n"); break; + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: case MLX4_DEV_EVENT_SLAVE_INIT: case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: break; default: if (port < 1 || port > dev->caps.num_ports || !mdev->pndev[port]) - return; - mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, - (int) port); + return NOTIFY_DONE; + mlx4_warn(mdev, "Unhandled event %d for port %d\n", (int)event, + port); } + + return NOTIFY_DONE; } -static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) +static void mlx4_en_remove(struct auxiliary_device *adev) { - struct mlx4_en_dev *mdev = endev_ptr; + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; + struct mlx4_en_dev *mdev = auxiliary_get_drvdata(adev); int i; + mlx4_unregister_event_notifier(dev, &mdev->mlx_nb); + mutex_lock(&mdev->state_lock); mdev->device_up = false; mutex_unlock(&mdev->state_lock); @@ -242,52 +256,41 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) iounmap(mdev->uar_map); mlx4_uar_free(dev, &mdev->priv_uar); mlx4_pd_free(dev, mdev->priv_pdn); - if (mdev->nb.notifier_call) - unregister_netdevice_notifier(&mdev->nb); + if (mdev->netdev_nb.notifier_call) + unregister_netdevice_notifier(&mdev->netdev_nb); kfree(mdev); } -static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx) -{ - int i; - struct mlx4_en_dev *mdev = ctx; - - /* Create a netdev for each port */ - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - mlx4_info(mdev, "Activating port:%d\n", i); - if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) - mdev->pndev[i] = NULL; - } - - /* register notifier */ - mdev->nb.notifier_call = mlx4_en_netdev_event; - if (register_netdevice_notifier(&mdev->nb)) { - mdev->nb.notifier_call = NULL; - mlx4_err(mdev, "Failed to create notifier\n"); - } -} - -static void *mlx4_en_add(struct mlx4_dev *dev) +static int mlx4_en_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); + struct mlx4_dev *dev = madev->mdev; struct mlx4_en_dev *mdev; - int i; + int err, i; printk_once(KERN_INFO "%s", mlx4_en_version); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); - if (!mdev) + if (!mdev) { + err = -ENOMEM; goto err_free_res; + } - if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) + err = mlx4_pd_alloc(dev, &mdev->priv_pdn); + if (err) goto err_free_dev; - if (mlx4_uar_alloc(dev, &mdev->priv_uar)) + err = mlx4_uar_alloc(dev, &mdev->priv_uar); + if (err) goto err_pd; mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); - if (!mdev->uar_map) + if (!mdev->uar_map) { + err = -ENOMEM; goto err_uar; + } spin_lock_init(&mdev->uar_lock); mdev->dev = dev; @@ -299,13 +302,15 @@ static void *mlx4_en_add(struct mlx4_dev *dev) if (!mdev->LSO_support) mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n"); - if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, - MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, - 0, 0, &mdev->mr)) { + err = mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, + MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0, + &mdev->mr); + if (err) { mlx4_err(mdev, "Failed allocating memory region\n"); goto err_map; } - if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { + err = mlx4_mr_enable(mdev->dev, &mdev->mr); + if (err) { mlx4_err(mdev, "Failed enabling memory region\n"); goto err_mr; } @@ -325,15 +330,39 @@ static void *mlx4_en_add(struct mlx4_dev *dev) * Note: we cannot use the shared workqueue because of deadlocks caused * by the rtnl lock */ mdev->workqueue = create_singlethread_workqueue("mlx4_en"); - if (!mdev->workqueue) + if (!mdev->workqueue) { + err = -ENOMEM; goto err_mr; + } /* At this stage all non-port specific tasks are complete: * mark the card state as up */ mutex_init(&mdev->state_lock); mdev->device_up = true; - return mdev; + /* register mlx4 core notifier */ + mdev->mlx_nb.notifier_call = mlx4_en_event; + err = mlx4_register_event_notifier(dev, &mdev->mlx_nb); + WARN(err, "failed to register mlx4 event notifier (%d)", err); + + /* Setup ports */ + + /* Create a netdev for each port */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + mlx4_info(mdev, "Activating port:%d\n", i); + if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) + mdev->pndev[i] = NULL; + } + + /* register netdev notifier */ + mdev->netdev_nb.notifier_call = mlx4_en_netdev_event; + if (register_netdevice_notifier(&mdev->netdev_nb)) { + mdev->netdev_nb.notifier_call = NULL; + mlx4_err(mdev, "Failed to create netdev notifier\n"); + } + + auxiliary_set_drvdata(adev, mdev); + return 0; err_mr: (void) mlx4_mr_free(dev, &mdev->mr); @@ -347,16 +376,24 @@ err_pd: err_free_dev: kfree(mdev); err_free_res: - return NULL; + return err; } -static struct mlx4_interface mlx4_en_interface = { - .add = mlx4_en_add, - .remove = mlx4_en_remove, - .event = mlx4_en_event, - .get_dev = mlx4_en_get_netdev, +static const struct auxiliary_device_id mlx4_en_id_table[] = { + { .name = MLX4_ADEV_NAME ".eth" }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx4_en_id_table); + +static struct mlx4_adrv mlx4_en_adrv = { + .adrv = { + .name = "eth", + .probe = mlx4_en_probe, + .remove = mlx4_en_remove, + .id_table = mlx4_en_id_table, + }, .protocol = MLX4_PROT_ETH, - .activate = mlx4_en_activate, }; static void mlx4_en_verify_params(void) @@ -385,12 +422,12 @@ static int __init mlx4_en_init(void) mlx4_en_verify_params(); mlx4_en_init_ptys2ethtool_map(); - return mlx4_register_interface(&mlx4_en_interface); + return mlx4_register_auxiliary_driver(&mlx4_en_adrv); } static void __exit mlx4_en_cleanup(void) { - mlx4_unregister_interface(&mlx4_en_interface); + mlx4_unregister_auxiliary_driver(&mlx4_en_adrv); } module_init(mlx4_en_init); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e11bc0ac880e..33bbcced8105 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -291,7 +291,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, __be32 dst_ip, u8 ip_proto, __be16 src_port, __be16 dst_port, u32 flow_id) { - struct mlx4_en_filter *filter = NULL; + struct mlx4_en_filter *filter; filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); if (!filter) @@ -2894,63 +2894,6 @@ static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = { .xmo_rx_hash = mlx4_en_xdp_rx_hash, }; -struct mlx4_en_bond { - struct work_struct work; - struct mlx4_en_priv *priv; - int is_bonded; - struct mlx4_port_map port_map; -}; - -static void mlx4_en_bond_work(struct work_struct *work) -{ - struct mlx4_en_bond *bond = container_of(work, - struct mlx4_en_bond, - work); - int err = 0; - struct mlx4_dev *dev = bond->priv->mdev->dev; - - if (bond->is_bonded) { - if (!mlx4_is_bonded(dev)) { - err = mlx4_bond(dev); - if (err) - en_err(bond->priv, "Fail to bond device\n"); - } - if (!err) { - err = mlx4_port_map_set(dev, &bond->port_map); - if (err) - en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", - bond->port_map.port1, - bond->port_map.port2, - err); - } - } else if (mlx4_is_bonded(dev)) { - err = mlx4_unbond(dev); - if (err) - en_err(bond->priv, "Fail to unbond device\n"); - } - dev_put(bond->priv->dev); - kfree(bond); -} - -static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, - u8 v2p_p1, u8 v2p_p2) -{ - struct mlx4_en_bond *bond = NULL; - - bond = kzalloc(sizeof(*bond), GFP_ATOMIC); - if (!bond) - return -ENOMEM; - - INIT_WORK(&bond->work, mlx4_en_bond_work); - bond->priv = priv; - bond->is_bonded = is_bonded; - bond->port_map.port1 = v2p_p1; - bond->port_map.port2 = v2p_p2; - dev_hold(priv->dev); - queue_work(priv->mdev->workqueue, &bond->work); - return 0; -} - int mlx4_en_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { @@ -2960,14 +2903,13 @@ int mlx4_en_netdev_event(struct notifier_block *this, struct mlx4_dev *dev; int i, num_eth_ports = 0; bool do_bond = true; - struct mlx4_en_priv *priv; u8 v2p_port1 = 0; u8 v2p_port2 = 0; if (!net_eq(dev_net(ndev), &init_net)) return NOTIFY_DONE; - mdev = container_of(this, struct mlx4_en_dev, nb); + mdev = container_of(this, struct mlx4_en_dev, netdev_nb); dev = mdev->dev; /* Go into this mode only when two network devices set on two ports @@ -2995,7 +2937,6 @@ int mlx4_en_netdev_event(struct notifier_block *this, if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) return NOTIFY_DONE; - priv = netdev_priv(ndev); if (do_bond) { struct netdev_notifier_bonding_info *notifier_info = ptr; struct netdev_bonding_info *bonding_info = @@ -3062,8 +3003,7 @@ int mlx4_en_netdev_event(struct notifier_block *this, } } - mlx4_en_queue_bond_work(priv, do_bond, - v2p_port1, v2p_port2); + mlx4_queue_bond_work(dev, do_bond, v2p_port1, v2p_port2); return NOTIFY_DONE; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 332472fe4990..a09b6e05337d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -400,7 +400,7 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) for (ring = 0; ring < priv->rx_ring_num; ring++) { if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { local_bh_disable(); - napi_reschedule(&priv->rx_cq[ring]->napi); + napi_schedule(&priv->rx_cq[ring]->napi); local_bh_enable(); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 414e390e6b48..6598b10a9ff4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -501,7 +501,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) int port; int slave = 0; int ret; - u32 flr_slave; + int flr_slave; u8 update_slave_state; int i; enum slave_port_gen_event gen_event; @@ -606,8 +606,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) port = be32_to_cpu(eqe->event.port_change.port) >> 28; slaves_port = mlx4_phys_to_slaves_pport(dev, port); if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, - port); + mlx4_dispatch_event( + dev, MLX4_DEV_EVENT_PORT_DOWN, &port); mlx4_priv(dev)->sense.do_sense_port[port] = 1; if (!mlx4_is_master(dev)) break; @@ -647,7 +647,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) } } } else { - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, + &port); mlx4_priv(dev)->sense.do_sense_port[port] = 0; @@ -758,7 +759,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) } spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, - flr_slave); + &flr_slave); queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_flr_event_work); break; @@ -787,8 +788,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, - (unsigned long) eqe); + mlx4_dispatch_event( + dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, eqe); break; case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index fe48d20d6118..0005d9e2c2d6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1967,7 +1967,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) { u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4); - strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1); + strscpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ); mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst); } diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 65482f004e50..a371b970ac1e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -38,102 +38,131 @@ #include "mlx4.h" -struct mlx4_device_context { - struct list_head list; - struct list_head bond_list; - struct mlx4_interface *intf; - void *context; -}; - -static LIST_HEAD(intf_list); -static LIST_HEAD(dev_list); static DEFINE_MUTEX(intf_mutex); +static DEFINE_IDA(mlx4_adev_ida); -static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +static bool is_eth_supported(struct mlx4_dev *dev) { - struct mlx4_device_context *dev_ctx; + for (int port = 1; port <= dev->caps.num_ports; port++) + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return true; - dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); - if (!dev_ctx) - return; + return false; +} - dev_ctx->intf = intf; - dev_ctx->context = intf->add(&priv->dev); +static bool is_ib_supported(struct mlx4_dev *dev) +{ + for (int port = 1; port <= dev->caps.num_ports; port++) + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) + return true; - if (dev_ctx->context) { - spin_lock_irq(&priv->ctx_lock); - list_add_tail(&dev_ctx->list, &priv->ctx_list); - spin_unlock_irq(&priv->ctx_lock); - if (intf->activate) - intf->activate(&priv->dev, dev_ctx->context); - } else - kfree(dev_ctx); + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) + return true; + return false; } -static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +static const struct mlx4_adev_device { + const char *suffix; + bool (*is_supported)(struct mlx4_dev *dev); +} mlx4_adev_devices[] = { + { "eth", is_eth_supported }, + { "ib", is_ib_supported }, +}; + +int mlx4_adev_init(struct mlx4_dev *dev) { - struct mlx4_device_context *dev_ctx; + struct mlx4_priv *priv = mlx4_priv(dev); - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf == intf) { - spin_lock_irq(&priv->ctx_lock); - list_del(&dev_ctx->list); - spin_unlock_irq(&priv->ctx_lock); + priv->adev_idx = ida_alloc(&mlx4_adev_ida, GFP_KERNEL); + if (priv->adev_idx < 0) + return priv->adev_idx; - intf->remove(&priv->dev, dev_ctx->context); - kfree(dev_ctx); - return; - } + priv->adev = kcalloc(ARRAY_SIZE(mlx4_adev_devices), + sizeof(struct mlx4_adev *), GFP_KERNEL); + if (!priv->adev) { + ida_free(&mlx4_adev_ida, priv->adev_idx); + return -ENOMEM; + } + + return 0; } -int mlx4_register_interface(struct mlx4_interface *intf) +void mlx4_adev_cleanup(struct mlx4_dev *dev) { - struct mlx4_priv *priv; - - if (!intf->add || !intf->remove) - return -EINVAL; - - mutex_lock(&intf_mutex); + struct mlx4_priv *priv = mlx4_priv(dev); - list_add_tail(&intf->list, &intf_list); - list_for_each_entry(priv, &dev_list, dev_list) { - if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) { - mlx4_dbg(&priv->dev, - "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol); - intf->flags &= ~MLX4_INTFF_BONDING; - } - mlx4_add_device(intf, priv); - } + kfree(priv->adev); + ida_free(&mlx4_adev_ida, priv->adev_idx); +} - mutex_unlock(&intf_mutex); +static void adev_release(struct device *dev) +{ + struct mlx4_adev *mlx4_adev = + container_of(dev, struct mlx4_adev, adev.dev); + struct mlx4_priv *priv = mlx4_priv(mlx4_adev->mdev); + int idx = mlx4_adev->idx; - return 0; + kfree(mlx4_adev); + priv->adev[idx] = NULL; } -EXPORT_SYMBOL_GPL(mlx4_register_interface); -void mlx4_unregister_interface(struct mlx4_interface *intf) +static struct mlx4_adev *add_adev(struct mlx4_dev *dev, int idx) { - struct mlx4_priv *priv; + struct mlx4_priv *priv = mlx4_priv(dev); + const char *suffix = mlx4_adev_devices[idx].suffix; + struct auxiliary_device *adev; + struct mlx4_adev *madev; + int ret; - mutex_lock(&intf_mutex); + madev = kzalloc(sizeof(*madev), GFP_KERNEL); + if (!madev) + return ERR_PTR(-ENOMEM); - list_for_each_entry(priv, &dev_list, dev_list) - mlx4_remove_device(intf, priv); + adev = &madev->adev; + adev->id = priv->adev_idx; + adev->name = suffix; + adev->dev.parent = &dev->persist->pdev->dev; + adev->dev.release = adev_release; + madev->mdev = dev; + madev->idx = idx; - list_del(&intf->list); + ret = auxiliary_device_init(adev); + if (ret) { + kfree(madev); + return ERR_PTR(ret); + } - mutex_unlock(&intf_mutex); + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ERR_PTR(ret); + } + return madev; +} + +static void del_adev(struct auxiliary_device *adev) +{ + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv) +{ + return auxiliary_driver_register(&madrv->adrv); +} +EXPORT_SYMBOL_GPL(mlx4_register_auxiliary_driver); + +void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv) +{ + auxiliary_driver_unregister(&madrv->adrv); } -EXPORT_SYMBOL_GPL(mlx4_unregister_interface); +EXPORT_SYMBOL_GPL(mlx4_unregister_auxiliary_driver); int mlx4_do_bond(struct mlx4_dev *dev, bool enable) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx; - unsigned long flags; - int ret; - LIST_HEAD(bond_list); + int i, ret; if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) return -EOPNOTSUPP; @@ -155,69 +184,178 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) dev->flags &= ~MLX4_FLAG_BONDED; } - spin_lock_irqsave(&priv->ctx_lock, flags); - list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) { - if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) { - list_add_tail(&dev_ctx->bond_list, &bond_list); - list_del(&dev_ctx->list); + mutex_lock(&intf_mutex); + + for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) { + struct mlx4_adev *madev = priv->adev[i]; + struct mlx4_adrv *madrv; + enum mlx4_protocol protocol; + + if (!madev) + continue; + + device_lock(&madev->adev.dev); + if (!madev->adev.dev.driver) { + device_unlock(&madev->adev.dev); + continue; + } + + madrv = container_of(madev->adev.dev.driver, struct mlx4_adrv, + adrv.driver); + if (!(madrv->flags & MLX4_INTFF_BONDING)) { + device_unlock(&madev->adev.dev); + continue; + } + + if (mlx4_is_mfunc(dev)) { + mlx4_dbg(dev, + "SRIOV, disabled HA mode for intf proto %d\n", + madrv->protocol); + device_unlock(&madev->adev.dev); + continue; } - } - spin_unlock_irqrestore(&priv->ctx_lock, flags); - list_for_each_entry(dev_ctx, &bond_list, bond_list) { - dev_ctx->intf->remove(dev, dev_ctx->context); - dev_ctx->context = dev_ctx->intf->add(dev); + protocol = madrv->protocol; + device_unlock(&madev->adev.dev); - spin_lock_irqsave(&priv->ctx_lock, flags); - list_add_tail(&dev_ctx->list, &priv->ctx_list); - spin_unlock_irqrestore(&priv->ctx_lock, flags); + del_adev(&madev->adev); + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i, + mlx4_adev_devices[i].suffix); + priv->adev[i] = NULL; + continue; + } - mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n", - dev_ctx->intf->protocol, enable ? - "enabled" : "disabled"); + mlx4_dbg(dev, + "Interface for protocol %d restarted with bonded mode %s\n", + protocol, enable ? "enabled" : "disabled"); } + + mutex_unlock(&intf_mutex); + return 0; } void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, - unsigned long param) + void *param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + atomic_notifier_call_chain(&priv->event_nh, type, param); +} + +int mlx4_register_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_device_context *dev_ctx; - unsigned long flags; - spin_lock_irqsave(&priv->ctx_lock, flags); + return atomic_notifier_chain_register(&priv->event_nh, nb); +} +EXPORT_SYMBOL(mlx4_register_event_notifier); - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf->event) - dev_ctx->intf->event(dev, dev_ctx->context, type, param); +int mlx4_unregister_event_notifier(struct mlx4_dev *dev, + struct notifier_block *nb) +{ + struct mlx4_priv *priv = mlx4_priv(dev); - spin_unlock_irqrestore(&priv->ctx_lock, flags); + return atomic_notifier_chain_unregister(&priv->event_nh, nb); } +EXPORT_SYMBOL(mlx4_unregister_event_notifier); -int mlx4_register_device(struct mlx4_dev *dev) +static int add_drivers(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) { + bool is_supported = false; + + if (priv->adev[i]) + continue; + + if (mlx4_adev_devices[i].is_supported) + is_supported = mlx4_adev_devices[i].is_supported(dev); + + if (!is_supported) + continue; + + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i, + mlx4_adev_devices[i].suffix); + /* We continue to rescan drivers and leave to the caller + * to make decision if to release everything or + * continue. */ + ret = PTR_ERR(priv->adev[i]); + priv->adev[i] = NULL; + } + } + return ret; +} + +static void delete_drivers(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_interface *intf; + bool delete_all; + int i; + + delete_all = !(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP); + + for (i = ARRAY_SIZE(mlx4_adev_devices) - 1; i >= 0; i--) { + bool is_supported = false; + + if (!priv->adev[i]) + continue; + + if (mlx4_adev_devices[i].is_supported && !delete_all) + is_supported = mlx4_adev_devices[i].is_supported(dev); + + if (is_supported) + continue; + + del_adev(&priv->adev[i]->adev); + priv->adev[i] = NULL; + } +} + +/* This function is used after mlx4_dev is reconfigured. + */ +static int rescan_drivers_locked(struct mlx4_dev *dev) +{ + lockdep_assert_held(&intf_mutex); + + delete_drivers(dev); + if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) + return 0; + + return add_drivers(dev); +} + +int mlx4_register_device(struct mlx4_dev *dev) +{ + int ret; mutex_lock(&intf_mutex); dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP; - list_add_tail(&priv->dev_list, &dev_list); - list_for_each_entry(intf, &intf_list, list) - mlx4_add_device(intf, priv); + + ret = rescan_drivers_locked(dev); mutex_unlock(&intf_mutex); + + if (ret) { + mlx4_unregister_device(dev); + return ret; + } + mlx4_start_catas_poll(dev); - return 0; + return ret; } void mlx4_unregister_device(struct mlx4_dev *dev) { - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_interface *intf; - if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) return; @@ -236,35 +374,12 @@ void mlx4_unregister_device(struct mlx4_dev *dev) } mutex_lock(&intf_mutex); - list_for_each_entry(intf, &intf_list, list) - mlx4_remove_device(intf, priv); - - list_del(&priv->dev_list); dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP; - mutex_unlock(&intf_mutex); -} - -void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port) -{ - struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_device_context *dev_ctx; - unsigned long flags; - void *result = NULL; + rescan_drivers_locked(dev); - spin_lock_irqsave(&priv->ctx_lock, flags); - - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) { - result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); - break; - } - - spin_unlock_irqrestore(&priv->ctx_lock, flags); - - return result; + mutex_unlock(&intf_mutex); } -EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 61286b0d9b0c..2581226836b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -42,7 +42,6 @@ #include <linux/slab.h> #include <linux/io-mapping.h> #include <linux/delay.h> -#include <linux/kmod.h> #include <linux/etherdevice.h> #include <net/devlink.h> @@ -864,7 +863,7 @@ static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev) static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev) { - struct mlx4_func_cap *func_cap = NULL; + struct mlx4_func_cap *func_cap; struct mlx4_caps *caps = &dev->caps; int i, err = 0; @@ -908,9 +907,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) { int err; u32 page_size; - struct mlx4_dev_cap *dev_cap = NULL; - struct mlx4_func_cap *func_cap = NULL; - struct mlx4_init_hca_param *hca_param = NULL; + struct mlx4_dev_cap *dev_cap; + struct mlx4_func_cap *func_cap; + struct mlx4_init_hca_param *hca_param; hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL); func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL); @@ -1091,27 +1090,6 @@ free_mem: return err; } -static void mlx4_request_modules(struct mlx4_dev *dev) -{ - int port; - int has_ib_port = false; - int has_eth_port = false; -#define EN_DRV_NAME "mlx4_en" -#define IB_DRV_NAME "mlx4_ib" - - for (port = 1; port <= dev->caps.num_ports; port++) { - if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) - has_ib_port = true; - else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - has_eth_port = true; - } - - if (has_eth_port) - request_module_nowait(EN_DRV_NAME); - if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) - request_module_nowait(IB_DRV_NAME); -} - /* * Change the port configuration of the device. * Every user of this function must hold the port mutex. @@ -1147,7 +1125,6 @@ int mlx4_change_port_types(struct mlx4_dev *dev, mlx4_err(dev, "Failed to register device\n"); goto out; } - mlx4_request_modules(dev); } out: @@ -1441,7 +1418,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev) return ret; } -int mlx4_bond(struct mlx4_dev *dev) +static int mlx4_bond(struct mlx4_dev *dev) { int ret = 0; struct mlx4_priv *priv = mlx4_priv(dev); @@ -1467,9 +1444,8 @@ int mlx4_bond(struct mlx4_dev *dev) return ret; } -EXPORT_SYMBOL_GPL(mlx4_bond); -int mlx4_unbond(struct mlx4_dev *dev) +static int mlx4_unbond(struct mlx4_dev *dev) { int ret = 0; struct mlx4_priv *priv = mlx4_priv(dev); @@ -1496,10 +1472,8 @@ int mlx4_unbond(struct mlx4_dev *dev) return ret; } -EXPORT_SYMBOL_GPL(mlx4_unbond); - -int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) +static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) { u8 port1 = v2p->port1; u8 port2 = v2p->port2; @@ -1541,7 +1515,61 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) mutex_unlock(&priv->bond_mutex); return err; } -EXPORT_SYMBOL_GPL(mlx4_port_map_set); + +struct mlx4_bond { + struct work_struct work; + struct mlx4_dev *dev; + int is_bonded; + struct mlx4_port_map port_map; +}; + +static void mlx4_bond_work(struct work_struct *work) +{ + struct mlx4_bond *bond = container_of(work, struct mlx4_bond, work); + int err = 0; + + if (bond->is_bonded) { + if (!mlx4_is_bonded(bond->dev)) { + err = mlx4_bond(bond->dev); + if (err) + mlx4_err(bond->dev, "Fail to bond device\n"); + } + if (!err) { + err = mlx4_port_map_set(bond->dev, &bond->port_map); + if (err) + mlx4_err(bond->dev, + "Fail to set port map [%d][%d]: %d\n", + bond->port_map.port1, + bond->port_map.port2, err); + } + } else if (mlx4_is_bonded(bond->dev)) { + err = mlx4_unbond(bond->dev); + if (err) + mlx4_err(bond->dev, "Fail to unbond device\n"); + } + put_device(&bond->dev->persist->pdev->dev); + kfree(bond); +} + +int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1, + u8 v2p_p2) +{ + struct mlx4_bond *bond; + + bond = kzalloc(sizeof(*bond), GFP_ATOMIC); + if (!bond) + return -ENOMEM; + + INIT_WORK(&bond->work, mlx4_bond_work); + get_device(&dev->persist->pdev->dev); + bond->dev = dev; + bond->is_bonded = is_bonded; + bond->port_map.port1 = v2p_p1; + bond->port_map.port2 = v2p_p2; + queue_work(mlx4_wq, &bond->work); + return 0; +} +EXPORT_SYMBOL(mlx4_queue_bond_work); static int mlx4_load_fw(struct mlx4_dev *dev) { @@ -3375,8 +3403,11 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, devl_assert_locked(devlink); dev = &priv->dev; - INIT_LIST_HEAD(&priv->ctx_list); - spin_lock_init(&priv->ctx_lock); + err = mlx4_adev_init(dev); + if (err) + return err; + + ATOMIC_INIT_NOTIFIER_HEAD(&priv->event_nh); mutex_init(&priv->port_mutex); mutex_init(&priv->bond_mutex); @@ -3402,10 +3433,11 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, err = mlx4_get_ownership(dev); if (err) { if (err < 0) - return err; + goto err_adev; else { mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); - return -EINVAL; + err = -EINVAL; + goto err_adev; } } @@ -3674,8 +3706,6 @@ slave_start: if (err) goto err_port; - mlx4_request_modules(dev); - mlx4_sense_init(dev); mlx4_start_sense(dev); @@ -3753,6 +3783,9 @@ err_sriov: mlx4_free_ownership(dev); kfree(dev_cap); + +err_adev: + mlx4_adev_cleanup(dev); return err; } @@ -4133,6 +4166,8 @@ static void mlx4_unload_one(struct pci_dev *pdev) mlx4_slave_destroy_special_qp_cap(dev); kfree(dev->dev_vfs); + mlx4_adev_cleanup(dev); + mlx4_clean_dev(dev); priv->pci_dev_data = pci_dev_data; priv->removed = 1; @@ -4520,6 +4555,9 @@ static int __init mlx4_init(void) { int ret; + WARN_ONCE(strcmp(MLX4_ADEV_NAME, KBUILD_MODNAME), + "mlx4_core name not in sync with kernel module name"); + if (mlx4_verify_params()) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f1716a83a4d3..24d0c7c46878 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -294,7 +294,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, struct mlx4_promisc_qp *dqp, *tmp_dqp; if (port < 1 || port > dev->caps.num_ports) - return NULL; + return false; s_steer = &mlx4_priv(dev)->steer[port - 1]; @@ -375,7 +375,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, bool ret = false; if (port < 1 || port > dev->caps.num_ports) - return NULL; + return false; s_steer = &mlx4_priv(dev)->steer[port - 1]; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 6ccf340660d9..d7d856d1758a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -47,6 +47,8 @@ #include <linux/spinlock.h> #include <net/devlink.h> #include <linux/rwsem.h> +#include <linux/auxiliary_bus.h> +#include <linux/notifier.h> #include <linux/mlx4/device.h> #include <linux/mlx4/driver.h> @@ -862,6 +864,11 @@ struct mlx4_steer { struct list_head steer_entries[MLX4_NUM_STEERS]; }; +struct mlx4_port_map { + u8 port1; + u8 port2; +}; + enum { MLX4_PCI_DEV_IS_VF = 1 << 0, MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, @@ -875,9 +882,9 @@ enum { struct mlx4_priv { struct mlx4_dev dev; - struct list_head dev_list; - struct list_head ctx_list; - spinlock_t ctx_lock; + struct mlx4_adev **adev; + int adev_idx; + struct atomic_notifier_head event_nh; int pci_dev_data; int removed; @@ -1045,10 +1052,13 @@ void mlx4_catas_end(struct mlx4_dev *dev); int mlx4_crdump_init(struct mlx4_dev *dev); void mlx4_crdump_end(struct mlx4_dev *dev); int mlx4_restart_one(struct pci_dev *pdev); + +int mlx4_adev_init(struct mlx4_dev *dev); +void mlx4_adev_cleanup(struct mlx4_dev *dev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, - unsigned long param); + void *param); struct mlx4_dev_cap; struct mlx4_init_hca_param; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 321f801c1d7c..efe3f97b874f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -49,6 +49,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/irq.h> #include <net/xdp.h> +#include <linux/notifier.h> #include <linux/mlx4/device.h> #include <linux/mlx4/qp.h> @@ -432,7 +433,8 @@ struct mlx4_en_dev { unsigned long last_overflow_check; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; - struct notifier_block nb; + struct notifier_block netdev_nb; + struct notifier_block mlx_nb; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index bb1d7b039a7e..685335832a93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -12,6 +12,7 @@ config MLX5_CORE depends on MLXFW || !MLXFW depends on PTP_1588_CLOCK_OPTIONAL depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE + depends on HWMON || !HWMON help Core driver for low level functionality of the ConnectX-4 and Connect-IB cards by Mellanox Technologies. @@ -139,7 +140,7 @@ config MLX5_CORE_IPOIB help MLX5 IPoIB offloads & acceleration support. -config MLX5_EN_MACSEC +config MLX5_MACSEC bool "Connect-X support for MACSec offload" depends on MLX5_CORE_EN depends on MACSEC @@ -188,3 +189,11 @@ config MLX5_SF_MANAGER port is managed through devlink. A subfunction supports RDMA, netdevice and vdpa device. It is similar to a SRIOV VF but it doesn't require SRIOV support. + +config MLX5_DPLL + tristate "Mellanox 5th generation network adapters (ConnectX series) DPLL support" + depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE + select DPLL + help + DPLL support in Mellanox Technologies ConnectX NICs. + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 35f00700a4d6..c44870b175f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -69,16 +69,20 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o # mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ ecpf.o rdma.o esw/legacy.o \ - esw/devlink_port.o esw/vporttbl.o esw/qos.o + esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o +ifneq ($(CONFIG_MLX5_EN_IPSEC),) + mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/ipsec_fs.o +endif + mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \ en/rep/bridge.o -mlx5_core-$(CONFIG_THERMAL) += thermal.o +mlx5_core-$(CONFIG_HWMON) += hwmon.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o @@ -94,7 +98,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o -mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \ +mlx5_core-$(CONFIG_MLX5_MACSEC) += en_accel/macsec.o lib/macsec_fs.o \ en_accel/macsec_stats.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ @@ -124,3 +128,6 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_ # SF manager # mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o + +obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o +mlx5_dpll-y := dpll.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d532883b42d7..a7b1f9686c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -156,24 +156,27 @@ static u8 alloc_token(struct mlx5_cmd *cmd) return token; } -static int cmd_alloc_index(struct mlx5_cmd *cmd) +static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent) { unsigned long flags; int ret; spin_lock_irqsave(&cmd->alloc_lock, flags); - ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); - if (ret < cmd->max_reg_cmds) - clear_bit(ret, &cmd->bitmask); + ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds); + if (ret < cmd->vars.max_reg_cmds) { + clear_bit(ret, &cmd->vars.bitmask); + ent->idx = ret; + cmd->ent_arr[ent->idx] = ent; + } spin_unlock_irqrestore(&cmd->alloc_lock, flags); - return ret < cmd->max_reg_cmds ? ret : -ENOMEM; + return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM; } static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { lockdep_assert_held(&cmd->alloc_lock); - set_bit(idx, &cmd->bitmask); + set_bit(idx, &cmd->vars.bitmask); } static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) @@ -192,7 +195,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) if (ent->idx >= 0) { cmd_free_index(cmd, ent->idx); - up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); + up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem); } cmd_free_ent(ent); @@ -202,7 +205,7 @@ out: static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) { - return cmd->cmd_buf + (idx << cmd->log_stride); + return cmd->cmd_buf + (idx << cmd->vars.log_stride); } static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) @@ -525,6 +528,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_SAVE_VHCA_STATE: case MLX5_CMD_OP_LOAD_VHCA_STATE: case MLX5_CMD_OP_SYNC_CRYPTO: + case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -ENOLINK; @@ -728,6 +732,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); MLX5_COMMAND_STR_CASE(SYNC_CRYPTO); + MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS); default: return "unknown command opcode"; } } @@ -974,10 +979,10 @@ static void cmd_work_handler(struct work_struct *work) cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); complete(&ent->handling); - sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem; down(sem); if (!ent->page_queue) { - alloc_ret = cmd_alloc_index(cmd); + alloc_ret = cmd_alloc_index(cmd, ent); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); if (ent->callback) { @@ -992,15 +997,14 @@ static void cmd_work_handler(struct work_struct *work) up(sem); return; } - ent->idx = alloc_ret; } else { - ent->idx = cmd->max_reg_cmds; + ent->idx = cmd->vars.max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); - clear_bit(ent->idx, &cmd->bitmask); + clear_bit(ent->idx, &cmd->vars.bitmask); + cmd->ent_arr[ent->idx] = ent; spin_unlock_irqrestore(&cmd->alloc_lock, flags); } - cmd->ent_arr[ent->idx] = ent; lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -1225,8 +1229,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, goto out_free; ds = ent->ts2 - ent->ts1; - if (ent->op < MLX5_CMD_OP_MAX) { - stats = &cmd->stats[ent->op]; + stats = xa_load(&cmd->stats, ent->op); + if (stats) { spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; @@ -1548,7 +1552,6 @@ static void clean_debug_files(struct mlx5_core_dev *dev) if (!mlx5_debugfs_root) return; - mlx5_cmdif_debugfs_cleanup(dev); debugfs_remove_recursive(dbg->dbg_root); } @@ -1563,8 +1566,6 @@ static void create_debugfs_files(struct mlx5_core_dev *dev) debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); - - mlx5_cmdif_debugfs_init(dev); } void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) @@ -1572,15 +1573,15 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - down(&cmd->sem); - down(&cmd->pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + down(&cmd->vars.sem); + down(&cmd->vars.pages_sem); cmd->allowed_opcode = opcode; - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); + up(&cmd->vars.pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + up(&cmd->vars.sem); } static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) @@ -1588,15 +1589,15 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - down(&cmd->sem); - down(&cmd->pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + down(&cmd->vars.sem); + down(&cmd->vars.pages_sem); cmd->mode = mode; - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); + up(&cmd->vars.pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + up(&cmd->vars.sem); } static int cmd_comp_notifier(struct notifier_block *nb, @@ -1655,7 +1656,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force /* there can be at most 32 command queues */ vector = vec & 0xffffffff; - for (i = 0; i < (1 << cmd->log_sz); i++) { + for (i = 0; i < (1 << cmd->vars.log_sz); i++) { if (test_bit(i, &vector)) { ent = cmd->ent_arr[i]; @@ -1698,8 +1699,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force if (ent->callback) { ds = ent->ts2 - ent->ts1; - if (ent->op < MLX5_CMD_OP_MAX) { - stats = &cmd->stats[ent->op]; + stats = xa_load(&cmd->stats, ent->op); + if (stats) { spin_lock_irqsave(&stats->lock, flags); stats->sum += ds; ++stats->n; @@ -1744,7 +1745,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) /* wait for pending handlers to complete */ mlx5_eq_synchronize_cmd_irq(dev); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); - vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); + vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1); if (!vector) goto no_trig; @@ -1753,14 +1754,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) * to guarantee pending commands will not get freed in the meanwhile. * For that reason, it also has to be done inside the alloc_lock. */ - for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) + for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) cmd_ent_get(cmd->ent_arr[i]); vector |= MLX5_TRIGGERED_CMD_COMP; spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%llx\n", vector); mlx5_cmd_comp_handler(dev, vector, true); - for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) + for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) cmd_ent_put(cmd->ent_arr[i]); return; @@ -1773,22 +1774,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) { - while (down_trylock(&cmd->sem)) { + for (i = 0; i < cmd->vars.max_reg_cmds; i++) { + while (down_trylock(&cmd->vars.sem)) { mlx5_cmd_trigger_completions(dev); cond_resched(); } } - while (down_trylock(&cmd->pages_sem)) { + while (down_trylock(&cmd->vars.pages_sem)) { mlx5_cmd_trigger_completions(dev); cond_resched(); } /* Unlock cmdif */ - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); + up(&cmd->vars.pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + up(&cmd->vars.sem); } static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, @@ -1858,7 +1859,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, /* atomic context may not sleep */ if (callback) return -EINVAL; - down(&dev->cmd.throttle_sem); + down(&dev->cmd.vars.throttle_sem); } pages_queue = is_manage_pages(in); @@ -1903,7 +1904,7 @@ out_in: free_msg(dev, inb); out_up: if (throttle_op) - up(&dev->cmd.throttle_sem); + up(&dev->cmd.vars.throttle_sem); return err; } @@ -1926,7 +1927,9 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, if (!err || !(strcmp(namep, "unknown command opcode"))) return; - stats = &dev->cmd.stats[opcode]; + stats = xa_load(&dev->cmd.stats, opcode); + if (!stats) + return; spin_lock_irq(&stats->lock); stats->failed++; if (err < 0) @@ -2091,6 +2094,74 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, } EXPORT_SYMBOL(mlx5_cmd_exec_cb); +int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, + struct mlx5_cmd_allow_other_vhca_access_attr *attr) +{ + u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {}; + u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {}; + void *key; + + MLX5_SET(allow_other_vhca_access_in, + in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); + MLX5_SET(allow_other_vhca_access_in, + in, object_type_to_be_accessed, attr->obj_type); + MLX5_SET(allow_other_vhca_access_in, + in, object_id_to_be_accessed, attr->obj_id); + + key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); + memcpy(key, attr->access_key, sizeof(attr->access_key)); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, + struct mlx5_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {}; + void *param; + void *attr; + void *key; + int ret; + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, alias_attr->obj_type); + param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param); + MLX5_SET(general_obj_create_param, param, alias_object, 1); + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); + MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); + MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); + + key = MLX5_ADDR_OF(alias_context, attr, access_key); + memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return 0; +} + +int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, + u16 obj_type) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct cmd_msg_cache *ch; @@ -2187,75 +2258,86 @@ static u16 cmdif_rev(struct mlx5_core_dev *dev) int mlx5_cmd_init(struct mlx5_core_dev *dev) { + struct mlx5_cmd *cmd = &dev->cmd; + + cmd->checksum_disabled = 1; + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + + set_wqname(dev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + mlx5_core_err(dev, "failed to create command workqueue\n"); + return -ENOMEM; + } + + mlx5_cmdif_debugfs_init(dev); + + return 0; +} + +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + mlx5_cmdif_debugfs_cleanup(dev); + destroy_workqueue(cmd->wq); +} + +int mlx5_cmd_enable(struct mlx5_core_dev *dev) +{ int size = sizeof(struct mlx5_cmd_prot_block); int align = roundup_pow_of_two(size); struct mlx5_cmd *cmd = &dev->cmd; u32 cmd_h, cmd_l; - u16 cmd_if_rev; int err; - int i; - memset(cmd, 0, sizeof(*cmd)); - cmd_if_rev = cmdif_rev(dev); - if (cmd_if_rev != CMD_IF_REV) { + memset(&cmd->vars, 0, sizeof(cmd->vars)); + cmd->vars.cmdif_rev = cmdif_rev(dev); + if (cmd->vars.cmdif_rev != CMD_IF_REV) { mlx5_core_err(dev, "Driver cmdif rev(%d) differs from firmware's(%d)\n", - CMD_IF_REV, cmd_if_rev); + CMD_IF_REV, cmd->vars.cmdif_rev); return -EINVAL; } - cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); - if (!cmd->pool) - return -ENOMEM; - - err = alloc_cmd_page(dev, cmd); - if (err) - goto err_free_pool; - cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; - cmd->log_sz = cmd_l >> 4 & 0xf; - cmd->log_stride = cmd_l & 0xf; - if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { + cmd->vars.log_sz = cmd_l >> 4 & 0xf; + cmd->vars.log_stride = cmd_l & 0xf; + if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) { mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", - 1 << cmd->log_sz); - err = -EINVAL; - goto err_free_page; + 1 << cmd->vars.log_sz); + return -EINVAL; } - if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { + if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) { mlx5_core_err(dev, "command queue size overflow\n"); - err = -EINVAL; - goto err_free_page; + return -EINVAL; } cmd->state = MLX5_CMDIF_STATE_DOWN; - cmd->checksum_disabled = 1; - cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; - cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; - - cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; - if (cmd->cmdif_rev > CMD_IF_REV) { - mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n", - CMD_IF_REV, cmd->cmdif_rev); - err = -EOPNOTSUPP; - goto err_free_page; - } + cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1; + cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1; - spin_lock_init(&cmd->alloc_lock); - spin_lock_init(&cmd->token_lock); - for (i = 0; i < MLX5_CMD_OP_MAX; i++) - spin_lock_init(&cmd->stats[i].lock); + sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds); + sema_init(&cmd->vars.pages_sem, 1); + sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2)); - sema_init(&cmd->sem, cmd->max_reg_cmds); - sema_init(&cmd->pages_sem, 1); - sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2)); + cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + err = alloc_cmd_page(dev, cmd); + if (err) + goto err_free_pool; cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); if (cmd_l & 0xfff) { mlx5_core_err(dev, "invalid command queue address\n"); err = -ENOMEM; - goto err_free_page; + goto err_cmd_page; } iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); @@ -2270,36 +2352,23 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; create_msg_cache(dev); - - set_wqname(dev); - cmd->wq = create_singlethread_workqueue(cmd->wq_name); - if (!cmd->wq) { - mlx5_core_err(dev, "failed to create command workqueue\n"); - err = -ENOMEM; - goto err_cache; - } - create_debugfs_files(dev); return 0; -err_cache: - destroy_msg_cache(dev); - -err_free_page: +err_cmd_page: free_cmd_page(dev, cmd); - err_free_pool: dma_pool_destroy(cmd->pool); return err; } -void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) +void mlx5_cmd_disable(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; + flush_workqueue(cmd->wq); clean_debug_files(dev); - destroy_workqueue(cmd->wq); destroy_msg_cache(dev); free_cmd_page(dev, cmd); dma_pool_destroy(cmd->pool); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 2138f28a2931..09652dc89115 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count, int ret; cmd = filp->private_data; - weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds); - field = cmd->max_reg_cmds - weight; + weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds); + field = cmd->vars.max_reg_cmds - weight; ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field); return simple_read_from_buffer(buf, count, pos, tbuf, ret); } @@ -188,6 +188,24 @@ static const struct file_operations slots_fops = { .read = slots_read, }; +static struct mlx5_cmd_stats * +mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode) +{ + struct mlx5_cmd_stats *stats = kzalloc(sizeof(*stats), GFP_KERNEL); + int err; + + if (!stats) + return NULL; + + err = xa_insert(stats_xa, opcode, stats, GFP_KERNEL); + if (err) { + kfree(stats); + return NULL; + } + spin_lock_init(&stats->lock); + return stats; +} + void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) { struct mlx5_cmd_stats *stats; @@ -200,10 +218,14 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops); + xa_init(&dev->cmd.stats); + for (i = 0; i < MLX5_CMD_OP_MAX; i++) { - stats = &dev->cmd.stats[i]; namep = mlx5_command_str(i); if (strcmp(namep, "unknown command opcode")) { + stats = mlx5_cmdif_alloc_stats(&dev->cmd.stats, i); + if (!stats) + continue; stats->root = debugfs_create_dir(namep, *cmd); debugfs_create_file("average", 0400, stats->root, stats, @@ -224,7 +246,13 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) { + struct mlx5_cmd_stats *stats; + unsigned long i; + debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs); + xa_for_each(&dev->cmd.stats, i, stats) + kfree(stats); + xa_destroy(&dev->cmd.stats); } void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index edb06fb9bbc5..cf0477f53dc4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -36,9 +36,8 @@ #include <linux/mlx5/vport.h> #include "mlx5_core.h" #include "devlink.h" +#include "lag/lag.h" -/* intf dev list mutex */ -static DEFINE_MUTEX(mlx5_intf_mutex); static DEFINE_IDA(mlx5_adev_ida); static bool is_eth_rep_supported(struct mlx5_core_dev *dev) @@ -205,6 +204,19 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev) return err ? false : val.vbool; } +static bool is_dpll_supported(struct mlx5_core_dev *dev) +{ + if (!IS_ENABLED(CONFIG_MLX5_DPLL)) + return false; + + if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) { + mlx5_core_warn(dev, "Missing SyncE capability\n"); + return false; + } + + return true; +} + enum { MLX5_INTERFACE_PROTOCOL_ETH, MLX5_INTERFACE_PROTOCOL_ETH_REP, @@ -214,6 +226,8 @@ enum { MLX5_INTERFACE_PROTOCOL_MPIB, MLX5_INTERFACE_PROTOCOL_VNET, + + MLX5_INTERFACE_PROTOCOL_DPLL, }; static const struct mlx5_adev_device { @@ -236,6 +250,8 @@ static const struct mlx5_adev_device { .is_supported = &is_ib_rep_supported }, [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport", .is_supported = &is_mp_supported }, + [MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll", + .is_supported = &is_dpll_supported }, }; int mlx5_adev_idx_alloc(void) @@ -319,9 +335,9 @@ static void del_adev(struct auxiliary_device *adev) void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev) { - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev) @@ -337,7 +353,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) int ret = 0, i; devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); priv->flags &= ~MLX5_PRIV_FLAGS_DETACH; for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { if (!priv->adev[i]) { @@ -382,7 +398,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) break; } } - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); return ret; } @@ -395,7 +411,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend) int i; devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { if (!priv->adev[i]) continue; @@ -425,7 +441,7 @@ skip_suspend: priv->adev[i] = NULL; } priv->flags |= MLX5_PRIV_FLAGS_DETACH; - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } int mlx5_register_device(struct mlx5_core_dev *dev) @@ -433,10 +449,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev) int ret; devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; ret = mlx5_rescan_drivers_locked(dev); - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); if (ret) mlx5_unregister_device(dev); @@ -446,10 +462,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev) void mlx5_unregister_device(struct mlx5_core_dev *dev) { devl_assert_locked(priv_to_devlink(dev)); - mutex_lock(&mlx5_intf_mutex); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; mlx5_rescan_drivers_locked(dev); - mutex_unlock(&mlx5_intf_mutex); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } static int add_drivers(struct mlx5_core_dev *dev) @@ -527,7 +543,6 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - lockdep_assert_held(&mlx5_intf_mutex); if (priv->flags & MLX5_PRIV_FLAGS_DETACH) return 0; @@ -547,88 +562,3 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid); } - -static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) -{ - return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | - (dev->pdev->bus->number << 8) | - PCI_SLOT(dev->pdev->devfn)); -} - -static int _next_phys_dev(struct mlx5_core_dev *mdev, - const struct mlx5_core_dev *curr) -{ - if (!mlx5_core_is_pf(mdev)) - return 0; - - if (mdev == curr) - return 0; - - if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) && - mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr)) - return 0; - - return 1; -} - -static void *pci_get_other_drvdata(struct device *this, struct device *other) -{ - if (this->driver != other->driver) - return NULL; - - return pci_get_drvdata(to_pci_dev(other)); -} - -static int next_phys_dev_lag(struct device *dev, const void *data) -{ - struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; - - mdev = pci_get_other_drvdata(this->device, dev); - if (!mdev) - return 0; - - if (!MLX5_CAP_GEN(mdev, vport_group_manager) || - !MLX5_CAP_GEN(mdev, lag_master) || - (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS || - MLX5_CAP_GEN(mdev, num_lag_ports) <= 1)) - return 0; - - return _next_phys_dev(mdev, data); -} - -static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, - int (*match)(struct device *dev, const void *data)) -{ - struct device *next; - - if (!mlx5_core_is_pf(dev)) - return NULL; - - next = bus_find_device(&pci_bus_type, NULL, dev, match); - if (!next) - return NULL; - - put_device(next); - return pci_get_drvdata(to_pci_dev(next)); -} - -/* Must be called with intf_mutex held */ -struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) -{ - lockdep_assert_held(&mlx5_intf_mutex); - return mlx5_get_next_dev(dev, &next_phys_dev_lag); -} - -void mlx5_dev_list_lock(void) -{ - mutex_lock(&mlx5_intf_mutex); -} -void mlx5_dev_list_unlock(void) -{ - mutex_unlock(&mlx5_intf_mutex); -} - -int mlx5_dev_list_trylock(void) -{ - return mutex_trylock(&mlx5_intf_mutex); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 3d82ec890666..3e064234f6fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -138,7 +138,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, { struct mlx5_core_dev *dev = devlink_priv(devlink); struct pci_dev *pdev = dev->pdev; - bool sf_dev_allocated; int ret = 0; if (mlx5_dev_is_lightweight(dev)) { @@ -148,16 +147,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, return 0; } - sf_dev_allocated = mlx5_sf_dev_allocated(dev); - if (sf_dev_allocated) { - /* Reload results in deleting SF device which further results in - * unregistering devlink instance while holding devlink_mutext. - * Hence, do not support reload. - */ - NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated"); - return -EOPNOTSUPP; - } - if (mlx5_lag_is_active(dev)) { NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode"); return -EOPNOTSUPP; @@ -212,6 +201,9 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a /* On fw_activate action, also driver is reloaded and reinit performed */ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); ret = mlx5_load_one_devl_locked(dev, true); + if (ret) + return ret; + ret = mlx5_fw_reset_verify_fw_complete(dev, extack); break; default: /* Unsupported action should not get to this function */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h index defba5bd91d9..961f75da6227 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h @@ -6,6 +6,14 @@ #include <net/devlink.h> +enum mlx5_devlink_resource_id { + MLX5_DL_RES_MAX_LOCAL_SFS = 1, + MLX5_DL_RES_MAX_EXTERNAL_SFS, + + __MLX5_ID_RES_MAX, + MLX5_ID_RES_MAX = __MLX5_ID_RES_MAX - 1, +}; + enum mlx5_devlink_param_id { MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 7c0f2adbea00..080e7eab52c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) while (block_timestamp > tracer->last_timestamp) { /* Check block override if it's not the first block */ - if (!tracer->last_timestamp) { + if (tracer->last_timestamp) { u64 *ts_event; /* To avoid block override be the HW in case of buffer * wraparound, the time stamp of the previous block @@ -848,7 +848,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work) mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); if (tracer->owner) { - tracer->owner = false; + mlx5_fw_tracer_ownership_acquire(tracer); return; } @@ -889,36 +889,16 @@ int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev) return 0; } -static int +static void mlx5_devlink_fmsg_fill_trace(struct devlink_fmsg *fmsg, struct mlx5_fw_trace_data *trace_data) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp); - if (err) - return err; - - err = devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp); + devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost); + devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id); + devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg); + devlink_fmsg_obj_nest_end(fmsg); } int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer, @@ -927,7 +907,6 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer, struct mlx5_fw_trace_data *straces = tracer->st_arr.straces; u32 index, start_index, end_index; u32 saved_traces_index; - int err; if (!straces[0].timestamp) return -ENOMSG; @@ -940,22 +919,18 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer, start_index = 0; end_index = (saved_traces_index - 1) & (SAVED_TRACES_NUM - 1); - err = devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces"); - if (err) - goto unlock; + devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces"); index = start_index; while (index != end_index) { - err = mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]); - if (err) - goto unlock; + mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]); index = (index + 1) & (SAVED_TRACES_NUM - 1); } - err = devlink_fmsg_arr_pair_nest_end(fmsg); -unlock: + devlink_fmsg_arr_pair_nest_end(fmsg); mutex_unlock(&tracer->st_arr.lock); - return err; + + return 0; } static void mlx5_fw_tracer_update_db(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c index e869c65d8e90..c7216e84ef8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c @@ -13,106 +13,55 @@ struct mlx5_vnic_diag_stats { __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)]; }; -int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, - struct devlink_fmsg *fmsg, - u16 vport_num, bool other_vport) +void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, + struct devlink_fmsg *fmsg, + u16 vport_num, bool other_vport) { u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; struct mlx5_vnic_diag_stats vnic; - int err; MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); MLX5_SET(query_vnic_env_in, in, vport_number, vport_num); MLX5_SET(query_vnic_env_in, in, other_vport, !!other_vport); - err = mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out); - if (err) - return err; + mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out); - err = devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters"); - if (err) - return err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; + devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters"); + devlink_fmsg_obj_nest_start(fmsg); if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) { - err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues", - VNIC_ENV_GET(&vnic, total_error_queues)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow", - VNIC_ENV_GET(&vnic, - send_queue_priority_update_flow)); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "total_error_queues", + VNIC_ENV_GET(&vnic, total_error_queues)); + devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow", + VNIC_ENV_GET(&vnic, send_queue_priority_update_flow)); } - if (MLX5_CAP_GEN(dev, eq_overrun_count)) { - err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun", - VNIC_ENV_GET(&vnic, comp_eq_overrun)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun", - VNIC_ENV_GET(&vnic, async_eq_overrun)); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) { - err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun", - VNIC_ENV_GET(&vnic, cq_overrun)); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, invalid_command_count)) { - err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command", - VNIC_ENV_GET(&vnic, invalid_command)); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, quota_exceeded_count)) { - err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command", - VNIC_ENV_GET(&vnic, quota_exceeded_command)); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun", + VNIC_ENV_GET(&vnic, comp_eq_overrun)); + devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun", + VNIC_ENV_GET(&vnic, async_eq_overrun)); } - - if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) { - err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard", - VNIC_ENV_GET64(&vnic, - nic_receive_steering_discard)); - if (err) - return err; - } - + if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) + devlink_fmsg_u32_pair_put(fmsg, "cq_overrun", + VNIC_ENV_GET(&vnic, cq_overrun)); + if (MLX5_CAP_GEN(dev, invalid_command_count)) + devlink_fmsg_u32_pair_put(fmsg, "invalid_command", + VNIC_ENV_GET(&vnic, invalid_command)); + if (MLX5_CAP_GEN(dev, quota_exceeded_count)) + devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command", + VNIC_ENV_GET(&vnic, quota_exceeded_command)); + if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) + devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard", + VNIC_ENV_GET64(&vnic, nic_receive_steering_discard)); if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) { - err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail", - VNIC_ENV_GET64(&vnic, - generated_pkt_steering_fail)); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail", - VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail)); - if (err) - return err; + devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail", + VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail)); + devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail", + VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail)); } - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_pair_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter, @@ -121,7 +70,8 @@ static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter, { struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); - return mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false); + mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false); + return 0; } static const struct devlink_health_reporter_ops mlx5_reporter_vnic_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h index eba87a39e9b1..fbc31256f7fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h @@ -9,8 +9,8 @@ void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev); void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev); -int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, - struct devlink_fmsg *fmsg, - u16 vport_num, bool other_vport); +void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, + struct devlink_fmsg *fmsg, + u16 vport_num, bool other_vport); #endif /* __MLX5_REPORTER_VNIC_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c new file mode 100644 index 000000000000..2cd81bb32c66 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include <linux/dpll.h> +#include <linux/mlx5/driver.h> + +/* This structure represents a reference to DPLL, one is created + * per mdev instance. + */ +struct mlx5_dpll { + struct dpll_device *dpll; + struct dpll_pin *dpll_pin; + struct mlx5_core_dev *mdev; + struct workqueue_struct *wq; + struct delayed_work work; + struct { + bool valid; + enum dpll_lock_status lock_status; + enum dpll_pin_state pin_state; + } last; + struct notifier_block mdev_nb; + struct net_device *tracking_netdev; +}; + +static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id) +{ + u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {}; + int err; + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MSECQ, 0, 0); + if (err) + return err; + *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity); + return 0; +} + +static int +mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev, + enum mlx5_msees_admin_status *admin_status, + enum mlx5_msees_oper_status *oper_status, + bool *ho_acq) +{ + u32 out[MLX5_ST_SZ_DW(msees_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(msees_reg)] = {}; + int err; + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MSEES, 0, 0); + if (err) + return err; + if (admin_status) + *admin_status = MLX5_GET(msees_reg, out, admin_status); + *oper_status = MLX5_GET(msees_reg, out, oper_status); + if (ho_acq) + *ho_acq = MLX5_GET(msees_reg, out, ho_acq); + return 0; +} + +static int +mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev, + enum mlx5_msees_admin_status admin_status) +{ + u32 out[MLX5_ST_SZ_DW(msees_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(msees_reg)] = {}; + + MLX5_SET(msees_reg, in, field_select, + MLX5_MSEES_FIELD_SELECT_ENABLE | + MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS); + MLX5_SET(msees_reg, in, admin_status, admin_status); + return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MSEES, 0, 1); +} + +static enum dpll_lock_status +mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq) +{ + switch (oper_status) { + case MLX5_MSEES_OPER_STATUS_SELF_TRACK: + fallthrough; + case MLX5_MSEES_OPER_STATUS_OTHER_TRACK: + return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ : + DPLL_LOCK_STATUS_LOCKED; + case MLX5_MSEES_OPER_STATUS_HOLDOVER: + fallthrough; + case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER: + return DPLL_LOCK_STATUS_HOLDOVER; + default: + return DPLL_LOCK_STATUS_UNLOCKED; + } +} + +static enum dpll_pin_state +mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status, + enum mlx5_msees_oper_status oper_status) +{ + return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK && + (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK || + oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ? + DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED; +} + +static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, + void *priv, + enum dpll_lock_status *status, + struct netlink_ext_ack *extack) +{ + enum mlx5_msees_oper_status oper_status; + struct mlx5_dpll *mdpll = priv; + bool ho_acq; + int err; + + err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL, + &oper_status, &ho_acq); + if (err) + return err; + + *status = mlx5_dpll_lock_status_get(oper_status, ho_acq); + return 0; +} + +static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll, + void *priv, enum dpll_mode *mode, + struct netlink_ext_ack *extack) +{ + *mode = DPLL_MODE_MANUAL; + return 0; +} + +static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll, + void *priv, + enum dpll_mode mode, + struct netlink_ext_ack *extack) +{ + return mode == DPLL_MODE_MANUAL; +} + +static const struct dpll_device_ops mlx5_dpll_device_ops = { + .lock_status_get = mlx5_dpll_device_lock_status_get, + .mode_get = mlx5_dpll_device_mode_get, + .mode_supported = mlx5_dpll_device_mode_supported, +}; + +static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin, + void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack) +{ + *direction = DPLL_PIN_DIRECTION_INPUT; + return 0; +} + +static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin, + void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack) +{ + enum mlx5_msees_admin_status admin_status; + enum mlx5_msees_oper_status oper_status; + struct mlx5_dpll *mdpll = pin_priv; + int err; + + err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status, + &oper_status, NULL); + if (err) + return err; + *state = mlx5_dpll_pin_state_get(admin_status, oper_status); + return 0; +} + +static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin, + void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + enum dpll_pin_state state, + struct netlink_ext_ack *extack) +{ + struct mlx5_dpll *mdpll = pin_priv; + + return mlx5_dpll_synce_status_set(mdpll->mdev, + state == DPLL_PIN_STATE_CONNECTED ? + MLX5_MSEES_ADMIN_STATUS_TRACK : + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING); +} + +static const struct dpll_pin_ops mlx5_dpll_pins_ops = { + .direction_get = mlx5_dpll_pin_direction_get, + .state_on_dpll_get = mlx5_dpll_state_on_dpll_get, + .state_on_dpll_set = mlx5_dpll_state_on_dpll_set, +}; + +static const struct dpll_pin_properties mlx5_dpll_pin_properties = { + .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT, + .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE, +}; + +#define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */ + +static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll) +{ + queue_delayed_work(mdpll->wq, &mdpll->work, + msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL)); +} + +static void mlx5_dpll_periodic_work(struct work_struct *work) +{ + struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll, + work.work); + enum mlx5_msees_admin_status admin_status; + enum mlx5_msees_oper_status oper_status; + enum dpll_lock_status lock_status; + enum dpll_pin_state pin_state; + bool ho_acq; + int err; + + err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status, + &oper_status, &ho_acq); + if (err) + goto err_out; + lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq); + pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status); + + if (!mdpll->last.valid) + goto invalid_out; + + if (mdpll->last.lock_status != lock_status) + dpll_device_change_ntf(mdpll->dpll); + if (mdpll->last.pin_state != pin_state) + dpll_pin_change_ntf(mdpll->dpll_pin); + +invalid_out: + mdpll->last.lock_status = lock_status; + mdpll->last.pin_state = pin_state; + mdpll->last.valid = true; +err_out: + mlx5_dpll_periodic_work_queue(mdpll); +} + +static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll, + struct net_device *netdev) +{ + if (mdpll->tracking_netdev) + return; + netdev_dpll_pin_set(netdev, mdpll->dpll_pin); + mdpll->tracking_netdev = netdev; +} + +static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll) +{ + if (!mdpll->tracking_netdev) + return; + netdev_dpll_pin_clear(mdpll->tracking_netdev); + mdpll->tracking_netdev = NULL; +} + +static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb); + struct net_device *netdev = data; + + switch (event) { + case MLX5_DRIVER_EVENT_UPLINK_NETDEV: + if (netdev) + mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev); + else + mlx5_dpll_netdev_dpll_pin_clear(mdpll); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll, + struct mlx5_core_dev *mdev) +{ + mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event; + mlx5_blocking_notifier_register(mdev, &mdpll->mdev_nb); + mlx5_core_uplink_netdev_event_replay(mdev); +} + +static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll, + struct mlx5_core_dev *mdev) +{ + mlx5_blocking_notifier_unregister(mdev, &mdpll->mdev_nb); + mlx5_dpll_netdev_dpll_pin_clear(mdpll); +} + +static int mlx5_dpll_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct mlx5_dpll *mdpll; + u64 clock_id; + int err; + + err = mlx5_dpll_synce_status_set(mdev, + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING); + if (err) + return err; + + err = mlx5_dpll_clock_id_get(mdev, &clock_id); + if (err) + return err; + + mdpll = kzalloc(sizeof(*mdpll), GFP_KERNEL); + if (!mdpll) + return -ENOMEM; + mdpll->mdev = mdev; + auxiliary_set_drvdata(adev, mdpll); + + /* Multiple mdev instances might share one DPLL device. */ + mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE); + if (IS_ERR(mdpll->dpll)) { + err = PTR_ERR(mdpll->dpll); + goto err_free_mdpll; + } + + err = dpll_device_register(mdpll->dpll, DPLL_TYPE_EEC, + &mlx5_dpll_device_ops, mdpll); + if (err) + goto err_put_dpll_device; + + /* Multiple mdev instances might share one DPLL pin. */ + mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev), + THIS_MODULE, &mlx5_dpll_pin_properties); + if (IS_ERR(mdpll->dpll_pin)) { + err = PTR_ERR(mdpll->dpll_pin); + goto err_unregister_dpll_device; + } + + err = dpll_pin_register(mdpll->dpll, mdpll->dpll_pin, + &mlx5_dpll_pins_ops, mdpll); + if (err) + goto err_put_dpll_pin; + + mdpll->wq = create_singlethread_workqueue("mlx5_dpll"); + if (!mdpll->wq) { + err = -ENOMEM; + goto err_unregister_dpll_pin; + } + + mlx5_dpll_mdev_netdev_track(mdpll, mdev); + + INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work); + mlx5_dpll_periodic_work_queue(mdpll); + + return 0; + +err_unregister_dpll_pin: + dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin, + &mlx5_dpll_pins_ops, mdpll); +err_put_dpll_pin: + dpll_pin_put(mdpll->dpll_pin); +err_unregister_dpll_device: + dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll); +err_put_dpll_device: + dpll_device_put(mdpll->dpll); +err_free_mdpll: + kfree(mdpll); + return err; +} + +static void mlx5_dpll_remove(struct auxiliary_device *adev) +{ + struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev); + struct mlx5_core_dev *mdev = mdpll->mdev; + + cancel_delayed_work(&mdpll->work); + mlx5_dpll_mdev_netdev_untrack(mdpll, mdev); + destroy_workqueue(mdpll->wq); + dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin, + &mlx5_dpll_pins_ops, mdpll); + dpll_pin_put(mdpll->dpll_pin); + dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll); + dpll_device_put(mdpll->dpll); + kfree(mdpll); + + mlx5_dpll_synce_status_set(mdev, + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING); +} + +static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state) +{ + return 0; +} + +static int mlx5_dpll_resume(struct auxiliary_device *adev) +{ + return 0; +} + +static const struct auxiliary_device_id mlx5_dpll_id_table[] = { + { .name = MLX5_ADEV_NAME ".dpll", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table); + +static struct auxiliary_driver mlx5_dpll_driver = { + .name = "dpll", + .probe = mlx5_dpll_probe, + .remove = mlx5_dpll_remove, + .suspend = mlx5_dpll_suspend, + .resume = mlx5_dpll_resume, + .id_table = mlx5_dpll_id_table, +}; + +static int __init mlx5_dpll_init(void) +{ + return auxiliary_driver_register(&mlx5_dpll_driver); +} + +static void __exit mlx5_dpll_exit(void) +{ + auxiliary_driver_unregister(&mlx5_dpll_driver); +} + +module_init(mlx5_dpll_init); +module_exit(mlx5_dpll_exit); + +MODULE_AUTHOR("Jiri Pirko <jiri@nvidia.com>"); +MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b1807bfb815f..729a11b5fb25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -141,7 +141,7 @@ struct page_pool; #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 #define MLX5E_MIN_NUM_CHANNELS 0x1 -#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) +#define MLX5E_MAX_NUM_CHANNELS 256 #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_XSK_POLL_BUDGET 64 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ @@ -168,6 +168,13 @@ struct page_pool; #define mlx5e_state_dereference(priv, p) \ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) +enum mlx5e_devcom_events { + MPV_DEVCOM_MASTER_UP, + MPV_DEVCOM_MASTER_DOWN, + MPV_DEVCOM_IPSEC_MASTER_UP, + MPV_DEVCOM_IPSEC_MASTER_DOWN, +}; + static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) { if (mlx5_lag_is_lacp_owner(mdev)) @@ -193,7 +200,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) { return is_kdump_kernel() ? MLX5E_MIN_NUM_CHANNELS : - min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); + min3(mlx5_comp_vectors_max(mdev), (u32)MLX5E_MAX_NUM_CHANNELS, + (u32)(1 << MLX5_CAP_GEN(mdev, log_max_rqt_size))); } /* The maximum WQE size can be retrieved by max_wqe_sz_sq in @@ -818,6 +826,7 @@ enum { MLX5E_STATE_DESTROYING, MLX5E_STATE_XDP_TX_ENABLED, MLX5E_STATE_XDP_ACTIVE, + MLX5E_STATE_CHANNELS_ACTIVE, }; struct mlx5e_modify_sq_param { @@ -917,7 +926,7 @@ struct mlx5e_priv { const struct mlx5e_profile *profile; void *ppriv; -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC struct mlx5e_macsec *macsec; #endif #ifdef CONFIG_MLX5_EN_IPSEC @@ -936,6 +945,7 @@ struct mlx5e_priv { struct mlx5e_htb *htb; struct mlx5e_mqprio_rl *mqprio_rl; struct dentry *dfs_root; + struct mlx5_devcom_comp_dev *devcom; }; struct mlx5e_dev { @@ -1167,9 +1177,6 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc); -int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rule_locs); -int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index c6b6e290fd79..0b1ac6e5c890 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -12,11 +12,19 @@ struct mlx5e_dev *mlx5e_create_devlink(struct device *dev, { struct mlx5e_dev *mlx5e_dev; struct devlink *devlink; + int err; devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev), devlink_net(priv_to_devlink(mdev)), dev); if (!devlink) return ERR_PTR(-ENOMEM); + + err = devl_nested_devlink_set(priv_to_devlink(mdev), devlink); + if (err) { + devlink_free(devlink); + return ERR_PTR(err); + } + devlink_register(devlink); return devlink_priv(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index e5a44b0b9616..4d6225e0eec7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -150,7 +150,6 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, struct dentry *dfs_root); void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs); struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs); -void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc); struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs); struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs); struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c index be83ad9db82a..e1283531e0b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c @@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty in = kvzalloc(inlen, GFP_KERNEL); if (!in || !ft->g) { kfree(ft->g); + ft->g = NULL; kvfree(in); return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 6f4e6c34b2a2..81523825faa2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -5,134 +5,59 @@ #include "lib/eq.h" #include "lib/mlx5.h" -int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) +void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) { - int err; - - err = devlink_fmsg_pair_nest_start(fmsg, name); - if (err) - return err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_pair_nest_start(fmsg, name); + devlink_fmsg_obj_nest_start(fmsg); } -int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) +void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_pair_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } -int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; u8 hw_status; void *cqc; - int err; - - err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out); - if (err) - return err; + mlx5_core_query_cq(cq->mdev, &cq->mcq, out); cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); hw_status = MLX5_GET(cqc, cqc, status); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq)); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); + devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); + devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status); + devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq)); + devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq)); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { u8 cq_log_stride; u32 cq_sz; - int err; cq_sz = mlx5_cqwq_get_size(&cq->wq); cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride)); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); + devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride)); + devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg) +void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ"); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core)); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ"); + devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); + devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn); + devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx); + devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index); + devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core)); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } void mlx5e_health_create_reporters(struct mlx5e_priv *priv) @@ -235,23 +160,19 @@ int mlx5e_health_report(struct mlx5e_priv *priv, } #define MLX5_HEALTH_DEVLINK_MAX_SIZE 1024 -static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg, - const void *value, u32 value_len) +static void mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg, + const void *value, u32 value_len) { u32 data_size; - int err = 0; u32 offset; for (offset = 0; offset < value_len; offset += data_size) { data_size = value_len - offset; if (data_size > MLX5_HEALTH_DEVLINK_MAX_SIZE) data_size = MLX5_HEALTH_DEVLINK_MAX_SIZE; - err = devlink_fmsg_binary_put(fmsg, value + offset, data_size); - if (err) - break; + devlink_fmsg_binary_put(fmsg, value + offset, data_size); } - return err; } int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key, @@ -259,9 +180,8 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_rsc_dump_cmd *cmd; + int cmd_err, err = 0; struct page *page; - int cmd_err, err; - int end_err; int size; if (IS_ERR_OR_NULL(mdev->rsc_dump)) @@ -271,9 +191,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key if (!page) return -ENOMEM; - err = devlink_fmsg_binary_pair_nest_start(fmsg, "data"); - if (err) - goto free_page; + devlink_fmsg_binary_pair_nest_start(fmsg, "data"); cmd = mlx5_rsc_dump_cmd_create(mdev, key); if (IS_ERR(cmd)) { @@ -288,52 +206,31 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key goto destroy_cmd; } - err = mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size); - if (err) - goto destroy_cmd; - + mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size); } while (cmd_err > 0); destroy_cmd: mlx5_rsc_dump_cmd_destroy(cmd); - end_err = devlink_fmsg_binary_pair_nest_end(fmsg); - if (end_err) - err = end_err; + devlink_fmsg_binary_pair_nest_end(fmsg); free_page: __free_page(page); return err; } -int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, - int queue_idx, char *lbl) +void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, + int queue_idx, char *lbl) { struct mlx5_rsc_key key = {}; - int err; key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = queue_idx; key.size = PAGE_SIZE; key.num_of_obj1 = 1; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx); - if (err) - return err; - - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_start(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl); + devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx); + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_end(fmsg); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index 0107e4e73bb0..84be3dd6f747 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -18,12 +18,13 @@ void mlx5e_reporter_tx_create(struct mlx5e_priv *priv); void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); +void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq); -int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); -int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); -int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg); -int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); -int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg); +void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg); +void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); +void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg); void mlx5e_reporter_rx_create(struct mlx5e_priv *priv); void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); @@ -53,6 +54,6 @@ void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv); void mlx5e_health_channels_update(struct mlx5e_priv *priv); int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key, struct devlink_fmsg *fmsg); -int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, - int queue_idx, char *lbl); +void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, + int queue_idx, char *lbl); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 5ce28ff7685f..e097f336e1c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -6,6 +6,7 @@ #include "en/port.h" #include "en_accel/en_accel.h" #include "en_accel/ipsec.h" +#include <net/page_pool/types.h> #include <net/xdp_sock_drv.h> static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index b0b429a0321e..af3928eddafd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -2,9 +2,12 @@ // Copyright (c) 2020 Mellanox Technologies #include "en/ptp.h" +#include "en/health.h" #include "en/txrx.h" #include "en/params.h" #include "en/fs_tt_redirect.h" +#include <linux/list.h> +#include <linux/spinlock.h> struct mlx5e_ptp_fs { struct mlx5_flow_handle *l2_rule; @@ -19,6 +22,48 @@ struct mlx5e_ptp_params { struct mlx5e_rq_param rq_param; }; +struct mlx5e_ptp_port_ts_cqe_tracker { + u8 metadata_id; + bool inuse : 1; + struct list_head entry; +}; + +struct mlx5e_ptp_port_ts_cqe_list { + struct mlx5e_ptp_port_ts_cqe_tracker *nodes; + struct list_head tracker_list_head; + /* Sync list operations in xmit and napi_poll contexts */ + spinlock_t tracker_list_lock; +}; + +static inline void +mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata) +{ + struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata]; + + WARN_ON_ONCE(tracker->inuse); + tracker->inuse = true; + spin_lock(&list->tracker_list_lock); + list_add_tail(&tracker->entry, &list->tracker_list_head); + spin_unlock(&list->tracker_list_lock); +} + +static void +mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata) +{ + struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata]; + + WARN_ON_ONCE(!tracker->inuse); + tracker->inuse = false; + spin_lock(&list->tracker_list_lock); + list_del(&tracker->entry); + spin_unlock(&list->tracker_list_lock); +} + +void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata) +{ + mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata); +} + struct mlx5e_skb_cb_hwtstamp { ktime_t cqe_hwtstamp; ktime_t port_hwtstamp; @@ -79,84 +124,113 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); } -#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask) - -static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id) +static struct sk_buff * +mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata) { - return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id)); + return map->data[metadata]; } -static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id) +static struct sk_buff * +mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata) { - u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc); - u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc); + struct sk_buff *skb; - if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci)) - return true; + skb = map->data[metadata]; + map->data[metadata] = NULL; - return false; + return skb; } -static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, - u16 skb_id, int budget) +static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map) { - struct skb_shared_hwtstamps hwts = {}; - struct sk_buff *skb; + /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */ + return map->undelivered_counter > (map->capacity >> 4) * 15; +} - ptpsq->cq_stats->resync_event++; +static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, + ktime_t port_tstamp) +{ + struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list; + ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT); + struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map; + struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n; - while (skb_ci != skb_id) { - skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); - hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp; - skb_tstamp_tx(skb, &hwts); - ptpsq->cq_stats->resync_cqe++; - napi_consume_skb(skb, budget); - skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc); + spin_lock(&cqe_list->tracker_list_lock); + list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) { + struct sk_buff *skb = + mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id); + ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp; + + if (!dma_tstamp || + ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp)) + break; + + metadata_map->undelivered_counter++; + WARN_ON_ONCE(!pos->inuse); + pos->inuse = false; + list_del(&pos->entry); } + spin_unlock(&cqe_list->tracker_list_lock); } +#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask) + static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, struct mlx5_cqe64 *cqe, + u8 *md_buff, + u8 *md_buff_sz, int budget) { - u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter)); - u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc); + struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list; + u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter)); + bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe); struct mlx5e_txqsq *sq = &ptpsq->txqsq; struct sk_buff *skb; ktime_t hwtstamp; - if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); - ptpsq->cq_stats->err_cqe++; - goto out; + if (likely(pending_cqe_list->nodes[metadata_id].inuse)) { + mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id); + } else { + /* Reclaim space in the unlikely event CQE was delivered after + * marking it late. + */ + ptpsq->metadata_map.undelivered_counter--; + ptpsq->cq_stats->late_cqe++; } - if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) { - if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) { - /* already handled by a previous resync */ - ptpsq->cq_stats->ooo_cqe_drop++; - return; - } - mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget); + skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id); + + if (unlikely(is_err_cqe)) { + ptpsq->cq_stats->err_cqe++; + goto out; } - skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, hwtstamp, ptpsq->cq_stats); ptpsq->cq_stats->cqe++; + mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp); out: napi_consume_skb(skb, budget); + md_buff[*md_buff_sz++] = metadata_id; + if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) && + !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) + queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work); } -static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) +static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq); - struct mlx5_cqwq *cqwq = &cq->wq; + int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET); + u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET]; + u8 metadata_buff_sz = 0; + struct mlx5_cqwq *cqwq; struct mlx5_cqe64 *cqe; int work_done = 0; + cqwq = &cq->wq; + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state))) return false; @@ -167,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) do { mlx5_cqwq_pop(cqwq); - mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget); + mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, + metadata_buff, &metadata_buff_sz, napi_budget); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); mlx5_cqwq_update_db_record(cqwq); @@ -175,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) /* ensure cq space is freed before enabling more cqes */ wmb(); + while (metadata_buff_sz > 0) + mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, + metadata_buff[--metadata_buff_sz]); + mlx5e_txqsq_wake(&ptpsq->txqsq); return work_done == budget; @@ -291,36 +370,86 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) { - int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq); - struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev; + struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist; + struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map; + struct mlx5e_ptp_port_ts_cqe_list *cqe_list; + int db_sz; + int md; - ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)), - GFP_KERNEL, numa); - if (!ptpsq->skb_fifo.fifo) + cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa); + if (!cqe_list) return -ENOMEM; + ptpsq->ts_cqe_pending_list = cqe_list; + + db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq), + 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev, + ts_cqe_metadata_size2wqe_counter)); + ptpsq->ts_cqe_ctr_mask = db_sz - 1; + + cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)), + GFP_KERNEL, numa); + if (!cqe_list->nodes) + goto free_cqe_list; + INIT_LIST_HEAD(&cqe_list->tracker_list_head); + spin_lock_init(&cqe_list->tracker_list_lock); + + metadata_freelist->data = + kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)), + GFP_KERNEL, numa); + if (!metadata_freelist->data) + goto free_cqe_list_nodes; + metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask; + + for (md = 0; md < db_sz; ++md) { + cqe_list->nodes[md].metadata_id = md; + metadata_freelist->data[md] = md; + } + metadata_freelist->pc = db_sz; + + metadata_map->data = + kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)), + GFP_KERNEL, numa); + if (!metadata_map->data) + goto free_metadata_freelist; + metadata_map->capacity = db_sz; - ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc; - ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc; - ptpsq->skb_fifo.mask = wq_sz - 1; - if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - ptpsq->ts_cqe_ctr_mask = - (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1; return 0; + +free_metadata_freelist: + kvfree(metadata_freelist->data); +free_cqe_list_nodes: + kvfree(cqe_list->nodes); +free_cqe_list: + kvfree(cqe_list); + return -ENOMEM; } -static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo) +static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map) { - while (*skb_fifo->pc != *skb_fifo->cc) { - struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo); + int idx; + + for (idx = 0; idx < map->capacity; ++idx) { + struct sk_buff *skb = map->data[idx]; dev_kfree_skb_any(skb); } } -static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo) +static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq) { - mlx5e_ptp_drain_skb_fifo(skb_fifo); - kvfree(skb_fifo->fifo); + mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map); + kvfree(ptpsq->metadata_map.data); + kvfree(ptpsq->metadata_freelist.data); + kvfree(ptpsq->ts_cqe_pending_list->nodes); + kvfree(ptpsq->ts_cqe_pending_list); +} + +static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work) +{ + struct mlx5e_ptpsq *ptpsq = + container_of(work, struct mlx5e_ptpsq, report_unhealthy_work); + + mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq); } static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn, @@ -348,11 +477,12 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn, if (err) goto err_free_txqsq; - err = mlx5e_ptp_alloc_traffic_db(ptpsq, - dev_to_node(mlx5_core_dma_dev(c->mdev))); + err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev))); if (err) goto err_free_txqsq; + INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work); + return 0; err_free_txqsq: @@ -366,7 +496,9 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq) struct mlx5e_txqsq *sq = &ptpsq->txqsq; struct mlx5_core_dev *mdev = sq->mdev; - mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo); + if (current_work() != &ptpsq->report_unhealthy_work) + cancel_work_sync(&ptpsq->report_unhealthy_work); + mlx5e_ptp_free_traffic_db(ptpsq); cancel_work_sync(&sq->recover_work); mlx5e_ptp_destroy_sq(mdev, sq->sqn); mlx5e_free_txqsq_descs(sq); @@ -534,7 +666,10 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, /* SQ */ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { - params->log_sq_size = orig->log_sq_size; + params->log_sq_size = + min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter), + MLX5E_PTP_MAX_LOG_SQ_SIZE); + params->log_sq_size = min(params->log_sq_size, orig->log_sq_size); mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param); } /* RQ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h index cc7efde88ac3..7b700d0f956a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -7,18 +7,38 @@ #include "en.h" #include "en_stats.h" #include "en/txrx.h" +#include <linux/ktime.h> #include <linux/ptp_classify.h> +#include <linux/time64.h> +#include <linux/workqueue.h> #define MLX5E_PTP_CHANNEL_IX 0 +#define MLX5E_PTP_MAX_LOG_SQ_SIZE (8U) +#define MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT (1 * NSEC_PER_SEC) + +struct mlx5e_ptp_metadata_fifo { + u8 cc; + u8 pc; + u8 mask; + u8 *data; +}; + +struct mlx5e_ptp_metadata_map { + u16 undelivered_counter; + u16 capacity; + struct sk_buff **data; +}; struct mlx5e_ptpsq { struct mlx5e_txqsq txqsq; struct mlx5e_cq ts_cq; - u16 skb_fifo_cc; - u16 skb_fifo_pc; - struct mlx5e_skb_fifo skb_fifo; struct mlx5e_ptp_cq_stats *cq_stats; u16 ts_cqe_ctr_mask; + + struct work_struct report_unhealthy_work; + struct mlx5e_ptp_port_ts_cqe_list *ts_cqe_pending_list; + struct mlx5e_ptp_metadata_fifo metadata_freelist; + struct mlx5e_ptp_metadata_map metadata_map; }; enum { @@ -69,12 +89,35 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb) fk.ports.dst == htons(PTP_EV_PORT)); } -static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq) +static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *fifo, u8 metadata) { - if (!sq->ptpsq) - return true; + fifo->data[fifo->mask & fifo->pc++] = metadata; +} + +static inline u8 +mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo) +{ + return fifo->data[fifo->mask & fifo->cc++]; +} - return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo); +static inline void +mlx5e_ptp_metadata_map_put(struct mlx5e_ptp_metadata_map *map, + struct sk_buff *skb, u8 metadata) +{ + WARN_ON_ONCE(map->data[metadata]); + map->data[metadata] = skb; +} + +static inline bool mlx5e_ptpsq_metadata_freelist_empty(struct mlx5e_ptpsq *ptpsq) +{ + struct mlx5e_ptp_metadata_fifo *freelist; + + if (likely(!ptpsq)) + return false; + + freelist = &ptpsq->metadata_freelist; + + return freelist->pc == freelist->cc; } int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, @@ -89,6 +132,8 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs, const struct mlx5e_profile *profile); int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set); +void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata); + enum { MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0), MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 1874c2f0587f..244bc15a42ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -379,9 +379,9 @@ int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_ if (!htb && htb_qopt->command != TC_HTB_CREATE) return -EINVAL; - if (htb_qopt->prio) { + if (htb_qopt->prio || htb_qopt->quantum) { NL_SET_ERR_MSG_MOD(htb_qopt->extack, - "prio parameter is not supported by device with HTB offload enabled."); + "prio and quantum parameters are not supported by device with HTB offload enabled."); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 560800246573..5d128c5b4529 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -77,6 +77,10 @@ mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_es return NULL; priv = netdev_priv(dev); + + if (!priv->mdev->priv.eswitch->br_offloads) + return NULL; + rpriv = priv->ppriv; *vport_num = rpriv->rep->vport; *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id); @@ -463,6 +467,17 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, /* only handle the event on peers */ if (mlx5_esw_bridge_is_local(dev, rep, esw)) break; + + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + /* Mark for deletion to prevent the update wq task from + * spuriously refreshing the entry which would mark it again as + * offloaded in SW bridge. After this fallthrough to regular + * async delete code. + */ + mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads, + fdb_info); fallthrough; case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index b5c773ffc763..b12fe3c5a258 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -715,9 +715,20 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, uplink_priv = &uplink_rpriv->uplink_priv; ct_priv = uplink_priv->ct_priv; - if (!mlx5_ipsec_is_rx_flow(cqe) && - !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id, - &tc_priv)) +#ifdef CONFIG_MLX5_EN_IPSEC + if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) { + u32 mapped_id; + u32 metadata; + + mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK; + if (mapped_id && + !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata)) + mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata); + } +#endif + + if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, + zone_restore_id, tunnel_id, &tc_priv)) goto free_skb; forward: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index e8eea9ffd5eb..4358798d6ce1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -199,78 +199,38 @@ static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter, mlx5e_health_recover_channels(priv); } -static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, - struct devlink_fmsg *fmsg) +static void mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, + struct devlink_fmsg *fmsg) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "WQE size", - mlx5_wq_cyc_get_size(&icosq->wq)); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq)); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); + devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn); + devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); + devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc); + devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc); + devlink_fmsg_u32_pair_put(fmsg, "WQE size", mlx5_wq_cyc_get_size(&icosq->wq)); + + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); + devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn); + devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc); + devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq)); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq) +static void mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq) { - int err; int i; BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES, "rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h"); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i) { - err = devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i], - test_bit(i, &rq->state)); - if (err) - return err; - } + for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i) + devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i], + test_bit(i, &rq->state)); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int @@ -291,184 +251,93 @@ mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq, wq_head = mlx5e_rqwq_get_head(rq); wqe_counter = mlx5e_rqwq_get_wqe_counter(rq); - err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head); - if (err) - return err; - - err = mlx5e_health_rq_put_sw_state(fmsg, rq); - if (err) - return err; - - err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg); - if (err) - return err; - - err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn); + devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); + devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter); + devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz); + devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head); + mlx5e_health_rq_put_sw_state(fmsg, rq); + mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg); + mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg); if (rq->icosq) { struct mlx5e_icosq *icosq = rq->icosq; u8 icosq_hw_state; + int err; err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state); if (err) return err; - err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); - if (err) - return err; + mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); } return 0; } -static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, - struct devlink_fmsg *fmsg) +static void mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); - if (err) - return err; - - err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); - if (err) - return err; - - return devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); + mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); + devlink_fmsg_obj_nest_end(fmsg); } -static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, - struct devlink_fmsg *fmsg) +static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { struct mlx5e_priv *priv = rq->priv; struct mlx5e_params *params; u32 rq_stride, rq_sz; bool real_time; - int err; params = &priv->channels.params; rq_sz = mlx5e_rqwq_get_size(rq); real_time = mlx5_is_real_time_rq(priv->mdev); rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); - if (err) - return err; - - err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); - if (err) - return err; - - err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); + devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); + devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride); + devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz); + devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); + mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch, struct devlink_fmsg *fmsg) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter); - if (err) - return err; - - err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); + devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter); + mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq; struct mlx5e_ptp *ptp_ch = priv->channels.ptp; - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config"); - if (err) - return err; - - err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg); - if (err) - return err; - if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { - err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg); - if (err) - return err; - } - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config"); + mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg); + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) + mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq, - struct devlink_fmsg *fmsg) +static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); - if (err) - return err; - - err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); + mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg); + devlink_fmsg_obj_nest_end(fmsg); } static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, @@ -477,20 +346,15 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, { struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_ptp *ptp_ch = priv->channels.ptp; - int i, err = 0; + int i; mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg); - if (err) - goto unlock; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); - if (err) - goto unlock; + mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -499,19 +363,14 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ? &c->xskrq : &c->rq; - err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); - if (err) - goto unlock; - } - if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { - err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg); - if (err) - goto unlock; + mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); } - err = devlink_fmsg_arr_pair_nest_end(fmsg); + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) + mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); unlock: mutex_unlock(&priv->state_lock); - return err; + return 0; } static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -519,61 +378,34 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_ { struct mlx5e_txqsq *icosq = ctx; struct mlx5_rsc_key key = {}; - int err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = icosq->sqn; key.num_of_obj1 = 1; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); key.rsc = MLX5_SGMT_TYPE_SND_BUFF; key.num_of_obj2 = MLX5_RSC_DUMP_ALL; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + return 0; } static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -581,60 +413,34 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms { struct mlx5_rsc_key key = {}; struct mlx5e_rq *rq = ctx; - int err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = rq->rqn; key.num_of_obj1 = 1; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff"); key.rsc = MLX5_SGMT_TYPE_RCV_BUFF; key.num_of_obj2 = MLX5_RSC_DUMP_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + return 0; } static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, @@ -642,44 +448,28 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, { struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5_rsc_key key = {}; - int i, err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); - for (i = 0; i < priv->channels.num; i++) { + for (int i = 0; i < priv->channels.num; i++) { struct mlx5e_rq *rq = &priv->channels.c[i]->rq; - err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ"); - if (err) - return err; + mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ"); } - if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) { - err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ"); - if (err) - return err; - } + if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) + mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ"); - return devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); + return 0; } static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv, @@ -702,11 +492,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter, void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) { - char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {}; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; struct mlx5e_icosq *icosq = rq->icosq; struct mlx5e_priv *priv = rq->priv; struct mlx5e_err_ctx err_ctx = {}; + char icosq_str[32] = {}; err_ctx.ctx = rq; err_ctx.recover = mlx5e_rx_reporter_timeout_recover; @@ -715,7 +505,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) if (icosq) snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn); snprintf(err_str, sizeof(err_str), - "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x", + "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x", rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn); mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index b35ff289af49..6b44ddce14e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -50,25 +50,19 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) sq->pc = 0; } -static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq) +static void mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq) { - int err; int i; BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES, "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h"); - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State"); - for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) { - err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i], - test_bit(i, &sq->state)); - if (err) - return err; - } + for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) + devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i], + test_bit(i, &sq->state)); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) @@ -164,6 +158,43 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx) return err; } +static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) +{ + struct mlx5e_ptpsq *ptpsq = ctx; + struct mlx5e_channels *chs; + struct net_device *netdev; + struct mlx5e_priv *priv; + int carrier_ok; + int err; + + if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state)) + return 0; + + priv = ptpsq->txqsq.priv; + + mutex_lock(&priv->state_lock); + chs = &priv->channels; + netdev = priv->netdev; + + carrier_ok = netif_carrier_ok(netdev); + netif_carrier_off(netdev); + + mlx5e_deactivate_priv_channels(priv); + + mlx5e_ptp_close(chs->ptp); + err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp); + + mlx5e_activate_priv_channels(priv); + + /* return carrier back if needed */ + if (carrier_ok) + netif_carrier_on(netdev); + + mutex_unlock(&priv->state_lock); + + return err; +} + /* state lock cannot be grabbed within this function. * It can cause a dead lock or a read-after-free. */ @@ -183,7 +214,7 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, mlx5e_health_recover_channels(priv); } -static int +static void mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq, int tc) { @@ -192,164 +223,71 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, u8 state; int err; - err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); - if (err) - return err; - - err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state); - if (err) - return err; - - err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); - if (err) - return err; - - err = mlx5e_health_sq_put_sw_state(fmsg, sq); - if (err) - return err; - - err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "tc", tc); + devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); + devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); - return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); + err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); + if (!err) + devlink_fmsg_u8_pair_put(fmsg, "HW state", state); + + devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped); + devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); + devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); + mlx5e_health_sq_put_sw_state(fmsg, sq); + mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg); + mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); } -static int +static void mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq, int tc) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); - if (err) - return err; - - err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); + mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc); + devlink_fmsg_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg, struct mlx5e_ptpsq *ptpsq, int tc) { - int err; - - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); - if (err) - return err; - - err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); - if (err) - return err; - - err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - - return 0; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); + mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *txqsq) { - u32 sq_stride, sq_sz; - bool real_time; - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); - if (err) - return err; - - real_time = mlx5_is_real_time_sq(txqsq->mdev); - sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); - sq_stride = MLX5_SEND_WQE_BB; - - err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); - if (err) - return err; - - err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); - if (err) - return err; - - err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + bool real_time = mlx5_is_real_time_sq(txqsq->mdev); + u32 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); + u32 sq_stride = MLX5_SEND_WQE_BB; + + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); + devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); + devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); + devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC"); + mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg, struct mlx5e_ptpsq *ptpsq) { - int err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); - if (err) - return err; - - err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg); - if (err) - return err; - - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } -static int +static void mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg) { @@ -357,39 +295,20 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5e_ptpsq *generic_ptpsq; - int err; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); - if (err) - return err; - - err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); + mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq); if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto out; generic_ptpsq = &ptp_ch->ptpsq[0]; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); - if (err) - return err; - - err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq); - if (err) - return err; - - err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); + mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq); + mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); out: - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, @@ -399,20 +318,15 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_ptp *ptp_ch = priv->channels.ptp; - int i, tc, err = 0; + int i, tc; mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg); - if (err) - goto unlock; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); - if (err) - goto unlock; + mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -420,31 +334,23 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; - err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); - if (err) - goto unlock; + mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); } } if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto close_sqs_nest; - for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { - err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, - &ptp_ch->ptpsq[tc], - tc); - if (err) - goto unlock; - } + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) + mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, + &ptp_ch->ptpsq[tc], + tc); close_sqs_nest: - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - goto unlock; - + devlink_fmsg_arr_pair_nest_end(fmsg); unlock: mutex_unlock(&priv->state_lock); - return err; + return 0; } static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -452,60 +358,33 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms { struct mlx5_rsc_key key = {}; struct mlx5e_txqsq *sq = ctx; - int err; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); key.rsc = MLX5_SGMT_TYPE_FULL_QPC; key.index1 = sq->sqn; key.num_of_obj1 = 1; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); key.rsc = MLX5_SGMT_TYPE_SND_BUFF; key.num_of_obj2 = MLX5_RSC_DUMP_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; + mlx5e_health_fmsg_named_obj_nest_end(fmsg); - return mlx5e_health_fmsg_named_obj_nest_end(fmsg); + return 0; } static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -516,33 +395,31 @@ static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlin return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq); } +static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv, + struct devlink_fmsg *fmsg, + void *ctx) +{ + struct mlx5e_ptpsq *ptpsq = ctx; + + return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq); +} + static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) { struct mlx5e_ptp *ptp_ch = priv->channels.ptp; struct mlx5_rsc_key key = {}; - int i, tc, err; + int i, tc; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); - if (err) - return err; - + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); key.size = PAGE_SIZE; key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL; - err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); - if (err) - return err; - - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); - if (err) - return err; + mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; @@ -550,9 +427,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; - err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); - if (err) - return err; + mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); } } @@ -560,13 +435,12 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; - err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); - if (err) - return err; + mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); } } - return devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); + return 0; } static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv, @@ -621,6 +495,25 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) return to_ctx.status; } +void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq) +{ + struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map; + char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_txqsq *txqsq = &ptpsq->txqsq; + struct mlx5e_cq *ts_cq = &ptpsq->ts_cq; + struct mlx5e_priv *priv = txqsq->priv; + struct mlx5e_err_ctx err_ctx = {}; + + err_ctx.ctx = ptpsq; + err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover; + err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump; + snprintf(err_str, sizeof(err_str), + "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u", + txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity); + + mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); +} + static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { .name = "tx", .recover = mlx5e_tx_reporter_recover, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c index b915fb29dd2c..7b8ff7a71003 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c @@ -9,7 +9,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, { unsigned int i; - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + for (i = 0; i < indir->actual_table_size; i++) indir->table[i] = i % num_channels; } @@ -45,9 +45,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, } int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, - bool indir_enabled, u32 init_rqn) + bool indir_enabled, u32 init_rqn, u32 indir_table_size) { - u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1; + u16 max_size = indir_enabled ? indir_table_size : 1; return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1); } @@ -68,11 +68,11 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns { unsigned int i; - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { + for (i = 0; i < indir->actual_table_size; i++) { unsigned int ix = i; if (hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE)); + ix = mlx5e_bits_invert(ix, ilog2(indir->actual_table_size)); ix = indir->table[ix]; @@ -94,7 +94,7 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, u32 *rss_rqns; int err; - rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL); if (!rss_rqns) return -ENOMEM; @@ -102,13 +102,25 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, if (err) goto out; - err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE); + err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, + indir->actual_table_size); out: kvfree(rss_rqns); return err; } +#define MLX5E_UNIFORM_SPREAD_RQT_FACTOR 2 + +u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels) +{ + u32 rqt_size = max_t(u32, MLX5E_INDIR_MIN_RQT_SIZE, + roundup_pow_of_two(num_channels * MLX5E_UNIFORM_SPREAD_RQT_FACTOR)); + u32 max_cap_rqt_size = 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size); + + return min_t(u32, rqt_size, max_cap_rqt_size); +} + void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt) { mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn); @@ -151,10 +163,10 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_ u32 *rss_rqns; int err; - if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE)) + if (WARN_ON(rqt->size != indir->max_table_size)) return -EINVAL; - rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL); if (!rss_rqns) return -ENOMEM; @@ -162,7 +174,7 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_ if (err) goto out; - err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE); + err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size); out: kvfree(rss_rqns); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h index 60c985a12f24..77fba3ebd18d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h @@ -6,12 +6,14 @@ #include <linux/kernel.h> -#define MLX5E_INDIR_RQT_SIZE (1 << 8) +#define MLX5E_INDIR_MIN_RQT_SIZE (BIT(8)) struct mlx5_core_dev; struct mlx5e_rss_params_indir { - u32 table[MLX5E_INDIR_RQT_SIZE]; + u32 *table; + u32 actual_table_size; + u32 max_table_size; }; void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, @@ -24,7 +26,7 @@ struct mlx5e_rqt { }; int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, - bool indir_enabled, u32 init_rqn); + bool indir_enabled, u32 init_rqn, u32 indir_table_size); int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, u32 *rqns, unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir); @@ -35,6 +37,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt) return rqt->rqtn; } +u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels); int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn); int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, u8 hfunc, struct mlx5e_rss_params_indir *indir); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index 7f93426b88b3..c1545a2e8d6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -81,14 +81,75 @@ struct mlx5e_rss { refcount_t refcnt; }; -struct mlx5e_rss *mlx5e_rss_alloc(void) +void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels) { - return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL); + rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels); } -void mlx5e_rss_free(struct mlx5e_rss *rss) +int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev, + u32 actual_table_size, u32 max_table_size) { + indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL); + if (!indir->table) + return -ENOMEM; + + indir->max_table_size = max_table_size; + indir->actual_table_size = actual_table_size; + + return 0; +} + +void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir) +{ + kvfree(indir->table); +} + +static int mlx5e_rss_copy(struct mlx5e_rss *to, const struct mlx5e_rss *from) +{ + u32 *dst_indir_table; + + if (to->indir.actual_table_size != from->indir.actual_table_size || + to->indir.max_table_size != from->indir.max_table_size) { + mlx5e_rss_warn(to->mdev, + "Failed to copy RSS due to size mismatch, src (actual %u, max %u) != dst (actual %u, max %u)\n", + from->indir.actual_table_size, from->indir.max_table_size, + to->indir.actual_table_size, to->indir.max_table_size); + return -EINVAL; + } + + dst_indir_table = to->indir.table; + *to = *from; + to->indir.table = dst_indir_table; + memcpy(to->indir.table, from->indir.table, + from->indir.actual_table_size * sizeof(*from->indir.table)); + return 0; +} + +static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from) +{ + struct mlx5e_rss *rss; + int err; + + rss = kvzalloc(sizeof(*rss), GFP_KERNEL); + if (!rss) + return ERR_PTR(-ENOMEM); + + err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size, + from->indir.max_table_size); + if (err) + goto err_free_rss; + + err = mlx5e_rss_copy(rss, from); + if (err) + goto err_free_indir; + + return rss; + +err_free_indir: + mlx5e_rss_params_indir_cleanup(&rss->indir); +err_free_rss: kvfree(rss); + return ERR_PTR(err); } static void mlx5e_rss_params_init(struct mlx5e_rss *rss) @@ -282,28 +343,43 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss) return retval; } -int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn) +static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss) { - rss->mdev = mdev; - rss->inner_ft_support = inner_ft_support; - rss->drop_rqn = drop_rqn; - mlx5e_rss_params_init(rss); refcount_set(&rss->refcnt, 1); - return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn); + return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true, + rss->drop_rqn, rss->indir.max_table_size); } -int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn, - const struct mlx5e_packet_merge_param *init_pkt_merge_param) +struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + enum mlx5e_rss_init_type type, unsigned int nch, + unsigned int max_nch) { + struct mlx5e_rss *rss; int err; - err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn); + rss = kvzalloc(sizeof(*rss), GFP_KERNEL); + if (!rss) + return ERR_PTR(-ENOMEM); + + err = mlx5e_rss_params_indir_init(&rss->indir, mdev, + mlx5e_rqt_size(mdev, nch), + mlx5e_rqt_size(mdev, max_nch)); + if (err) + goto err_free_rss; + + rss->mdev = mdev; + rss->inner_ft_support = inner_ft_support; + rss->drop_rqn = drop_rqn; + + err = mlx5e_rss_init_no_tirs(rss); if (err) - goto err_out; + goto err_free_indir; + + if (type == MLX5E_RSS_INIT_NO_TIRS) + goto out; err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false); if (err) @@ -315,14 +391,18 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, goto err_destroy_tirs; } - return 0; +out: + return rss; err_destroy_tirs: mlx5e_rss_destroy_tirs(rss, false); err_destroy_rqt: mlx5e_rqt_destroy(&rss->rqt); -err_out: - return err; +err_free_indir: + mlx5e_rss_params_indir_cleanup(&rss->indir); +err_free_rss: + kvfree(rss); + return ERR_PTR(err); } int mlx5e_rss_cleanup(struct mlx5e_rss *rss) @@ -336,6 +416,8 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss) mlx5e_rss_destroy_tirs(rss, true); mlx5e_rqt_destroy(&rss->rqt); + mlx5e_rss_params_indir_cleanup(&rss->indir); + kvfree(rss); return 0; } @@ -470,11 +552,9 @@ inner_tir: int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) { - unsigned int i; - if (indir) - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) - indir[i] = rss->indir.table[i]; + memcpy(indir, rss->indir.table, + rss->indir.actual_table_size * sizeof(*rss->indir.table)); if (key) memcpy(key, rss->hash.toeplitz_hash_key, @@ -495,11 +575,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, struct mlx5e_rss *old_rss; int err = 0; - old_rss = mlx5e_rss_alloc(); - if (!old_rss) - return -ENOMEM; - - *old_rss = *rss; + old_rss = mlx5e_rss_init_copy(rss); + if (IS_ERR(old_rss)) + return PTR_ERR(old_rss); if (hfunc && *hfunc != rss->hash.hfunc) { switch (*hfunc) { @@ -523,18 +601,16 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, } if (indir) { - unsigned int i; - changed_indir = true; - for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) - rss->indir.table[i] = indir[i]; + memcpy(rss->indir.table, indir, + rss->indir.actual_table_size * sizeof(*rss->indir.table)); } if (changed_indir && rss->enabled) { err = mlx5e_rss_apply(rss, rqns, num_rqns); if (err) { - *rss = *old_rss; + mlx5e_rss_copy(rss, old_rss); goto out; } } @@ -543,7 +619,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, mlx5e_rss_update_tirs(rss); out: - mlx5e_rss_free(old_rss); + mlx5e_rss_params_indir_cleanup(&old_rss->indir); + kvfree(old_rss); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h index c6b216416344..d1d0bc350e92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -8,18 +8,24 @@ #include "tir.h" #include "fs.h" +enum mlx5e_rss_init_type { + MLX5E_RSS_INIT_NO_TIRS = 0, + MLX5E_RSS_INIT_TIRS +}; + struct mlx5e_rss_params_traffic_type mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt); struct mlx5e_rss; -struct mlx5e_rss *mlx5e_rss_alloc(void); -void mlx5e_rss_free(struct mlx5e_rss *rss); -int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn, - const struct mlx5e_packet_merge_param *init_pkt_merge_param); -int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, - bool inner_ft_support, u32 drop_rqn); +int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev, + u32 actual_table_size, u32 max_table_size); +void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir); +void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels); +struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + enum mlx5e_rss_init_type type, unsigned int nch, + unsigned int max_nch); int mlx5e_rss_cleanup(struct mlx5e_rss *rss); void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index e1095bc36543..b23e224e3763 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -18,7 +18,7 @@ struct mlx5e_rx_res { struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; bool rss_active; - u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; + u32 *rss_rqns; unsigned int rss_nch; struct { @@ -34,41 +34,42 @@ struct mlx5e_rx_res { /* API for rx_res_rss_* */ +void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch) +{ + int i; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + if (res->rss[i]) + mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch); + } +} + static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, unsigned int init_nch) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; struct mlx5e_rss *rss; - int err; if (WARN_ON(res->rss[0])) return -EINVAL; - rss = mlx5e_rss_alloc(); - if (!rss) - return -ENOMEM; - - err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn, - &res->pkt_merge_param); - if (err) - goto err_rss_free; + rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn, + &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch); + if (IS_ERR(rss)) + return PTR_ERR(rss); mlx5e_rss_set_indir_uniform(rss, init_nch); res->rss[0] = rss; return 0; - -err_rss_free: - mlx5e_rss_free(rss); - return err; } int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; struct mlx5e_rss *rss; - int err, i; + int i; for (i = 1; i < MLX5E_MAX_NUM_RSS; i++) if (!res->rss[i]) @@ -77,13 +78,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i if (i == MLX5E_MAX_NUM_RSS) return -ENOSPC; - rss = mlx5e_rss_alloc(); - if (!rss) - return -ENOMEM; - - err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn); - if (err) - goto err_rss_free; + rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn, + &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch, + res->max_nch); + if (IS_ERR(rss)) + return PTR_ERR(rss); mlx5e_rss_set_indir_uniform(rss, init_nch); if (res->rss_active) @@ -93,10 +92,6 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i *rss_idx = i; return 0; - -err_rss_free: - mlx5e_rss_free(rss); - return err; } static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) @@ -108,7 +103,6 @@ static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) if (err) return err; - mlx5e_rss_free(rss); res->rss[rss_idx] = NULL; return 0; @@ -218,17 +212,32 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch); } -u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) +int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx, + enum mlx5_traffic_types tt) { - struct mlx5e_rss *rss = res->rss[0]; + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -ENOENT; return mlx5e_rss_get_hash_fields(rss, tt); } -int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt, - u8 rx_hash_fields) +int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx, + enum mlx5_traffic_types tt, u8 rx_hash_fields) { - struct mlx5e_rss *rss = res->rss[0]; + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -ENOENT; return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields); } @@ -269,9 +278,27 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx) /* End of API rx_res_rss_* */ -struct mlx5e_rx_res *mlx5e_rx_res_alloc(void) +static void mlx5e_rx_res_free(struct mlx5e_rx_res *res) +{ + kvfree(res->rss_rqns); + kvfree(res); +} + +static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch) { - return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL); + struct mlx5e_rx_res *rx_res; + + rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL); + if (!rx_res) + return NULL; + + rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL); + if (!rx_res->rss_rqns) { + kvfree(rx_res); + return NULL; + } + + return rx_res; } static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) @@ -293,7 +320,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) for (ix = 0; ix < res->max_nch; ix++) { err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt, - res->mdev, false, res->drop_rqn); + res->mdev, false, res->drop_rqn, + mlx5e_rqt_size(res->mdev, res->max_nch)); if (err) { mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n", err, ix); @@ -347,7 +375,8 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) if (!builder) return -ENOMEM; - err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn); + err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn, + mlx5e_rqt_size(res->mdev, res->max_nch)); if (err) goto out; @@ -389,13 +418,19 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res) mlx5e_rqt_destroy(&res->ptp.rqt); } -int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, - enum mlx5e_rx_res_features features, unsigned int max_nch, - u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, - unsigned int init_nch) +struct mlx5e_rx_res * +mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features, + unsigned int max_nch, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + unsigned int init_nch) { + struct mlx5e_rx_res *res; int err; + res = mlx5e_rx_res_alloc(mdev, max_nch); + if (!res) + return ERR_PTR(-ENOMEM); + res->mdev = mdev; res->features = features; res->max_nch = max_nch; @@ -406,7 +441,7 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, err = mlx5e_rx_res_rss_init_def(res, init_nch); if (err) - goto err_out; + goto err_rx_res_free; err = mlx5e_rx_res_channels_init(res); if (err) @@ -416,14 +451,15 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, if (err) goto err_channels_destroy; - return 0; + return res; err_channels_destroy: mlx5e_rx_res_channels_destroy(res); err_rss_destroy: __mlx5e_rx_res_rss_destroy(res, 0); -err_out: - return err; +err_rx_res_free: + mlx5e_rx_res_free(res); + return ERR_PTR(err); } void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) @@ -431,11 +467,7 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) mlx5e_rx_res_ptp_destroy(res); mlx5e_rx_res_channels_destroy(res); mlx5e_rx_res_rss_destroy_all(res); -} - -void mlx5e_rx_res_free(struct mlx5e_rx_res *res) -{ - kvfree(res); + mlx5e_rx_res_free(res); } u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index 5d5f64fab60f..82aaba8a82b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -21,13 +21,12 @@ enum mlx5e_rx_res_features { }; /* Setup */ -struct mlx5e_rx_res *mlx5e_rx_res_alloc(void); -int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, - enum mlx5e_rx_res_features features, unsigned int max_nch, - u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, - unsigned int init_nch); +struct mlx5e_rx_res * +mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features, + unsigned int max_nch, u32 drop_rqn, + const struct mlx5e_packet_merge_param *init_pkt_merge_param, + unsigned int init_nch); void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res); -void mlx5e_rx_res_free(struct mlx5e_rx_res *res); /* TIRN getters for flow steering */ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix); @@ -48,9 +47,10 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, const u32 *indir, const u8 *key, const u8 *hfunc); -u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); -int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt, - u8 rx_hash_fields); +int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx, + enum mlx5_traffic_types tt); +int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx, + enum mlx5_traffic_types tt, u8 rx_hash_fields); int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, struct mlx5e_packet_merge_param *pkt_merge_param); @@ -59,6 +59,7 @@ int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx); int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res); int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss); struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); +void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch); /* Workaround for hairpin */ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c index 92d3952dfa8b..feeb41693c17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -17,8 +17,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; - if (mlx5e_is_eswitch_flow(parse_state->flow)) + if (mlx5e_is_eswitch_flow(parse_state->flow)) { attr->esw_attr->split_count = attr->esw_attr->out_count; + parse_state->if_count = 0; + } attr->flags |= MLX5_ATTR_FLAG_CT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c index 291193f7120d..1b418095b79a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -197,7 +197,7 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state, } esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; esw_attr->out_count++; - /* attr->dests[].rep is resolved when we handle encap */ + /* attr->dests[].vport is resolved when we handle encap */ return 0; } @@ -270,7 +270,8 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; - esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; + esw_attr->dests[esw_attr->out_count].vport_valid = true; + esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport; esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; esw_attr->out_count++; @@ -294,6 +295,7 @@ parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; + parse_state->if_count = 0; esw_attr->out_count++; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c index 3b272bbf4c53..368a95fa77d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c @@ -98,8 +98,10 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state, attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { esw_attr->split_count = esw_attr->out_count; + parse_state->if_count = 0; + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c index ad09a8a5f36e..2d1d4a04501b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c @@ -66,6 +66,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; + parse_state->if_count = 0; esw_attr->out_count++; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c index c8a3eaf189f6..a13c5e707b83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c @@ -166,6 +166,7 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state, return err; esw_attr->split_count = esw_attr->out_count; + parse_state->if_count = 0; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c index 310b99230760..f17575b09788 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c @@ -65,8 +65,10 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state, if (err) return err; - if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { attr->esw_attr->split_count = attr->esw_attr->out_count; + parse_state->if_count = 0; + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c index 2b80fe73549d..8c531f4ec912 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c @@ -221,16 +221,21 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs) } static inline bool -mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys) +mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys) { -#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name) - const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META); - const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP); - const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP); - const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS); - const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS); - const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS); - const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS); +#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name) + const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | + DISS_BIT(META); + const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | + DISS_BIT(PORTS) | DISS_BIT(TCP); + const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | + DISS_BIT(PORTS) | DISS_BIT(TCP); + const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | + DISS_BIT(PORTS); + const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | + DISS_BIT(PORTS); + const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS); + const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS); return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp || used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre); @@ -247,7 +252,7 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f struct flow_match_tcp tcp; if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) { - ct_dbg("rule uses unexpected dissectors (0x%08x)", + ct_dbg("rule uses unexpected dissectors (0x%016llx)", flow_rule->match.dissector->used_keys); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index 4e923a2874ae..86bf007fd05b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -83,6 +83,9 @@ mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act, struct mlx5_flow_spec *spec; int err; + if (IS_ERR(post_act)) + return PTR_ERR(post_act); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; @@ -111,6 +114,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po struct mlx5e_post_act_handle *handle; int err; + if (IS_ERR(post_act)) + return ERR_CAST(post_act); + handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (!handle) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 00a04fdd756f..8dfb57f712b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -302,6 +302,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, e->encap_size = ipv4_encap_size; e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -313,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv4_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -407,6 +408,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, e->encap_size = ipv4_encap_size; kfree(e->encap_header); e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -418,8 +420,8 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv4_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -570,6 +572,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->encap_size = ipv6_encap_size; e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -581,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv6_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { @@ -674,6 +677,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, e->encap_size = ipv6_encap_size; kfree(e->encap_header); e->encap_header = encap_header; + encap_header = NULL; if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); @@ -685,8 +689,8 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, memset(&reformat_params, 0, sizeof(reformat_params)); reformat_params.type = e->reformat_type; - reformat_params.size = ipv6_encap_size; - reformat_params.data = encap_header; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 1730f6a716ee..f1d1e1542e81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -24,7 +24,8 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv, route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex); - if (!route_dev || !netif_is_ovs_master(route_dev)) + if (!route_dev || !netif_is_ovs_master(route_dev) || + attr->parse_attr->filter_dev == e->out_dev) goto out; err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex, @@ -1063,7 +1064,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; - esw_attr->dests[out_index].rep = rpriv->rep; + esw_attr->dests[out_index].vport_valid = true; + esw_attr->dests[out_index].vport = rpriv->rep->vport; esw_attr->dests[out_index].mdev = out_priv->mdev; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 201ac7dd338f..5620d9f97518 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2020 Mellanox Technologies */ -#include <net/page_pool.h> #include "en/txrx.h" #include "en/params.h" #include "en/trap.h" @@ -128,7 +127,7 @@ static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev, static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) { - int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0)); + int cpu = mlx5_comp_vector_get_cpu(priv->mdev, 0); struct net_device *netdev = priv->netdev; struct mlx5e_trap *t; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 40589cebb773..13c7ed1bb37e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -35,6 +35,7 @@ #include "en/xdp.h" #include "en/params.h" #include <linux/bitfield.h> +#include <net/page_pool/helpers.h> int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { @@ -492,6 +493,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, dma_addr_t dma_addr = xdptxd->dma_addr; u32 dma_len = xdptxd->len; u16 ds_cnt, inline_hdr_sz; + unsigned int frags_size; u8 num_wqebbs = 1; int num_frags = 0; bool inline_ok; @@ -502,8 +504,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE || dma_len >= MLX5E_XDP_MIN_INLINE; + frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0; - if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) { + if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) { stats->err++; return false; } @@ -873,11 +876,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, } out: - if (flags & XDP_XMIT_FLUSH) { - if (sq->mpwqe.wqe) - mlx5e_xdp_mpwqe_complete(sq); + if (sq->mpwqe.wqe) + mlx5e_xdp_mpwqe_complete(sq); + + if (flags & XDP_XMIT_FLUSH) mlx5e_xmit_xdp_doorbell(sq); - } return nxmit; } @@ -892,7 +895,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) mlx5e_xmit_xdp_doorbell(xdpsq); if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { - xdp_do_flush_map(); + xdp_do_flush(); __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index bac4717548c6..caa34b9c161e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -138,7 +138,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, } #endif -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC if (unlikely(mlx5e_macsec_skb_is_offload(skb))) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -173,7 +173,7 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, mlx5e_ipsec_tx_build_eseg(priv, skb, eseg); #endif -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC if (unlikely(mlx5e_macsec_skb_is_offload(skb))) mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 891d39b4bfd4..161c5190c236 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -38,8 +38,10 @@ #include <net/netevent.h> #include "en.h" +#include "eswitch.h" #include "ipsec.h" #include "ipsec_rxtx.h" +#include "en_rep.h" #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000) #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1 @@ -54,7 +56,7 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; } -static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work) +static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work) { struct mlx5e_ipsec_dwork *dwork = container_of(_work, struct mlx5e_ipsec_dwork, dwork.work); @@ -119,7 +121,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO) esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom)); - sa_entry->esn_state.esn = esn; + if (sa_entry->esn_state.esn_msb) + sa_entry->esn_state.esn = esn; + else + /* According to RFC4303, section "3.3.3. Sequence Number Generation", + * the first packet sent using a given SA will contain a sequence + * number of 1. + */ + sa_entry->esn_state.esn = max_t(u32, esn, 1); sa_entry->esn_state.esn_msb = esn_msb; if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { @@ -333,6 +342,27 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->replay_esn.esn = sa_entry->esn_state.esn; attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; attrs->replay_esn.overlap = sa_entry->esn_state.overlap; + switch (x->replay_esn->replay_window) { + case 32: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_32BIT; + break; + case 64: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_64BIT; + break; + case 128: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_128BIT; + break; + case 256: + attrs->replay_esn.replay_window = + MLX5_IPSEC_ASO_REPLAY_WIN_256BIT; + break; + default: + WARN_ON(true); + return; + } } attrs->dir = x->xso.dir; @@ -354,6 +384,12 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, mlx5e_ipsec_init_limits(sa_entry, attrs); mlx5e_ipsec_init_macs(sa_entry, attrs); + + if (x->encap) { + attrs->encap = true; + attrs->sport = x->encap->encap_sport; + attrs->dport = x->encap->encap_dport; + } } static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, @@ -387,8 +423,25 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, return -EINVAL; } if (x->encap) { - NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded"); - return -EINVAL; + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) { + NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported"); + return -EINVAL; + } + + if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) { + NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported"); + return -EINVAL; + } + + if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) { + NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only"); + return -EINVAL; + } + + if (x->props.mode != XFRM_MODE_TRANSPORT) { + NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only"); + return -EINVAL; + } } if (!x->aead) { NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead"); @@ -416,9 +469,9 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, return -EINVAL; } - if (x->sel.proto != IPPROTO_IP && - (x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) { - NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction"); + if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP && + x->sel.proto != IPPROTO_TCP) { + NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP"); return -EINVAL; } @@ -461,9 +514,15 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, return -EINVAL; } - if (x->lft.hard_byte_limit != XFRM_INF || - x->lft.soft_byte_limit != XFRM_INF) { - NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes"); + if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit && + x->lft.hard_byte_limit != XFRM_INF) { + /* XFRM stack doesn't prevent such configuration :(. */ + NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one"); + return -EINVAL; + } + + if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) { + NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0"); return -EINVAL; } @@ -599,11 +658,10 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) return 0; - if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT) - return 0; - if (x->lft.soft_packet_limit == XFRM_INF && - x->lft.hard_packet_limit == XFRM_INF) + x->lft.hard_packet_limit == XFRM_INF && + x->lft.soft_byte_limit == XFRM_INF && + x->lft.hard_byte_limit == XFRM_INF) return 0; dwork = kzalloc(sizeof(*dwork), GFP_KERNEL); @@ -611,7 +669,7 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) return -ENOMEM; dwork->sa_entry = sa_entry; - INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit); + INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits); sa_entry->dwork = dwork; return 0; } @@ -646,6 +704,11 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, if (err) goto err_xfrm; + if (!mlx5_eswitch_block_ipsec(priv->mdev)) { + err = -EBUSY; + goto err_xfrm; + } + /* check esn */ if (x->props.flags & XFRM_STATE_ESN) mlx5e_ipsec_update_esn_state(sa_entry); @@ -654,7 +717,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, err = mlx5_ipsec_create_work(sa_entry); if (err) - goto err_xfrm; + goto unblock_ipsec; err = mlx5e_ipsec_create_dwork(sa_entry); if (err) @@ -711,6 +774,8 @@ release_work: if (sa_entry->work) kfree(sa_entry->work->data); kfree(sa_entry->work); +unblock_ipsec: + mlx5_eswitch_unblock_ipsec(priv->mdev); err_xfrm: kfree(sa_entry); NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state"); @@ -740,6 +805,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) static void mlx5e_xfrm_free_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) goto sa_entry_free; @@ -756,6 +822,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) if (sa_entry->work) kfree(sa_entry->work->data); kfree(sa_entry->work); + mlx5_eswitch_unblock_ipsec(ipsec->mdev); sa_entry_free: kfree(sa_entry); } @@ -816,6 +883,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); ipsec->mdev = priv->mdev; + init_completion(&ipsec->comp); ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0, priv->netdev->name); if (!ipsec->wq) @@ -835,7 +903,8 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) goto clear_aso; } - ret = mlx5e_accel_ipsec_fs_init(ipsec); + ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv); + ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom); if (ret) goto err_fs_init; @@ -866,9 +935,11 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) return; mlx5e_accel_ipsec_fs_cleanup(ipsec); - if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) + if (ipsec->netevent_nb.notifier_call) { unregister_netevent_notifier(&ipsec->netevent_nb); - if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) + ipsec->netevent_nb.notifier_call = NULL; + } + if (ipsec->aso) mlx5e_ipsec_aso_cleanup(ipsec); destroy_workqueue(ipsec->wq); kfree(ipsec); @@ -958,9 +1029,10 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, return -EINVAL; } - if (sel->proto != IPPROTO_IP && - (sel->proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) { - NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction"); + if (x->selector.proto != IPPROTO_IP && + x->selector.proto != IPPROTO_UDP && + x->selector.proto != IPPROTO_TCP) { + NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP"); return -EINVAL; } @@ -976,6 +1048,12 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, } } + if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET && + !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) { + NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported"); + return -EINVAL; + } + return 0; } @@ -1029,6 +1107,11 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, pol_entry->x = x; pol_entry->ipsec = priv->ipsec; + if (!mlx5_eswitch_block_ipsec(priv->mdev)) { + err = -EBUSY; + goto ipsec_busy; + } + mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs); err = mlx5e_accel_ipsec_fs_add_pol(pol_entry); if (err) @@ -1038,6 +1121,8 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, return 0; err_fs: + mlx5_eswitch_unblock_ipsec(priv->mdev); +ipsec_busy: kfree(pol_entry); NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy"); return err; @@ -1048,6 +1133,7 @@ static void mlx5e_xfrm_del_policy(struct xfrm_policy *x) struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x); mlx5e_accel_ipsec_fs_del_pol(pol_entry); + mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev); } static void mlx5e_xfrm_free_policy(struct xfrm_policy *x) @@ -1063,14 +1149,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_free = mlx5e_xfrm_free_state, .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, -}; - -static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = { - .xdo_dev_state_add = mlx5e_xfrm_add_state, - .xdo_dev_state_delete = mlx5e_xfrm_del_state, - .xdo_dev_state_free = mlx5e_xfrm_free_state, - .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, - .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, .xdo_dev_policy_add = mlx5e_xfrm_add_policy, @@ -1088,11 +1166,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n"); - if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) - netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops; - else - netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; - + netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; netdev->features |= NETIF_F_HW_ESP; netdev->hw_enc_features |= NETIF_F_HW_ESP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 4e9887171508..adaea3493193 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -38,6 +38,7 @@ #include <net/xfrm.h> #include <linux/idr.h> #include "lib/aso.h" +#include "lib/devcom.h" #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L @@ -94,13 +95,20 @@ struct mlx5_accel_esp_xfrm_attrs { u8 dir : 2; u8 type : 2; u8 drop : 1; + u8 encap : 1; u8 family; struct mlx5_replay_esn replay_esn; u32 authsize; u32 reqid; struct mlx5_ipsec_lft lft; - u8 smac[ETH_ALEN]; - u8 dmac[ETH_ALEN]; + union { + u8 smac[ETH_ALEN]; + __be16 sport; + }; + union { + u8 dmac[ETH_ALEN]; + __be16 dport; + }; }; enum mlx5_ipsec_cap { @@ -110,6 +118,7 @@ enum mlx5_ipsec_cap { MLX5_IPSEC_CAP_ROCE = 1 << 3, MLX5_IPSEC_CAP_PRIO = 1 << 4, MLX5_IPSEC_CAP_TUNNEL = 1 << 5, + MLX5_IPSEC_CAP_ESPINUDP = 1 << 6, }; struct mlx5e_priv; @@ -135,7 +144,7 @@ struct mlx5e_ipsec_sw_stats { atomic64_t ipsec_tx_drop_trailer; }; -struct mlx5e_ipsec_rx; +struct mlx5e_ipsec_fc; struct mlx5e_ipsec_tx; struct mlx5e_ipsec_work { @@ -161,20 +170,80 @@ struct mlx5e_ipsec_aso { spinlock_t lock; }; +struct mlx5e_ipsec_rx_create_attr { + struct mlx5_flow_namespace *ns; + struct mlx5_ttc_table *ttc; + u32 family; + int prio; + int pol_level; + int sa_level; + int status_level; + enum mlx5_flow_namespace_type chains_ns; +}; + +struct mlx5e_ipsec_ft { + struct mutex mutex; /* Protect changes to this struct */ + struct mlx5_flow_table *pol; + struct mlx5_flow_table *sa; + struct mlx5_flow_table *status; + u32 refcnt; +}; + +struct mlx5e_ipsec_drop { + struct mlx5_flow_handle *rule; + struct mlx5_fc *fc; +}; + +struct mlx5e_ipsec_rule { + struct mlx5_flow_handle *rule; + struct mlx5_modify_hdr *modify_hdr; + struct mlx5_pkt_reformat *pkt_reformat; + struct mlx5_fc *fc; + struct mlx5e_ipsec_drop replay; + struct mlx5e_ipsec_drop auth; + struct mlx5e_ipsec_drop trailer; +}; + +struct mlx5e_ipsec_miss { + struct mlx5_flow_group *group; + struct mlx5_flow_handle *rule; +}; + +struct mlx5e_ipsec_tx_create_attr { + int prio; + int pol_level; + int sa_level; + int cnt_level; + enum mlx5_flow_namespace_type chains_ns; +}; + +struct mlx5e_ipsec_mpv_work { + int event; + struct work_struct work; + struct mlx5e_priv *slave_priv; + struct mlx5e_priv *master_priv; +}; + struct mlx5e_ipsec { struct mlx5_core_dev *mdev; struct xarray sadb; struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_hw_stats hw_stats; struct workqueue_struct *wq; + struct completion comp; struct mlx5e_flow_steering *fs; struct mlx5e_ipsec_rx *rx_ipv4; struct mlx5e_ipsec_rx *rx_ipv6; + struct mlx5e_ipsec_rx *rx_esw; struct mlx5e_ipsec_tx *tx; + struct mlx5e_ipsec_tx *tx_esw; struct mlx5e_ipsec_aso *aso; struct notifier_block nb; struct notifier_block netevent_nb; struct mlx5_ipsec_fs *roce; + u8 is_uplink_rep: 1; + struct mlx5e_ipsec_mpv_work mpv_work; + struct xarray ipsec_obj_id_map; }; struct mlx5e_ipsec_esn_state { @@ -183,13 +252,6 @@ struct mlx5e_ipsec_esn_state { u8 overlap: 1; }; -struct mlx5e_ipsec_rule { - struct mlx5_flow_handle *rule; - struct mlx5_modify_hdr *modify_hdr; - struct mlx5_pkt_reformat *pkt_reformat; - struct mlx5_fc *fc; -}; - struct mlx5e_ipsec_limits { u64 round; u8 soft_limit_hit : 1; @@ -209,6 +271,7 @@ struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_work *work; struct mlx5e_ipsec_dwork *dwork; struct mlx5e_ipsec_limits limits; + u32 rx_mapped_id; }; struct mlx5_accel_pol_xfrm_attrs { @@ -245,7 +308,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); -int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, struct mlx5_devcom_comp_dev **devcom); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); @@ -271,6 +334,10 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs); +void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv); +void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event); + static inline struct mlx5_core_dev * mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) { @@ -306,6 +373,15 @@ static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } + +static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv) +{ +} + +static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) +{ +} #endif #endif /* __MLX5E_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 832d36be4a17..41a2543a52cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -9,6 +9,8 @@ #include "fs_core.h" #include "lib/ipsec_fs_roce.h" #include "lib/fs_chains.h" +#include "esw/ipsec_fs.h" +#include "en_rep.h" #define NUM_IPSEC_FTE BIT(15) #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16 @@ -19,34 +21,28 @@ struct mlx5e_ipsec_fc { struct mlx5_fc *drop; }; -struct mlx5e_ipsec_ft { - struct mutex mutex; /* Protect changes to this struct */ - struct mlx5_flow_table *pol; - struct mlx5_flow_table *sa; - struct mlx5_flow_table *status; - u32 refcnt; -}; - -struct mlx5e_ipsec_miss { - struct mlx5_flow_group *group; - struct mlx5_flow_handle *rule; -}; - -struct mlx5e_ipsec_rx { +struct mlx5e_ipsec_tx { struct mlx5e_ipsec_ft ft; struct mlx5e_ipsec_miss pol; struct mlx5e_ipsec_miss sa; struct mlx5e_ipsec_rule status; + struct mlx5_flow_namespace *ns; struct mlx5e_ipsec_fc *fc; struct mlx5_fs_chains *chains; u8 allow_tunnel_mode : 1; }; -struct mlx5e_ipsec_tx { +struct mlx5e_ipsec_status_checks { + struct mlx5_flow_group *drop_all_group; + struct mlx5e_ipsec_drop all; +}; + +struct mlx5e_ipsec_rx { struct mlx5e_ipsec_ft ft; struct mlx5e_ipsec_miss pol; + struct mlx5e_ipsec_miss sa; struct mlx5e_ipsec_rule status; - struct mlx5_flow_namespace *ns; + struct mlx5e_ipsec_status_checks status_drops; struct mlx5e_ipsec_fc *fc; struct mlx5_fs_chains *chains; u8 allow_tunnel_mode : 1; @@ -60,14 +56,25 @@ static enum mlx5_traffic_types family2tt(u32 family) return MLX5_TT_IPV6_IPSEC_ESP; } -static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family) +static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type) { + if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET) + return ipsec->rx_esw; + if (family == AF_INET) return ipsec->rx_ipv4; return ipsec->rx_ipv6; } +static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type) +{ + if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET) + return ipsec->tx_esw; + + return ipsec->tx; +} + static struct mlx5_fs_chains * ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft, enum mlx5_flow_namespace_type ns, int base_prio, @@ -137,14 +144,37 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns, return mlx5_create_auto_grouped_flow_table(ns, &ft_attr); } -static int ipsec_status_rule(struct mlx5_core_dev *mdev, - struct mlx5e_ipsec_rx *rx, - struct mlx5_flow_destination *dest) +static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + mlx5_del_flow_rules(rx->status_drops.all.rule); + mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc); + mlx5_destroy_flow_group(rx->status_drops.drop_all_group); +} + +static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + mlx5_del_flow_rules(rx->status.rule); + + if (rx != ipsec->rx_esw) + return; + +#ifdef CONFIG_MLX5_ESWITCH + mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0); +#endif +} + +static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5e_ipsec_rx *rx) { - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {}; - struct mlx5_modify_hdr *modify_hdr; - struct mlx5_flow_handle *fte; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; struct mlx5_flow_spec *spec; int err; @@ -152,48 +182,273 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev, if (!spec) return -ENOMEM; - /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */ - MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); - MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME); - MLX5_SET(copy_action_in, action, src_offset, 0); - MLX5_SET(copy_action_in, action, length, 7); - MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); - MLX5_SET(copy_action_in, action, dst_offset, 24); + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + sa_entry->ipsec_rule.auth.fc = flow_counter; - modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, - 1, action); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + flow_act.flags = FLOW_ACT_NO_APPEND; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; - if (IS_ERR(modify_hdr)) { - err = PTR_ERR(modify_hdr); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); mlx5_core_err(mdev, - "fail to alloc ipsec copy modify_header_id err=%d\n", err); - goto out_spec; + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; } + sa_entry->ipsec_rule.auth.rule = rule; - /* create fte */ - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt_2; + } + sa_entry->ipsec_rule.trailer.fc = flow_counter; + + dest.counter_id = mlx5_fc_id(flow_counter); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule_2; + } + sa_entry->ipsec_rule.trailer.rule = rule; + + kvfree(spec); + return 0; + +err_rule_2: + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc); +err_cnt_2: + mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule); +err_rule: + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc); +err_cnt: + kvfree(spec); + return err; +} + +static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_counter = mlx5_fc_create(mdev, true); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + flow_act.flags = FLOW_ACT_NO_APPEND; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; + } + + sa_entry->ipsec_rule.replay.rule = rule; + sa_entry->ipsec_rule.replay.fc = flow_counter; + + kvfree(spec); + return 0; + +err_rule: + mlx5_fc_destroy(mdev, flow_counter); +err_cnt: + kvfree(spec); + return err; +} + +static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_fc *flow_counter; + struct mlx5_flow_spec *spec; + struct mlx5_flow_group *g; + u32 *flow_group_in; + int err = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!flow_group_in || !spec) { + err = -ENOMEM; + goto err_out; + } + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); + g = mlx5_create_flow_group(ft, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop flow group, err=%d\n", err); + goto err_out; + } + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule counter, err=%d\n", err); + goto err_cnt; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(flow_counter); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx status drop rule, err=%d\n", err); + goto err_rule; + } + + rx->status_drops.drop_all_group = g; + rx->status_drops.all.rule = rule; + rx->status_drops.all.fc = flow_counter; + + kvfree(flow_group_in); + kvfree(spec); + return 0; + +err_rule: + mlx5_fc_destroy(mdev, flow_counter); +err_cnt: + mlx5_destroy_flow_group(g); +err_out: + kvfree(flow_group_in); + kvfree(spec); + return err; +} + +static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.ipsec_syndrome); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.ipsec_syndrome, 0); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_4, 0); + if (rx == ipsec->rx_esw) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + flow_act.flags = FLOW_ACT_NO_APPEND; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; - flow_act.modify_hdr = modify_hdr; - fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); - if (IS_ERR(fte)) { - err = PTR_ERR(fte); - mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err); - goto out; + rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_warn(ipsec->mdev, + "Failed to add ipsec rx status pass rule, err=%d\n", err); + goto err_rule; } + rx->status.rule = rule; kvfree(spec); - rx->status.rule = fte; - rx->status.modify_hdr = modify_hdr; return 0; -out: - mlx5_modify_header_dealloc(mdev, modify_hdr); -out_spec: +err_rule: kvfree(spec); return err; } +static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + ipsec_rx_status_pass_destroy(ipsec, rx); + ipsec_rx_status_drop_destroy(ipsec, rx); +} + +static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *dest) +{ + int err; + + err = ipsec_rx_status_drop_all_create(ipsec, rx); + if (err) + return err; + + err = ipsec_rx_status_pass_create(ipsec, rx, dest); + if (err) + goto err_pass_create; + + return 0; + +err_pass_create: + ipsec_rx_status_drop_destroy(ipsec, rx); + return err; +} + static int ipsec_miss_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *ft, struct mlx5e_ipsec_miss *miss, @@ -238,13 +493,96 @@ out: return err; } -static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx, u32 family) +static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family) +{ + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET); + struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false); + struct mlx5_flow_destination old_dest, new_dest; + + old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false), + family2tt(family)); + + mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family, + MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO); + + new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); + new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); + mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); +} + +static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family) +{ + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET); + struct mlx5_flow_destination old_dest, new_dest; + + old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); + old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false), + family2tt(family)); + mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); + mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); + + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev); +} + +static void ipsec_mpv_work_handler(struct work_struct *_work) +{ + struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work); + struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec; + + switch (work->event) { + case MPV_DEVCOM_IPSEC_MASTER_UP: + mutex_lock(&ipsec->tx->ft.mutex); + if (ipsec->tx->ft.refcnt) + mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol, + true); + mutex_unlock(&ipsec->tx->ft.mutex); + + mutex_lock(&ipsec->rx_ipv4->ft.mutex); + if (ipsec->rx_ipv4->ft.refcnt) + handle_ipsec_rx_bringup(ipsec, AF_INET); + mutex_unlock(&ipsec->rx_ipv4->ft.mutex); + + mutex_lock(&ipsec->rx_ipv6->ft.mutex); + if (ipsec->rx_ipv6->ft.refcnt) + handle_ipsec_rx_bringup(ipsec, AF_INET6); + mutex_unlock(&ipsec->rx_ipv6->ft.mutex); + break; + case MPV_DEVCOM_IPSEC_MASTER_DOWN: + mutex_lock(&ipsec->tx->ft.mutex); + if (ipsec->tx->ft.refcnt) + mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev); + mutex_unlock(&ipsec->tx->ft.mutex); + + mutex_lock(&ipsec->rx_ipv4->ft.mutex); + if (ipsec->rx_ipv4->ft.refcnt) + handle_ipsec_rx_cleanup(ipsec, AF_INET); + mutex_unlock(&ipsec->rx_ipv4->ft.mutex); + + mutex_lock(&ipsec->rx_ipv6->ft.mutex); + if (ipsec->rx_ipv6->ft.refcnt) + handle_ipsec_rx_cleanup(ipsec, AF_INET6); + mutex_unlock(&ipsec->rx_ipv6->ft.mutex); + break; + } + + complete(&work->master_priv->ipsec->comp); +} + +static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family) { struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); - /* disconnect */ mlx5_ttc_fwd_default_dest(ttc, family2tt(family)); +} + +static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, u32 family) +{ + /* disconnect */ + if (rx != ipsec->rx_esw) + ipsec_rx_ft_disconnect(ipsec, family); if (rx->chains) { ipsec_chains_destroy(rx->chains); @@ -259,51 +597,97 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, mlx5_destroy_flow_table(rx->ft.sa); if (rx->allow_tunnel_mode) mlx5_eswitch_unblock_encap(mdev); - mlx5_del_flow_rules(rx->status.rule); - mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); + mlx5_ipsec_rx_status_destroy(ipsec, rx); mlx5_destroy_flow_table(rx->ft.status); - mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); +} + +static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + u32 family, + struct mlx5e_ipsec_rx_create_attr *attr) +{ + if (rx == ipsec->rx_esw) { + /* For packet offload in switchdev mode, RX & TX use FDB namespace */ + attr->ns = ipsec->tx_esw->ns; + mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr); + return; + } + + attr->ns = mlx5e_fs_get_ns(ipsec->fs, false); + attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false); + attr->family = family; + attr->prio = MLX5E_NIC_PRIO; + attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL; + attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; + attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; + attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL; +} + +static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5e_ipsec_rx_create_attr *attr, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_table *ft; + int err; + + if (rx == ipsec->rx_esw) + return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest); + + *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family)); + err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest, + attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, + attr->prio); + if (err) + return err; + + ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family); + if (ft) { + dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest->ft = ft; + } + + return 0; +} + +static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5e_ipsec_rx_create_attr *attr) +{ + struct mlx5_flow_destination dest = {}; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = rx->ft.pol; + mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest); } static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx, u32 family) { - struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false); - struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); - struct mlx5_flow_destination default_dest; + struct mlx5e_ipsec_rx_create_attr attr; struct mlx5_flow_destination dest[2]; struct mlx5_flow_table *ft; u32 flags = 0; int err; - default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family)); - err = mlx5_ipsec_fs_roce_rx_create(mdev, ipsec->roce, ns, &default_dest, - family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, - MLX5E_NIC_PRIO); + ipsec_rx_create_attr_set(ipsec, rx, family, &attr); + + err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]); if (err) return err; - ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, - MLX5E_NIC_PRIO, 1, 0); + ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft_status; } - rx->ft.status = ft; - ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); - if (ft) { - dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[0].ft = ft; - } else { - dest[0] = default_dest; - } - dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[1].counter_id = mlx5_fc_id(rx->fc->cnt); - err = ipsec_status_rule(mdev, rx, dest); + err = mlx5_ipsec_rx_status_create(ipsec, rx, dest); if (err) goto err_add; @@ -312,8 +696,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); if (rx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO, 2, - flags); + ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft; @@ -326,9 +709,9 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { rx->chains = ipsec_chains_create(mdev, rx->ft.sa, - MLX5_FLOW_NAMESPACE_KERNEL, - MLX5E_NIC_PRIO, - MLX5E_ACCEL_FS_POL_FT_LEVEL, + attr.chains_ns, + attr.prio, + attr.pol_level, &rx->ft.pol); if (IS_ERR(rx->chains)) { err = PTR_ERR(rx->chains); @@ -338,8 +721,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, goto connect; } - ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO, - 2, 0); + ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_pol_ft; @@ -354,10 +736,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, connect: /* connect */ - memset(dest, 0x00, sizeof(*dest)); - dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[0].ft = rx->ft.pol; - mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest[0]); + if (rx != ipsec->rx_esw) + ipsec_rx_ft_connect(ipsec, rx, &attr); return 0; err_pol_miss: @@ -375,7 +755,7 @@ err_fs_ft: err_add: mlx5_destroy_flow_table(rx->ft.status); err_fs_ft_status: - mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); return err; } @@ -387,10 +767,16 @@ static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, if (rx->ft.refcnt) goto skip; - err = rx_create(mdev, ipsec, rx, family); + err = mlx5_eswitch_block_mode(mdev); if (err) return err; + err = rx_create(mdev, ipsec, rx, family); + if (err) { + mlx5_eswitch_unblock_mode(mdev); + return err; + } + skip: rx->ft.refcnt++; return 0; @@ -403,12 +789,14 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx, return; rx_destroy(ipsec->mdev, ipsec, rx, family); + mlx5_eswitch_unblock_mode(ipsec->mdev); } static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev, - struct mlx5e_ipsec *ipsec, u32 family) + struct mlx5e_ipsec *ipsec, u32 family, + int type) { - struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family); + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type); int err; mutex_lock(&rx->ft.mutex); @@ -422,9 +810,9 @@ static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev, static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, - u32 family, u32 prio) + u32 family, u32 prio, int type) { - struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family); + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type); struct mlx5_flow_table *ft; int err; @@ -449,18 +837,18 @@ err_get: return ERR_PTR(err); } -static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family) +static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type) { - struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family); + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type); mutex_lock(&rx->ft.mutex); rx_put(ipsec, rx, family); mutex_unlock(&rx->ft.mutex); } -static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio) +static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type) { - struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family); + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type); mutex_lock(&rx->ft.mutex); if (rx->chains) @@ -504,10 +892,10 @@ err_rule: } /* IPsec TX flow steering */ -static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx, +static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, struct mlx5_ipsec_fs *roce) { - mlx5_ipsec_fs_roce_tx_destroy(roce); + mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev); if (tx->chains) { ipsec_chains_destroy(tx->chains); } else { @@ -516,22 +904,45 @@ static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx, mlx5_destroy_flow_table(tx->ft.pol); } + if (tx == ipsec->tx_esw) { + mlx5_del_flow_rules(tx->sa.rule); + mlx5_destroy_flow_group(tx->sa.group); + } mlx5_destroy_flow_table(tx->ft.sa); if (tx->allow_tunnel_mode) - mlx5_eswitch_unblock_encap(mdev); + mlx5_eswitch_unblock_encap(ipsec->mdev); mlx5_del_flow_rules(tx->status.rule); mlx5_destroy_flow_table(tx->ft.status); } -static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx, +static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_tx *tx, + struct mlx5e_ipsec_tx_create_attr *attr) +{ + if (tx == ipsec->tx_esw) { + mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr); + return; + } + + attr->prio = 0; + attr->pol_level = 0; + attr->sa_level = 1; + attr->cnt_level = 2; + attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC; +} + +static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, struct mlx5_ipsec_fs *roce) { + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5e_ipsec_tx_create_attr attr; struct mlx5_flow_destination dest = {}; struct mlx5_flow_table *ft; u32 flags = 0; int err; - ft = ipsec_ft_create(tx->ns, 2, 0, 1, 0); + ipsec_tx_create_attr_set(ipsec, tx, &attr); + ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0); if (IS_ERR(ft)) return PTR_ERR(ft); tx->ft.status = ft; @@ -544,16 +955,25 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx, tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); if (tx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - ft = ipsec_ft_create(tx->ns, 1, 0, 4, flags); + ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_sa_ft; } tx->ft.sa = ft; + if (tx == ipsec->tx_esw) { + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = MLX5_VPORT_UPLINK; + err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest); + if (err) + goto err_sa_miss; + memset(&dest, 0, sizeof(dest)); + } + if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { tx->chains = ipsec_chains_create( - mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 0, + mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level, &tx->ft.pol); if (IS_ERR(tx->chains)) { err = PTR_ERR(tx->chains); @@ -563,7 +983,7 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx, goto connect_roce; } - ft = ipsec_ft_create(tx->ns, 0, 0, 2, 0); + ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_pol_ft; @@ -578,7 +998,7 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx, } connect_roce: - err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol); + err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false); if (err) goto err_roce; return 0; @@ -592,6 +1012,11 @@ err_roce: mlx5_destroy_flow_table(tx->ft.pol); } err_pol_ft: + if (tx == ipsec->tx_esw) { + mlx5_del_flow_rules(tx->sa.rule); + mlx5_destroy_flow_group(tx->sa.group); + } +err_sa_miss: mlx5_destroy_flow_table(tx->ft.sa); err_sa_ft: if (tx->allow_tunnel_mode) @@ -602,6 +1027,25 @@ err_status_rule: return err; } +static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev, + struct mlx5_flow_table *ft) +{ +#ifdef CONFIG_MLX5_ESWITCH + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_priv *priv; + + esw->offloads.ft_ipsec_tx_pol = ft; + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + priv = netdev_priv(uplink_rpriv->netdev); + if (!priv->channels.num) + return; + + mlx5e_rep_deactivate_channels(priv); + mlx5e_rep_activate_channels(priv); +#endif +} + static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx) { @@ -610,10 +1054,19 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, if (tx->ft.refcnt) goto skip; - err = tx_create(mdev, tx, ipsec->roce); + err = mlx5_eswitch_block_mode(mdev); if (err) return err; + err = tx_create(ipsec, tx, ipsec->roce); + if (err) { + mlx5_eswitch_unblock_mode(mdev); + return err; + } + + if (tx == ipsec->tx_esw) + ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol); + skip: tx->ft.refcnt++; return 0; @@ -624,14 +1077,20 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx) if (--tx->ft.refcnt) return; - tx_destroy(ipsec->mdev, tx, ipsec->roce); + if (tx == ipsec->tx_esw) { + mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev); + ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL); + } + + tx_destroy(ipsec, tx, ipsec->roce); + mlx5_eswitch_unblock_mode(ipsec->mdev); } static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, - u32 prio) + u32 prio, int type) { - struct mlx5e_ipsec_tx *tx = ipsec->tx; + struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type); struct mlx5_flow_table *ft; int err; @@ -657,9 +1116,9 @@ err_get: } static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev, - struct mlx5e_ipsec *ipsec) + struct mlx5e_ipsec *ipsec, int type) { - struct mlx5e_ipsec_tx *tx = ipsec->tx; + struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type); int err; mutex_lock(&tx->ft.mutex); @@ -671,18 +1130,18 @@ static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev, return tx; } -static void tx_ft_put(struct mlx5e_ipsec *ipsec) +static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type) { - struct mlx5e_ipsec_tx *tx = ipsec->tx; + struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type); mutex_lock(&tx->ft.mutex); tx_put(ipsec, tx); mutex_unlock(&tx->ft.mutex); } -static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio) +static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type) { - struct mlx5e_ipsec_tx *tx = ipsec->tx; + struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type); mutex_lock(&tx->ft.mutex); if (tx->chains) @@ -753,13 +1212,22 @@ static void setup_fte_esp(struct mlx5_flow_spec *spec) MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP); } -static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi) +static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap) { /* SPI number */ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi); + if (encap) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters.inner_esp_spi); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters.inner_esp_spi, spi); + } else { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters.outer_esp_spi); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters.outer_esp_spi, spi); + } } static void setup_fte_no_frags(struct mlx5_flow_spec *spec) @@ -782,66 +1250,115 @@ static void setup_fte_reg_a(struct mlx5_flow_spec *spec) misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC); } -static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid) +static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid) { /* Pass policy check before choosing this SA */ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; - MLX5_SET(fte_match_param, spec->match_criteria, - misc_parameters_2.metadata_reg_c_0, reqid); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_4); MLX5_SET(fte_match_param, spec->match_value, - misc_parameters_2.metadata_reg_c_0, reqid); + misc_parameters_2.metadata_reg_c_4, reqid); } static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec) { - if (upspec->proto != IPPROTO_UDP) + switch (upspec->proto) { + case IPPROTO_UDP: + if (upspec->dport) { + MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, + udp_dport, upspec->dport_mask); + MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, + udp_dport, upspec->dport); + } + if (upspec->sport) { + MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, + udp_sport, upspec->sport_mask); + MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, + udp_sport, upspec->sport); + } + break; + case IPPROTO_TCP: + if (upspec->dport) { + MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, + tcp_dport, upspec->dport_mask); + MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, + tcp_dport, upspec->dport); + } + if (upspec->sport) { + MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, + tcp_sport, upspec->sport_mask); + MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, + tcp_sport, upspec->sport); + } + break; + default: return; + } spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol); MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto); - if (upspec->dport) { - MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, - upspec->dport_mask); - MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport); - } +} - if (upspec->sport) { - MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_sport, - upspec->sport_mask); - MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_sport, upspec->sport); - } +static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec, + int type, u8 dir) +{ + if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET) + return MLX5_FLOW_NAMESPACE_FDB; + + if (dir == XFRM_DEV_OFFLOAD_IN) + return MLX5_FLOW_NAMESPACE_KERNEL; + + return MLX5_FLOW_NAMESPACE_EGRESS; } -static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir, +static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir, struct mlx5_flow_act *flow_act) { - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; - enum mlx5_flow_namespace_type ns_type; + enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir); + u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_core_dev *mdev = ipsec->mdev; struct mlx5_modify_hdr *modify_hdr; + u8 num_of_actions = 1; - MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET); switch (dir) { case XFRM_DEV_OFFLOAD_IN: - MLX5_SET(set_action_in, action, field, + MLX5_SET(set_action_in, action[0], field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); - ns_type = MLX5_FLOW_NAMESPACE_KERNEL; + + num_of_actions++; + MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2); + MLX5_SET(set_action_in, action[1], data, val); + MLX5_SET(set_action_in, action[1], offset, 0); + MLX5_SET(set_action_in, action[1], length, 32); + + if (type == XFRM_DEV_OFFLOAD_CRYPTO) { + num_of_actions++; + MLX5_SET(set_action_in, action[2], action_type, + MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action[2], field, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4); + MLX5_SET(set_action_in, action[2], data, 0); + MLX5_SET(set_action_in, action[2], offset, 0); + MLX5_SET(set_action_in, action[2], length, 32); + } break; case XFRM_DEV_OFFLOAD_OUT: - MLX5_SET(set_action_in, action, field, - MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); - ns_type = MLX5_FLOW_NAMESPACE_EGRESS; + MLX5_SET(set_action_in, action[0], field, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4); break; default: return -EINVAL; } - MLX5_SET(set_action_in, action, data, val); - MLX5_SET(set_action_in, action, offset, 0); - MLX5_SET(set_action_in, action, length, 32); + MLX5_SET(set_action_in, action[0], data, val); + MLX5_SET(set_action_in, action[0], offset, 0); + MLX5_SET(set_action_in, action[0], length, 32); - modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action); + modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action); if (IS_ERR(modify_hdr)) { mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n", PTR_ERR(modify_hdr)); @@ -951,37 +1468,70 @@ free_reformatbf: return -EINVAL; } +static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + switch (attrs->dir) { + case XFRM_DEV_OFFLOAD_IN: + if (attrs->encap) + return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP; + return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT; + case XFRM_DEV_OFFLOAD_OUT: + if (attrs->family == AF_INET) { + if (attrs->encap) + return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4; + return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4; + } + + if (attrs->encap) + return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6; + return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6; + default: + WARN_ON(true); + } + + return -EINVAL; +} + static int setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5_pkt_reformat_params *reformat_params) { - u8 *reformatbf; + struct udphdr *udphdr; + char *reformatbf; + size_t bfflen; __be32 spi; + void *hdr; + + reformat_params->type = get_reformat_type(attrs); + if (reformat_params->type < 0) + return reformat_params->type; switch (attrs->dir) { case XFRM_DEV_OFFLOAD_IN: - reformat_params->type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT; break; case XFRM_DEV_OFFLOAD_OUT: - if (attrs->family == AF_INET) - reformat_params->type = - MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4; - else - reformat_params->type = - MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6; + bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE; + if (attrs->encap) + bfflen += sizeof(*udphdr); - reformatbf = kzalloc(MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE, - GFP_KERNEL); + reformatbf = kzalloc(bfflen, GFP_KERNEL); if (!reformatbf) return -ENOMEM; + hdr = reformatbf; + if (attrs->encap) { + udphdr = (struct udphdr *)reformatbf; + udphdr->source = attrs->sport; + udphdr->dest = attrs->dport; + hdr += sizeof(*udphdr); + } + /* convert to network format */ spi = htonl(attrs->spi); - memcpy(reformatbf, &spi, sizeof(spi)); + memcpy(hdr, &spi, sizeof(spi)); reformat_params->param_0 = attrs->authsize; - reformat_params->size = - MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE; + reformat_params->size = bfflen; reformat_params->data = reformatbf; break; default: @@ -991,26 +1541,17 @@ setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs, return 0; } -static int setup_pkt_reformat(struct mlx5_core_dev *mdev, +static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec, struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5_flow_act *flow_act) { + enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type, + attrs->dir); struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5_core_dev *mdev = ipsec->mdev; struct mlx5_pkt_reformat *pkt_reformat; - enum mlx5_flow_namespace_type ns_type; int ret; - switch (attrs->dir) { - case XFRM_DEV_OFFLOAD_IN: - ns_type = MLX5_FLOW_NAMESPACE_KERNEL; - break; - case XFRM_DEV_OFFLOAD_OUT: - ns_type = MLX5_FLOW_NAMESPACE_EGRESS; - break; - default: - return -EINVAL; - } - switch (attrs->mode) { case XFRM_MODE_TRANSPORT: ret = setup_pkt_transport_reformat(attrs, &reformat_params); @@ -1047,9 +1588,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5_flow_spec *spec; struct mlx5e_ipsec_rx *rx; struct mlx5_fc *counter; - int err; + int err = 0; - rx = rx_ft_get(mdev, ipsec, attrs->family); + rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type); if (IS_ERR(rx)) return PTR_ERR(rx); @@ -1064,18 +1605,27 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) else setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); - setup_fte_spi(spec, attrs->spi); - setup_fte_esp(spec); + setup_fte_spi(spec, attrs->spi, attrs->encap); + if (!attrs->encap) + setup_fte_esp(spec); setup_fte_no_frags(spec); + setup_fte_upper_proto_match(spec, &attrs->upspec); - err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31), - XFRM_DEV_OFFLOAD_IN, &flow_act); - if (err) - goto err_mod_header; + if (!attrs->drop) { + if (rx != ipsec->rx_esw) + err = setup_modify_header(ipsec, attrs->type, + sa_entry->ipsec_obj_id | BIT(31), + XFRM_DEV_OFFLOAD_IN, &flow_act); + else + err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act); + + if (err) + goto err_mod_header; + } switch (attrs->type) { case XFRM_DEV_OFFLOAD_PACKET: - err = setup_pkt_reformat(mdev, attrs, &flow_act); + err = setup_pkt_reformat(ipsec, attrs, &flow_act); if (err) goto err_pkt_reformat; break; @@ -1107,6 +1657,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err); goto err_add_flow; } + if (attrs->type == XFRM_DEV_OFFLOAD_PACKET) + err = rx_add_rule_drop_replay(sa_entry, rx); + if (err) + goto err_add_replay; + + err = rx_add_rule_drop_auth_trailer(sa_entry, rx); + if (err) + goto err_drop_reason; + kvfree(spec); sa_entry->ipsec_rule.rule = rule; @@ -1115,17 +1674,25 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat; return 0; +err_drop_reason: + if (sa_entry->ipsec_rule.replay.rule) { + mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule); + mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc); + } +err_add_replay: + mlx5_del_flow_rules(rule); err_add_flow: mlx5_fc_destroy(mdev, counter); err_add_cnt: if (flow_act.pkt_reformat) mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); err_pkt_reformat: - mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); + if (flow_act.modify_hdr) + mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); err_mod_header: kvfree(spec); err_alloc: - rx_ft_put(ipsec, attrs->family); + rx_ft_put(ipsec, attrs->family, attrs->type); return err; } @@ -1142,7 +1709,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5_fc *counter; int err; - tx = tx_ft_get(mdev, ipsec); + tx = tx_ft_get(mdev, ipsec, attrs->type); if (IS_ERR(tx)) return PTR_ERR(tx); @@ -1162,14 +1729,14 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) switch (attrs->type) { case XFRM_DEV_OFFLOAD_CRYPTO: - setup_fte_spi(spec, attrs->spi); + setup_fte_spi(spec, attrs->spi, false); setup_fte_esp(spec); setup_fte_reg_a(spec); break; case XFRM_DEV_OFFLOAD_PACKET: if (attrs->reqid) - setup_fte_reg_c0(spec, attrs->reqid); - err = setup_pkt_reformat(mdev, attrs, &flow_act); + setup_fte_reg_c4(spec, attrs->reqid); + err = setup_pkt_reformat(ipsec, attrs, &flow_act); if (err) goto err_pkt_reformat; break; @@ -1218,7 +1785,7 @@ err_add_cnt: err_pkt_reformat: kvfree(spec); err_alloc: - tx_ft_put(ipsec); + tx_ft_put(ipsec, attrs->type); return err; } @@ -1226,15 +1793,16 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) { struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); - struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx; + struct mlx5e_ipsec *ipsec = pol_entry->ipsec; struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; struct mlx5_flow_table *ft; + struct mlx5e_ipsec_tx *tx; int err, dstn = 0; - ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio); + ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type); if (IS_ERR(ft)) return PTR_ERR(ft); @@ -1244,6 +1812,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) goto err_alloc; } + tx = ipsec_tx(ipsec, attrs->type); if (attrs->family == AF_INET) setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); else @@ -1258,7 +1827,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) if (!attrs->reqid) break; - err = setup_modify_header(mdev, attrs->reqid, + err = setup_modify_header(ipsec, attrs->type, attrs->reqid, XFRM_DEV_OFFLOAD_OUT, &flow_act); if (err) goto err_mod_header; @@ -1277,6 +1846,8 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) } flow_act.flags |= FLOW_ACT_NO_APPEND; + if (tx == ipsec->tx_esw && tx->chains) + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; dest[dstn].ft = tx->ft.sa; dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dstn++; @@ -1298,7 +1869,7 @@ err_action: err_mod_header: kvfree(spec); err_alloc: - tx_ft_put_policy(pol_entry->ipsec, attrs->prio); + tx_ft_put_policy(ipsec, attrs->prio, attrs->type); return err; } @@ -1306,6 +1877,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) { struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs; struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry); + struct mlx5e_ipsec *ipsec = pol_entry->ipsec; struct mlx5_flow_destination dest[2]; struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *rule; @@ -1314,11 +1886,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) struct mlx5e_ipsec_rx *rx; int err, dstn = 0; - ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio); + ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio, + attrs->type); if (IS_ERR(ft)) return PTR_ERR(ft); - rx = ipsec_rx(pol_entry->ipsec, attrs->family); + rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type); spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { @@ -1332,6 +1905,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); setup_fte_no_frags(spec); + setup_fte_upper_proto_match(spec, &attrs->upspec); switch (attrs->action) { case XFRM_POLICY_ALLOW: @@ -1350,6 +1924,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) } flow_act.flags |= FLOW_ACT_NO_APPEND; + if (rx == ipsec->rx_esw && rx->chains) + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[dstn].ft = rx->ft.sa; dstn++; @@ -1367,88 +1943,110 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) err_action: kvfree(spec); err_alloc: - rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio); + rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type); return err; } +static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev, + struct mlx5e_ipsec_fc *fc) +{ + mlx5_fc_destroy(mdev, fc->drop); + mlx5_fc_destroy(mdev, fc->cnt); + kfree(fc); +} + static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec) { - struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; struct mlx5_core_dev *mdev = ipsec->mdev; - struct mlx5e_ipsec_tx *tx = ipsec->tx; - mlx5_fc_destroy(mdev, tx->fc->drop); - mlx5_fc_destroy(mdev, tx->fc->cnt); - kfree(tx->fc); - mlx5_fc_destroy(mdev, rx_ipv4->fc->drop); - mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); - kfree(rx_ipv4->fc); + ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc); + ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc); + if (ipsec->is_uplink_rep) { + ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc); + ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc); + } } -static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec) +static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev) { - struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4; - struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6; - struct mlx5_core_dev *mdev = ipsec->mdev; - struct mlx5e_ipsec_tx *tx = ipsec->tx; struct mlx5e_ipsec_fc *fc; struct mlx5_fc *counter; int err; - fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL); + fc = kzalloc(sizeof(*fc), GFP_KERNEL); if (!fc) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - /* Both IPv4 and IPv6 point to same flow counters struct. */ - rx_ipv4->fc = fc; - rx_ipv6->fc = fc; counter = mlx5_fc_create(mdev, false); if (IS_ERR(counter)) { err = PTR_ERR(counter); - goto err_rx_cnt; + goto err_cnt; } - fc->cnt = counter; + counter = mlx5_fc_create(mdev, false); if (IS_ERR(counter)) { err = PTR_ERR(counter); - goto err_rx_drop; + goto err_drop; } - fc->drop = counter; - fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL); - if (!fc) { - err = -ENOMEM; - goto err_tx_fc; + + return fc; + +err_drop: + mlx5_fc_destroy(mdev, fc->cnt); +err_cnt: + kfree(fc); + return ERR_PTR(err); +} + +static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec) +{ + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5e_ipsec_fc *fc; + int err; + + fc = ipsec_fs_init_single_counter(mdev); + if (IS_ERR(fc)) { + err = PTR_ERR(fc); + goto err_rx_cnt; } + ipsec->rx_ipv4->fc = fc; - tx->fc = fc; - counter = mlx5_fc_create(mdev, false); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); + fc = ipsec_fs_init_single_counter(mdev); + if (IS_ERR(fc)) { + err = PTR_ERR(fc); goto err_tx_cnt; } + ipsec->tx->fc = fc; - fc->cnt = counter; - counter = mlx5_fc_create(mdev, false); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); - goto err_tx_drop; + if (ipsec->is_uplink_rep) { + fc = ipsec_fs_init_single_counter(mdev); + if (IS_ERR(fc)) { + err = PTR_ERR(fc); + goto err_rx_esw_cnt; + } + ipsec->rx_esw->fc = fc; + + fc = ipsec_fs_init_single_counter(mdev); + if (IS_ERR(fc)) { + err = PTR_ERR(fc); + goto err_tx_esw_cnt; + } + ipsec->tx_esw->fc = fc; } - fc->drop = counter; + /* Both IPv4 and IPv6 point to same flow counters struct. */ + ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc; return 0; -err_tx_drop: - mlx5_fc_destroy(mdev, tx->fc->cnt); +err_tx_esw_cnt: + ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc); +err_rx_esw_cnt: + ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc); err_tx_cnt: - kfree(tx->fc); -err_tx_fc: - mlx5_fc_destroy(mdev, rx_ipv4->fc->drop); -err_rx_drop: - mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt); + ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc); err_rx_cnt: - kfree(rx_ipv4->fc); return err; } @@ -1458,6 +2056,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats) struct mlx5e_ipsec *ipsec = priv->ipsec; struct mlx5e_ipsec_hw_stats *stats; struct mlx5e_ipsec_fc *fc; + u64 packets, bytes; stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats; @@ -1479,14 +2078,97 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats) mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes); mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts, &stats->ipsec_tx_drop_bytes); + + if (ipsec->is_uplink_rep) { + fc = ipsec->rx_esw->fc; + if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) { + stats->ipsec_rx_pkts += packets; + stats->ipsec_rx_bytes += bytes; + } + + if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) { + stats->ipsec_rx_drop_pkts += packets; + stats->ipsec_rx_drop_bytes += bytes; + } + + fc = ipsec->tx_esw->fc; + if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) { + stats->ipsec_tx_pkts += packets; + stats->ipsec_tx_bytes += bytes; + } + + if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) { + stats->ipsec_tx_drop_pkts += packets; + stats->ipsec_tx_drop_bytes += bytes; + } + } +} + +#ifdef CONFIG_MLX5_ESWITCH +static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int err = 0; + + if (esw) { + err = mlx5_esw_lock(esw); + if (err) + return err; + } + + if (mdev->num_block_ipsec) { + err = -EBUSY; + goto unlock; + } + + mdev->num_block_tc++; + +unlock: + if (esw) + mlx5_esw_unlock(esw); + + return err; +} +#else +static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev) +{ + if (mdev->num_block_ipsec) + return -EBUSY; + + mdev->num_block_tc++; + return 0; +} +#endif + +static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev) +{ + mdev->num_block_tc--; } int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) { + int err; + + if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) { + err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev); + if (err) + return err; + } + if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) - return tx_add_rule(sa_entry); + err = tx_add_rule(sa_entry); + else + err = rx_add_rule(sa_entry); + + if (err) + goto err_out; - return rx_add_rule(sa_entry); + return 0; + +err_out: + if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) + mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev); + return err; } void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) @@ -1499,21 +2181,52 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) if (ipsec_rule->pkt_reformat) mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat); + if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) + mlx5e_ipsec_unblock_tc_offload(mdev); + if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) { - tx_ft_put(sa_entry->ipsec); + tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type); return; } - mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); - rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family); + if (ipsec_rule->modify_hdr) + mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); + + mlx5_del_flow_rules(ipsec_rule->trailer.rule); + mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc); + + mlx5_del_flow_rules(ipsec_rule->auth.rule); + mlx5_fc_destroy(mdev, ipsec_rule->auth.fc); + + if (ipsec_rule->replay.rule) { + mlx5_del_flow_rules(ipsec_rule->replay.rule); + mlx5_fc_destroy(mdev, ipsec_rule->replay.fc); + } + mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry); + rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type); } int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry) { + int err; + + err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev); + if (err) + return err; + if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) - return tx_add_policy(pol_entry); + err = tx_add_policy(pol_entry); + else + err = rx_add_policy(pol_entry); + + if (err) + goto err_out; - return rx_add_policy(pol_entry); + return 0; + +err_out: + mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev); + return err; } void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry) @@ -1523,16 +2236,18 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry) mlx5_del_flow_rules(ipsec_rule->rule); + mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev); + if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) { rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family, - pol_entry->attrs.prio); + pol_entry->attrs.prio, pol_entry->attrs.type); return; } if (ipsec_rule->modify_hdr) mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); - tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio); + tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type); } void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) @@ -1540,7 +2255,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) if (!ipsec->tx) return; - if (mlx5_ipsec_device_caps(ipsec->mdev) & MLX5_IPSEC_CAP_ROCE) + if (ipsec->roce) mlx5_ipsec_fs_roce_cleanup(ipsec->roce); ipsec_fs_destroy_counters(ipsec); @@ -1555,12 +2270,25 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) mutex_destroy(&ipsec->rx_ipv6->ft.mutex); WARN_ON(ipsec->rx_ipv6->ft.refcnt); kfree(ipsec->rx_ipv6); + + if (ipsec->is_uplink_rep) { + xa_destroy(&ipsec->ipsec_obj_id_map); + + mutex_destroy(&ipsec->tx_esw->ft.mutex); + WARN_ON(ipsec->tx_esw->ft.refcnt); + kfree(ipsec->tx_esw); + + mutex_destroy(&ipsec->rx_esw->ft.mutex); + WARN_ON(ipsec->rx_esw->ft.refcnt); + kfree(ipsec->rx_esw); + } } -int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, + struct mlx5_devcom_comp_dev **devcom) { struct mlx5_core_dev *mdev = ipsec->mdev; - struct mlx5_flow_namespace *ns; + struct mlx5_flow_namespace *ns, *ns_esw; int err = -ENOMEM; ns = mlx5_get_flow_namespace(ipsec->mdev, @@ -1568,9 +2296,23 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) if (!ns) return -EOPNOTSUPP; + if (ipsec->is_uplink_rep) { + ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB); + if (!ns_esw) + return -EOPNOTSUPP; + + ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL); + if (!ipsec->tx_esw) + return -ENOMEM; + + ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL); + if (!ipsec->rx_esw) + goto err_rx_esw; + } + ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL); if (!ipsec->tx) - return -ENOMEM; + goto err_tx; ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL); if (!ipsec->rx_ipv4) @@ -1589,8 +2331,16 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) mutex_init(&ipsec->rx_ipv6->ft.mutex); ipsec->tx->ns = ns; - if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) - ipsec->roce = mlx5_ipsec_fs_roce_init(mdev); + if (ipsec->is_uplink_rep) { + mutex_init(&ipsec->tx_esw->ft.mutex); + mutex_init(&ipsec->rx_esw->ft.mutex); + ipsec->tx_esw->ns = ns_esw; + xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1); + } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) { + ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom); + } else { + mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n"); + } return 0; @@ -1600,6 +2350,10 @@ err_rx_ipv6: kfree(ipsec->rx_ipv4); err_rx_ipv4: kfree(ipsec->tx); +err_tx: + kfree(ipsec->rx_esw); +err_rx_esw: + kfree(ipsec->tx_esw); return err; } @@ -1621,12 +2375,44 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry) bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry) { - struct mlx5e_ipsec_rx *rx = - ipsec_rx(sa_entry->ipsec, sa_entry->attrs.family); - struct mlx5e_ipsec_tx *tx = sa_entry->ipsec->tx; + struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct mlx5e_ipsec_rx *rx; + struct mlx5e_ipsec_tx *tx; + rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type); + tx = ipsec_tx(sa_entry->ipsec, attrs->type); if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) return tx->allow_tunnel_mode; return rx->allow_tunnel_mode; } + +void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv) +{ + struct mlx5e_ipsec_mpv_work *work; + + reinit_completion(&master_priv->ipsec->comp); + + if (!slave_priv->ipsec) { + complete(&master_priv->ipsec->comp); + return; + } + + work = &slave_priv->ipsec->mpv_work; + + INIT_WORK(&work->work, ipsec_mpv_work_handler); + work->event = event; + work->slave_priv = slave_priv; + work->master_priv = master_priv; + queue_work(slave_priv->ipsec->wq, &work->work); +} + +void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) +{ + if (!priv->ipsec) + return; /* IPsec not supported */ + + mlx5_devcom_send_event(priv->devcom, event, event, priv); + wait_for_completion(&priv->ipsec->comp); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index a3554bde3e07..6e00afe4671b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -5,6 +5,9 @@ #include "en.h" #include "ipsec.h" #include "lib/crypto.h" +#include "lib/ipsec_fs_roce.h" +#include "fs_core.h" +#include "eswitch.h" enum { MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET, @@ -37,7 +40,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp)) caps |= MLX5_IPSEC_CAP_CRYPTO; - if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) { + if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) && + (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS || + (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS && + is_mdev_legacy_mode(mdev)))) { if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) && MLX5_CAP_FLOWTABLE_NIC_RX(mdev, @@ -45,8 +51,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap)) caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD; - if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && - MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) + if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) || + MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)) caps |= MLX5_IPSEC_CAP_PRIO; if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, @@ -54,9 +61,15 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_l3_esp_tunnel_to_l2)) caps |= MLX5_IPSEC_CAP_TUNNEL; + + if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, + reformat_add_esp_transport_over_udp) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, + reformat_del_esp_transport_over_udp)) + caps |= MLX5_IPSEC_CAP_ESPINUDP; } - if (mlx5_get_roce_state(mdev) && + if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX) caps |= MLX5_IPSEC_CAP_ROCE; @@ -87,7 +100,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, if (attrs->dir == XFRM_DEV_OFFLOAD_IN) { MLX5_SET(ipsec_aso, aso_ctx, window_sz, - attrs->replay_esn.replay_window / 64); + attrs->replay_esn.replay_window); MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_REPLAY_PROTECTION); } @@ -551,6 +564,7 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec) dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL); kfree(aso); + ipsec->aso = NULL; } static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 8d995e304869..51a144246ea6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -37,6 +37,7 @@ #include "ipsec.h" #include "ipsec_rxtx.h" #include "en.h" +#include "esw/ipsec_fs.h" enum { MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8, @@ -311,9 +312,8 @@ enum { void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, - struct mlx5_cqe64 *cqe) + u32 ipsec_meta_data) { - u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata); struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_ipsec *ipsec = priv->ipsec; struct mlx5e_ipsec_sa_entry *sa_entry; @@ -358,3 +358,24 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome); } } + +int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata) +{ + struct mlx5e_ipsec *ipsec = priv->ipsec; + u32 ipsec_obj_id; + int err; + + if (!ipsec || !ipsec->is_uplink_rep) + return -EINVAL; + + err = mlx5_esw_ipsec_rx_ipsec_obj_id_search(priv, id, &ipsec_obj_id); + if (err) { + atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss); + return err; + } + + *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id, + MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 1878a70b9031..2ed99772f168 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -43,6 +43,7 @@ #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1) #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0)) #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0)) +#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24)) struct mlx5e_accel_tx_ipsec_state { struct xfrm_offload *xo; @@ -53,7 +54,6 @@ struct mlx5e_accel_tx_ipsec_state { #ifdef CONFIG_MLX5_EN_IPSEC -void mlx5e_ipsec_inverse_table_init(void); void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_offload *xo); void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, @@ -66,7 +66,8 @@ void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe, struct mlx5_wqe_inline_seg *inlseg); void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, - struct mlx5_cqe64 *cqe); + u32 ipsec_meta_data); +int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata); static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st) { return ipsec_st->tailen; @@ -145,7 +146,7 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, static inline void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, - struct mlx5_cqe64 *cqe) + u32 ipsec_meta_data) {} static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index 592b165530ff..d4ebd8743114 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -10,7 +10,6 @@ #include "lib/aso.h" #include "lib/crypto.h" #include "en_accel/macsec.h" -#include "en_accel/macsec_fs.h" #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso) @@ -66,9 +65,7 @@ struct mlx5e_macsec_sa { ssci_t ssci; salt_t salt; - struct rhash_head hash; - u32 fs_id; - union mlx5e_macsec_rule *macsec_rule; + union mlx5_macsec_rule *macsec_rule; struct rcu_head rcu_head; struct mlx5e_macsec_epn_state epn_state; }; @@ -106,14 +103,6 @@ struct mlx5e_macsec_aso { u32 pdn; }; -static const struct rhashtable_params rhash_sci = { - .key_len = sizeof_field(struct mlx5e_macsec_sa, sci), - .key_offset = offsetof(struct mlx5e_macsec_sa, sci), - .head_offset = offsetof(struct mlx5e_macsec_sa, hash), - .automatic_shrinking = true, - .min_size = 1, -}; - struct mlx5e_macsec_device { const struct net_device *netdev; struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN]; @@ -125,20 +114,13 @@ struct mlx5e_macsec_device { struct mlx5e_macsec { struct list_head macsec_device_list_head; int num_of_devices; - struct mlx5e_macsec_fs *macsec_fs; struct mutex lock; /* Protects mlx5e_macsec internal contexts */ - /* Tx sci -> fs id mapping handling */ - struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */ - /* Rx fs_id -> rx_sc mapping */ struct xarray sc_xarray; struct mlx5_core_dev *mdev; - /* Stats manage */ - struct mlx5e_macsec_stats stats; - /* ASO */ struct mlx5e_macsec_aso aso; @@ -330,36 +312,30 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec, struct mlx5e_macsec_sa *sa, - bool is_tx) + bool is_tx, struct net_device *netdev, u32 fs_id) { int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : MLX5_ACCEL_MACSEC_ACTION_DECRYPT; - if ((is_tx) && sa->fs_id) { - /* Make sure ongoing datapath readers sees a valid SA */ - rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci); - sa->fs_id = 0; - } - if (!sa->macsec_rule) return; - mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action); + mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev, + fs_id); mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id); sa->macsec_rule = NULL; } static int mlx5e_macsec_init_sa(struct macsec_context *ctx, struct mlx5e_macsec_sa *sa, - bool encrypt, - bool is_tx) + bool encrypt, bool is_tx, u32 *fs_id) { struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); struct mlx5e_macsec *macsec = priv->macsec; struct mlx5_macsec_rule_attrs rule_attrs; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_macsec_obj_attrs obj_attrs; - union mlx5e_macsec_rule *macsec_rule; + union mlx5_macsec_rule *macsec_rule; int err; obj_attrs.next_pn = sa->next_pn; @@ -387,7 +363,7 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx, rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT : MLX5_ACCEL_MACSEC_ACTION_DECRYPT; - macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id); + macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id); if (!macsec_rule) { err = -ENOMEM; goto destroy_macsec_object; @@ -395,16 +371,8 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx, sa->macsec_rule = macsec_rule; - if (is_tx) { - err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci); - if (err) - goto destroy_macsec_object_and_rule; - } - return 0; -destroy_macsec_object_and_rule: - mlx5e_macsec_cleanup_sa(macsec, sa, is_tx); destroy_macsec_object: mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id); @@ -426,7 +394,7 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci) static int macsec_rx_sa_active_update(struct macsec_context *ctx, struct mlx5e_macsec_sa *rx_sa, - bool active) + bool active, u32 *fs_id) { struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev); struct mlx5e_macsec *macsec = priv->macsec; @@ -437,11 +405,11 @@ static int macsec_rx_sa_active_update(struct macsec_context *ctx, rx_sa->active = active; if (!active) { - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id); return 0; } - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false); + err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id); if (err) rx_sa->active = false; @@ -563,7 +531,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx) !tx_sa->active) goto out; - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto destroy_encryption_key; @@ -612,7 +580,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; } - if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) { + if (ctx->sa.update_pn) { netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n", assoc_num); err = -EINVAL; @@ -627,7 +595,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; if (ctx_tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } else { @@ -636,7 +604,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); } out: mutex_unlock(&macsec->lock); @@ -669,7 +637,7 @@ static int mlx5e_macsec_del_txsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id); kfree_rcu_mightsleep(tx_sa); macsec_device->tx_sa[assoc_num] = NULL; @@ -680,20 +648,6 @@ out: return err; } -static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci) -{ - struct mlx5e_macsec_sa *macsec_sa; - u32 fs_id = 0; - - rcu_read_lock(); - macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci); - if (macsec_sa) - fs_id = macsec_sa->fs_id; - rcu_read_unlock(); - - return fs_id; -} - static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx) { struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element; @@ -813,7 +767,8 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx) if (!rx_sa) continue; - err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active); + err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active, + &rx_sc->sc_xarray_element->fs_id); if (err) goto out; } @@ -824,7 +779,8 @@ out: return err; } -static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc) +static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc, + struct net_device *netdev) { struct mlx5e_macsec_sa *rx_sa; int i; @@ -834,7 +790,8 @@ static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec if (!rx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev, + rx_sc->sc_xarray_element->fs_id); mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); kfree(rx_sa); @@ -882,7 +839,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx) goto out; } - macsec_del_rxsc_ctx(macsec, rx_sc); + macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev); out: mutex_unlock(&macsec->lock); @@ -941,7 +898,6 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx) rx_sa->next_pn = ctx_rx_sa->next_pn; rx_sa->sci = sci; rx_sa->assoc_num = assoc_num; - rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id; if (ctx->secy->xpn) update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves, @@ -958,7 +914,7 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx) goto out; //TODO - add support for both authentication and encryption flows - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false); + err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id); if (err) goto destroy_encryption_key; @@ -1017,7 +973,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) goto out; } - if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) { + if (ctx->sa.update_pn) { netdev_err(ctx->netdev, "MACsec offload update RX sa %d PN isn't supported\n", assoc_num); @@ -1025,7 +981,8 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) goto out; } - err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active); + err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active, + &rx_sc->sc_xarray_element->fs_id); out: mutex_unlock(&macsec->lock); @@ -1073,7 +1030,8 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx) goto out; } - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id); kfree(rx_sa); rx_sc->rx_sa[assoc_num] = NULL; @@ -1154,7 +1112,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, if (!rx_sa || !rx_sa->macsec_rule) continue; - mlx5e_macsec_cleanup_sa(macsec, rx_sa, false); + mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, + rx_sc->sc_xarray_element->fs_id); } } @@ -1165,7 +1124,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx, continue; if (rx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false); + err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, + &rx_sc->sc_xarray_element->fs_id); if (err) goto out; } @@ -1218,7 +1178,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) if (!tx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); } for (i = 0; i < MACSEC_NUM_AN; ++i) { @@ -1227,7 +1187,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx) continue; if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) { - err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true); + err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL); if (err) goto out; } @@ -1265,7 +1225,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx) if (!tx_sa) continue; - mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); + mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0); mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id); kfree(tx_sa); macsec_device->tx_sa[i] = NULL; @@ -1273,7 +1233,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx) list = &macsec_device->macsec_rx_sc_list_head; list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) - macsec_del_rxsc_ctx(macsec, rx_sc); + macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev); kfree(macsec_device->dev_addr); macsec_device->dev_addr = NULL; @@ -1647,50 +1607,6 @@ static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_c mlx5_core_dealloc_pd(mdev, aso->pdn); } -bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) -{ - if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) - return false; - - if (!MLX5_CAP_GEN(mdev, log_max_dek)) - return false; - - if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) - return false; - - if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || - !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) - return false; - - if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || - !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) - return false; - - if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && - !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) - return false; - - if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && - !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) - return false; - - return true; -} - -void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats) -{ - mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats); -} - -struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec) -{ - if (!macsec) - return NULL; - - return &macsec->stats; -} - static const struct macsec_ops macsec_offload_ops = { .mdo_add_txsa = mlx5e_macsec_add_txsa, .mdo_upd_txsa = mlx5e_macsec_upd_txsa, @@ -1711,7 +1627,8 @@ bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb struct metadata_dst *md_dst = skb_metadata_dst(skb); u32 fs_id; - fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci); + fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs, + &md_dst->u.macsec_info.sci); if (!fs_id) goto err_out; @@ -1729,7 +1646,8 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec, struct metadata_dst *md_dst = skb_metadata_dst(skb); u32 fs_id; - fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci); + fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs, + &md_dst->u.macsec_info.sci); if (!fs_id) return; @@ -1782,7 +1700,7 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_macsec *macsec = NULL; - struct mlx5e_macsec_fs *macsec_fs; + struct mlx5_macsec_fs *macsec_fs; int err; if (!mlx5e_is_macsec_device(priv->mdev)) { @@ -1797,13 +1715,6 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv) INIT_LIST_HEAD(&macsec->macsec_device_list_head); mutex_init(&macsec->lock); - err = rhashtable_init(&macsec->sci_hash, &rhash_sci); - if (err) { - mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n", - err); - goto err_hash; - } - err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev); if (err) { mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err); @@ -1822,13 +1733,13 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv) macsec->mdev = mdev; - macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev); + macsec_fs = mlx5_macsec_fs_init(mdev); if (!macsec_fs) { err = -ENOMEM; goto err_out; } - macsec->macsec_fs = macsec_fs; + mdev->macsec_fs = macsec_fs; macsec->nb.notifier_call = macsec_obj_change_event; mlx5_notifier_register(mdev, &macsec->nb); @@ -1842,8 +1753,6 @@ err_out: err_wq: mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev); err_aso: - rhashtable_destroy(&macsec->sci_hash); -err_hash: kfree(macsec); priv->macsec = NULL; return err; @@ -1858,10 +1767,9 @@ void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) return; mlx5_notifier_unregister(mdev, &macsec->nb); - mlx5e_macsec_fs_cleanup(macsec->macsec_fs); + mlx5_macsec_fs_cleanup(mdev->macsec_fs); destroy_workqueue(macsec->wq); mlx5e_macsec_aso_cleanup(&macsec->aso, mdev); - rhashtable_destroy(&macsec->sci_hash); mutex_destroy(&macsec->lock); kfree(macsec); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h index 347380a2cd9c..27df72e23106 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h @@ -4,32 +4,16 @@ #ifndef __MLX5_EN_ACCEL_MACSEC_H__ #define __MLX5_EN_ACCEL_MACSEC_H__ -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC #include <linux/mlx5/driver.h> #include <net/macsec.h> #include <net/dst_metadata.h> - -/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */ -#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */ -#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX -#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1) -#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK) +#include "lib/macsec_fs.h" struct mlx5e_priv; struct mlx5e_macsec; -struct mlx5e_macsec_stats { - u64 macsec_rx_pkts; - u64 macsec_rx_bytes; - u64 macsec_rx_pkts_drop; - u64 macsec_rx_bytes_drop; - u64 macsec_tx_pkts; - u64 macsec_tx_bytes; - u64 macsec_tx_pkts_drop; - u64 macsec_tx_bytes_drop; -}; - void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv); int mlx5e_macsec_init(struct mlx5e_priv *priv); void mlx5e_macsec_cleanup(struct mlx5e_priv *priv); @@ -52,9 +36,6 @@ static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, struct mlx5_cqe64 *cqe); -bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev); -void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats); -struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec); #else @@ -67,7 +48,6 @@ static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, struct mlx5_cqe64 *cqe) {} -static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; } -#endif /* CONFIG_MLX5_EN_MACSEC */ +#endif /* CONFIG_MLX5_MACSEC */ #endif /* __MLX5_ACCEL_EN_MACSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c deleted file mode 100644 index 414e28584881..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c +++ /dev/null @@ -1,1394 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ - -#include <net/macsec.h> -#include <linux/netdevice.h> -#include <linux/mlx5/qp.h> -#include <linux/if_vlan.h> -#include "fs_core.h" -#include "en/fs.h" -#include "en_accel/macsec_fs.h" -#include "mlx5_core.h" - -/* MACsec TX flow steering */ -#define CRYPTO_NUM_MAXSEC_FTE BIT(15) -#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1 - -#define TX_CRYPTO_TABLE_LEVEL 0 -#define TX_CRYPTO_TABLE_NUM_GROUPS 3 -#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1 -#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \ - (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \ - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE)) -#define TX_CHECK_TABLE_LEVEL 1 -#define TX_CHECK_TABLE_NUM_FTE 2 -#define RX_CRYPTO_TABLE_LEVEL 0 -#define RX_CHECK_TABLE_LEVEL 1 -#define RX_CHECK_TABLE_NUM_FTE 3 -#define RX_CRYPTO_TABLE_NUM_GROUPS 3 -#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \ - ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2) -#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \ - (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE) -#define RX_NUM_OF_RULES_PER_SA 2 - -#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */ -#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23 -#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8 -#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5 -#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) -#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8 -#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN) - -/* MACsec RX flow steering */ -#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E - -struct mlx5_sectag_header { - __be16 ethertype; - u8 tci_an; - u8 sl; - u32 pn; - u8 sci[MACSEC_SCI_LEN]; /* optional */ -} __packed; - -struct mlx5e_macsec_tx_rule { - struct mlx5_flow_handle *rule; - struct mlx5_pkt_reformat *pkt_reformat; - u32 fs_id; -}; - -struct mlx5e_macsec_tables { - struct mlx5e_flow_table ft_crypto; - struct mlx5_flow_handle *crypto_miss_rule; - - struct mlx5_flow_table *ft_check; - struct mlx5_flow_group *ft_check_group; - struct mlx5_fc *check_miss_rule_counter; - struct mlx5_flow_handle *check_miss_rule; - struct mlx5_fc *check_rule_counter; - - u32 refcnt; -}; - -struct mlx5e_macsec_tx { - struct mlx5_flow_handle *crypto_mke_rule; - struct mlx5_flow_handle *check_rule; - - struct ida tx_halloc; - - struct mlx5e_macsec_tables tables; -}; - -struct mlx5e_macsec_rx_rule { - struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA]; - struct mlx5_modify_hdr *meta_modhdr; -}; - -struct mlx5e_macsec_rx { - struct mlx5_flow_handle *check_rule[2]; - struct mlx5_pkt_reformat *check_rule_pkt_reformat[2]; - - struct mlx5e_macsec_tables tables; -}; - -union mlx5e_macsec_rule { - struct mlx5e_macsec_tx_rule tx_rule; - struct mlx5e_macsec_rx_rule rx_rule; -}; - -struct mlx5e_macsec_fs { - struct mlx5_core_dev *mdev; - struct net_device *netdev; - struct mlx5e_macsec_tx *tx_fs; - struct mlx5e_macsec_rx *rx_fs; -}; - -static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct mlx5e_macsec_tables *tx_tables; - - tx_tables = &tx_fs->tables; - - /* Tx check table */ - if (tx_fs->check_rule) { - mlx5_del_flow_rules(tx_fs->check_rule); - tx_fs->check_rule = NULL; - } - - if (tx_tables->check_miss_rule) { - mlx5_del_flow_rules(tx_tables->check_miss_rule); - tx_tables->check_miss_rule = NULL; - } - - if (tx_tables->ft_check_group) { - mlx5_destroy_flow_group(tx_tables->ft_check_group); - tx_tables->ft_check_group = NULL; - } - - if (tx_tables->ft_check) { - mlx5_destroy_flow_table(tx_tables->ft_check); - tx_tables->ft_check = NULL; - } - - /* Tx crypto table */ - if (tx_fs->crypto_mke_rule) { - mlx5_del_flow_rules(tx_fs->crypto_mke_rule); - tx_fs->crypto_mke_rule = NULL; - } - - if (tx_tables->crypto_miss_rule) { - mlx5_del_flow_rules(tx_tables->crypto_miss_rule); - tx_tables->crypto_miss_rule = NULL; - } - - mlx5e_destroy_flow_table(&tx_tables->ft_crypto); -} - -static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - int mclen = MLX5_ST_SZ_BYTES(fte_match_param); - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - in = kvzalloc(inlen, GFP_KERNEL); - - if (!in) { - kfree(ft->g); - ft->g = NULL; - return -ENOMEM; - } - - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - - /* Flow Group for MKE match */ - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow Group for SA rules */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2); - MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_MACSEC_MASK); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow Group for l2 traps */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -static struct mlx5_flow_table - *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags, - int level, int max_fte) -{ - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_table *fdb = NULL; - - /* reserve entry for the match all miss group and rule */ - ft_attr.autogroup.num_reserved_entries = 1; - ft_attr.autogroup.max_num_groups = 1; - ft_attr.prio = 0; - ft_attr.flags = flags; - ft_attr.level = level; - ft_attr.max_fte = max_fte; - - fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); - - return fdb; -} - -static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *tx_tables; - struct mlx5_flow_act flow_act = {}; - struct mlx5e_flow_table *ft_crypto; - struct mlx5_flow_table *flow_table; - struct mlx5_flow_group *flow_group; - struct mlx5_flow_namespace *ns; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - u32 *flow_group_in; - int err; - - ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); - if (!ns) - return -ENOMEM; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) { - err = -ENOMEM; - goto out_spec; - } - - tx_tables = &tx_fs->tables; - ft_crypto = &tx_tables->ft_crypto; - - /* Tx crypto table */ - ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - ft_attr.level = TX_CRYPTO_TABLE_LEVEL; - ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; - - flow_table = mlx5_create_flow_table(ns, &ft_attr); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err); - goto out_flow_group; - } - ft_crypto->t = flow_table; - - /* Tx crypto table groups */ - err = macsec_fs_tx_create_crypto_table_groups(ft_crypto); - if (err) { - netdev_err(netdev, - "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", - err); - goto err; - } - - /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */ - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; - - rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err); - goto err; - } - tx_fs->crypto_mke_rule = rule; - - /* Tx crypto table Default miss rule */ - memset(&flow_act, 0, sizeof(flow_act)); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; - rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err); - goto err; - } - tx_tables->crypto_miss_rule = rule; - - /* Tx check table */ - flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL, - TX_CHECK_TABLE_NUM_FTE); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err); - goto err; - } - tx_tables->ft_check = flow_table; - - /* Tx check table Default miss group/rule */ - memset(flow_group_in, 0, inlen); - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); - flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in); - if (IS_ERR(flow_group)) { - err = PTR_ERR(flow_group); - netdev_err(netdev, - "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", - err); - goto err; - } - tx_tables->ft_check_group = flow_group; - - /* Tx check table default drop rule */ - memset(&dest, 0, sizeof(struct mlx5_flow_destination)); - memset(&flow_act, 0, sizeof(flow_act)); - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; - rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err); - goto err; - } - tx_tables->check_miss_rule = rule; - - /* Tx check table rule */ - memset(spec, 0, sizeof(struct mlx5_flow_spec)); - memset(&dest, 0, sizeof(struct mlx5_flow_destination)); - memset(&flow_act, 0, sizeof(flow_act)); - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; - - flow_act.flags = FLOW_ACT_NO_APPEND; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter); - rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err); - goto err; - } - tx_fs->check_rule = rule; - - goto out_flow_group; - -err: - macsec_fs_tx_destroy(macsec_fs); -out_flow_group: - kvfree(flow_group_in); -out_spec: - kvfree(spec); - return err; -} - -static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct mlx5e_macsec_tables *tx_tables; - int err = 0; - - tx_tables = &tx_fs->tables; - if (tx_tables->refcnt) - goto out; - - err = macsec_fs_tx_create(macsec_fs); - if (err) - return err; - -out: - tx_tables->refcnt++; - return err; -} - -static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; - - if (--tx_tables->refcnt) - return; - - macsec_fs_tx_destroy(macsec_fs); -} - -static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5_flow_spec *spec, - struct mlx5_flow_act *flow_act, - u32 macsec_obj_id, - u32 *fs_id) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - int err = 0; - u32 id; - - err = ida_alloc_range(&tx_fs->tx_halloc, 1, - MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES, - GFP_KERNEL); - if (err < 0) - return err; - - id = err; - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; - - /* Metadata match */ - MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_MACSEC_MASK); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a, - MLX5_ETH_WQE_FT_META_MACSEC | id << 2); - - *fs_id = id; - flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; - flow_act->crypto.obj_id = macsec_obj_id; - - mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id); - return 0; -} - -static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx, - char *reformatbf, - size_t *reformat_size) -{ - const struct macsec_secy *secy = ctx->secy; - bool sci_present = macsec_send_sci(secy); - struct mlx5_sectag_header sectag = {}; - const struct macsec_tx_sc *tx_sc; - - tx_sc = &secy->tx_sc; - sectag.ethertype = htons(ETH_P_MACSEC); - - if (sci_present) { - sectag.tci_an |= MACSEC_TCI_SC; - memcpy(§ag.sci, &secy->sci, - sizeof(sectag.sci)); - } else { - if (tx_sc->end_station) - sectag.tci_an |= MACSEC_TCI_ES; - if (tx_sc->scb) - sectag.tci_an |= MACSEC_TCI_SCB; - } - - /* With GCM, C/E clear for !encrypt, both set for encrypt */ - if (tx_sc->encrypt) - sectag.tci_an |= MACSEC_TCI_CONFID; - else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) - sectag.tci_an |= MACSEC_TCI_C; - - sectag.tci_an |= tx_sc->encoding_sa; - - *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); - - memcpy(reformatbf, §ag, *reformat_size); -} - -static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5e_macsec_tx_rule *tx_rule) -{ - if (tx_rule->rule) { - mlx5_del_flow_rules(tx_rule->rule); - tx_rule->rule = NULL; - } - - if (tx_rule->pkt_reformat) { - mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat); - tx_rule->pkt_reformat = NULL; - } - - if (tx_rule->fs_id) { - ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id); - tx_rule->fs_id = 0; - } - - kfree(tx_rule); - - macsec_fs_tx_ft_put(macsec_fs); -} - -#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1 - -static union mlx5e_macsec_rule * -macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs, - const struct macsec_context *macsec_ctx, - struct mlx5_macsec_rule_attrs *attrs, - u32 *sa_fs_id) -{ - char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN]; - struct mlx5_pkt_reformat_params reformat_params = {}; - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct net_device *netdev = macsec_fs->netdev; - union mlx5e_macsec_rule *macsec_rule = NULL; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *tx_tables; - struct mlx5e_macsec_tx_rule *tx_rule; - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - size_t reformat_size; - int err = 0; - u32 fs_id; - - tx_tables = &tx_fs->tables; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return NULL; - - err = macsec_fs_tx_ft_get(macsec_fs); - if (err) - goto out_spec; - - macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); - if (!macsec_rule) { - macsec_fs_tx_ft_put(macsec_fs); - goto out_spec; - } - - tx_rule = &macsec_rule->tx_rule; - - /* Tx crypto table crypto rule */ - macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size); - - reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC; - reformat_params.size = reformat_size; - reformat_params.data = reformatbf; - - if (is_vlan_dev(macsec_ctx->netdev)) - reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES; - - flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev, - &reformat_params, - MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); - if (IS_ERR(flow_act.pkt_reformat)) { - err = PTR_ERR(flow_act.pkt_reformat); - netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err); - goto err; - } - tx_rule->pkt_reformat = flow_act.pkt_reformat; - - err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id); - if (err) { - netdev_err(netdev, - "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n", - err); - goto err; - } - - tx_rule->fs_id = fs_id; - *sa_fs_id = fs_id; - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = tx_tables->ft_check; - rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err); - goto err; - } - tx_rule->rule = rule; - - goto out_spec; - -err: - macsec_fs_tx_del_rule(macsec_fs, tx_rule); - macsec_rule = NULL; -out_spec: - kvfree(spec); - - return macsec_rule; -} - -static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *tx_tables; - - if (!tx_fs) - return; - - tx_tables = &tx_fs->tables; - if (tx_tables->refcnt) { - netdev_err(macsec_fs->netdev, - "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n", - tx_tables->refcnt); - return; - } - - ida_destroy(&tx_fs->tx_halloc); - - if (tx_tables->check_miss_rule_counter) { - mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter); - tx_tables->check_miss_rule_counter = NULL; - } - - if (tx_tables->check_rule_counter) { - mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); - tx_tables->check_rule_counter = NULL; - } - - kfree(tx_fs); - macsec_fs->tx_fs = NULL; -} - -static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs) -{ - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *tx_tables; - struct mlx5e_macsec_tx *tx_fs; - struct mlx5_fc *flow_counter; - int err; - - tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL); - if (!tx_fs) - return -ENOMEM; - - tx_tables = &tx_fs->tables; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Tx encrypt flow counter, err(%d)\n", - err); - goto err_encrypt_counter; - } - tx_tables->check_rule_counter = flow_counter; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Tx drop flow counter, err(%d)\n", - err); - goto err_drop_counter; - } - tx_tables->check_miss_rule_counter = flow_counter; - - ida_init(&tx_fs->tx_halloc); - - macsec_fs->tx_fs = tx_fs; - - return 0; - -err_drop_counter: - mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); - tx_tables->check_rule_counter = NULL; - -err_encrypt_counter: - kfree(tx_fs); - macsec_fs->tx_fs = NULL; - - return err; -} - -static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct mlx5e_macsec_tables *rx_tables; - int i; - - /* Rx check table */ - for (i = 1; i >= 0; --i) { - if (rx_fs->check_rule[i]) { - mlx5_del_flow_rules(rx_fs->check_rule[i]); - rx_fs->check_rule[i] = NULL; - } - - if (rx_fs->check_rule_pkt_reformat[i]) { - mlx5_packet_reformat_dealloc(macsec_fs->mdev, - rx_fs->check_rule_pkt_reformat[i]); - rx_fs->check_rule_pkt_reformat[i] = NULL; - } - } - - rx_tables = &rx_fs->tables; - - if (rx_tables->check_miss_rule) { - mlx5_del_flow_rules(rx_tables->check_miss_rule); - rx_tables->check_miss_rule = NULL; - } - - if (rx_tables->ft_check_group) { - mlx5_destroy_flow_group(rx_tables->ft_check_group); - rx_tables->ft_check_group = NULL; - } - - if (rx_tables->ft_check) { - mlx5_destroy_flow_table(rx_tables->ft_check); - rx_tables->ft_check = NULL; - } - - /* Rx crypto table */ - if (rx_tables->crypto_miss_rule) { - mlx5_del_flow_rules(rx_tables->crypto_miss_rule); - rx_tables->crypto_miss_rule = NULL; - } - - mlx5e_destroy_flow_table(&rx_tables->ft_crypto); -} - -static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - int mclen = MLX5_ST_SZ_BYTES(fte_match_param); - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - kfree(ft->g); - return -ENOMEM; - } - - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - - /* Flow group for SA rule with SCI */ - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS_5); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2); - MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow group for SA rule without SCI */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS_5); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - MLX5_SET_CFG(in, start_flow_index, ix); - ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Flow Group for l2 traps */ - memset(in, 0, inlen); - memset(mc, 0, mclen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5_flow_destination *dest, - struct mlx5_flow_act *flow_act, - struct mlx5_flow_spec *spec, - int reformat_param_size) -{ - int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1; - u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI]; - struct mlx5_pkt_reformat_params reformat_params = {}; - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct net_device *netdev = macsec_fs->netdev; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5_flow_handle *rule; - int err = 0; - - rx_tables = &rx_fs->tables; - - /* Rx check table decap 16B rule */ - memset(dest, 0, sizeof(*dest)); - memset(flow_act, 0, sizeof(*flow_act)); - memset(spec, 0, sizeof(*spec)); - - reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC; - reformat_params.size = reformat_param_size; - reformat_params.data = mlx5_reformat_buf; - flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev, - &reformat_params, - MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); - if (IS_ERR(flow_act->pkt_reformat)) { - err = PTR_ERR(flow_act->pkt_reformat); - netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err); - return err; - } - rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat; - - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; - /* MACsec syndrome match */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0); - /* ASO return reg syndrome match */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); - - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; - /* Sectag TCI SC present bit*/ - MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - flow_act->flags = FLOW_ACT_NO_APPEND; - flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO | - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter); - rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err); - return err; - } - - rx_fs->check_rule[rule_index] = rule; - - return 0; -} - -static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5e_flow_table *ft_crypto; - struct mlx5_flow_table *flow_table; - struct mlx5_flow_group *flow_group; - struct mlx5_flow_act flow_act = {}; - struct mlx5_flow_namespace *ns; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - u32 *flow_group_in; - int err; - - ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); - if (!ns) - return -ENOMEM; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return -ENOMEM; - - flow_group_in = kvzalloc(inlen, GFP_KERNEL); - if (!flow_group_in) { - err = -ENOMEM; - goto free_spec; - } - - rx_tables = &rx_fs->tables; - ft_crypto = &rx_tables->ft_crypto; - - /* Rx crypto table */ - ft_attr.level = RX_CRYPTO_TABLE_LEVEL; - ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; - - flow_table = mlx5_create_flow_table(ns, &ft_attr); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err); - goto out_flow_group; - } - ft_crypto->t = flow_table; - - /* Rx crypto table groups */ - err = macsec_fs_rx_create_crypto_table_groups(ft_crypto); - if (err) { - netdev_err(netdev, - "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", - err); - goto err; - } - - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; - rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, - "Failed to add MACsec Rx crypto table default miss rule %d\n", - err); - goto err; - } - rx_tables->crypto_miss_rule = rule; - - /* Rx check table */ - flow_table = macsec_fs_auto_group_table_create(ns, - MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT, - RX_CHECK_TABLE_LEVEL, - RX_CHECK_TABLE_NUM_FTE); - if (IS_ERR(flow_table)) { - err = PTR_ERR(flow_table); - netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err); - goto err; - } - rx_tables->ft_check = flow_table; - - /* Rx check table Default miss group/rule */ - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); - flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in); - if (IS_ERR(flow_group)) { - err = PTR_ERR(flow_group); - netdev_err(netdev, - "Failed to create default flow group for MACsec Rx check table err(%d)\n", - err); - goto err; - } - rx_tables->ft_check_group = flow_group; - - /* Rx check table default drop rule */ - memset(&flow_act, 0, sizeof(flow_act)); - - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; - rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err); - goto err; - } - rx_tables->check_miss_rule = rule; - - /* Rx check table decap rules */ - err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, - MLX5_SECTAG_HEADER_SIZE_WITH_SCI); - if (err) - goto err; - - err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, - MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI); - if (err) - goto err; - - goto out_flow_group; - -err: - macsec_fs_rx_destroy(macsec_fs); -out_flow_group: - kvfree(flow_group_in); -free_spec: - kvfree(spec); - return err; -} - -static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; - int err = 0; - - if (rx_tables->refcnt) - goto out; - - err = macsec_fs_rx_create(macsec_fs); - if (err) - return err; - -out: - rx_tables->refcnt++; - return err; -} - -static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; - - if (--rx_tables->refcnt) - return; - - macsec_fs_rx_destroy(macsec_fs); -} - -static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5e_macsec_rx_rule *rx_rule) -{ - int i; - - for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) { - if (rx_rule->rule[i]) { - mlx5_del_flow_rules(rx_rule->rule[i]); - rx_rule->rule[i] = NULL; - } - } - - if (rx_rule->meta_modhdr) { - mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr); - rx_rule->meta_modhdr = NULL; - } - - kfree(rx_rule); - - macsec_fs_rx_ft_put(macsec_fs); -} - -static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec, - struct mlx5_flow_act *flow_act, - struct mlx5_macsec_rule_attrs *attrs, - bool sci_present) -{ - u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num; - struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto; - __be32 *sci_p = (__be32 *)(&attrs->sci); - - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - - /* MACsec ethertype */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC); - - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; - - /* Sectag AN + TCI SC present bit*/ - MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, - MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, - tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); - - if (sci_present) { - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - misc_parameters_5.macsec_tag_2); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2, - be32_to_cpu(sci_p[0])); - - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - misc_parameters_5.macsec_tag_3); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3, - be32_to_cpu(sci_p[1])); - } else { - /* When SCI isn't present in the Sectag, need to match the source */ - /* MAC address only if the SCI contains the default MACsec PORT */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); - memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16), - sci_p, ETH_ALEN); - } - - crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; - crypto_params->obj_id = attrs->macsec_obj_id; -} - -static union mlx5e_macsec_rule * -macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs, - struct mlx5_macsec_rule_attrs *attrs, - u32 fs_id) -{ - u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct net_device *netdev = macsec_fs->netdev; - union mlx5e_macsec_rule *macsec_rule = NULL; - struct mlx5_modify_hdr *modify_hdr = NULL; - struct mlx5_flow_destination dest = {}; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5e_macsec_rx_rule *rx_rule; - struct mlx5_flow_act flow_act = {}; - struct mlx5e_flow_table *ft_crypto; - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - int err = 0; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return NULL; - - err = macsec_fs_rx_ft_get(macsec_fs); - if (err) - goto out_spec; - - macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); - if (!macsec_rule) { - macsec_fs_rx_ft_put(macsec_fs); - goto out_spec; - } - - rx_rule = &macsec_rule->rx_rule; - rx_tables = &rx_fs->tables; - ft_crypto = &rx_tables->ft_crypto; - - /* Set bit[31 - 30] macsec marker - 0x01 */ - /* Set bit[15-0] fs id */ - MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); - MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); - MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30)); - MLX5_SET(set_action_in, action, offset, 0); - MLX5_SET(set_action_in, action, length, 32); - - modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, - 1, action); - if (IS_ERR(modify_hdr)) { - err = PTR_ERR(modify_hdr); - netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err); - modify_hdr = NULL; - goto err; - } - rx_rule->meta_modhdr = modify_hdr; - - /* Rx crypto table with SCI rule */ - macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true); - - flow_act.modify_hdr = modify_hdr; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | - MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = rx_tables->ft_check; - rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, - "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n", - err); - goto err; - } - rx_rule->rule[0] = rule; - - /* Rx crypto table without SCI rule */ - if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) { - memset(spec, 0, sizeof(struct mlx5_flow_spec)); - memset(&dest, 0, sizeof(struct mlx5_flow_destination)); - memset(&flow_act, 0, sizeof(flow_act)); - - macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false); - - flow_act.modify_hdr = modify_hdr; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | - MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = rx_tables->ft_check; - rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(netdev, - "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n", - err); - goto err; - } - rx_rule->rule[1] = rule; - } - - kvfree(spec); - return macsec_rule; - -err: - macsec_fs_rx_del_rule(macsec_fs, rx_rule); - macsec_rule = NULL; -out_spec: - kvfree(spec); - return macsec_rule; -} - -static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs) -{ - struct net_device *netdev = macsec_fs->netdev; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *rx_tables; - struct mlx5e_macsec_rx *rx_fs; - struct mlx5_fc *flow_counter; - int err; - - rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL); - if (!rx_fs) - return -ENOMEM; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Rx encrypt flow counter, err(%d)\n", - err); - goto err_encrypt_counter; - } - - rx_tables = &rx_fs->tables; - rx_tables->check_rule_counter = flow_counter; - - flow_counter = mlx5_fc_create(mdev, false); - if (IS_ERR(flow_counter)) { - err = PTR_ERR(flow_counter); - netdev_err(netdev, - "Failed to create MACsec Rx drop flow counter, err(%d)\n", - err); - goto err_drop_counter; - } - rx_tables->check_miss_rule_counter = flow_counter; - - macsec_fs->rx_fs = rx_fs; - - return 0; - -err_drop_counter: - mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); - rx_tables->check_rule_counter = NULL; - -err_encrypt_counter: - kfree(rx_fs); - macsec_fs->rx_fs = NULL; - - return err; -} - -static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs) -{ - struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - struct mlx5e_macsec_tables *rx_tables; - - if (!rx_fs) - return; - - rx_tables = &rx_fs->tables; - - if (rx_tables->refcnt) { - netdev_err(macsec_fs->netdev, - "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n", - rx_tables->refcnt); - return; - } - - if (rx_tables->check_miss_rule_counter) { - mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter); - rx_tables->check_miss_rule_counter = NULL; - } - - if (rx_tables->check_rule_counter) { - mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); - rx_tables->check_rule_counter = NULL; - } - - kfree(rx_fs); - macsec_fs->rx_fs = NULL; -} - -void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats) -{ - struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats; - struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; - struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; - struct mlx5_core_dev *mdev = macsec_fs->mdev; - - if (tx_tables->check_rule_counter) - mlx5_fc_query(mdev, tx_tables->check_rule_counter, - &stats->macsec_tx_pkts, &stats->macsec_tx_bytes); - - if (tx_tables->check_miss_rule_counter) - mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter, - &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop); - - if (rx_tables->check_rule_counter) - mlx5_fc_query(mdev, rx_tables->check_rule_counter, - &stats->macsec_rx_pkts, &stats->macsec_rx_bytes); - - if (rx_tables->check_miss_rule_counter) - mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter, - &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop); -} - -union mlx5e_macsec_rule * -mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs, - const struct macsec_context *macsec_ctx, - struct mlx5_macsec_rule_attrs *attrs, - u32 *sa_fs_id) -{ - return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? - macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) : - macsec_fs_rx_add_rule(macsec_fs, attrs, *sa_fs_id); -} - -void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs, - union mlx5e_macsec_rule *macsec_rule, - int action) -{ - (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? - macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) : - macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule); -} - -void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs) -{ - macsec_fs_rx_cleanup(macsec_fs); - macsec_fs_tx_cleanup(macsec_fs); - kfree(macsec_fs); -} - -struct mlx5e_macsec_fs * -mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, - struct net_device *netdev) -{ - struct mlx5e_macsec_fs *macsec_fs; - int err; - - macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL); - if (!macsec_fs) - return NULL; - - macsec_fs->mdev = mdev; - macsec_fs->netdev = netdev; - - err = macsec_fs_tx_init(macsec_fs); - if (err) { - netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); - goto err; - } - - err = macsec_fs_rx_init(macsec_fs); - if (err) { - netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); - goto tx_cleanup; - } - - return macsec_fs; - -tx_cleanup: - macsec_fs_tx_cleanup(macsec_fs); -err: - kfree(macsec_fs); - return NULL; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h deleted file mode 100644 index b429648d4ee7..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ - -#ifndef __MLX5_MACSEC_STEERING_H__ -#define __MLX5_MACSEC_STEERING_H__ - -#ifdef CONFIG_MLX5_EN_MACSEC - -#include "en_accel/macsec.h" - -#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16 - -struct mlx5e_macsec_fs; -union mlx5e_macsec_rule; - -struct mlx5_macsec_rule_attrs { - sci_t sci; - u32 macsec_obj_id; - u8 assoc_num; - int action; -}; - -enum mlx5_macsec_action { - MLX5_ACCEL_MACSEC_ACTION_ENCRYPT, - MLX5_ACCEL_MACSEC_ACTION_DECRYPT, -}; - -void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs); - -struct mlx5e_macsec_fs * -mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev); - -union mlx5e_macsec_rule * -mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs, - const struct macsec_context *ctx, - struct mlx5_macsec_rule_attrs *attrs, - u32 *sa_fs_id); - -void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs, - union mlx5e_macsec_rule *macsec_rule, - int action); - -void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats); - -#endif - -#endif /* __MLX5_MACSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c index e50a2e3f3d18..4559ee16a11a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c @@ -8,14 +8,14 @@ #include "en_accel/macsec.h" static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes_drop) }, }; #define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc) @@ -52,6 +52,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw) { + struct mlx5_macsec_fs *macsec_fs; int i; if (!priv->macsec) @@ -60,9 +61,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw) if (!mlx5e_is_macsec_device(priv->mdev)) return idx; - mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec)); + macsec_fs = priv->mdev->macsec_fs; + mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs)); for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec), + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs), mlx5e_macsec_hw_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 5aa51d74f8b4..bb7f86c993e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -432,8 +432,10 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) } spin_unlock_bh(&arfs->arfs_lock); hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { - if (arfs_rule->rule) + if (arfs_rule->rule) { mlx5_del_flow_rules(arfs_rule->rule); + priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++; + } hlist_del(&arfs_rule->hlist); kfree(arfs_rule); } @@ -509,6 +511,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { + priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++; err = -ENOMEM; goto out; } @@ -519,6 +522,8 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, ntohs(tuple->etype)); arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); if (!arfs_table) { + WARN_ONCE(1, "arfs table does not exist for etype %u and ip_proto %u\n", + tuple->etype, tuple->ip_proto); err = -EINVAL; goto out; } @@ -600,9 +605,11 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv, dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq); err = mlx5_modify_rule_destination(rule, &dst, NULL); - if (err) + if (err) { + priv->channel_stats[rxq]->rq.arfs_err++; netdev_warn(priv->netdev, "Failed to modify aRFS rule destination to rq=%d\n", rxq); + } } static void arfs_handle_work(struct work_struct *work) @@ -632,6 +639,7 @@ static void arfs_handle_work(struct work_struct *work) if (IS_ERR(rule)) goto out; arfs_rule->rule = rule; + priv->channel_stats[arfs_rule->rxq]->rq.arfs_add++; } else { arfs_modify_rule_rq(priv, arfs_rule->rule, arfs_rule->rxq); @@ -650,8 +658,10 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, struct arfs_tuple *tuple; rule = kzalloc(sizeof(*rule), GFP_ATOMIC); - if (!rule) + if (!rule) { + priv->channel_stats[rxq]->rq.arfs_err++; return NULL; + } rule->priv = priv; rule->rxq = rxq; @@ -740,10 +750,13 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_lock_bh(&arfs->arfs_lock); arfs_rule = arfs_find_rule(arfs_t, &fk); if (arfs_rule) { - if (arfs_rule->rxq == rxq_index) { + if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) { spin_unlock_bh(&arfs->arfs_lock); return arfs_rule->filter_id; } + + priv->channel_stats[rxq_index]->rq.arfs_request_in++; + priv->channel_stats[arfs_rule->rxq]->rq.arfs_request_out++; arfs_rule->rxq = rxq_index; } else { arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 27861b68ced5..c7c1b667b105 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo) { struct mlx5_core_dev *mdev = priv->mdev; + int count; strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%04d (%.16s)", - fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev), - mdev->board_id); + count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); + if (count >= sizeof(drvinfo->fw_version)) + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev)); + strscpy(drvinfo->bus_info, dev_name(mdev->device), sizeof(drvinfo->bus_info)); } @@ -1247,7 +1252,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv) { - return MLX5E_INDIR_RQT_SIZE; + return mlx5e_rqt_size(priv->mdev, priv->channels.params.num_channels); } static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) @@ -2061,7 +2066,8 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) struct mlx5e_params new_params; int err; - if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) + if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) || + !MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) return -EOPNOTSUPP; /* Don't allow changing the PTP state if HTB offload is active, because @@ -2163,8 +2169,8 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) return priv->channels.params.pflags; } -int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rule_locs) +static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rule_locs) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2181,7 +2187,7 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs); } -int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct mlx5e_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 934b0d5ce1b3..777d311d44ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1283,9 +1283,7 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs, mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params); fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev, &ttc_params); - if (IS_ERR(fs->inner_ttc)) - return PTR_ERR(fs->inner_ttc); - return 0; + return PTR_ERR_OR_ZERO(fs->inner_ttc); } int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs, @@ -1295,9 +1293,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs, mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true); fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params); - if (IS_ERR(fs->ttc)) - return PTR_ERR(fs->ttc); - return 0; + return PTR_ERR_OR_ZERO(fs->ttc); } int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index aac32e505c14..3eccdadc0357 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -96,10 +96,6 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, case UDP_V4_FLOW: case TCP_V6_FLOW: case UDP_V6_FLOW: - max_tuples = ETHTOOL_NUM_L3_L4_FTS; - prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); - eth_ft = ðtool->l3_l4_ft[prio]; - break; case IP_USER_FLOW: case IPV6_USER_FLOW: max_tuples = ETHTOOL_NUM_L3_L4_FTS; @@ -900,10 +896,16 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, struct ethtool_rxnfc *nfc) { u8 rx_hash_field = 0; + u32 flow_type = 0; + u32 rss_idx = 0; int err; int tt; - tt = flow_type_to_traffic_type(nfc->flow_type); + if (nfc->flow_type & FLOW_RSS) + rss_idx = nfc->rss_context; + + flow_type = flow_type_mask(nfc->flow_type); + tt = flow_type_to_traffic_type(flow_type); if (tt < 0) return tt; @@ -911,10 +913,10 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest * port. */ - if (nfc->flow_type != TCP_V4_FLOW && - nfc->flow_type != TCP_V6_FLOW && - nfc->flow_type != UDP_V4_FLOW && - nfc->flow_type != UDP_V6_FLOW) + if (flow_type != TCP_V4_FLOW && + flow_type != TCP_V6_FLOW && + flow_type != UDP_V4_FLOW && + flow_type != UDP_V6_FLOW) return -EOPNOTSUPP; if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | @@ -931,7 +933,7 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT; mutex_lock(&priv->state_lock); - err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field); + err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, rss_idx, tt, rx_hash_field); mutex_unlock(&priv->state_lock); return err; @@ -940,14 +942,23 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, struct ethtool_rxnfc *nfc) { - u32 hash_field = 0; + int hash_field = 0; + u32 flow_type = 0; + u32 rss_idx = 0; int tt; - tt = flow_type_to_traffic_type(nfc->flow_type); + if (nfc->flow_type & FLOW_RSS) + rss_idx = nfc->rss_context; + + flow_type = flow_type_mask(nfc->flow_type); + tt = flow_type_to_traffic_type(flow_type); if (tt < 0) return tt; - hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt); + hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, rss_idx, tt); + if (hash_field < 0) + return hash_field; + nfc->data = 0; if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f7b494125eee..0c87ddb8a7a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -38,7 +38,7 @@ #include <linux/debugfs.h> #include <linux/if_bridge.h> #include <linux/filter.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> #include <net/pkt_sched.h> #include <net/xdp_sock_drv.h> #include "eswitch.h" @@ -69,6 +69,7 @@ #include "en/htb.h" #include "qos.h" #include "en/trap.h" +#include "lib/devcom.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) @@ -178,6 +179,61 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) mlx5_notifier_unregister(priv->mdev, &priv->events_nb); } +static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data) +{ + struct mlx5e_priv *slave_priv = my_data; + + switch (event) { + case MPV_DEVCOM_MASTER_UP: + mlx5_devcom_comp_set_ready(slave_priv->devcom, true); + break; + case MPV_DEVCOM_MASTER_DOWN: + /* no need for comp set ready false since we unregister after + * and it hurts cleanup flow. + */ + break; + case MPV_DEVCOM_IPSEC_MASTER_UP: + case MPV_DEVCOM_IPSEC_MASTER_DOWN: + mlx5e_ipsec_handle_mpv_event(event, my_data, event_data); + break; + } + + return 0; +} + +static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data) +{ + priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc, + MLX5_DEVCOM_MPV, + *data, + mlx5e_devcom_event_mpv, + priv); + if (IS_ERR_OR_NULL(priv->devcom)) + return -EOPNOTSUPP; + + if (mlx5_core_is_mp_master(priv->mdev)) { + mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP, + MPV_DEVCOM_MASTER_UP, priv); + mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_UP); + } + + return 0; +} + +static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) +{ + if (IS_ERR_OR_NULL(priv->devcom)) + return; + + if (mlx5_core_is_mp_master(priv->mdev)) { + mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_DOWN, + MPV_DEVCOM_MASTER_DOWN, priv); + mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_DOWN); + } + + mlx5_devcom_unregister_component(priv->devcom); +} + static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) { struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb); @@ -192,6 +248,13 @@ static int blocking_event(struct notifier_block *nb, unsigned long event, void * return NOTIFY_BAD; } break; + case MLX5_DRIVER_EVENT_AFFILIATION_DONE: + if (mlx5e_devcom_init_mpv(priv, data)) + return NOTIFY_BAD; + break; + case MLX5_DRIVER_EVENT_AFFILIATION_REMOVED: + mlx5e_devcom_cleanup_mpv(priv); + break; default: return NOTIFY_DONE; } @@ -834,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, struct page_pool_params pp_params = { 0 }; pp_params.order = 0; - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = pool_size; pp_params.nid = node; pp_params.dev = rq->pdev; @@ -1991,7 +2054,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int eqn; int err; - err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn); + err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn); if (err) return err; @@ -2447,14 +2510,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct xsk_buff_pool *xsk_pool, struct mlx5e_channel **cp) { - int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix)); + int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix); struct net_device *netdev = priv->netdev; struct mlx5e_xsk_param xsk; struct mlx5e_channel *c; unsigned int irq; int err; - err = mlx5_vector2irqn(priv->mdev, ix, &irq); + err = mlx5_comp_irqn_get(priv->mdev, ix, &irq); if (err) return err; @@ -2668,6 +2731,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; + ASSERT_RTNL(); if (chs->ptp) { mlx5e_ptp_close(chs->ptp); chs->ptp = NULL; @@ -2858,13 +2922,13 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; int num_comp_vectors, ix, irq; - num_comp_vectors = mlx5_comp_vectors_count(mdev); + num_comp_vectors = mlx5_comp_vectors_max(mdev); for (ix = 0; ix < params->num_channels; ix++) { cpumask_clear(priv->scratchpad.cpumask); for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { - int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq)); + int cpu = mlx5_comp_vector_get_cpu(mdev, irq); cpumask_set_cpu(cpu, priv->scratchpad.cpumask); } @@ -2885,8 +2949,12 @@ static int mlx5e_num_channels_changed(struct mlx5e_priv *priv) mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); /* This function may be called on attach, before priv->rx_res is created. */ - if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res) - mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count); + if (priv->rx_res) { + mlx5e_rx_res_rss_update_num_channels(priv->rx_res, count); + + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count); + } return 0; } @@ -2945,17 +3013,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) if (mlx5e_is_vport_rep(priv)) mlx5e_rep_activate_channels(priv); + set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state); + mlx5e_wait_channels_min_rx_wqes(&priv->channels); if (priv->rx_res) mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels); } +static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv) +{ + WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)); + if (current_work() != &priv->tx_timeout_work) + cancel_work_sync(&priv->tx_timeout_work); +} + void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { if (priv->rx_res) mlx5e_rx_res_channels_deactivate(priv->rx_res); + clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state); + mlx5e_cancel_tx_timeout_work(priv); + if (mlx5e_is_vport_rep(priv)) mlx5e_rep_deactivate_channels(priv); @@ -3952,13 +4032,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable) struct mlx5e_channels *chs = &priv->channels; struct mlx5e_params new_params; int err; + bool rx_ts_over_crc = !enable; mutex_lock(&priv->state_lock); new_params = chs->params; new_params.scatter_fcs_en = enable; err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap, - &new_params.scatter_fcs_en, true); + &rx_ts_over_crc, true); mutex_unlock(&priv->state_lock); return err; } @@ -4733,8 +4814,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) struct net_device *netdev = priv->netdev; int i; - rtnl_lock(); - mutex_lock(&priv->state_lock); + /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues + * through this flow. However, channel closing flows have to wait for + * this work to finish while holding rtnl lock too. So either get the + * lock or find that channels are being closed for other reason and + * this work is not relevant anymore. + */ + while (!rtnl_trylock()) { + if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)) + return; + msleep(20); + } if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; @@ -4753,7 +4843,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) } unlock: - mutex_unlock(&priv->state_lock); rtnl_unlock(); } @@ -4898,9 +4987,6 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; - if (nla_len(attr) < sizeof(mode)) - return -EINVAL; - mode = nla_get_u16(attr); if (mode > BRIDGE_MODE_VEPA) return -EINVAL; @@ -5328,10 +5414,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) enum mlx5e_rx_res_features features; int err; - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) - return -ENOMEM; - mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -5343,12 +5425,16 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) features = MLX5E_RX_RES_FEATURE_PTP; if (mlx5_tunnel_inner_ft_supported(mdev)) features |= MLX5E_RX_RES_FEATURE_INNER_FT; - err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, - priv->max_nch, priv->drop_rq.rqn, - &priv->channels.params.packet_merge, - priv->channels.params.num_channels); - if (err) + + priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, + priv->channels.params.num_channels); + if (IS_ERR(priv->rx_res)) { + err = PTR_ERR(priv->rx_res); + priv->rx_res = NULL; + mlx5_core_err(mdev, "create rx resources failed, %d\n", err); goto err_close_drop_rq; + } err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile, priv->netdev); @@ -5378,12 +5464,11 @@ err_destroy_flow_steering: priv->profile); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = NULL; err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; return err; } @@ -5394,10 +5479,9 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), priv->profile); mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = NULL; mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; } static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 99b3843396f3..e92d4f83592e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + int count; strscpy(drvinfo->driver, mlx5e_rep_driver_name, sizeof(drvinfo->driver)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%04d (%.16s)", - fw_rev_maj(mdev), fw_rev_min(mdev), - fw_rev_sub(mdev), mdev->board_id); + count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); + if (count >= sizeof(drvinfo->fw_version)) + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%04d", fw_rev_maj(mdev), + fw_rev_min(mdev), fw_rev_sub(mdev)); } static const struct counter_desc sw_rep_stats_desc[] = { @@ -399,15 +403,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, } static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, - struct mlx5_devcom *devcom, struct mlx5e_rep_sq *rep_sq, int i) { - struct mlx5_eswitch *peer_esw = NULL; struct mlx5_flow_handle *flow_rule; - int tmp; + struct mlx5_devcom_comp_dev *tmp; + struct mlx5_eswitch *peer_esw; - mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS, - peer_esw, tmp) { + mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) { u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id); struct mlx5e_rep_sq_peer *sq_peer; int err; @@ -443,7 +445,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, struct mlx5_flow_handle *flow_rule; struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_sq *rep_sq; - struct mlx5_devcom *devcom; bool devcom_locked = false; int err; int i; @@ -451,10 +452,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, if (esw->mode != MLX5_ESWITCH_OFFLOADS) return 0; - devcom = esw->dev->priv.devcom; rpriv = mlx5e_rep_to_rep_priv(rep); - if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) && - mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + + if (mlx5_devcom_comp_is_ready(esw->devcom) && + mlx5_devcom_for_each_peer_begin(esw->devcom)) devcom_locked = true; for (i = 0; i < sqns_num; i++) { @@ -477,7 +478,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, xa_init(&rep_sq->sq_peer); if (devcom_locked) { - err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i); + err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i); if (err) { mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); xa_destroy(&rep_sq->sq_peer); @@ -490,7 +491,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, } if (devcom_locked) - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + mlx5_devcom_for_each_peer_end(esw->devcom); return 0; @@ -498,7 +499,7 @@ out_err: mlx5e_sqs2vport_stop(esw, rep); if (devcom_locked) - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + mlx5_devcom_for_each_peer_end(esw->devcom); return err; } @@ -704,7 +705,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) /* update HW stats in background for next time */ mlx5e_queue_update_stats(priv); - memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); + mlx5e_stats_copy_rep_stats(stats, &priv->stats.rep_stats); } static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) @@ -772,6 +773,7 @@ static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev) static void mlx5e_build_rep_params(struct net_device *netdev) { + const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED; struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; @@ -797,8 +799,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev) /* RQ */ mlx5e_build_rq_params(mdev, params); + /* If netdev is already registered (e.g. move from nic profile to uplink, + * RTNL lock must be held before triggering netdev notifiers. + */ + if (take_rtnl) + rtnl_lock(); /* update XDP supported features */ mlx5e_set_xdp_feature(netdev); + if (take_rtnl) + rtnl_unlock(); /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); @@ -1001,26 +1010,22 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) { - err = -ENOMEM; - goto err_free_fs; - } - mlx5e_fs_init_l2_addr(priv->fs, priv->netdev); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_rx_res_free; + goto err_free_fs; } - err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, - priv->max_nch, priv->drop_rq.rqn, - &priv->channels.params.packet_merge, - priv->channels.params.num_channels); - if (err) + priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, + priv->channels.params.num_channels); + if (IS_ERR(priv->rx_res)) { + err = PTR_ERR(priv->rx_res); + mlx5_core_err(mdev, "Create rx resources failed, err=%d\n", err); goto err_close_drop_rq; + } err = mlx5e_create_rep_ttc_table(priv); if (err) @@ -1044,11 +1049,9 @@ err_destroy_ttc_table: mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); -err_rx_res_free: - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; err_free_fs: mlx5e_fs_cleanup(priv->fs); priv->fs = NULL; @@ -1062,9 +1065,8 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) mlx5e_destroy_rep_root_ft(priv); mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); mlx5e_close_drop_rq(&priv->drop_rq); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; } static void mlx5e_rep_mpesw_work(struct work_struct *work) @@ -1339,6 +1341,7 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { &MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(per_port_buff_congest), #ifdef CONFIG_MLX5_EN_IPSEC + &MLX5E_STATS_GRP(ipsec_hw), &MLX5E_STATS_GRP(ipsec_sw), #endif &MLX5E_STATS_GRP(ptp), @@ -1357,8 +1360,9 @@ mlx5e_rep_vnic_reporter_diagnose(struct devlink_health_reporter *reporter, struct mlx5e_rep_priv *rpriv = devlink_health_reporter_priv(reporter); struct mlx5_eswitch_rep *rep = rpriv->rep; - return mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg, - rep->vport, true); + mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg, rep->vport, + true); + return 0; } static const struct devlink_health_reporter_ops mlx5_rep_vnic_reporter_ops = { @@ -1493,7 +1497,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport); - if (dl_port) { + if (!IS_ERR(dl_port)) { SET_NETDEV_DEVLINK_PORT(netdev, dl_port); mlx5e_rep_vnic_reporter_create(priv, dl_port); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 41d37159e027..8d9743a5e42c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -36,7 +36,7 @@ #include <linux/bitmap.h> #include <linux/filter.h> #include <net/ip6_checksum.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <net/inet_ecn.h> #include <net/gro.h> #include <net/udp.h> @@ -457,26 +457,41 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) { int remaining = wqe_bulk; - int i = 0; + int total_alloc = 0; + int refill_alloc; + int refill; /* The WQE bulk is split into smaller bulks that are sized * according to the page pool cache refill size to avoid overflowing * the page pool cache due to too many page releases at once. */ do { - int refill = min_t(u16, rq->wqe.info.refill_unit, remaining); - int alloc_count; + refill = min_t(u16, rq->wqe.info.refill_unit, remaining); - mlx5e_free_rx_wqes(rq, ix + i, refill); - alloc_count = mlx5e_alloc_rx_wqes(rq, ix + i, refill); - i += alloc_count; - if (unlikely(alloc_count != refill)) - break; + mlx5e_free_rx_wqes(rq, ix + total_alloc, refill); + refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill); + if (unlikely(refill_alloc != refill)) + goto err_free; + total_alloc += refill_alloc; remaining -= refill; } while (remaining); - return i; + return total_alloc; + +err_free: + mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc); + + for (int i = 0; i < total_alloc + refill; i++) { + int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i); + struct mlx5e_wqe_frag_info *frag; + + frag = get_frag(rq, j); + for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++) + frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE); + } + + return 0; } static void @@ -816,6 +831,8 @@ err_unmap: mlx5e_page_release_fragmented(rq, frag_page); } + bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); + err: rq->stats->buff_alloc_err++; @@ -1543,7 +1560,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) - mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); + mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, + be32_to_cpu(cqe->ft_metadata)); if (unlikely(mlx5e_macsec_is_rx_flow(cqe))) mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 4d77055abd4b..4b96ad657145 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -38,7 +38,7 @@ #include "en/port.h" #ifdef CONFIG_PAGE_POOL_STATS -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #endif static unsigned int stats_grps_num(struct mlx5e_priv *priv) @@ -180,7 +180,13 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, +#ifdef CONFIG_MLX5_EN_ARFS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, +#endif { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, #ifdef CONFIG_PAGE_POOL_STATS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) }, @@ -231,7 +237,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) }, @@ -321,7 +326,6 @@ static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; s->rx_xsk_congst_umr += xskrq_stats->congst_umr; - s->rx_xsk_arfs_err += xskrq_stats->arfs_err; } static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, @@ -354,7 +358,13 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; s->rx_congst_umr += rq_stats->congst_umr; +#ifdef CONFIG_MLX5_EN_ARFS + s->rx_arfs_add += rq_stats->arfs_add; + s->rx_arfs_request_in += rq_stats->arfs_request_in; + s->rx_arfs_request_out += rq_stats->arfs_request_out; + s->rx_arfs_expired += rq_stats->arfs_expired; s->rx_arfs_err += rq_stats->arfs_err; +#endif s->rx_recover += rq_stats->recover; #ifdef CONFIG_PAGE_POOL_STATS s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast; @@ -1990,7 +2000,13 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, +#ifdef CONFIG_MLX5_EN_ARFS + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, +#endif { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, #ifdef CONFIG_PAGE_POOL_STATS { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) }, @@ -2092,7 +2108,6 @@ static const struct counter_desc xskrq_stats_desc[] = { { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) }, - { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) }, }; static const struct counter_desc xsksq_stats_desc[] = { @@ -2142,9 +2157,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = { { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, - { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) }, - { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) }, - { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) }, }; static const struct counter_desc ptp_rq_stats_desc[] = { @@ -2170,7 +2183,6 @@ static const struct counter_desc ptp_rq_stats_desc[] = { { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) }, - { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) }, }; @@ -2490,7 +2502,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(per_port_buff_congest), &MLX5E_STATS_GRP(ptp), &MLX5E_STATS_GRP(qos), -#ifdef CONFIG_MLX5_EN_MACSEC +#ifdef CONFIG_MLX5_MACSEC &MLX5E_STATS_GRP(macsec_hw), #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 1ff8a06027dc..477c547dcc04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -194,7 +194,13 @@ struct mlx5e_sw_stats { u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; u64 rx_congst_umr; +#ifdef CONFIG_MLX5_EN_ARFS + u64 rx_arfs_add; + u64 rx_arfs_request_in; + u64 rx_arfs_request_out; + u64 rx_arfs_expired; u64 rx_arfs_err; +#endif u64 rx_recover; u64 ch_events; u64 ch_poll; @@ -256,7 +262,6 @@ struct mlx5e_sw_stats { u64 rx_xsk_cqe_compress_blks; u64 rx_xsk_cqe_compress_pkts; u64 rx_xsk_congst_umr; - u64 rx_xsk_arfs_err; u64 tx_xsk_xmit; u64 tx_xsk_mpwqe; u64 tx_xsk_inlnw; @@ -358,7 +363,13 @@ struct mlx5e_rq_stats { u64 cqe_compress_blks; u64 cqe_compress_pkts; u64 congst_umr; +#ifdef CONFIG_MLX5_EN_ARFS + u64 arfs_add; + u64 arfs_request_in; + u64 arfs_request_out; + u64 arfs_expired; u64 arfs_err; +#endif u64 recover; #ifdef CONFIG_PAGE_POOL_STATS u64 pp_alloc_fast; @@ -449,9 +460,7 @@ struct mlx5e_ptp_cq_stats { u64 err_cqe; u64 abort; u64 abort_abs_diff_ns; - u64 resync_cqe; - u64 resync_event; - u64 ooo_cqe_drop; + u64 late_cqe; }; struct mlx5e_rep_stats { @@ -475,11 +484,20 @@ struct mlx5e_stats { struct mlx5e_vnic_env_stats vnic; struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; - struct rtnl_link_stats64 vf_vport; struct mlx5e_pcie_stats pcie; struct mlx5e_rep_stats rep_stats; }; +static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport, + struct mlx5e_rep_stats *rep_stats) +{ + memset(vf_vport, 0, sizeof(*vf_vport)); + vf_vport->rx_packets = rep_stats->vport_rx_packets; + vf_vport->tx_packets = rep_stats->vport_tx_packets; + vf_vport->rx_bytes = rep_stats->vport_rx_bytes; + vf_vport->tx_bytes = rep_stats->vport_tx_bytes; +} + extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[]; unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 31708d5aa608..96af9e2ab1d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -444,6 +444,9 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv, struct mlx5e_flow_meter_handle *meter; enum mlx5e_post_meter_type type; + if (IS_ERR(post_act)) + return PTR_ERR(post_act); + meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params); if (IS_ERR(meter)) { mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); @@ -753,19 +756,21 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) { struct mlx5e_priv *priv = hp->func_priv; struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_rss_params_indir *indir; + struct mlx5e_rss_params_indir indir; int err; - indir = kvmalloc(sizeof(*indir), GFP_KERNEL); - if (!indir) - return -ENOMEM; + err = mlx5e_rss_params_indir_init(&indir, mdev, + mlx5e_rqt_size(mdev, hp->num_channels), + mlx5e_rqt_size(mdev, priv->max_nch)); + if (err) + return err; - mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); + mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels); err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, - indir); + &indir); - kvfree(indir); + mlx5e_rss_params_indir_cleanup(&indir); return err; } @@ -1668,11 +1673,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro { struct mlx5e_priv *out_priv, *route_priv; struct mlx5_core_dev *route_mdev; - struct mlx5_devcom *devcom; + struct mlx5_devcom_comp_dev *pos; struct mlx5_eswitch *esw; u16 vhca_id; int err; - int i; out_priv = netdev_priv(out_dev); esw = out_priv->mdev->priv.eswitch; @@ -1688,10 +1692,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro return err; rcu_read_lock(); - devcom = out_priv->mdev->priv.devcom; err = -ENODEV; - mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS, - esw, i) { + mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) { err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); if (!err) break; @@ -2038,15 +2040,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { if (mlx5e_is_eswitch_flow(flow)) { - struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom; + struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom; - if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) { + if (!mlx5_devcom_for_each_peer_begin(devcom)) { mlx5e_tc_del_fdb_flow(priv, flow); return; } mlx5e_tc_del_fdb_peers_flow(flow); - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + mlx5_devcom_for_each_peer_end(devcom); mlx5e_tc_del_fdb_flow(priv, flow); } else { mlx5e_tc_del_nic_flow(priv, flow); @@ -2600,29 +2602,29 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, match_level = outer_match_level; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_META) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_CVLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_TCP) | - BIT(FLOW_DISSECTOR_KEY_IP) | - BIT(FLOW_DISSECTOR_KEY_CT) | - BIT(FLOW_DISSECTOR_KEY_ENC_IP) | - BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | - BIT(FLOW_DISSECTOR_KEY_ICMP) | - BIT(FLOW_DISSECTOR_KEY_MPLS))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_CT) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | + BIT_ULL(FLOW_DISSECTOR_KEY_MPLS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); - netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", + netdev_dbg(priv->netdev, "Unsupported key used: 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } @@ -3148,7 +3150,7 @@ static struct mlx5_fields fields[] = { OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), - OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp), + OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp), OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), @@ -3159,21 +3161,31 @@ static struct mlx5_fields fields[] = { OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), }; -static unsigned long mask_to_le(unsigned long mask, int size) +static u32 mask_field_get(void *mask, struct mlx5_fields *f) { - __be32 mask_be32; - __be16 mask_be16; - - if (size == 32) { - mask_be32 = (__force __be32)(mask); - mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); - } else if (size == 16) { - mask_be32 = (__force __be32)(mask); - mask_be16 = *(__be16 *)&mask_be32; - mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); + switch (f->field_bsize) { + case 32: + return be32_to_cpu(*(__be32 *)mask) & f->field_mask; + case 16: + return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask; + default: + return *(u8 *)mask & (u8)f->field_mask; } +} - return mask; +static void mask_field_clear(void *mask, struct mlx5_fields *f) +{ + switch (f->field_bsize) { + case 32: + *(__be32 *)mask &= ~cpu_to_be32(f->field_mask); + break; + case 16: + *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask); + break; + default: + *(u8 *)mask &= ~(u8)f->field_mask; + break; + } } static int offload_pedit_fields(struct mlx5e_priv *priv, @@ -3185,11 +3197,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; struct pedit_headers_action *hdrs = parse_attr->hdrs; void *headers_c, *headers_v, *action, *vals_p; - u32 *s_masks_p, *a_masks_p, s_mask, a_mask; struct mlx5e_tc_mod_hdr_acts *mod_acts; - unsigned long mask, field_mask; + void *s_masks_p, *a_masks_p; int i, first, last, next_z; struct mlx5_fields *f; + unsigned long mask; + u32 s_mask, a_mask; u8 cmd; mod_acts = &parse_attr->mod_hdr_acts; @@ -3205,15 +3218,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, bool skip; f = &fields[i]; - /* avoid seeing bits set from previous iterations */ - s_mask = 0; - a_mask = 0; - s_masks_p = (void *)set_masks + f->offset; a_masks_p = (void *)add_masks + f->offset; - s_mask = *s_masks_p & f->field_mask; - a_mask = *a_masks_p & f->field_mask; + s_mask = mask_field_get(s_masks_p, f); + a_mask = mask_field_get(a_masks_p, f); if (!s_mask && !a_mask) /* nothing to offload here */ continue; @@ -3240,22 +3249,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, match_mask, f->field_bsize)) skip = true; /* clear to denote we consumed this field */ - *s_masks_p &= ~f->field_mask; + mask_field_clear(s_masks_p, f); } else { cmd = MLX5_ACTION_TYPE_ADD; mask = a_mask; vals_p = (void *)add_vals + f->offset; /* add 0 is no change */ - if ((*(u32 *)vals_p & f->field_mask) == 0) + if (!mask_field_get(vals_p, f)) skip = true; /* clear to denote we consumed this field */ - *a_masks_p &= ~f->field_mask; + mask_field_clear(a_masks_p, f); } if (skip) continue; - mask = mask_to_le(mask, f->field_bsize); - first = find_first_bit(&mask, f->field_bsize); next_z = find_next_zero_bit(&mask, f->field_bsize, first); last = find_last_bit(&mask, f->field_bsize); @@ -3282,10 +3289,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, MLX5_SET(set_action_in, action, field, f->field); if (cmd == MLX5_ACTION_TYPE_SET) { + unsigned long field_mask = f->field_mask; int start; - field_mask = mask_to_le(f->field_mask, f->field_bsize); - /* if field is bit sized it can start not from first bit */ start = find_first_bit(&field_mask, f->field_bsize); @@ -3736,6 +3742,20 @@ out_free: } static int +set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) +{ + struct mlx5e_post_act *post_act = get_post_action(priv); + + if (IS_ERR(post_act)) + return PTR_ERR(post_act); + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act); + + return 0; +} + +static int alloc_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5e_tc_act_branch_ctrl *cond, struct mlx5_flow_attr **cond_attr, @@ -3758,8 +3778,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow, break; case FLOW_ACTION_ACCEPT: case FLOW_ACTION_PIPE: - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv)); + err = set_branch_dest_ft(flow->priv, attr); + if (err) + goto out_err; break; case FLOW_ACTION_JUMP: if (*jump_count) { @@ -3768,8 +3789,9 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow, goto out_err; } *jump_count = cond->extval; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv)); + err = set_branch_dest_ft(flow->priv, attr); + if (err) + goto out_err; break; default: err = -EOPNOTSUPP; @@ -3939,6 +3961,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, } i_split = i + 1; + parse_state->if_count = 0; list_add(&attr->list, &flow->attrs); } @@ -4223,8 +4246,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) flow_flag_test(flow, INGRESS); bool act_is_encap = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); - bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom, - MLX5_DEVCOM_ESW_OFFLOADS); + bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom); if (!esw_paired) return false; @@ -4491,14 +4513,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { - struct mlx5_devcom *devcom = priv->mdev->priv.devcom; + struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *in_rep = rpriv->rep; struct mlx5_core_dev *in_mdev = priv->mdev; struct mlx5_eswitch *peer_esw; struct mlx5e_tc_flow *flow; int err; - int i; flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, in_mdev); @@ -4510,27 +4531,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, return 0; } - if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) { + if (!mlx5_devcom_for_each_peer_begin(devcom)) { err = -ENODEV; goto clean_flow; } - mlx5_devcom_for_each_peer_entry(devcom, - MLX5_DEVCOM_ESW_OFFLOADS, - peer_esw, i) { + mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw); if (err) goto peer_clean; } - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + mlx5_devcom_for_each_peer_end(devcom); *__flow = flow; return 0; peer_clean: mlx5e_tc_del_fdb_peers_flow(flow); - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + mlx5_devcom_for_each_peer_end(devcom); clean_flow: mlx5e_tc_del_fdb_flow(priv, flow); return err; @@ -4633,6 +4652,46 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev, return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; } +/* As IPsec and TC order is not aligned between software and hardware-offload, + * either IPsec offload or TC offload, not both, is allowed for a specific interface. + */ +static bool is_tc_ipsec_order_check_needed(struct net_device *filter, struct mlx5e_priv *priv) +{ + if (!IS_ENABLED(CONFIG_MLX5_EN_IPSEC)) + return false; + + if (filter != priv->netdev) + return false; + + if (mlx5e_eswitch_vf_rep(priv->netdev)) + return false; + + return true; +} + +static int mlx5e_tc_block_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + if (!is_tc_ipsec_order_check_needed(filter, priv)) + return 0; + + if (mdev->num_block_tc) + return -EBUSY; + + mdev->num_block_ipsec++; + + return 0; +} + +static void mlx5e_tc_unblock_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv) +{ + if (!is_tc_ipsec_order_check_needed(filter, priv)) + return; + + priv->mdev->num_block_ipsec--; +} + int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct flow_cls_offload *f, unsigned long flags) { @@ -4645,6 +4704,10 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, if (!mlx5_esw_hold(priv->mdev)) return -EBUSY; + err = mlx5e_tc_block_ipsec_offload(dev, priv); + if (err) + goto esw_release; + mlx5_esw_get(priv->mdev); rcu_read_lock(); @@ -4690,7 +4753,9 @@ rcu_unlock: err_free: mlx5e_flow_put(priv, flow); out: + mlx5e_tc_unblock_ipsec_offload(dev, priv); mlx5_esw_put(priv->mdev); +esw_release: mlx5_esw_release(priv->mdev); return err; } @@ -4731,6 +4796,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, trace_mlx5e_delete_flower(f); mlx5e_flow_put(priv, flow); + mlx5e_tc_unblock_ipsec_offload(dev, priv); mlx5_esw_put(priv->mdev); return 0; @@ -4748,7 +4814,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, struct flow_cls_offload *f, unsigned long flags) { - struct mlx5_devcom *devcom = priv->mdev->priv.devcom; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; struct mlx5_fc *counter; @@ -4784,7 +4850,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, /* Under multipath it's possible for one rule to be currently * un-offloaded while the other rule is offloaded. */ - if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom)) goto out; if (flow_flag_test(flow, DUP)) { @@ -4815,7 +4881,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, } no_peer_counter: - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (esw) + mlx5_devcom_for_each_peer_end(esw->devcom); out: flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); @@ -4930,7 +4997,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, if (err) return err; - rpriv->prev_vf_vport_stats = priv->stats.vf_vport; + mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, + &priv->stats.rep_stats); break; default: NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); @@ -4970,7 +5038,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, u64 dbytes; u64 dpkts; - cur_stats = priv->stats.vf_vport; + mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats); dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; rpriv->prev_vf_vport_stats = cur_stats; @@ -5220,11 +5288,12 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) { const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); + struct netdev_phys_item_id ppid; struct mlx5e_rep_priv *rpriv; struct mapping_ctx *mapping; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; - u64 mapping_id; + u64 mapping_id, key; int err = 0; rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); @@ -5278,7 +5347,11 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) goto err_action_counter; } - mlx5_esw_offloads_devcom_init(esw); + err = dev_get_port_parent_id(priv->netdev, &ppid, false); + if (!err) { + memcpy(&key, &ppid.id, sizeof(key)); + mlx5_esw_offloads_devcom_init(esw, key); + } return 0; @@ -5665,8 +5738,10 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a esw = priv->mdev->priv.eswitch; attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); - if (IS_ERR(attr->act_id_restore_rule)) + if (IS_ERR(attr->act_id_restore_rule)) { + err = PTR_ERR(attr->act_id_restore_rule); goto err_rule; + } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index c7eb6b238c2b..f0b506e562df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -372,7 +372,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma, struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg, - bool xmit_more) + struct mlx5_wqe_eth_seg *eseg, bool xmit_more) { struct mlx5_wq_cyc *wq = &sq->wq; bool send_doorbell; @@ -394,11 +394,16 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_tx_check_stop(sq); - if (unlikely(sq->ptpsq)) { + if (unlikely(sq->ptpsq && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { + u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata); + mlx5e_skb_cb_hwtstamp_init(skb); - mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); + mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb, + metadata_index); + mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index); if (!netif_tx_queue_stopped(sq->txq) && - !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) { + mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) { netif_tx_stop_queue(sq->txq); sq->stats->stopped++; } @@ -483,12 +488,15 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more); + mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, eseg, xmit_more); return; err_drop: stats->dropped++; + if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist, + be32_to_cpu(eseg->flow_table_metadata)); dev_kfree_skb_any(skb); mlx5e_tx_flush(sq); } @@ -645,9 +653,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) { - if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) - eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc & - ptpsq->ts_cqe_ctr_mask); + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + eseg->flow_table_metadata = + cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist)); } static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, @@ -766,7 +774,7 @@ void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq) { if (netif_tx_queue_stopped(sq->txq) && mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && - mlx5e_ptpsq_fifo_has_room(sq) && + !mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); sq->stats->wake++; @@ -1031,7 +1039,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more); + mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, eseg, xmit_more); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 3db4866d7880..40a6cb052a2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -47,7 +47,7 @@ enum { static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE); struct mlx5_eq_table { - struct list_head comp_eqs_list; + struct xarray comp_eqs; struct mlx5_eq_async pages_eq; struct mlx5_eq_async cmd_eq; struct mlx5_eq_async async_eq; @@ -58,11 +58,14 @@ struct mlx5_eq_table { struct mlx5_nb cq_err_nb; struct mutex lock; /* sync async eqs creations */ - int num_comp_eqs; + struct mutex comp_lock; /* sync comp eqs creations */ + int curr_comp_eqs; + int max_comp_eqs; struct mlx5_irq_table *irq_table; - struct mlx5_irq **comp_irqs; + struct xarray comp_irqs; struct mlx5_irq *ctrl_irq; struct cpu_rmap *rmap; + struct cpumask used_cpus; }; #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ @@ -452,13 +455,22 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); eq_table->irq_table = mlx5_irq_table_get(dev); + cpumask_clear(&eq_table->used_cpus); + xa_init(&eq_table->comp_eqs); + xa_init(&eq_table->comp_irqs); + mutex_init(&eq_table->comp_lock); + eq_table->curr_comp_eqs = 0; return 0; } void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev) { + struct mlx5_eq_table *table = dev->priv.eq_table; + mlx5_eq_debugfs_cleanup(dev); - kvfree(dev->priv.eq_table); + xa_destroy(&table->comp_irqs); + xa_destroy(&table->comp_eqs); + kvfree(table); } /* Async EQs */ @@ -803,88 +815,125 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm) } EXPORT_SYMBOL(mlx5_eq_update_ci); -static void comp_irqs_release_pci(struct mlx5_core_dev *dev) +static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_irq *irq; + + irq = xa_load(&table->comp_irqs, vecidx); + if (!irq) + return; - mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs); + xa_erase(&table->comp_irqs, vecidx); + mlx5_irq_release_vector(irq); } -static int comp_irqs_request_pci(struct mlx5_core_dev *dev) +static int mlx5_cpumask_default_spread(int numa_node, int index) { - struct mlx5_eq_table *table = dev->priv.eq_table; const struct cpumask *prev = cpu_none_mask; const struct cpumask *mask; - int ncomp_eqs; - u16 *cpus; - int ret; + int found_cpu = 0; + int i = 0; int cpu; - int i; - - ncomp_eqs = table->num_comp_eqs; - cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL); - if (!cpus) - return -ENOMEM; - i = 0; rcu_read_lock(); - for_each_numa_hop_mask(mask, dev->priv.numa_node) { + for_each_numa_hop_mask(mask, numa_node) { for_each_cpu_andnot(cpu, mask, prev) { - cpus[i] = cpu; - if (++i == ncomp_eqs) + if (i++ == index) { + found_cpu = cpu; goto spread_done; + } } prev = mask; } + spread_done: rcu_read_unlock(); - ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap); - kfree(cpus); - return ret; + return found_cpu; } -static void comp_irqs_release_sf(struct mlx5_core_dev *dev) +static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = dev->priv.eq_table; - - mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs); +#ifdef CONFIG_RFS_ACCEL +#ifdef CONFIG_MLX5_SF + if (mlx5_core_is_sf(dev)) + return dev->priv.parent_mdev->priv.eq_table->rmap; +#endif + return dev->priv.eq_table->rmap; +#else + return NULL; +#endif } -static int comp_irqs_request_sf(struct mlx5_core_dev *dev) +static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; - int ncomp_eqs = table->num_comp_eqs; + struct cpu_rmap *rmap; + struct mlx5_irq *irq; + int cpu; + + rmap = mlx5_eq_table_get_pci_rmap(dev); + cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx); + irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap); + if (IS_ERR(irq)) + return PTR_ERR(irq); - return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs); + return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL)); } -static void comp_irqs_release(struct mlx5_core_dev *dev) +static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_irq *irq; + int cpu; - mlx5_core_is_sf(dev) ? comp_irqs_release_sf(dev) : - comp_irqs_release_pci(dev); + irq = xa_load(&table->comp_irqs, vecidx); + if (!irq) + return; - kfree(table->comp_irqs); + cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); + cpumask_clear_cpu(cpu, &table->used_cpus); + xa_erase(&table->comp_irqs, vecidx); + mlx5_irq_affinity_irq_release(dev, irq); } -static int comp_irqs_request(struct mlx5_core_dev *dev) +static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; - int ncomp_eqs; - int ret; - - ncomp_eqs = table->num_comp_eqs; - table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL); - if (!table->comp_irqs) - return -ENOMEM; + struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); + struct irq_affinity_desc af_desc = {}; + struct mlx5_irq *irq; + + /* In case SF irq pool does not exist, fallback to the PF irqs*/ + if (!mlx5_irq_pool_is_sf_pool(pool)) + return comp_irq_request_pci(dev, vecidx); + + af_desc.is_managed = 1; + cpumask_copy(&af_desc.mask, cpu_online_mask); + cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); + irq = mlx5_irq_affinity_request(pool, &af_desc); + if (IS_ERR(irq)) + return PTR_ERR(irq); + + cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq)); + mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", + pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), + cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), + mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); + + return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL)); +} - ret = mlx5_core_is_sf(dev) ? comp_irqs_request_sf(dev) : - comp_irqs_request_pci(dev); - if (ret < 0) - kfree(table->comp_irqs); +static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx) +{ + mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) : + comp_irq_release_pci(dev, vecidx); +} - return ret; +static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx) +{ + return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) : + comp_irq_request_pci(dev, vecidx); } #ifdef CONFIG_RFS_ACCEL @@ -901,7 +950,7 @@ static int alloc_rmap(struct mlx5_core_dev *mdev) if (mlx5_core_is_sf(mdev)) return 0; - eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs); + eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs); if (!eq_table->rmap) return -ENOMEM; return 0; @@ -921,22 +970,19 @@ static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; } static void free_rmap(struct mlx5_core_dev *mdev) {} #endif -static void destroy_comp_eqs(struct mlx5_core_dev *dev) +static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; - struct mlx5_eq_comp *eq, *n; - - list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { - list_del(&eq->list); - mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); - if (destroy_unmap_eq(dev, &eq->core)) - mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n", - eq->core.eqn); - tasklet_disable(&eq->tasklet_ctx.task); - kfree(eq); - } - comp_irqs_release(dev); - free_rmap(dev); + + xa_erase(&table->comp_eqs, vecidx); + mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); + if (destroy_unmap_eq(dev, &eq->core)) + mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n", + eq->core.eqn); + tasklet_disable(&eq->tasklet_ctx.task); + kfree(eq); + comp_irq_release(dev, vecidx); + table->curr_comp_eqs--; } static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) @@ -954,129 +1000,149 @@ static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) return MLX5_COMP_EQ_SIZE; } -static int create_comp_eqs(struct mlx5_core_dev *dev) +/* Must be called with EQ table comp_lock held */ +static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_eq_param param = {}; struct mlx5_eq_comp *eq; - int ncomp_eqs; + struct mlx5_irq *irq; int nent; int err; - int i; - err = alloc_rmap(dev); + lockdep_assert_held(&table->comp_lock); + if (table->curr_comp_eqs == table->max_comp_eqs) { + mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n", + table->max_comp_eqs); + return -ENOMEM; + } + + err = comp_irq_request(dev, vecidx); if (err) return err; - ncomp_eqs = comp_irqs_request(dev); - if (ncomp_eqs < 0) { - err = ncomp_eqs; - goto err_irqs_req; - } - - INIT_LIST_HEAD(&table->comp_eqs_list); nent = comp_eq_depth_devlink_param_get(dev); - for (i = 0; i < ncomp_eqs; i++) { - struct mlx5_eq_param param = {}; + eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); + if (!eq) { + err = -ENOMEM; + goto clean_irq; + } - eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); - if (!eq) { - err = -ENOMEM; - goto clean; - } + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb); - INIT_LIST_HEAD(&eq->tasklet_ctx.list); - INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); - spin_lock_init(&eq->tasklet_ctx.lock); - tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb); - - eq->irq_nb.notifier_call = mlx5_eq_comp_int; - param = (struct mlx5_eq_param) { - .irq = table->comp_irqs[i], - .nent = nent, - }; - - err = create_map_eq(dev, &eq->core, ¶m); - if (err) - goto clean_eq; - err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); - if (err) { - destroy_unmap_eq(dev, &eq->core); - goto clean_eq; - } + irq = xa_load(&table->comp_irqs, vecidx); + eq->irq_nb.notifier_call = mlx5_eq_comp_int; + param = (struct mlx5_eq_param) { + .irq = irq, + .nent = nent, + }; - mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn); - /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */ - list_add_tail(&eq->list, &table->comp_eqs_list); + err = create_map_eq(dev, &eq->core, ¶m); + if (err) + goto clean_eq; + err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); + if (err) { + destroy_unmap_eq(dev, &eq->core); + goto clean_eq; } - table->num_comp_eqs = ncomp_eqs; - return 0; + mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn); + err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL)); + if (err) + goto disable_eq; + + table->curr_comp_eqs++; + return eq->core.eqn; +disable_eq: + mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); clean_eq: kfree(eq); -clean: - destroy_comp_eqs(dev); -err_irqs_req: - free_rmap(dev); +clean_irq: + comp_irq_release(dev, vecidx); return err; } -static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn) +int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn) { struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_comp *eq; - int err = -ENOENT; - int i = 0; + int ret = 0; - list_for_each_entry(eq, &table->comp_eqs_list, list) { - if (i++ == vector) { - if (irqn) - *irqn = eq->core.irqn; - if (eqn) - *eqn = eq->core.eqn; - err = 0; - break; - } + mutex_lock(&table->comp_lock); + eq = xa_load(&table->comp_eqs, vecidx); + if (eq) { + *eqn = eq->core.eqn; + goto out; } - return err; -} + ret = create_comp_eq(dev, vecidx); + if (ret < 0) { + mutex_unlock(&table->comp_lock); + return ret; + } -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn) -{ - return vector2eqnirqn(dev, vector, eqn, NULL); + *eqn = ret; +out: + mutex_unlock(&table->comp_lock); + return 0; } -EXPORT_SYMBOL(mlx5_vector2eqn); +EXPORT_SYMBOL(mlx5_comp_eqn_get); -int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn) +int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn) { - return vector2eqnirqn(dev, vector, NULL, irqn); + struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_eq_comp *eq; + int eqn; + int err; + + /* Allocate the EQ if not allocated yet */ + err = mlx5_comp_eqn_get(dev, vector, &eqn); + if (err) + return err; + + eq = xa_load(&table->comp_eqs, vector); + *irqn = eq->core.irqn; + return 0; } -unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev) +unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev) { - return dev->priv.eq_table->num_comp_eqs; + return dev->priv.eq_table->max_comp_eqs; } -EXPORT_SYMBOL(mlx5_comp_vectors_count); +EXPORT_SYMBOL(mlx5_comp_vectors_max); -struct cpumask * +static struct cpumask * mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) { struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_comp *eq; - int i = 0; - list_for_each_entry(eq, &table->comp_eqs_list, list) { - if (i++ == vector) - return mlx5_irq_get_affinity_mask(eq->core.irq); - } + eq = xa_load(&table->comp_eqs, vector); + if (eq) + return mlx5_irq_get_affinity_mask(eq->core.irq); - WARN_ON_ONCE(1); return NULL; } -EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask); + +int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector) +{ + struct cpumask *mask; + int cpu; + + mask = mlx5_comp_irq_get_affinity_mask(dev, vector); + if (mask) + cpu = cpumask_first(mask); + else + cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector); + + return cpu; +} +EXPORT_SYMBOL(mlx5_comp_vector_get_cpu); #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev) @@ -1089,11 +1155,11 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn) { struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_comp *eq; + unsigned long index; - list_for_each_entry(eq, &table->comp_eqs_list, list) { + xa_for_each(&table->comp_eqs, index, eq) if (eq->core.eqn == eqn) return eq; - } return ERR_PTR(-ENOENT); } @@ -1101,11 +1167,7 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn) /* This function should only be called after mlx5_cmd_force_teardown_hca */ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = dev->priv.eq_table; - - mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ mlx5_irq_table_free_irqs(dev); - mutex_unlock(&table->lock); } #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING @@ -1148,22 +1210,22 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev) struct mlx5_eq_table *eq_table = dev->priv.eq_table; int err; - eq_table->num_comp_eqs = get_num_eqs(dev); + eq_table->max_comp_eqs = get_num_eqs(dev); err = create_async_eqs(dev); if (err) { mlx5_core_err(dev, "Failed to create async EQs\n"); goto err_async_eqs; } - err = create_comp_eqs(dev); + err = alloc_rmap(dev); if (err) { - mlx5_core_err(dev, "Failed to create completion EQs\n"); - goto err_comp_eqs; + mlx5_core_err(dev, "Failed to allocate rmap\n"); + goto err_rmap; } return 0; -err_comp_eqs: +err_rmap: destroy_async_eqs(dev); err_async_eqs: return err; @@ -1171,7 +1233,14 @@ err_async_eqs: void mlx5_eq_table_destroy(struct mlx5_core_dev *dev) { - destroy_comp_eqs(dev); + struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_eq_comp *eq; + unsigned long index; + + xa_for_each(&table->comp_eqs, index, eq) + destroy_comp_eq(dev, eq, index); + + free_rmap(dev); destroy_async_eqs(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index f4fe1daa4afd..1b9bc32efd6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id, struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, struct mlx5_esw_bridge *bridge) { - struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom; + struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos; struct mlx5_eswitch *tmp, *peer_esw = NULL; static struct mlx5_flow_handle *handle; - int i; - if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + if (!mlx5_devcom_for_each_peer_begin(devcom)) return ERR_PTR(-ENODEV); - mlx5_devcom_for_each_peer_entry(devcom, - MLX5_DEVCOM_ESW_OFFLOADS, - tmp, i) { + mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) { if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) { peer_esw = tmp; break; } } + if (!peer_esw) { - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - return ERR_PTR(-ENODEV); + handle = ERR_PTR(-ENODEV); + goto out; } handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, bridge, peer_esw); - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + +out: + mlx5_devcom_for_each_peer_end(devcom); return handle; } @@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow mlx5_fc_id(counter), bridge); if (IS_ERR(handle)) { err = PTR_ERR(handle); - esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n", - vport_num, err); + esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n", + vport_num, err, peer); goto err_ingress_flow_create; } entry->ingress_handle = handle; @@ -1748,6 +1748,28 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 entry->lastuse = jiffies; } +void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct mlx5_esw_bridge_fdb_entry *entry; + struct mlx5_esw_bridge *bridge; + + bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!bridge) + return; + + entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid); + if (!entry) { + esw_debug(br_offloads->esw->dev, + "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n", + fdb_info->addr, fdb_info->vid, vport_num); + return; + } + + entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED; +} + void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info) @@ -1810,7 +1832,8 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads) unsigned long lastuse = (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter); - if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER) + if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | + MLX5_ESW_BRIDGE_FLAG_DELETED)) continue; if (time_after(lastuse, entry->lastuse)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h index c2c7c70d99eb..d6f539161993 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h @@ -62,6 +62,9 @@ int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_nu void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info); +void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info); void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c index 2455f8b93c1e..a7ed87e9d842 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c @@ -78,6 +78,8 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md xa_for_each(&entry->ports, idx, port) { dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dests[i].ft = port->mcast.ft; + if (port->vport_num == MLX5_VPORT_UPLINK) + dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT; i++; } @@ -539,30 +541,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port) static struct mlx5_flow_handle * mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port) { - struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom; + struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos; struct mlx5_eswitch *tmp, *peer_esw = NULL; static struct mlx5_flow_handle *handle; - int i; - if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + if (!mlx5_devcom_for_each_peer_begin(devcom)) return ERR_PTR(-ENODEV); - mlx5_devcom_for_each_peer_entry(devcom, - MLX5_DEVCOM_ESW_OFFLOADS, - tmp, i) { + mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) { if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) { peer_esw = tmp; break; } } + if (!peer_esw) { - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - return ERR_PTR(-ENODEV); + handle = ERR_PTR(-ENODEV); + goto out; } handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw); - mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); +out: + mlx5_devcom_for_each_peer_end(devcom); return handle; } @@ -586,10 +587,6 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po if (!rule_spec) return ERR_PTR(-ENOMEM); - if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) && - port->vport_num == MLX5_VPORT_UPLINK) - rule_spec->flow_context.flow_source = - MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; @@ -661,11 +658,6 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port) if (!rule_spec) return ERR_PTR(-ENOMEM); - if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) && - port->vport_num == MLX5_VPORT_UPLINK) - rule_spec->flow_context.flow_source = - MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; - if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) { dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; dest.vport.vhca_id = port->esw_owner_vhca_id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h index 4911cc32161b..7c251af566c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h @@ -133,6 +133,7 @@ struct mlx5_esw_bridge_mdb_key { enum { MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0), MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1), + MLX5_ESW_BRIDGE_FLAG_DELETED = BIT(2), }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index fdf2be548e85..d8e739cbcbce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -16,39 +16,28 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num) { - return vport_num == MLX5_VPORT_UPLINK || - (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) || + return (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) || mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_core_is_ec_vf_vport(esw->dev, vport_num); } -static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num) +static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw, + u16 vport_num, + struct devlink_port *dl_port) { struct mlx5_core_dev *dev = esw->dev; - struct devlink_port_attrs attrs = {}; struct netdev_phys_item_id ppid = {}; - struct devlink_port *dl_port; u32 controller_num = 0; bool external; u16 pfnum; - dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL); - if (!dl_port) - return NULL; - mlx5_esw_get_port_parent_id(dev, &ppid); pfnum = mlx5_get_dev_index(dev); external = mlx5_core_is_ecpf_esw_manager(dev); if (external) controller_num = dev->priv.eswitch->offloads.host_number + 1; - if (vport_num == MLX5_VPORT_UPLINK) { - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pfnum; - memcpy(attrs.switch_id.id, ppid.id, ppid.id_len); - attrs.switch_id.id_len = ppid.id_len; - devlink_port_attrs_set(dl_port, &attrs); - } else if (vport_num == MLX5_VPORT_PF) { + if (vport_num == MLX5_VPORT_PF) { memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external); @@ -63,91 +52,83 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum, vport_num - 1, false); } - return dl_port; } -static void mlx5_esw_dl_port_free(struct devlink_port *dl_port) +int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - kfree(dl_port); -} - -static const struct devlink_port_ops mlx5_esw_dl_port_ops = { - .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, - .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, - .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, - .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, - .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, - .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, -}; - -int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num) -{ - struct mlx5_core_dev *dev = esw->dev; - struct devlink_port *dl_port; - unsigned int dl_port_index; - struct mlx5_vport *vport; - struct devlink *devlink; - int err; + struct mlx5_devlink_port *dl_port; + u16 vport_num = vport->vport; if (!mlx5_esw_devlink_port_supported(esw, vport_num)) return 0; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); - - dl_port = mlx5_esw_dl_port_alloc(esw, vport_num); + dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL); if (!dl_port) return -ENOMEM; - devlink = priv_to_devlink(dev); - dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); - err = devl_port_register_with_ops(devlink, dl_port, dl_port_index, - &mlx5_esw_dl_port_ops); - if (err) - goto reg_err; - - err = devl_rate_leaf_create(dl_port, vport, NULL); - if (err) - goto rate_err; + mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num, + &dl_port->dl_port); vport->dl_port = dl_port; + mlx5_devlink_port_init(dl_port, vport); return 0; - -rate_err: - devl_port_unregister(dl_port); -reg_err: - mlx5_esw_dl_port_free(dl_port); - return err; } -void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_vport *vport; - - if (!mlx5_esw_devlink_port_supported(esw, vport_num)) + if (!vport->dl_port) return; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return; + kfree(vport->dl_port); + vport->dl_port = NULL; +} - if (vport->dl_port->devlink_rate) { - mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); - devl_rate_leaf_destroy(vport->dl_port); - } +static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = { + .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, + .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, + .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, + .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, + .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, + .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, +#ifdef CONFIG_XFRM_OFFLOAD + .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get, + .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set, + .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get, + .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set, +#endif /* CONFIG_XFRM_OFFLOAD */ +}; - devl_port_unregister(vport->dl_port); - mlx5_esw_dl_port_free(vport->dl_port); - vport->dl_port = NULL; +static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw, + struct devlink_port *dl_port, + u32 controller, u32 sfnum) +{ + struct mlx5_core_dev *dev = esw->dev; + struct netdev_phys_item_id ppid = {}; + u16 pfnum; + + pfnum = mlx5_get_dev_index(dev); + mlx5_esw_get_port_parent_id(dev, &ppid); + memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); + dl_port->attrs.switch_id.id_len = ppid.id_len; + devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller); } -struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num) +int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + struct mlx5_devlink_port *dl_port, + u32 controller, u32 sfnum) { - struct mlx5_vport *vport; + mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum); - vport = mlx5_eswitch_get_vport(esw, vport_num); - return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port; + vport->dl_port = dl_port; + mlx5_devlink_port_init(dl_port, vport); + return 0; +} + +void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + vport->dl_port = NULL; } static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { @@ -164,58 +145,62 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { #endif }; -int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 controller, u32 sfnum) +int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { struct mlx5_core_dev *dev = esw->dev; - struct netdev_phys_item_id ppid = {}; + const struct devlink_port_ops *ops; + struct mlx5_devlink_port *dl_port; + u16 vport_num = vport->vport; unsigned int dl_port_index; - struct mlx5_vport *vport; struct devlink *devlink; - u16 pfnum; int err; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); + dl_port = vport->dl_port; + if (!dl_port) + return 0; + + if (mlx5_esw_is_sf_vport(esw, vport_num)) + ops = &mlx5_esw_dl_sf_port_ops; + else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num)) + ops = &mlx5_esw_pf_vf_dl_port_ops; + else + ops = NULL; - pfnum = mlx5_get_dev_index(dev); - mlx5_esw_get_port_parent_id(dev, &ppid); - memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); - dl_port->attrs.switch_id.id_len = ppid.id_len; - devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller); devlink = priv_to_devlink(dev); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); - err = devl_port_register_with_ops(devlink, dl_port, dl_port_index, - &mlx5_esw_dl_sf_port_ops); + err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops); if (err) return err; - err = devl_rate_leaf_create(dl_port, vport, NULL); + err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL); if (err) goto rate_err; - vport->dl_port = dl_port; return 0; rate_err: - devl_port_unregister(dl_port); + devl_port_unregister(&dl_port->dl_port); return err; } -void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - struct mlx5_vport *vport; + struct mlx5_devlink_port *dl_port; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) + if (!vport->dl_port) return; + dl_port = vport->dl_port; - if (vport->dl_port->devlink_rate) { - mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); - devl_rate_leaf_destroy(vport->dl_port); - } + mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); + devl_rate_leaf_destroy(&dl_port->dl_port); - devl_port_unregister(vport->dl_port); - vport->dl_port = NULL; + devl_port_unregister(&dl_port->dl_port); +} + +struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c new file mode 100644 index 000000000000..da10e04777cf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/mlx5/device.h> +#include <linux/mlx5/vport.h> +#include "mlx5_core.h" +#include "eswitch.h" + +static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *hca_cap, *query_cap; + int err; + + if (!MLX5_CAP_GEN(dev, vhca_resource_manager)) + return -EOPNOTSUPP; + + if (!mlx5_esw_ipsec_vf_offload_supported(dev)) { + *result = false; + return 0; + } + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + if (!query_cap) + return -ENOMEM; + + err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap); + if (err) + goto free; + + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); + *result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload); +free: + kvfree(query_cap); + return err; +} + +enum esw_vport_ipsec_offload { + MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD, + MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD, +}; + +int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *hca_cap, *query_cap; + bool ipsec_enabled; + int err; + + /* Querying IPsec caps only makes sense when generic ipsec_offload + * HCA cap is enabled + */ + err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled); + if (err) + return err; + + if (!ipsec_enabled) { + vport->info.ipsec_crypto_enabled = false; + vport->info.ipsec_packet_enabled = false; + return 0; + } + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + if (!query_cap) + return -ENOMEM; + + err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC); + if (err) + goto free; + + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); + vport->info.ipsec_crypto_enabled = + MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload); + vport->info.ipsec_packet_enabled = + MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload); +free: + kvfree(query_cap); + return err; +} + +static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + void *hca_cap, *query_cap, *cap; + int ret; + + if (!MLX5_CAP_GEN(dev, vhca_resource_manager)) + return -EOPNOTSUPP; + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + hca_cap = kvzalloc(set_sz, GFP_KERNEL); + if (!hca_cap || !query_cap) { + ret = -ENOMEM; + goto free; + } + + ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap); + if (ret) + goto free; + + cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability); + memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability), + MLX5_UN_SZ_BYTES(hca_cap_union)); + MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld); + + MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP); + MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1); + MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num); + + MLX5_SET(set_hca_cap_in, hca_cap, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1); + ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap); +free: + kvfree(hca_cap); + kvfree(query_cap); + return ret; +} + +static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport, + bool enable, enum esw_vport_ipsec_offload type) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + void *hca_cap, *query_cap, *cap; + int ret; + + if (!MLX5_CAP_GEN(dev, vhca_resource_manager)) + return -EOPNOTSUPP; + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + hca_cap = kvzalloc(set_sz, GFP_KERNEL); + if (!hca_cap || !query_cap) { + ret = -ENOMEM; + goto free; + } + + ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC); + if (ret) + goto free; + + cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability); + memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability), + MLX5_UN_SZ_BYTES(hca_cap_union)); + + switch (type) { + case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD: + MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable); + break; + case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD: + MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable); + break; + default: + ret = -EOPNOTSUPP; + goto free; + } + + MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP); + MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1); + MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport); + + MLX5_SET(set_hca_cap_in, hca_cap, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1); + ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap); +free: + kvfree(hca_cap); + kvfree(query_cap); + return ret; +} + +static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + struct mlx5_eswitch *esw = dev->priv.eswitch; + void *hca_cap, *query_cap, *cap; + int ret; + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + hca_cap = kvzalloc(set_sz, GFP_KERNEL); + if (!hca_cap || !query_cap) { + ret = -ENOMEM; + goto free; + } + + ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS); + if (ret) + goto free; + + cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability); + memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability), + MLX5_UN_SZ_BYTES(hca_cap_union)); + MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable); + MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP); + MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1); + MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num); + MLX5_SET(set_hca_cap_in, hca_cap, op_mod, + MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1); + ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap); +free: + kvfree(hca_cap); + kvfree(query_cap); + return ret; +} + +static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + bool enable, enum esw_vport_ipsec_offload type) +{ + struct mlx5_core_dev *dev = esw->dev; + int err; + + if (vport->vport == MLX5_VPORT_PF) + return -EOPNOTSUPP; + + if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) { + err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable); + if (err) + return err; + } + + if (enable) { + err = esw_ipsec_vf_set_generic(dev, vport->vport, enable); + if (err) + return err; + err = esw_ipsec_vf_set_bytype(dev, vport, enable, type); + if (err) + return err; + } else { + err = esw_ipsec_vf_set_bytype(dev, vport, enable, type); + if (err) + return err; + err = mlx5_esw_ipsec_vf_offload_get(dev, vport); + if (err) + return err; + + /* The generic ipsec_offload cap can be disabled only if both + * ipsec_crypto_offload and ipsec_full_offload aren't enabled. + */ + if (!vport->info.ipsec_crypto_enabled && + !vport->info.ipsec_packet_enabled) { + err = esw_ipsec_vf_set_generic(dev, vport->vport, enable); + if (err) + return err; + } + } + + switch (type) { + case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD: + vport->info.ipsec_crypto_enabled = enable; + break; + case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD: + vport->info.ipsec_packet_enabled = enable; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *hca_cap, *query_cap; + int ret; + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + if (!query_cap) + return -ENOMEM; + + ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL); + if (ret) + goto free; + + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); + if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek)) + ret = -EOPNOTSUPP; +free: + kvfree(query_cap); + return ret; +} + +bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev) +{ + /* Old firmware doesn't support ipsec_offload capability for VFs. This + * can be detected by checking reformat_add_esp_trasport capability - + * when this cap isn't supported it means firmware cannot be trusted + * about what it reports for ipsec_offload cap. + */ + return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport); +} + +int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev, + u16 vport_num) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *hca_cap, *query_cap; + int err; + + if (!mlx5_esw_ipsec_vf_offload_supported(dev)) + return -EOPNOTSUPP; + + err = esw_ipsec_offload_supported(dev, vport_num); + if (err) + return err; + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + if (!query_cap) + return -ENOMEM; + + err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS); + if (err) + goto free; + + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); + if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp)) + goto free; + +free: + kvfree(query_cap); + return err; +} + +int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, + u16 vport_num) +{ + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *hca_cap, *query_cap; + int ret; + + if (!mlx5_esw_ipsec_vf_offload_supported(dev)) + return -EOPNOTSUPP; + + ret = esw_ipsec_offload_supported(dev, vport_num); + if (ret) + return ret; + + query_cap = kvzalloc(query_sz, GFP_KERNEL); + if (!query_cap) + return -ENOMEM; + + ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE); + if (ret) + goto out; + + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); + if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) { + ret = -EOPNOTSUPP; + goto out; + } + +out: + kvfree(query_cap); + return ret; +} + +int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + bool enable) +{ + return esw_ipsec_vf_offload_set_bytype(esw, vport, enable, + MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD); +} + +int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + bool enable) +{ + return esw_ipsec_vf_offload_set_bytype(esw, vport, enable, + MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c new file mode 100644 index 000000000000..190f10aba170 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "fs_core.h" +#include "eswitch.h" +#include "en_accel/ipsec.h" +#include "esw/ipsec_fs.h" +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) +#include "en/tc_priv.h" +#endif + +enum { + MLX5_ESW_IPSEC_RX_POL_FT_LEVEL, + MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL, + MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL, +}; + +enum { + MLX5_ESW_IPSEC_TX_POL_FT_LEVEL, + MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL, + MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL, +}; + +void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx_create_attr *attr) +{ + attr->prio = FDB_CRYPTO_INGRESS; + attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL; + attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL; + attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL; + attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB; +} + +int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, + struct mlx5_flow_destination *dest) +{ + dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0); + + return 0; +} + +int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_act *flow_act) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_modify_hdr *modify_hdr; + u32 mapped_id; + int err; + + err = xa_alloc_bh(&ipsec->ipsec_obj_id_map, &mapped_id, + xa_mk_value(sa_entry->ipsec_obj_id), + XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0); + if (err) + return err; + + /* reuse tunnel bits for ipsec, + * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id. + */ + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); + MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS); + MLX5_SET(set_action_in, action, length, + ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS); + MLX5_SET(set_action_in, action, data, mapped_id); + + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB, + 1, action); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + goto err_header_alloc; + } + + sa_entry->rx_mapped_id = mapped_id; + flow_act->modify_hdr = modify_hdr; + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + return 0; + +err_header_alloc: + xa_erase_bh(&ipsec->ipsec_obj_id_map, mapped_id); + return err; +} + +void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + + if (sa_entry->rx_mapped_id) + xa_erase_bh(&ipsec->ipsec_obj_id_map, + sa_entry->rx_mapped_id); +} + +int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, + u32 *ipsec_obj_id) +{ + struct mlx5e_ipsec *ipsec = priv->ipsec; + void *val; + + val = xa_load(&ipsec->ipsec_obj_id_map, id); + if (!val) + return -ENOENT; + + *ipsec_obj_id = xa_to_value(val); + + return 0; +} + +void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_tx_create_attr *attr) +{ + attr->prio = FDB_CRYPTO_EGRESS; + attr->pol_level = MLX5_ESW_IPSEC_TX_POL_FT_LEVEL; + attr->sa_level = MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL; + attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL; + attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB; +} + +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) +static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_attr *attr; + int err; + + attr = flow->attr; + esw_attr = attr->esw_attr; + if (esw_attr->out_count - esw_attr->split_count > 1) + return 0; + + err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr, + esw_attr->out_count - 1); + + return err; +} +#endif + +void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) +{ +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep; + struct mlx5e_rep_priv *rpriv; + struct rhashtable_iter iter; + struct mlx5e_tc_flow *flow; + unsigned long i; + int err; + + xa_for_each(&esw->offloads.vport_reps, i, rep) { + rpriv = rep->rep_data[REP_ETH].priv; + if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems)) + continue; + + rhashtable_walk_enter(&rpriv->tc_ht, &iter); + rhashtable_walk_start(&iter); + while ((flow = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(flow)) + continue; + + err = mlx5_esw_ipsec_modify_flow_dests(esw, flow); + if (err) + mlx5_core_warn_once(mdev, + "Failed to modify flow dests for IPsec"); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); + } +#endif +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h new file mode 100644 index 000000000000..ac9c65b89166 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_ESW_IPSEC_FS_H__ +#define __MLX5_ESW_IPSEC_FS_H__ + +struct mlx5e_ipsec; +struct mlx5e_ipsec_sa_entry; + +#ifdef CONFIG_MLX5_ESWITCH +void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx_create_attr *attr); +int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, + struct mlx5_flow_destination *dest); +int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_act *flow_act); +void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry); +int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, + u32 *ipsec_obj_id); +void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_tx_create_attr *attr); +void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev); +#else +static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx_create_attr *attr) {} + +static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, + struct mlx5_flow_destination *dest) +{ + return -EINVAL; +} + +static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_act *flow_act) +{ + return -EINVAL; +} + +static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {} + +static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, + u32 *ipsec_obj_id) +{ + return -EINVAL; +} + +static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_tx_create_attr *attr) {} + +static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {} +#endif /* CONFIG_MLX5_ESWITCH */ +#endif /* __MLX5_ESW_IPSEC_FS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 7c79476cc5f9..d2ebe56c3977 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ #include "eswitch.h" +#include "lib/mlx5.h" #include "esw/qos.h" #include "en/port.h" #define CREATE_TRACE_POINTS @@ -701,10 +702,75 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo return err; } +static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) +{ + struct ethtool_link_ksettings lksettings; + struct net_device *slave, *master; + u32 speed = SPEED_UNKNOWN; + + /* Lock ensures a stable reference to master and slave netdevice + * while port speed of master is queried. + */ + ASSERT_RTNL(); + + slave = mlx5_uplink_netdev_get(mdev); + if (!slave) + goto out; + + master = netdev_master_upper_dev_get(slave); + if (master && !__ethtool_get_link_ksettings(master, &lksettings)) + speed = lksettings.base.speed; + +out: + return speed; +} + +static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max, + bool hold_rtnl_lock, struct netlink_ext_ack *extack) +{ + int err; + + if (!mlx5_lag_is_active(mdev)) + goto skip_lag; + + if (hold_rtnl_lock) + rtnl_lock(); + + *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev); + + if (hold_rtnl_lock) + rtnl_unlock(); + + if (*link_speed_max != (u32)SPEED_UNKNOWN) + return 0; + +skip_lag: + err = mlx5_port_max_linkspeed(mdev, link_speed_max); + if (err) + NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); + + return err; +} + +static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev, + const char *name, u32 link_speed_max, + u64 value, struct netlink_ext_ack *extack) +{ + if (value > link_speed_max) { + pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", + name, value, link_speed_max); + NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); + return -EINVAL; + } + + return 0; +} + int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) { u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_vport *vport; + u32 link_speed_max; u32 bitmask; int err; @@ -712,6 +778,17 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 if (IS_ERR(vport)) return PTR_ERR(vport); + if (rate_mbps) { + err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL); + if (err) + return err; + + err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police", + link_speed_max, rate_mbps, NULL); + if (err) + return err; + } + mutex_lock(&esw->state_lock); if (!vport->qos.enabled) { /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ @@ -740,30 +817,25 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name, u64 *rate, struct netlink_ext_ack *extack) { - u32 link_speed_max, reminder; + u32 link_speed_max, remainder; u64 value; int err; - err = mlx5_port_max_linkspeed(mdev, &link_speed_max); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); - return err; - } - - value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder); - if (reminder) { + value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder); + if (remainder) { pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n", name, *rate); NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps"); return -EINVAL; } - if (value > link_speed_max) { - pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", - name, value, link_speed_max); - NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); - return -EINVAL; - } + err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack); + if (err) + return err; + + err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack); + if (err) + return err; *rate = value; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 243c455f1029..3047d7015c52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -48,6 +48,7 @@ #include "devlink.h" #include "ecpf.h" #include "en/mod_hdr.h" +#include "en_accel/ipsec.h" enum { MLX5_ACTION_NONE = 0, @@ -77,18 +78,31 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) return 0; } -struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) +static struct mlx5_eswitch *__mlx5_devlink_eswitch_get(struct devlink *devlink, bool check) { struct mlx5_core_dev *dev = devlink_priv(devlink); int err; - err = mlx5_eswitch_check(dev); - if (err) - return ERR_PTR(err); + if (check) { + err = mlx5_eswitch_check(dev); + if (err) + return ERR_PTR(err); + } return dev->priv.eswitch; } +struct mlx5_eswitch *__must_check +mlx5_devlink_eswitch_get(struct devlink *devlink) +{ + return __mlx5_devlink_eswitch_get(devlink, true); +} + +struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink) +{ + return __mlx5_devlink_eswitch_get(devlink, false); +} + struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { @@ -818,6 +832,8 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport * hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable); + + err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport); out_free: kfree(query_ctx); return err; @@ -882,16 +898,12 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport esw_vport_cleanup_acl(esw, vport); } -int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, +int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, enum mlx5_eswitch_vport_event enabled_events) { - struct mlx5_vport *vport; + u16 vport_num = vport->vport; int ret; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); - mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); @@ -904,6 +916,9 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, /* Sync with current vport context */ vport->enabled_events = enabled_events; vport->enabled = true; + if (vport->vport != MLX5_VPORT_PF && + (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled)) + esw->enabled_ipsec_vf_count++; /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well * in smartNIC as it's a vport group manager. @@ -912,7 +927,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, (!vport_num && mlx5_core_is_ecpf(esw->dev))) vport->info.trusted = true; - if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + if (!mlx5_esw_is_manager_vport(esw, vport_num) && MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); if (ret) @@ -939,15 +954,12 @@ err_vhca_mapping: return ret; } -void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - struct mlx5_vport *vport; - - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return; + u16 vport_num = vport->vport; mutex_lock(&esw->state_lock); + if (!vport->enabled) goto done; @@ -957,12 +969,16 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) /* Disable events from this vport */ if (MLX5_CAP_GEN(esw->dev, log_max_l2_table)) - arm_vport_context_events_cmd(esw->dev, vport->vport, 0); + arm_vport_context_events_cmd(esw->dev, vport_num, 0); - if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + if (!mlx5_esw_is_manager_vport(esw, vport_num) && MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) mlx5_esw_vport_vhca_id_clear(esw, vport_num); + if (vport->vport != MLX5_VPORT_PF && + (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled)) + esw->enabled_ipsec_vf_count--; + /* We don't assume VFs will cleanup after themselves. * Calling vport change handler while vport is disabled will cleanup * the vport resources. @@ -1022,11 +1038,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) return ERR_PTR(err); } -static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) +static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw) { - MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); - mlx5_eq_notifier_register(esw->dev, &esw->nb); - if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, ESW_FUNCTIONS_CHANGED); @@ -1034,13 +1047,11 @@ static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) } } -static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) +static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw) { if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); - mlx5_eq_notifier_unregister(esw->dev, &esw->nb); - flush_workqueue(esw->work_queue); } @@ -1068,31 +1079,104 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw) } } -/* Public E-Switch API */ -int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, - enum mlx5_eswitch_vport_event enabled_events) +static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + enum mlx5_eswitch_vport_event enabled_events) { int err; - err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); + err = mlx5_esw_vport_enable(esw, vport, enabled_events); if (err) return err; - err = esw_offloads_load_rep(esw, vport_num); + err = mlx5_esw_offloads_load_rep(esw, vport); if (err) goto err_rep; return err; err_rep: - mlx5_esw_vport_disable(esw, vport_num); + mlx5_esw_vport_disable(esw, vport); + return err; +} + +static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + mlx5_esw_offloads_unload_rep(esw, vport); + mlx5_esw_vport_disable(esw, vport); +} + +static int mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num, + enum mlx5_eswitch_vport_event enabled_events) +{ + struct mlx5_vport *vport; + int err; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); + + err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport); + if (err) + return err; + + err = mlx5_eswitch_load_vport(esw, vport, enabled_events); + if (err) + goto err_load; + return 0; + +err_load: + mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport); + return err; +} + +static void mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return; + + mlx5_eswitch_unload_vport(esw, vport); + mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport); +} + +int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num, + enum mlx5_eswitch_vport_event enabled_events, + struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum) +{ + struct mlx5_vport *vport; + int err; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); + + err = mlx5_esw_offloads_init_sf_rep(esw, vport, dl_port, controller, sfnum); + if (err) + return err; + + err = mlx5_eswitch_load_vport(esw, vport, enabled_events); + if (err) + goto err_load; + + return 0; + +err_load: + mlx5_esw_offloads_cleanup_sf_rep(esw, vport); return err; } -void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) { - esw_offloads_unload_rep(esw, vport_num); - mlx5_esw_vport_disable(esw, vport_num); + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return; + + mlx5_eswitch_unload_vport(esw, vport); + mlx5_esw_offloads_cleanup_sf_rep(esw, vport); } void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) @@ -1103,7 +1187,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { if (!vport->enabled) continue; - mlx5_eswitch_unload_vport(esw, vport->vport); + mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport); } } @@ -1116,7 +1200,7 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw, mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) { if (!vport->enabled) continue; - mlx5_eswitch_unload_vport(esw, vport->vport); + mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport); } } @@ -1128,7 +1212,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, int err; mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { - err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events); + err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events); if (err) goto vf_err; } @@ -1148,7 +1232,7 @@ static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_v int err; mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) { - err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events); + err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events); if (err) goto vf_err; } @@ -1187,12 +1271,19 @@ int mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, enum mlx5_eswitch_vport_event enabled_events) { + bool pf_needed; int ret; + pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) || + esw->mode == MLX5_ESWITCH_LEGACY; + /* Enable PF vport */ - ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events); - if (ret) - return ret; + if (pf_needed) { + ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, + enabled_events); + if (ret) + return ret; + } /* Enable external host PF HCA */ ret = host_pf_enable_hca(esw->dev); @@ -1201,7 +1292,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, /* Enable ECPF vport */ if (mlx5_ecpf_vport_exists(esw->dev)) { - ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); + ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events); if (ret) goto ecpf_err; if (mlx5_core_ec_sriov_enabled(esw->dev)) { @@ -1224,11 +1315,12 @@ vf_err: mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs); ec_vf_err: if (mlx5_ecpf_vport_exists(esw->dev)) - mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); + mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF); ecpf_err: host_pf_disable_hca(esw->dev); pf_hca_err: - mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); + if (pf_needed) + mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); return ret; } @@ -1242,11 +1334,14 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) if (mlx5_ecpf_vport_exists(esw->dev)) { if (mlx5_core_ec_sriov_enabled(esw->dev)) mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs); - mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); + mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF); } host_pf_disable_hca(esw->dev); - mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); + + if (mlx5_core_is_ecpf_esw_manager(esw->dev) || + esw->mode == MLX5_ESWITCH_LEGACY) + mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); } static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) @@ -1368,7 +1463,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) { int err; - lockdep_assert_held(&esw->mode_lock); + devl_assert_locked(priv_to_devlink(esw->dev)); if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); @@ -1383,6 +1478,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) mlx5_eswitch_update_num_of_vfs(esw, num_vfs); + MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); + mlx5_eq_notifier_register(esw->dev, &esw->nb); + if (esw->mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { @@ -1395,7 +1493,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED; - mlx5_eswitch_event_handlers_register(esw); + mlx5_eswitch_event_handler_register(esw); esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n", esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", @@ -1433,7 +1531,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (toggle_lag) mlx5_lag_disable_change(esw->dev); - down_write(&esw->mode_lock); if (!mlx5_esw_is_fdb_created(esw)) { ret = mlx5_eswitch_enable_locked(esw, num_vfs); } else { @@ -1456,8 +1553,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) } } - up_write(&esw->mode_lock); - if (toggle_lag) mlx5_lag_enable_change(esw->dev); @@ -1471,12 +1566,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) return; devl_assert_locked(priv_to_devlink(esw->dev)); - down_write(&esw->mode_lock); /* If driver is unloaded, this function is called twice by remove_one() * and mlx5_unload(). Prevent the second call. */ if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf) - goto unlock; + return; esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n", esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", @@ -1505,9 +1599,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) esw->esw_funcs.num_vfs = 0; else esw->esw_funcs.num_ec_vfs = 0; - -unlock: - up_write(&esw->mode_lock); } /* Free resources for corresponding eswitch mode. It is called by devlink @@ -1522,7 +1613,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) */ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY); - mlx5_eswitch_event_handlers_unregister(esw); + mlx5_eq_notifier_unregister(esw->dev, &esw->nb); + mlx5_eswitch_event_handler_unregister(esw); esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n", esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", @@ -1548,10 +1640,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw) devl_assert_locked(priv_to_devlink(esw->dev)); mlx5_lag_disable_change(esw->dev); - down_write(&esw->mode_lock); mlx5_eswitch_disable_locked(esw); esw->mode = MLX5_ESWITCH_LEGACY; - up_write(&esw->mode_lock); mlx5_lag_enable_change(esw->dev); } @@ -1919,6 +2009,12 @@ bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF); } +bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) +{ + return vport_num == MLX5_VPORT_PF || + mlx5_eswitch_is_vf_vport(esw, vport_num); +} + bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) { return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF); @@ -2149,8 +2245,13 @@ bool mlx5_esw_hold(struct mlx5_core_dev *mdev) if (!mlx5_esw_allowed(esw)) return true; - if (down_read_trylock(&esw->mode_lock) != 0) + if (down_read_trylock(&esw->mode_lock) != 0) { + if (esw->eswitch_operation_in_progress) { + up_read(&esw->mode_lock); + return false; + } return true; + } return false; } @@ -2207,7 +2308,8 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw) if (down_write_trylock(&esw->mode_lock) == 0) return -EINVAL; - if (atomic64_read(&esw->user_count) > 0) { + if (esw->eswitch_operation_in_progress || + atomic64_read(&esw->user_count) > 0) { up_write(&esw->mode_lock); return -EBUSY; } @@ -2215,6 +2317,18 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw) return esw->mode; } +int mlx5_esw_lock(struct mlx5_eswitch *esw) +{ + down_write(&esw->mode_lock); + + if (esw->eswitch_operation_in_progress) { + up_write(&esw->mode_lock); + return -EBUSY; + } + + return 0; +} + /** * mlx5_esw_unlock() - Release write lock on esw mode lock * @esw: eswitch device. @@ -2251,3 +2365,34 @@ struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) return mlx5_esw_allowed(esw) ? esw->dev : NULL; } EXPORT_SYMBOL(mlx5_eswitch_get_core_dev); + +bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + + if (!mlx5_esw_allowed(esw)) + return true; + + mutex_lock(&esw->state_lock); + if (esw->enabled_ipsec_vf_count) { + mutex_unlock(&esw->state_lock); + return false; + } + + dev->num_ipsec_offloads++; + mutex_unlock(&esw->state_lock); + return true; +} + +void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + + if (!mlx5_esw_allowed(esw)) + /* Failure means no eswitch => core dev is not a PF */ + return; + + mutex_lock(&esw->state_lock); + dev->num_ipsec_offloads--; + mutex_unlock(&esw->state_lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index ae0dc8a3060d..b4eb17141edf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -163,6 +163,8 @@ struct mlx5_vport_info { u8 trusted: 1; u8 roce_enabled: 1; u8 mig_enabled: 1; + u8 ipsec_crypto_enabled: 1; + u8 ipsec_packet_enabled: 1; }; /* Vport context events */ @@ -172,6 +174,29 @@ enum mlx5_eswitch_vport_event { MLX5_VPORT_PROMISC_CHANGE = BIT(3), }; +struct mlx5_vport; + +struct mlx5_devlink_port { + struct devlink_port dl_port; + struct mlx5_vport *vport; +}; + +static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port, + struct mlx5_vport *vport) +{ + dl_port->vport = vport; +} + +static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port) +{ + return container_of(dl_port, struct mlx5_devlink_port, dl_port); +} + +static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port) +{ + return mlx5_devlink_port_get(dl_port)->vport; +} + struct mlx5_vport { struct mlx5_core_dev *dev; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; @@ -200,7 +225,7 @@ struct mlx5_vport { bool enabled; enum mlx5_eswitch_vport_event enabled_events; int index; - struct devlink_port *dl_port; + struct mlx5_devlink_port *dl_port; }; struct mlx5_esw_indir_table; @@ -254,6 +279,7 @@ struct mlx5_esw_offload { struct mlx5_flow_group *vport_rx_group; struct mlx5_flow_group *vport_rx_drop_group; struct mlx5_flow_handle *vport_rx_drop_rule; + struct mlx5_flow_table *ft_ipsec_tx_pol; struct xarray vport_reps; struct list_head peer_flows[MLX5_MAX_PORTS]; struct mutex peer_mutex; @@ -269,6 +295,7 @@ struct mlx5_esw_offload { u8 inline_mode; atomic64_t num_flows; u64 num_block_encap; + u64 num_block_mode; enum devlink_eswitch_encap_mode encap; struct ida vport_metadata_ida; unsigned int host_number; /* ECPF supports one external host */ @@ -354,6 +381,9 @@ struct mlx5_eswitch { } params; struct blocking_notifier_head n_head; struct xarray paired; + struct mlx5_devcom_comp_dev *devcom; + u16 enabled_ipsec_vf_count; + bool eswitch_operation_in_progress; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -381,8 +411,9 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw); -void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw); +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key); void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); +bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, @@ -495,7 +526,8 @@ struct mlx5_esw_flow_attr { u8 total_vlan; struct { u32 flags; - struct mlx5_eswitch_rep *rep; + bool vport_valid; + u16 vport; struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_core_dev *mdev; struct mlx5_termtbl_handle *termtbl; @@ -531,6 +563,16 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab struct netlink_ext_ack *extack); int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, struct netlink_ext_ack *extack); +#ifdef CONFIG_XFRM_OFFLOAD +int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled, + struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable, + struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled, + struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable, + struct netlink_ext_ack *extack); +#endif /* CONFIG_XFRM_OFFLOAD */ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, @@ -671,11 +713,16 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\ (last) - 1) -struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); +struct mlx5_eswitch *__must_check +mlx5_devlink_eswitch_get(struct devlink *devlink); + +struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink); + struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); +bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); @@ -685,9 +732,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, enum mlx5_eswitch_vport_event enabled_events); void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); -int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, +int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, enum mlx5_eswitch_vport_event enabled_events); -void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); +void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); int esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, @@ -725,31 +772,40 @@ void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, u16 vport, struct mlx5_flow_spec *spec); -int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); -void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + +int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + struct mlx5_devlink_port *dl_port, + u32 controller, u32 sfnum); +void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); -int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num); -void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); -int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, - enum mlx5_eswitch_vport_event enabled_events); -void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num, + enum mlx5_eswitch_vport_event enabled_events, + struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum); +void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, enum mlx5_eswitch_vport_event enabled_events); void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); -int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num); -void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); -struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); + +int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + struct mlx5_devlink_port *dl_port, + u32 controller, u32 sfnum); +void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); -int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 controller, u32 sfnum); -void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport); +struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); -int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 controller, u32 sfnum); -void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); @@ -773,6 +829,7 @@ void mlx5_esw_release(struct mlx5_core_dev *dev); void mlx5_esw_get(struct mlx5_core_dev *dev); void mlx5_esw_put(struct mlx5_core_dev *dev); int mlx5_esw_try_lock(struct mlx5_eswitch *esw); +int mlx5_esw_lock(struct mlx5_eswitch *esw); void mlx5_esw_unlock(struct mlx5_eswitch *esw); void esw_vport_change_handle_locked(struct mlx5_vport *vport); @@ -788,6 +845,9 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); +int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); +void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev); + static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) { if (mlx5_esw_allowed(esw)) @@ -809,6 +869,24 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw) return esw->fdb_table.offloads.slow_fdb; } +int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *esw_attr, int attr_idx); +bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev); +void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev); +bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev); +int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, + struct mlx5_vport *vport); +int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev, + u16 vport_num); +int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + bool enable); +int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + bool enable); +int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, + u16 vport_num); +void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw); +void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -816,8 +894,9 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} -static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {} +static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {} static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} +static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } @@ -866,6 +945,15 @@ static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) { } + +static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {} +static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev) +{ + return false; +} + +static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {} #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index e59380ee1ead..b0455134c98e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -287,10 +287,9 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_ for (i = from; i < to; i++) if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) mlx5_chains_put_table(chains, 0, 1, 0); - else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, esw_attr->dests[i].mdev)) - mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport, - false); + mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false); } static bool @@ -358,8 +357,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) * this criteria. */ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { - if (esw_attr->dests[i].rep && - mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + if (esw_attr->dests[i].vport_valid && + mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport, esw_attr->dests[i].mdev)) { result = true; } else { @@ -375,7 +374,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, - bool ignore_flow_lvl, int *i) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; @@ -385,12 +383,11 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, return -EOPNOTSUPP; for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { - if (ignore_flow_lvl) - flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, - esw_attr->dests[j].rep->vport, false); + esw_attr->dests[j].vport, false); if (IS_ERR(dest[*i].ft)) { err = PTR_ERR(dest[*i].ft); goto err_indir_tbl_get; @@ -424,13 +421,54 @@ esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 l mlx5_chains_put_table(chains, chain, prio, level); } +static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2) +{ + return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id); +} + +static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw, + struct mlx5_esw_flow_attr *esw_attr, + int attr_idx) +{ + if (esw->offloads.ft_ipsec_tx_pol && + esw_attr->dests[attr_idx].vport_valid && + esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK && + /* To be aligned with software, encryption is needed only for tunnel device */ + (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) && + esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport && + esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev)) + return true; + + return false; +} + +static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw, + struct mlx5_esw_flow_attr *esw_attr) +{ + int i; + + if (!esw->offloads.ft_ipsec_tx_pol) + return true; + + for (i = 0; i < esw_attr->split_count; i++) + if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i)) + return false; + + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) && + (esw_attr->out_count - esw_attr->split_count > 1)) + return false; + + return true; +} + static void -esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, - struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, - int attr_idx, int dest_idx, bool pkt_reformat) +esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, + int attr_idx, int dest_idx, bool pkt_reformat) { dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; + dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport; if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { dest[dest_idx].vport.vhca_id = MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); @@ -449,6 +487,33 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f } } +static void +esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, + int attr_idx, int dest_idx, bool pkt_reformat) +{ + dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol; + dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + if (pkt_reformat && + esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) { + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; + } +} + +static void +esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, + int attr_idx, int dest_idx, bool pkt_reformat) +{ + if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx)) + esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr, + attr_idx, dest_idx, pkt_reformat); + else + esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr, + attr_idx, dest_idx, pkt_reformat); +} + static int esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, @@ -469,6 +534,28 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw) MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level); } +static bool +esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest) +{ + bool vf_dest = false, pf_dest = false; + int i; + + for (i = 0; i < max_dest; i++) { + if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT) + continue; + + if (dests[i].vport.num == MLX5_VPORT_UPLINK) + pf_dest = true; + else + vf_dest = true; + + if (vf_dest && pf_dest) + return true; + } + + return false; +} + static int esw_setup_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, @@ -501,7 +588,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest, err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i); (*i)++; } else if (esw_is_indir_table(esw, attr)) { - err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i); + err = esw_setup_indir_table(dest, flow_act, esw, attr, i); } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); } else { @@ -575,6 +662,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) return ERR_PTR(-EOPNOTSUPP); + if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr)) + return ERR_PTR(-EOPNOTSUPP); + dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL); if (!dest) return ERR_PTR(-ENOMEM); @@ -602,6 +692,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, rule = ERR_PTR(err); goto err_create_goto_table; } + + /* Header rewrite with combined wire+loopback in FDB is not allowed */ + if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) && + esw_dests_to_vf_pf_vports(dest, i)) { + esw_warn(esw->dev, + "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n"); + rule = ERR_PTR(-EINVAL); + goto err_esw_get; + } } if (esw_attr->decap_pkt_reformat) @@ -884,6 +983,18 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (rep->vport == MLX5_VPORT_UPLINK && + on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) { + dest.ft = on_esw->offloads.ft_ipsec_tx_pol; + flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + } else { + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = rep->vport; + dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); + dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + } + if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) && rep->vport == MLX5_VPORT_UPLINK) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; @@ -1065,9 +1176,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_flow_handle *flow; struct mlx5_flow_spec *spec; struct mlx5_vport *vport; + int err, pfindex; unsigned long i; void *misc; - int err; if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) return 0; @@ -1143,7 +1254,15 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, flows[vport->index] = flow; } } - esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows; + + pfindex = mlx5_get_dev_index(peer_dev); + if (pfindex >= MLX5_MAX_PORTS) { + esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n", + pfindex, MLX5_MAX_PORTS); + err = -EINVAL; + goto add_ec_vf_flow_err; + } + esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows; kvfree(spec); return 0; @@ -2390,7 +2509,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) __esw_offloads_unload_rep(esw, rep, rep_type); } -int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) +static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_eswitch_rep *rep; int rep_type; @@ -2414,7 +2533,7 @@ err_reps: return err; } -void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) +static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_eswitch_rep *rep; int rep_type; @@ -2424,39 +2543,63 @@ void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) __esw_offloads_unload_rep(esw, rep, rep_type); } -int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num) +int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + if (esw->mode != MLX5_ESWITCH_OFFLOADS) + return 0; + + return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport); +} + +void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + if (esw->mode != MLX5_ESWITCH_OFFLOADS) + return; + + mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport); +} + +int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + struct mlx5_devlink_port *dl_port, + u32 controller, u32 sfnum) +{ + return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum); +} + +void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport); +} + +int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { int err; if (esw->mode != MLX5_ESWITCH_OFFLOADS) return 0; - if (vport_num != MLX5_VPORT_UPLINK) { - err = mlx5_esw_offloads_devlink_port_register(esw, vport_num); - if (err) - return err; - } + err = mlx5_esw_offloads_devlink_port_register(esw, vport); + if (err) + return err; - err = mlx5_esw_offloads_rep_load(esw, vport_num); + err = mlx5_esw_offloads_rep_load(esw, vport->vport); if (err) goto load_err; return err; load_err: - if (vport_num != MLX5_VPORT_UPLINK) - mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); + mlx5_esw_offloads_devlink_port_unregister(esw, vport); return err; } -void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (esw->mode != MLX5_ESWITCH_OFFLOADS) return; - mlx5_esw_offloads_rep_unload(esw, vport_num); + mlx5_esw_offloads_rep_unload(esw, vport->vport); - if (vport_num != MLX5_VPORT_UPLINK) - mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); + mlx5_esw_offloads_devlink_port_unregister(esw, vport); } static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, @@ -2810,7 +2953,6 @@ static int mlx5_esw_offloads_devcom_event(int event, void *event_data) { struct mlx5_eswitch *esw = my_data; - struct mlx5_devcom *devcom = esw->dev->priv.devcom; struct mlx5_eswitch *peer_esw = event_data; u16 esw_i, peer_esw_i; bool esw_paired; @@ -2832,6 +2974,7 @@ static int mlx5_esw_offloads_devcom_event(int event, err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); if (err) goto err_out; + err = mlx5_esw_offloads_pair(esw, peer_esw); if (err) goto err_peer; @@ -2850,7 +2993,7 @@ static int mlx5_esw_offloads_devcom_event(int event, esw->num_peers++; peer_esw->num_peers++; - mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); + mlx5_devcom_comp_set_ready(esw->devcom, true); break; case ESW_OFFLOADS_DEVCOM_UNPAIR: @@ -2860,7 +3003,7 @@ static int mlx5_esw_offloads_devcom_event(int event, peer_esw->num_peers--; esw->num_peers--; if (!esw->num_peers && !peer_esw->num_peers) - mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); + mlx5_devcom_comp_set_ready(esw->devcom, false); xa_erase(&peer_esw->paired, esw_i); xa_erase(&esw->paired, peer_esw_i); mlx5_esw_offloads_unpair(peer_esw, esw); @@ -2885,9 +3028,8 @@ err_out: return err; } -void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) { - struct mlx5_devcom *devcom = esw->dev->priv.devcom; int i; for (i = 0; i < MLX5_MAX_PORTS; i++) @@ -2897,38 +3039,44 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; - if (!mlx5_lag_is_supported(esw->dev)) + if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) && + !mlx5_lag_is_supported(esw->dev)) return; xa_init(&esw->paired); - mlx5_devcom_register_component(devcom, - MLX5_DEVCOM_ESW_OFFLOADS, - mlx5_esw_offloads_devcom_event, - esw); - esw->num_peers = 0; - mlx5_devcom_send_event(devcom, - MLX5_DEVCOM_ESW_OFFLOADS, + esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc, + MLX5_DEVCOM_ESW_OFFLOADS, + key, + mlx5_esw_offloads_devcom_event, + esw); + if (IS_ERR_OR_NULL(esw->devcom)) + return; + + mlx5_devcom_send_event(esw->devcom, ESW_OFFLOADS_DEVCOM_PAIR, - ESW_OFFLOADS_DEVCOM_UNPAIR, esw); + ESW_OFFLOADS_DEVCOM_UNPAIR, + esw); } void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) { - struct mlx5_devcom *devcom = esw->dev->priv.devcom; - - if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) + if (IS_ERR_OR_NULL(esw->devcom)) return; - if (!mlx5_lag_is_supported(esw->dev)) - return; - - mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, + mlx5_devcom_send_event(esw->devcom, ESW_OFFLOADS_DEVCOM_UNPAIR, - ESW_OFFLOADS_DEVCOM_UNPAIR, esw); + ESW_OFFLOADS_DEVCOM_UNPAIR, + esw); - mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + mlx5_devcom_unregister_component(esw->devcom); xa_destroy(&esw->paired); + esw->devcom = NULL; +} + +bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) +{ + return mlx5_devcom_comp_is_ready(esw->devcom); } bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) @@ -3076,26 +3224,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, esw_acl_ingress_ofld_cleanup(esw, vport); } -static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) +static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) { - struct mlx5_vport *vport; + struct mlx5_vport *uplink, *manager; + int ret; - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); - if (IS_ERR(vport)) - return PTR_ERR(vport); + uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (IS_ERR(uplink)) + return PTR_ERR(uplink); + + ret = esw_vport_create_offloads_acl_tables(esw, uplink); + if (ret) + return ret; + + manager = mlx5_eswitch_get_vport(esw, esw->manager_vport); + if (IS_ERR(manager)) { + ret = PTR_ERR(manager); + goto err_manager; + } + + ret = esw_vport_create_offloads_acl_tables(esw, manager); + if (ret) + goto err_manager; + + return 0; - return esw_vport_create_offloads_acl_tables(esw, vport); +err_manager: + esw_vport_destroy_offloads_acl_tables(esw, uplink); + return ret; } -static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) +static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) { struct mlx5_vport *vport; - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); - if (IS_ERR(vport)) - return; + vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); + if (!IS_ERR(vport)) + esw_vport_destroy_offloads_acl_tables(esw, vport); - esw_vport_destroy_offloads_acl_tables(esw, vport); + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + if (!IS_ERR(vport)) + esw_vport_destroy_offloads_acl_tables(esw, vport); } int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) @@ -3140,7 +3309,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) } esw->fdb_table.offloads.indir = indir; - err = esw_create_uplink_offloads_acl_tables(esw); + err = esw_create_offloads_acl_tables(esw); if (err) goto create_acl_err; @@ -3181,7 +3350,7 @@ create_fdb_err: create_restore_err: esw_destroy_offloads_table(esw); create_offloads_err: - esw_destroy_uplink_offloads_acl_tables(esw); + esw_destroy_offloads_acl_tables(esw); create_acl_err: mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); create_indir_err: @@ -3197,7 +3366,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) esw_destroy_offloads_fdb_tables(esw); esw_destroy_restore_table(esw); esw_destroy_offloads_table(esw); - esw_destroy_uplink_offloads_acl_tables(esw); + esw_destroy_offloads_acl_tables(esw); mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); mutex_destroy(&esw->fdb_table.offloads.vports.lock); } @@ -3355,7 +3524,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; /* Uplink vport rep must load first. */ - err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK); + err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); if (err) goto err_uplink; @@ -3366,7 +3535,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) return 0; err_vports: - esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); + mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK); err_uplink: esw_offloads_steering_cleanup(esw); err_steering_init: @@ -3404,7 +3573,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, void esw_offloads_disable(struct mlx5_eswitch *esw) { mlx5_eswitch_disable_pf_vf_vports(esw); - esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); + mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK); esw_set_passing_vport_metadata(esw, false); esw_offloads_steering_cleanup(esw); mapping_destroy(esw->offloads.reg_c0_obj_pool); @@ -3491,14 +3660,48 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink) { + struct mlx5_core_dev *dev = devlink_priv(devlink); struct net *devl_net, *netdev_net; - struct mlx5_eswitch *esw; + bool ret = false; - esw = mlx5_devlink_eswitch_get(devlink); - netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev); - devl_net = devlink_net(devlink); + mutex_lock(&dev->mlx5e_res.uplink_netdev_lock); + if (dev->mlx5e_res.uplink_netdev) { + netdev_net = dev_net(dev->mlx5e_res.uplink_netdev); + devl_net = devlink_net(devlink); + ret = net_eq(devl_net, netdev_net); + } + mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock); + return ret; +} + +int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; - return net_eq(devl_net, netdev_net); + if (!mlx5_esw_allowed(esw)) + return 0; + + /* Take TC into account */ + err = mlx5_esw_try_lock(esw); + if (err < 0) + return err; + + esw->offloads.num_block_mode++; + mlx5_esw_unlock(esw); + return 0; +} + +void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + + if (!mlx5_esw_allowed(esw)) + return; + + down_write(&esw->mode_lock); + esw->offloads.num_block_mode--; + up_write(&esw->mode_lock); } int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, @@ -3534,13 +3737,23 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (cur_mlx5_mode == mlx5_mode) goto unlock; + if (esw->offloads.num_block_mode) { + NL_SET_ERR_MSG_MOD(extack, + "Can't change eswitch mode when IPsec SA and/or policies are configured"); + err = -EOPNOTSUPP; + goto unlock; + } + + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + mlx5_eswitch_disable_locked(esw); if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { if (mlx5_devlink_trap_get_num_active(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't change mode while devlink traps are active"); err = -EOPNOTSUPP; - goto unlock; + goto skip; } err = esw_offloads_start(esw, extack); } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { @@ -3550,6 +3763,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = -EINVAL; } +skip: + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; unlock: mlx5_esw_unlock(esw); enable_lag: @@ -3560,16 +3776,12 @@ enable_lag: int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct mlx5_eswitch *esw; - int err; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) return PTR_ERR(esw); - down_read(&esw->mode_lock); - err = esw_mode_to_devlink(esw->mode, mode); - up_read(&esw->mode_lock); - return err; + return esw_mode_to_devlink(esw->mode, mode); } static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, @@ -3663,11 +3875,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, if (err) goto out; + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack); - if (err) - goto out; + if (!err) + esw->offloads.inline_mode = mlx5_mode; - esw->offloads.inline_mode = mlx5_mode; + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; up_write(&esw->mode_lock); return 0; @@ -3679,52 +3895,38 @@ out: int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) { struct mlx5_eswitch *esw; - int err; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) return PTR_ERR(esw); - down_read(&esw->mode_lock); - err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); - up_read(&esw->mode_lock); - return err; + return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); } bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) { - struct devlink *devlink = priv_to_devlink(dev); - struct mlx5_eswitch *esw; + struct mlx5_eswitch *esw = dev->priv.eswitch; - devl_lock(devlink); - esw = mlx5_devlink_eswitch_get(devlink); - if (IS_ERR(esw)) { - devl_unlock(devlink); - /* Failure means no eswitch => not possible to change encap */ + if (!mlx5_esw_allowed(esw)) return true; - } down_write(&esw->mode_lock); if (esw->mode != MLX5_ESWITCH_LEGACY && esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { up_write(&esw->mode_lock); - devl_unlock(devlink); return false; } esw->offloads.num_block_encap++; up_write(&esw->mode_lock); - devl_unlock(devlink); return true; } void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) { - struct devlink *devlink = priv_to_devlink(dev); - struct mlx5_eswitch *esw; + struct mlx5_eswitch *esw = dev->priv.eswitch; - esw = mlx5_devlink_eswitch_get(devlink); - if (IS_ERR(esw)) + if (!mlx5_esw_allowed(esw)) return; down_write(&esw->mode_lock); @@ -3780,6 +3982,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, goto unlock; } + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + esw_destroy_offloads_fdb_tables(esw); esw->offloads.encap = encap; @@ -3793,6 +3998,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, (void)esw_create_offloads_fdb_tables(esw); } + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; + unlock: up_write(&esw->mode_lock); return err; @@ -3807,9 +4015,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, if (IS_ERR(esw)) return PTR_ERR(esw); - down_read(&esw->mode_lock); *encap = esw->offloads.encap; - up_read(&esw->mode_lock); return 0; } @@ -3920,38 +4126,6 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, } EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); -int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, - u16 vport_num, u32 controller, u32 sfnum) -{ - int err; - - err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE); - if (err) - return err; - - err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum); - if (err) - goto devlink_err; - - err = mlx5_esw_offloads_rep_load(esw, vport_num); - if (err) - goto rep_err; - return 0; - -rep_err: - mlx5_esw_devlink_sf_port_unregister(esw, vport_num); -devlink_err: - mlx5_esw_vport_disable(esw, vport_num); - return err; -} - -void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) -{ - mlx5_esw_offloads_rep_unload(esw, vport_num); - mlx5_esw_devlink_sf_port_unregister(esw, vport_num); - mlx5_esw_vport_disable(esw, vport_num); -} - static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) { int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); @@ -4040,35 +4214,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, } EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); -static bool -is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) -{ - return vport_num == MLX5_VPORT_PF || - mlx5_eswitch_is_vf_vport(esw, vport_num) || - mlx5_esw_is_sf_vport(esw, vport_num); -} - int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack) { - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - u16 vport_num; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) - return PTR_ERR(esw); - - vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); - if (!is_port_function_supported(esw, vport_num)) - return -EOPNOTSUPP; - - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) { - NL_SET_ERR_MSG_MOD(extack, "Invalid port"); - return PTR_ERR(vport); - } + struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); mutex_lock(&esw->state_lock); ether_addr_copy(hw_addr, vport->info.mac); @@ -4081,100 +4232,55 @@ int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack) { - struct mlx5_eswitch *esw; - u16 vport_num; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) { - NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); - return PTR_ERR(esw); - } - - vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); - if (!is_port_function_supported(esw, vport_num)) { - NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); - return -EINVAL; - } - - return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr); -} - -static struct mlx5_vport * -mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw) -{ - u16 vport_num; - - if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) - return ERR_PTR(-EOPNOTSUPP); + struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); - vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); - if (!is_port_function_supported(esw, vport_num)) - return ERR_PTR(-EOPNOTSUPP); - - return mlx5_eswitch_get_vport(esw, vport_num); + return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr); } int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, struct netlink_ext_ack *extack) { - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - int err = -EOPNOTSUPP; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) - return PTR_ERR(esw); + struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); if (!MLX5_CAP_GEN(esw->dev, migration)) { NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration"); - return err; + return -EOPNOTSUPP; } - vport = mlx5_devlink_port_fn_get_vport(port, esw); - if (IS_ERR(vport)) { - NL_SET_ERR_MSG_MOD(extack, "Invalid port"); - return PTR_ERR(vport); + if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); + return -EOPNOTSUPP; } mutex_lock(&esw->state_lock); - if (vport->enabled) { - *is_enabled = vport->info.mig_enabled; - err = 0; - } + *is_enabled = vport->info.mig_enabled; mutex_unlock(&esw->state_lock); - return err; + return 0; } int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, struct netlink_ext_ack *extack) { + struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; void *query_ctx; void *hca_caps; - int err = -EOPNOTSUPP; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) - return PTR_ERR(esw); + int err; if (!MLX5_CAP_GEN(esw->dev, migration)) { NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration"); - return err; + return -EOPNOTSUPP; } - vport = mlx5_devlink_port_fn_get_vport(port, esw); - if (IS_ERR(vport)) { - NL_SET_ERR_MSG_MOD(extack, "Invalid port"); - return PTR_ERR(vport); + if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); + return -EOPNOTSUPP; } mutex_lock(&esw->state_lock); - if (!vport->enabled) { - NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); - goto out; - } if (vport->info.mig_enabled == enable) { err = 0; @@ -4216,56 +4322,37 @@ out: int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, struct netlink_ext_ack *extack) { - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - int err = -EOPNOTSUPP; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) - return PTR_ERR(esw); + struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); - vport = mlx5_devlink_port_fn_get_vport(port, esw); - if (IS_ERR(vport)) { - NL_SET_ERR_MSG_MOD(extack, "Invalid port"); - return PTR_ERR(vport); + if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); + return -EOPNOTSUPP; } mutex_lock(&esw->state_lock); - if (vport->enabled) { - *is_enabled = vport->info.roce_enabled; - err = 0; - } + *is_enabled = vport->info.roce_enabled; mutex_unlock(&esw->state_lock); - return err; + return 0; } int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, struct netlink_ext_ack *extack) { + struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - int err = -EOPNOTSUPP; + u16 vport_num = vport->vport; void *query_ctx; void *hca_caps; - u16 vport_num; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) - return PTR_ERR(esw); + int err; - vport = mlx5_devlink_port_fn_get_vport(port, esw); - if (IS_ERR(vport)) { - NL_SET_ERR_MSG_MOD(extack, "Invalid port"); - return PTR_ERR(vport); + if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management"); + return -EOPNOTSUPP; } - vport_num = vport->vport; mutex_lock(&esw->state_lock); - if (!vport->enabled) { - NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); - goto out; - } if (vport->info.roce_enabled == enable) { err = 0; @@ -4303,3 +4390,188 @@ out: mutex_unlock(&esw->state_lock); return err; } + +int +mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *esw_attr, int attr_idx) +{ + struct mlx5_flow_destination new_dest = {}; + struct mlx5_flow_destination old_dest = {}; + + if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx)) + return 0; + + esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false); + esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false); + + return mlx5_modify_rule_destination(rule, &new_dest, &old_dest); +} + +#ifdef CONFIG_XFRM_OFFLOAD +int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + struct mlx5_vport *vport; + int err = 0; + + esw = mlx5_devlink_eswitch_get(port->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto"); + return -EOPNOTSUPP; + } + + vport = mlx5_devlink_port_vport_get(port); + + mutex_lock(&esw->state_lock); + if (!vport->enabled) { + err = -EOPNOTSUPP; + goto unlock; + } + + *is_enabled = vport->info.ipsec_crypto_enabled; +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + struct mlx5_vport *vport; + u16 vport_num; + int err; + + esw = mlx5_devlink_eswitch_get(port->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); + err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Device doesn't support IPsec crypto"); + return err; + } + + vport = mlx5_devlink_port_vport_get(port); + + mutex_lock(&esw->state_lock); + if (!vport->enabled) { + err = -EOPNOTSUPP; + NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); + goto unlock; + } + + if (vport->info.ipsec_crypto_enabled == enable) + goto unlock; + + if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) { + err = -EBUSY; + goto unlock; + } + + err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto"); + goto unlock; + } + + vport->info.ipsec_crypto_enabled = enable; + if (enable) + esw->enabled_ipsec_vf_count++; + else + esw->enabled_ipsec_vf_count--; +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + struct mlx5_vport *vport; + int err = 0; + + esw = mlx5_devlink_eswitch_get(port->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet"); + return -EOPNOTSUPP; + } + + vport = mlx5_devlink_port_vport_get(port); + + mutex_lock(&esw->state_lock); + if (!vport->enabled) { + err = -EOPNOTSUPP; + goto unlock; + } + + *is_enabled = vport->info.ipsec_packet_enabled; +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, + bool enable, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + struct mlx5_vport *vport; + u16 vport_num; + int err; + + esw = mlx5_devlink_eswitch_get(port->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); + err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Device doesn't support IPsec packet mode"); + return err; + } + + vport = mlx5_devlink_port_vport_get(port); + mutex_lock(&esw->state_lock); + if (!vport->enabled) { + err = -EOPNOTSUPP; + NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); + goto unlock; + } + + if (vport->info.ipsec_packet_enabled == enable) + goto unlock; + + if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) { + err = -EBUSY; + goto unlock; + } + + err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set IPsec packet mode"); + goto unlock; + } + + vport->info.ipsec_packet_enabled = enable; + if (enable) + esw->enabled_ipsec_vf_count++; + else + esw->enabled_ipsec_vf_count--; +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +#endif /* CONFIG_XFRM_OFFLOAD */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index edd910258314..40bdc677f051 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -233,8 +233,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, /* hairpin */ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) - if (!esw_attr->dest_int_port && esw_attr->dests[i].rep && - esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK) + if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid && + esw_attr->dests[i].vport == MLX5_VPORT_UPLINK) return true; return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index 3ec892d51f57..d91ea53eb394 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -441,8 +441,3 @@ int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int ev return blocking_notifier_call_chain(&events->sw_nh, event, data); } - -void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work) -{ - queue_work(dev->priv.events->wq, work); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 12abe991583a..c4de6bf8d1b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -445,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) goto err_cqwq; } - err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn); + err = mlx5_comp_eqn_get(mdev, smp_processor_id(), &eqn); if (err) { kvfree(in); goto err_cqwq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 39c03dcbd196..e5c1012921d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -57,7 +57,7 @@ static const char * const mlx5_fpga_qp_error_strings[] = { }; static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void) { - struct mlx5_fpga_device *fdev = NULL; + struct mlx5_fpga_device *fdev; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) @@ -252,7 +252,7 @@ out: int mlx5_fpga_init(struct mlx5_core_dev *mdev) { - struct mlx5_fpga_device *fdev = NULL; + struct mlx5_fpga_device *fdev; if (!MLX5_CAP_GEN(mdev, fpga)) { mlx5_core_dbg(mdev, "FPGA capability not present\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 244cfd470903..a4b925331661 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -975,6 +975,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions); table_type = FS_FT_ESW_INGRESS_ACL; break; + case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: case MLX5_FLOW_NAMESPACE_RDMA_TX: max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions); table_type = FS_FT_RDMA_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6b069fa411c5..e6bfa7e4f146 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -107,16 +107,16 @@ LEFTOVERS_NUM_PRIOS) #define KERNEL_RX_MACSEC_NUM_PRIOS 1 -#define KERNEL_RX_MACSEC_NUM_LEVELS 2 +#define KERNEL_RX_MACSEC_NUM_LEVELS 3 #define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS) #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, - * IPsec RoCE policy + * {IPsec RoCE MPV,Alias table},IPsec RoCE policy */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 9 +#define KERNEL_NIC_PRIO_NUM_LEVELS 11 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) @@ -137,7 +137,7 @@ #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) #define KERNEL_TX_IPSEC_NUM_PRIOS 1 -#define KERNEL_TX_IPSEC_NUM_LEVELS 3 +#define KERNEL_TX_IPSEC_NUM_LEVELS 4 #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) #define KERNEL_TX_MACSEC_NUM_PRIOS 1 @@ -224,22 +224,27 @@ static struct init_tree_node egress_root_fs = { enum { RDMA_RX_IPSEC_PRIO, + RDMA_RX_MACSEC_PRIO, RDMA_RX_COUNTERS_PRIO, RDMA_RX_BYPASS_PRIO, RDMA_RX_KERNEL_PRIO, }; #define RDMA_RX_IPSEC_NUM_PRIOS 1 -#define RDMA_RX_IPSEC_NUM_LEVELS 2 +#define RDMA_RX_IPSEC_NUM_LEVELS 4 #define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS) #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1) #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2) +#define RDMA_RX_MACSEC_NUM_PRIOS 1 +#define RDMA_RX_MACSEC_PRIO_NUM_LEVELS 2 +#define RDMA_RX_MACSEC_MIN_LEVEL (RDMA_RX_COUNTERS_MIN_LEVEL + RDMA_RX_MACSEC_NUM_PRIOS) + static struct init_tree_node rdma_rx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 4, + .ar_size = 5, .children = (struct init_tree_node[]) { [RDMA_RX_IPSEC_PRIO] = ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0, @@ -247,6 +252,12 @@ static struct init_tree_node rdma_rx_root_fs = { ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS, RDMA_RX_IPSEC_NUM_LEVELS))), + [RDMA_RX_MACSEC_PRIO] = + ADD_PRIO(0, RDMA_RX_MACSEC_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(RDMA_RX_MACSEC_NUM_PRIOS, + RDMA_RX_MACSEC_PRIO_NUM_LEVELS))), [RDMA_RX_COUNTERS_PRIO] = ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0, FS_CHAINING_CAPS, @@ -270,19 +281,24 @@ static struct init_tree_node rdma_rx_root_fs = { enum { RDMA_TX_COUNTERS_PRIO, RDMA_TX_IPSEC_PRIO, + RDMA_TX_MACSEC_PRIO, RDMA_TX_BYPASS_PRIO, }; #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) -#define RDMA_TX_IPSEC_NUM_PRIOS 1 +#define RDMA_TX_IPSEC_NUM_PRIOS 2 #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1 #define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS) +#define RDMA_TX_MACSEC_NUM_PRIOS 1 +#define RDMA_TX_MACESC_PRIO_NUM_LEVELS 1 +#define RDMA_TX_MACSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_MACSEC_NUM_PRIOS) + static struct init_tree_node rdma_tx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 3, + .ar_size = 4, .children = (struct init_tree_node[]) { [RDMA_TX_COUNTERS_PRIO] = ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0, @@ -296,7 +312,12 @@ static struct init_tree_node rdma_tx_root_fs = { ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS, RDMA_TX_IPSEC_PRIO_NUM_LEVELS))), - + [RDMA_TX_MACSEC_PRIO] = + ADD_PRIO(0, RDMA_TX_MACSEC_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(RDMA_TX_MACSEC_NUM_PRIOS, + RDMA_TX_MACESC_PRIO_NUM_LEVELS))), [RDMA_TX_BYPASS_PRIO] = ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS_RDMA_TX, @@ -1122,7 +1143,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle, } for (i = 0; i < handle->num_rules; i++) { - if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr)) + if (mlx5_flow_dests_cmp(old_dest, &handle->rule[i]->dest_attr)) return _mlx5_modify_rule_destination(handle->rule[i], new_dest); } @@ -2466,6 +2487,14 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, root_ns = steering->rdma_tx_root_ns; prio = RDMA_TX_IPSEC_PRIO; break; + case MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC: + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_MACSEC_PRIO; + break; + case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: + root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_MACSEC_PRIO; + break; default: /* Must be NIC RX */ WARN_ON(!is_nic_rx_ns(type)); root_ns = steering->root_ns; @@ -3050,6 +3079,12 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (err) goto out_err; + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_INGRESS, 3); + if (IS_ERR(maj_prio)) { + err = PTR_ERR(maj_prio); + goto out_err; + } + err = create_fdb_fast_path(steering); if (err) goto out_err; @@ -3072,6 +3107,12 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) goto out_err; } + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_EGRESS, 3); + if (IS_ERR(maj_prio)) { + err = PTR_ERR(maj_prio); + goto out_err; + } + /* We put this priority last, knowing that nothing will get here * unless explicitly forwarded to. This is possible because the * slow path tables have catch all rules and nothing gets passed diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index fb2035a5ec99..58f4c0d0fafa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -143,90 +143,86 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { int err; - err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); if (err) return err; if (MLX5_CAP_GEN(dev, port_selection_cap)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_PORT_SELECTION, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, hca_cap_2)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_GENERAL_2, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, eth_net_offloads)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, + HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, pg)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ODP); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ODP, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, atomic)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ATOMIC, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, roce)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ROCE, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, nic_flow_table) || MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_FLOW_TABLE, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_ESWITCH_MANAGER(dev)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ESWITCH_FLOW_TABLE, + HCA_CAP_OPMOD_GET_CUR); if (err) return err; - err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, vector_calc)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ESWITCH, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, qos)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_QOS); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_QOS, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, debug)) - mlx5_core_get_caps(dev, MLX5_CAP_DEBUG); + mlx5_core_get_caps_mode(dev, MLX5_CAP_DEBUG, HCA_CAP_OPMOD_GET_CUR); if (MLX5_CAP_GEN(dev, pcam_reg)) mlx5_get_pcam_reg(dev); if (MLX5_CAP_GEN(dev, mcam_reg)) { mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128); - mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF); mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F); } @@ -234,57 +230,52 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) mlx5_get_qcam_reg(dev); if (MLX5_CAP_GEN(dev, device_memory)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_DEV_MEM, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, event_cap)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_DEV_EVENT, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_TLS, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { - err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_VDPA_EMULATION, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, ipsec_offload)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_IPSEC, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, crypto)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_CRYPTO); - if (err) - return err; - } - - if (MLX5_CAP_GEN(dev, shampo)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_CRYPTO, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) { - err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_MACSEC, HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, adv_virtualization)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_VIRTUALIZATION, + HCA_CAP_OPMOD_GET_CUR); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 4804990b7f22..c4e19d627da2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -127,17 +127,23 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev, if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state)) goto out; + if (!reset_state) + return 0; + switch (reset_state) { case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION: case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS: - NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered"); + NL_SET_ERR_MSG_MOD(extack, "Sync reset still in progress"); return -EBUSY; - case MLX5_MFRL_REG_RESET_STATE_TIMEOUT: - NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout"); + case MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT: + NL_SET_ERR_MSG_MOD(extack, "Sync reset negotiation timeout"); return -ETIMEDOUT; case MLX5_MFRL_REG_RESET_STATE_NACK: NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset"); return -EPERM; + case MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT: + NL_SET_ERR_MSG_MOD(extack, "Sync reset unload timeout"); + return -ETIMEDOUT; } out: @@ -151,7 +157,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {}; - int err; + int err, rst_res; set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); @@ -164,13 +170,34 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, return 0; clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags); - if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) - return mlx5_fw_reset_get_reset_state_err(dev, extack); + if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) { + rst_res = mlx5_fw_reset_get_reset_state_err(dev, extack); + return rst_res ? rst_res : err; + } NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed"); return mlx5_cmd_check(dev, err, in, out); } +int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev, + struct netlink_ext_ack *extack) +{ + u8 rst_state; + int err; + + err = mlx5_fw_reset_get_reset_state_err(dev, extack); + if (err) + return err; + + rst_state = mlx5_get_fw_rst_state(dev); + if (!rst_state) + return 0; + + mlx5_core_err(dev, "Sync reset did not complete, state=%d\n", rst_state); + NL_SET_ERR_MSG_MOD(extack, "Sync reset did not complete successfully"); + return rst_state; +} + int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev) { return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false); @@ -298,6 +325,29 @@ static void mlx5_fw_live_patch_event(struct work_struct *work) mlx5_core_err(dev, "Failed to reload FW tracer\n"); } +#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) +static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev) +{ + struct pci_dev *bridge = dev->pdev->bus->self; + u16 reg16; + int err; + + if (!bridge) + return -EOPNOTSUPP; + + err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, ®16); + if (err) + return err; + + if ((reg16 & PCI_EXP_SLTCTL_HPIE) && (reg16 & PCI_EXP_SLTCTL_DLLSCE)) { + mlx5_core_warn(dev, "FW reset is not supported as HotPlug is enabled\n"); + return -EOPNOTSUPP; + } + + return 0; +} +#endif + static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) { struct pci_bus *bridge_bus = dev->pdev->bus; @@ -311,7 +361,7 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) list_for_each_entry(sdev, &bridge_bus->devices, bus_list) { err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id); if (err) - return err; + return pcibios_err_to_errno(err); if (sdev_id != dev_id) { mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id); return -EPERM; @@ -330,6 +380,12 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev) return false; } +#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) + err = mlx5_check_hotplug_interrupt(dev); + if (err) + return false; +#endif + err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); if (err) return false; @@ -371,7 +427,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id); if (err) - return err; + return pcibios_err_to_errno(err); err = mlx5_check_dev_ids(dev, dev_id); if (err) return err; @@ -384,18 +440,13 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) pci_cfg_access_lock(sdev); } /* PCI link toggle */ - err = pci_read_config_word(bridge, cap + PCI_EXP_LNKCTL, ®16); + err = pcie_capability_set_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD); if (err) - return err; - reg16 |= PCI_EXP_LNKCTL_LD; - err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16); - if (err) - return err; + return pcibios_err_to_errno(err); msleep(500); - reg16 &= ~PCI_EXP_LNKCTL_LD; - err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16); + err = pcie_capability_clear_word(bridge, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LD); if (err) - return err; + return pcibios_err_to_errno(err); /* Check link */ if (!bridge->link_active_reporting) { @@ -408,7 +459,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) do { err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, ®16); if (err) - return err; + return pcibios_err_to_errno(err); if (reg16 & PCI_EXP_LNKSTA_DLLLA) break; msleep(20); @@ -426,7 +477,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) do { err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, ®16); if (err) - return err; + return pcibios_err_to_errno(err); if (reg16 == dev_id) break; msleep(20); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h index c57465595f7c..ea527d06a85f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h @@ -12,6 +12,8 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev); int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); +int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev, + struct netlink_ext_ack *extack); void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev); void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev); void mlx5_drain_fw_reset(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 187cb2c464f8..8ff6dc9bc803 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -50,20 +50,6 @@ enum { }; enum { - MLX5_HEALTH_SYNDR_FW_ERR = 0x1, - MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, - MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8, - MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, - MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, - MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, - MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, - MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, - MLX5_HEALTH_SYNDR_EQ_INV = 0xe, - MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, - MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10 -}; - -enum { MLX5_DROP_HEALTH_WORK, }; @@ -357,28 +343,30 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev) static const char *hsynd_str(u8 synd) { switch (synd) { - case MLX5_HEALTH_SYNDR_FW_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR: return "firmware internal error"; - case MLX5_HEALTH_SYNDR_IRISC_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC: return "irisc not responding"; - case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR: return "unrecoverable hardware error"; - case MLX5_HEALTH_SYNDR_CRC_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR: return "firmware CRC error"; - case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR: return "ICM fetch PCI error"; - case MLX5_HEALTH_SYNDR_HW_FTL_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR: return "HW fatal error\n"; - case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN: return "async EQ buffer overrun"; - case MLX5_HEALTH_SYNDR_EQ_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR: return "EQ error"; - case MLX5_HEALTH_SYNDR_EQ_INV: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV: return "Invalid EQ referenced"; - case MLX5_HEALTH_SYNDR_FFSER_ERR: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR: return "FFSER error"; - case MLX5_HEALTH_SYNDR_HIGH_TEMP: + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR: return "High temperature"; + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR: + return "ICM fetch PCI data poisoned error"; default: return "unrecognized error"; } @@ -462,14 +450,15 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - u8 synd; - int err; + u8 synd = ioread8(&h->synd); - synd = ioread8(&h->synd); - err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); - if (err || !synd) - return err; - return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); + if (!synd) + return 0; + + devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd); + devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd)); + + return 0; } struct mlx5_fw_reporter_ctx { @@ -477,94 +466,47 @@ struct mlx5_fw_reporter_ctx { int miss_counter; }; -static int +static void mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg, struct mlx5_fw_reporter_ctx *fw_reporter_ctx) { - int err; - - err = devlink_fmsg_u8_pair_put(fmsg, "syndrome", - fw_reporter_ctx->err_synd); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", - fw_reporter_ctx->miss_counter); - if (err) - return err; - return 0; + devlink_fmsg_u8_pair_put(fmsg, "syndrome", fw_reporter_ctx->err_synd); + devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", fw_reporter_ctx->miss_counter); } -static int +static void mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev, struct devlink_fmsg *fmsg) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; u8 rfr_severity; - int err; int i; if (!ioread8(&h->synd)) - return 0; - - err = devlink_fmsg_pair_nest_start(fmsg, "health buffer"); - if (err) - return err; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var"); - if (err) - return err; + return; - for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) { - err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i)); - if (err) - return err; - } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr", - ioread32be(&h->assert_exit_ptr)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra", - ioread32be(&h->assert_callra)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); - if (err) - return err; + devlink_fmsg_pair_nest_start(fmsg, "health buffer"); + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var"); + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) + devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i)); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr", + ioread32be(&h->assert_exit_ptr)); + devlink_fmsg_u32_pair_put(fmsg, "assert_callra", + ioread32be(&h->assert_callra)); + devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time)); + devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id)); rfr_severity = ioread8(&h->rfr_severity); - err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity)); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity)); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index", - ioread8(&h->irisc_index)); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", - ioread16be(&h->ext_synd)); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", - ioread32be(&h->fw_ver)); - if (err) - return err; - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - return devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity)); + devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity)); + devlink_fmsg_u8_pair_put(fmsg, "irisc_index", ioread8(&h->irisc_index)); + devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd)); + devlink_fmsg_u32_pair_put(fmsg, "ext_synd", ioread16be(&h->ext_synd)); + devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", ioread32be(&h->fw_ver)); + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); } static int @@ -582,14 +524,11 @@ mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter, if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; - err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); - if (err) - return err; + mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); } - err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg); - if (err) - return err; + mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg); + return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg); } @@ -655,12 +594,10 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; - err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); - if (err) - goto free_data; + mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx); } - err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size); + devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size); free_data: kvfree(cr_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c new file mode 100644 index 000000000000..353f81dccd1c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved + +#include <linux/hwmon.h> +#include <linux/bitmap.h> +#include <linux/mlx5/device.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/port.h> +#include "mlx5_core.h" +#include "hwmon.h" + +#define CHANNELS_TYPE_NUM 2 /* chip channel and temp channel */ +#define CHIP_CONFIG_NUM 1 + +/* module 0 is mapped to sensor_index 64 in MTMP register */ +#define to_mtmp_module_sensor_idx(idx) (64 + (idx)) + +/* All temperatures retrieved in units of 0.125C. hwmon framework expect + * it in units of millidegrees C. Hence multiply values by 125. + */ +#define mtmp_temp_to_mdeg(temp) ((temp) * 125) + +struct temp_channel_desc { + u32 sensor_index; + char sensor_name[32]; +}; + +/* chip_channel_config and channel_info arrays must be 0-terminated, hence + 1 */ +struct mlx5_hwmon { + struct mlx5_core_dev *mdev; + struct device *hwmon_dev; + struct hwmon_channel_info chip_info; + u32 chip_channel_config[CHIP_CONFIG_NUM + 1]; + struct hwmon_channel_info temp_info; + u32 *temp_channel_config; + const struct hwmon_channel_info *channel_info[CHANNELS_TYPE_NUM + 1]; + struct hwmon_chip_info chip; + struct temp_channel_desc *temp_channel_desc; + u32 asic_platform_scount; + u32 module_scount; +}; + +static int mlx5_hwmon_query_mtmp(struct mlx5_core_dev *mdev, u32 sensor_index, u32 *mtmp_out) +{ + u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {}; + + MLX5_SET(mtmp_reg, mtmp_in, sensor_index, sensor_index); + + return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in), + mtmp_out, MLX5_ST_SZ_BYTES(mtmp_reg), + MLX5_REG_MTMP, 0, 0); +} + +static int mlx5_hwmon_reset_max_temp(struct mlx5_core_dev *mdev, int sensor_index) +{ + u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {}; + u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {}; + + MLX5_SET(mtmp_reg, mtmp_in, sensor_index, sensor_index); + MLX5_SET(mtmp_reg, mtmp_in, mtr, 1); + + return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in), + mtmp_out, sizeof(mtmp_out), + MLX5_REG_MTMP, 0, 0); +} + +static int mlx5_hwmon_enable_max_temp(struct mlx5_core_dev *mdev, int sensor_index) +{ + u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {}; + u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {}; + int err; + + err = mlx5_hwmon_query_mtmp(mdev, sensor_index, mtmp_in); + if (err) + return err; + + MLX5_SET(mtmp_reg, mtmp_in, mte, 1); + return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in), + mtmp_out, sizeof(mtmp_out), + MLX5_REG_MTMP, 0, 1); +} + +static int mlx5_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct mlx5_hwmon *hwmon = dev_get_drvdata(dev); + u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {}; + int err; + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + err = mlx5_hwmon_query_mtmp(hwmon->mdev, hwmon->temp_channel_desc[channel].sensor_index, + mtmp_out); + if (err) + return err; + + switch (attr) { + case hwmon_temp_input: + *val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, temperature)); + return 0; + case hwmon_temp_highest: + *val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, max_temperature)); + return 0; + case hwmon_temp_crit: + *val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, temp_threshold_hi)); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int mlx5_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long val) +{ + struct mlx5_hwmon *hwmon = dev_get_drvdata(dev); + + if (type != hwmon_temp || attr != hwmon_temp_reset_history) + return -EOPNOTSUPP; + + return mlx5_hwmon_reset_max_temp(hwmon->mdev, + hwmon->temp_channel_desc[channel].sensor_index); +} + +static umode_t mlx5_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, + int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_highest: + case hwmon_temp_crit: + case hwmon_temp_label: + return 0444; + case hwmon_temp_reset_history: + return 0200; + default: + return 0; + } +} + +static int mlx5_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, const char **str) +{ + struct mlx5_hwmon *hwmon = dev_get_drvdata(dev); + + if (type != hwmon_temp || attr != hwmon_temp_label) + return -EOPNOTSUPP; + + *str = (const char *)hwmon->temp_channel_desc[channel].sensor_name; + return 0; +} + +static const struct hwmon_ops mlx5_hwmon_ops = { + .read = mlx5_hwmon_read, + .read_string = mlx5_hwmon_read_string, + .is_visible = mlx5_hwmon_is_visible, + .write = mlx5_hwmon_write, +}; + +static int mlx5_hwmon_init_channels_names(struct mlx5_hwmon *hwmon) +{ + u32 i; + + for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++) { + u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {}; + char *sensor_name; + int err; + + err = mlx5_hwmon_query_mtmp(hwmon->mdev, hwmon->temp_channel_desc[i].sensor_index, + mtmp_out); + if (err) + return err; + + sensor_name = MLX5_ADDR_OF(mtmp_reg, mtmp_out, sensor_name_hi); + if (!*sensor_name) { + snprintf(hwmon->temp_channel_desc[i].sensor_name, + sizeof(hwmon->temp_channel_desc[i].sensor_name), "sensor%u", + hwmon->temp_channel_desc[i].sensor_index); + continue; + } + + memcpy(&hwmon->temp_channel_desc[i].sensor_name, sensor_name, + MLX5_FLD_SZ_BYTES(mtmp_reg, sensor_name_hi) + + MLX5_FLD_SZ_BYTES(mtmp_reg, sensor_name_lo)); + } + + return 0; +} + +static int mlx5_hwmon_get_module_sensor_index(struct mlx5_core_dev *mdev, u32 *module_index) +{ + int module_num; + int err; + + err = mlx5_query_module_num(mdev, &module_num); + if (err) + return err; + + *module_index = to_mtmp_module_sensor_idx(module_num); + + return 0; +} + +static int mlx5_hwmon_init_sensors_indexes(struct mlx5_hwmon *hwmon, u64 sensor_map) +{ + DECLARE_BITMAP(smap, BITS_PER_TYPE(sensor_map)); + unsigned long bit_pos; + int err = 0; + int i = 0; + + bitmap_from_u64(smap, sensor_map); + + for_each_set_bit(bit_pos, smap, BITS_PER_TYPE(sensor_map)) { + hwmon->temp_channel_desc[i].sensor_index = bit_pos; + i++; + } + + if (hwmon->module_scount) + err = mlx5_hwmon_get_module_sensor_index(hwmon->mdev, + &hwmon->temp_channel_desc[i].sensor_index); + + return err; +} + +static void mlx5_hwmon_channel_info_init(struct mlx5_hwmon *hwmon) +{ + int i; + + hwmon->channel_info[0] = &hwmon->chip_info; + hwmon->channel_info[1] = &hwmon->temp_info; + + hwmon->chip_channel_config[0] = HWMON_C_REGISTER_TZ; + hwmon->chip_info.config = (const u32 *)hwmon->chip_channel_config; + hwmon->chip_info.type = hwmon_chip; + + for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++) + hwmon->temp_channel_config[i] = HWMON_T_INPUT | HWMON_T_HIGHEST | HWMON_T_CRIT | + HWMON_T_RESET_HISTORY | HWMON_T_LABEL; + + hwmon->temp_info.config = (const u32 *)hwmon->temp_channel_config; + hwmon->temp_info.type = hwmon_temp; +} + +static int mlx5_hwmon_is_module_mon_cap(struct mlx5_core_dev *mdev, bool *mon_cap) +{ + u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)]; + u32 module_index; + int err; + + err = mlx5_hwmon_get_module_sensor_index(mdev, &module_index); + if (err) + return err; + + err = mlx5_hwmon_query_mtmp(mdev, module_index, mtmp_out); + if (err) + return err; + + if (MLX5_GET(mtmp_reg, mtmp_out, temperature)) + *mon_cap = true; + + return 0; +} + +static int mlx5_hwmon_get_sensors_count(struct mlx5_core_dev *mdev, u32 *asic_platform_scount) +{ + u32 mtcap_out[MLX5_ST_SZ_DW(mtcap_reg)] = {}; + u32 mtcap_in[MLX5_ST_SZ_DW(mtcap_reg)] = {}; + int err; + + err = mlx5_core_access_reg(mdev, mtcap_in, sizeof(mtcap_in), + mtcap_out, sizeof(mtcap_out), + MLX5_REG_MTCAP, 0, 0); + if (err) + return err; + + *asic_platform_scount = MLX5_GET(mtcap_reg, mtcap_out, sensor_count); + + return 0; +} + +static void mlx5_hwmon_free(struct mlx5_hwmon *hwmon) +{ + if (!hwmon) + return; + + kfree(hwmon->temp_channel_config); + kfree(hwmon->temp_channel_desc); + kfree(hwmon); +} + +static struct mlx5_hwmon *mlx5_hwmon_alloc(struct mlx5_core_dev *mdev) +{ + struct mlx5_hwmon *hwmon; + bool mon_cap = false; + u32 sensors_count; + int err; + + hwmon = kzalloc(sizeof(*mdev->hwmon), GFP_KERNEL); + if (!hwmon) + return ERR_PTR(-ENOMEM); + + err = mlx5_hwmon_get_sensors_count(mdev, &hwmon->asic_platform_scount); + if (err) + goto err_free_hwmon; + + /* check if module sensor has thermal mon cap. if yes, allocate channel desc for it */ + err = mlx5_hwmon_is_module_mon_cap(mdev, &mon_cap); + if (err) + goto err_free_hwmon; + + hwmon->module_scount = mon_cap ? 1 : 0; + sensors_count = hwmon->asic_platform_scount + hwmon->module_scount; + hwmon->temp_channel_desc = kcalloc(sensors_count, sizeof(*hwmon->temp_channel_desc), + GFP_KERNEL); + if (!hwmon->temp_channel_desc) { + err = -ENOMEM; + goto err_free_hwmon; + } + + /* sensors configuration values array, must be 0-terminated hence, + 1 */ + hwmon->temp_channel_config = kcalloc(sensors_count + 1, sizeof(*hwmon->temp_channel_config), + GFP_KERNEL); + if (!hwmon->temp_channel_config) { + err = -ENOMEM; + goto err_free_temp_channel_desc; + } + + hwmon->mdev = mdev; + + return hwmon; + +err_free_temp_channel_desc: + kfree(hwmon->temp_channel_desc); +err_free_hwmon: + kfree(hwmon); + return ERR_PTR(err); +} + +static int mlx5_hwmon_dev_init(struct mlx5_hwmon *hwmon) +{ + u32 mtcap_out[MLX5_ST_SZ_DW(mtcap_reg)] = {}; + u32 mtcap_in[MLX5_ST_SZ_DW(mtcap_reg)] = {}; + int err; + int i; + + err = mlx5_core_access_reg(hwmon->mdev, mtcap_in, sizeof(mtcap_in), + mtcap_out, sizeof(mtcap_out), + MLX5_REG_MTCAP, 0, 0); + if (err) + return err; + + mlx5_hwmon_channel_info_init(hwmon); + mlx5_hwmon_init_sensors_indexes(hwmon, MLX5_GET64(mtcap_reg, mtcap_out, sensor_map)); + err = mlx5_hwmon_init_channels_names(hwmon); + if (err) + return err; + + for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++) { + err = mlx5_hwmon_enable_max_temp(hwmon->mdev, + hwmon->temp_channel_desc[i].sensor_index); + if (err) + return err; + } + + hwmon->chip.ops = &mlx5_hwmon_ops; + hwmon->chip.info = (const struct hwmon_channel_info **)hwmon->channel_info; + + return 0; +} + +int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev) +{ + struct device *dev = mdev->device; + struct mlx5_hwmon *hwmon; + int err; + + if (!MLX5_CAP_MCAM_REG(mdev, mtmp)) + return 0; + + hwmon = mlx5_hwmon_alloc(mdev); + if (IS_ERR(hwmon)) + return PTR_ERR(hwmon); + + err = mlx5_hwmon_dev_init(hwmon); + if (err) + goto err_free_hwmon; + + hwmon->hwmon_dev = hwmon_device_register_with_info(dev, "mlx5", + hwmon, + &hwmon->chip, + NULL); + if (IS_ERR(hwmon->hwmon_dev)) { + err = PTR_ERR(hwmon->hwmon_dev); + goto err_free_hwmon; + } + + mdev->hwmon = hwmon; + return 0; + +err_free_hwmon: + mlx5_hwmon_free(hwmon); + return err; +} + +void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev) +{ + struct mlx5_hwmon *hwmon = mdev->hwmon; + + if (!hwmon) + return; + + hwmon_device_unregister(hwmon->hwmon_dev); + mlx5_hwmon_free(hwmon); + mdev->hwmon = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h new file mode 100644 index 000000000000..999654a9b9da --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB + * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ +#ifndef __MLX5_HWMON_H__ +#define __MLX5_HWMON_H__ + +#include <linux/mlx5/driver.h> + +#if IS_ENABLED(CONFIG_HWMON) + +int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev); +void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev); + +#else +static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev) +{ + return 0; +} + +static inline void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev) {} + +#endif + +#endif /* __MLX5_HWMON_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index baa7ef812313..2bf77a5251b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -418,12 +418,6 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) return -ENOMEM; } - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) { - err = -ENOMEM; - goto err_free_fs; - } - mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -432,12 +426,13 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, - priv->max_nch, priv->drop_rq.rqn, - &priv->channels.params.packet_merge, - priv->channels.params.num_channels); - if (err) + priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, + priv->channels.params.num_channels); + if (IS_ERR(priv->rx_res)) { + err = PTR_ERR(priv->rx_res); goto err_close_drop_rq; + } err = mlx5i_create_flow_steering(priv); if (err) @@ -447,13 +442,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; -err_free_fs: mlx5e_fs_cleanup(priv->fs); return err; } @@ -462,10 +455,9 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { mlx5i_destroy_flow_steering(priv); mlx5e_rx_res_destroy(priv->rx_res); + priv->rx_res = ERR_PTR(-EINVAL); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); - mlx5e_rx_res_free(priv->rx_res); - priv->rx_res = NULL; mlx5e_fs_cleanup(priv->fs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c index fa467335526e..612e666ec263 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -156,67 +156,15 @@ unlock: return least_loaded_irq; } -void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs, - int num_irqs) +void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq) { struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); - int i; - - for (i = 0; i < num_irqs; i++) { - int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i])); - - synchronize_irq(pci_irq_vector(pool->dev->pdev, - mlx5_irq_get_index(irqs[i]))); - if (mlx5_irq_put(irqs[i])) - if (pool->irqs_per_cpu) - cpu_put(pool, cpu); - } -} - -/** - * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device. - * @dev: mlx5 device that is requesting the IRQs. - * @nirqs: number of IRQs to request. - * @irqs: an output array of IRQs pointers. - * - * Each IRQ is bounded to at most 1 CPU. - * This function is requesting IRQs according to the default assignment. - * The default assignment policy is: - * - in each iteration, request the least loaded IRQ which is not bound to any - * CPU of the previous IRQs requested. - * - * This function returns the number of IRQs requested, (which might be smaller than - * @nirqs), if successful, or a negative error code in case of an error. - */ -int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs, - struct mlx5_irq **irqs) -{ - struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); - struct irq_affinity_desc af_desc = {}; - struct mlx5_irq *irq; - int i = 0; + int cpu; - af_desc.is_managed = 1; - cpumask_copy(&af_desc.mask, cpu_online_mask); - for (i = 0; i < nirqs; i++) { - if (mlx5_irq_pool_is_sf_pool(pool)) - irq = mlx5_irq_affinity_request(pool, &af_desc); - else - /* In case SF pool doesn't exists, fallback to the PF IRQs. - * The PF IRQs are already allocated and binded to CPU - * at this point. Hence, only an index is needed. - */ - irq = mlx5_irq_request(dev, i, NULL, NULL); - if (IS_ERR(irq)) - break; - irqs[i] = irq; - cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), &af_desc.mask); - mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", - pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), - cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), - mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); - } - if (!i) - return PTR_ERR(irq); - return i; + cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); + synchronize_irq(pci_irq_vector(pool->dev->pdev, + mlx5_irq_get_index(irq))); + if (mlx5_irq_put(irq)) + if (pool->irqs_per_cpu) + cpu_put(pool, cpu); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index f0a074b2fcdf..d14459e5c04f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) dev = ldev->pf[MLX5_LAG_P1].dev; if (is_mdev_switchdev_mode(dev) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) && - mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) && + mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) && MLX5_CAP_ESW(dev, esw_shared_ingress_acl) && mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1) return true; @@ -943,6 +943,26 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } } +/* The last mdev to unregister will destroy the workqueue before removing the + * devcom component, and as all the mdevs use the same devcom component we are + * guaranteed that the devcom is valid while the calling work is running. + */ +struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev) +{ + struct mlx5_devcom_comp_dev *devcom = NULL; + int i; + + mutex_lock(&ldev->lock); + for (i = 0; i < ldev->ports; i++) { + if (ldev->pf[i].dev) { + devcom = ldev->pf[i].dev->priv.hca_devcom_comp; + break; + } + } + mutex_unlock(&ldev->lock); + return devcom; +} + static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) { queue_delayed_work(ldev->wq, &ldev->bond_work, delay); @@ -953,9 +973,14 @@ static void mlx5_do_bond_work(struct work_struct *work) struct delayed_work *delayed_work = to_delayed_work(work); struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, bond_work); + struct mlx5_devcom_comp_dev *devcom; int status; - status = mlx5_dev_list_trylock(); + devcom = mlx5_lag_get_devcom_comp(ldev); + if (!devcom) + return; + + status = mlx5_devcom_comp_trylock(devcom); if (!status) { mlx5_queue_bond_work(ldev, HZ); return; @@ -964,14 +989,14 @@ static void mlx5_do_bond_work(struct work_struct *work) mutex_lock(&ldev->lock); if (ldev->mode_changes_in_progress) { mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(devcom); mlx5_queue_bond_work(ldev, HZ); return; } mlx5_do_bond(ldev); mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(devcom); } static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, @@ -1212,13 +1237,14 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, dev->priv.lag = NULL; } -/* Must be called with intf_mutex held */ +/* Must be called with HCA devcom component lock held */ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) { + struct mlx5_devcom_comp_dev *pos = NULL; struct mlx5_lag *ldev = NULL; struct mlx5_core_dev *tmp_dev; - tmp_dev = mlx5_get_next_phys_dev_lag(dev); + tmp_dev = mlx5_devcom_get_next_peer_data(dev->priv.hca_devcom_comp, &pos); if (tmp_dev) ldev = mlx5_lag_dev(tmp_dev); @@ -1268,16 +1294,6 @@ recheck: mlx5_ldev_put(ldev); } -bool mlx5_lag_is_supported(struct mlx5_core_dev *dev) -{ - if (!MLX5_CAP_GEN(dev, vport_group_manager) || - !MLX5_CAP_GEN(dev, lag_master) || - MLX5_CAP_GEN(dev, num_lag_ports) < 2 || - MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) - return false; - return true; -} - void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) { int err; @@ -1285,10 +1301,13 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) if (!mlx5_lag_is_supported(dev)) return; + if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp)) + return; + recheck: - mlx5_dev_list_lock(); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); err = __mlx5_lag_dev_add_mdev(dev); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); if (err) { msleep(100); @@ -1441,7 +1460,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) if (!ldev) return; - mlx5_dev_list_lock(); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); mutex_lock(&ldev->lock); ldev->mode_changes_in_progress++; @@ -1449,7 +1468,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) mlx5_disable_lag(ldev); mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); } void mlx5_lag_enable_change(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index a061b1873e27..50fcb1eee574 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -74,8 +74,6 @@ struct mlx5_lag { struct lag_mpesw lag_mpesw; }; -bool mlx5_lag_is_supported(struct mlx5_core_dev *dev); - static inline struct mlx5_lag * mlx5_lag_dev(struct mlx5_core_dev *dev) { @@ -114,5 +112,16 @@ void mlx5_disable_lag(struct mlx5_lag *ldev); void mlx5_lag_remove_devices(struct mlx5_lag *ldev); int mlx5_deactivate_lag(struct mlx5_lag *ldev); void mlx5_lag_add_devices(struct mlx5_lag *ldev); +struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev); + +static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev) +{ + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + MLX5_CAP_GEN(dev, num_lag_ports) < 2 || + MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) + return false; + return true; +} #endif /* __MLX5_LAG_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 4bf15391525c..82889f30506e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -65,12 +65,12 @@ err_metadata: return err; } -#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2 +#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4 static int enable_mpesw(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; int err; + int i; if (ldev->mode != MLX5_LAG_MODE_NONE) return -EINVAL; @@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - err = mlx5_eswitch_reload_reps(dev0->priv.eswitch); - if (!err) - err = mlx5_eswitch_reload_reps(dev1->priv.eswitch); - if (err) - goto err_rescan_drivers; + for (i = 0; i < ldev->ports; i++) { + err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + if (err) + goto err_rescan_drivers; + } return 0; @@ -112,8 +112,8 @@ err_rescan_drivers: mlx5_deactivate_lag(ldev); err_add_devices: mlx5_lag_add_devices(ldev); - mlx5_eswitch_reload_reps(dev0->priv.eswitch); - mlx5_eswitch_reload_reps(dev1->priv.eswitch); + for (i = 0; i < ldev->ports; i++) + mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); mlx5_mpesw_metadata_cleanup(ldev); return err; } @@ -129,9 +129,14 @@ static void disable_mpesw(struct mlx5_lag *ldev) static void mlx5_mpesw_work(struct work_struct *work) { struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); + struct mlx5_devcom_comp_dev *devcom; struct mlx5_lag *ldev = mpesww->lag; - mlx5_dev_list_lock(); + devcom = mlx5_lag_get_devcom_comp(ldev); + if (!devcom) + return; + + mlx5_devcom_comp_lock(devcom); mutex_lock(&ldev->lock); if (ldev->mode_changes_in_progress) { mpesww->result = -EAGAIN; @@ -144,7 +149,7 @@ static void mlx5_mpesw_work(struct work_struct *work) disable_mpesw(ldev); unlock: mutex_unlock(&ldev->lock); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(devcom); complete(&mpesww->comp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 7d9bbb494d95..101b3bb90863 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -507,10 +507,7 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) mlx5_lag_set_outer_ttc_params(ldev, &ttc_params); port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params); - if (IS_ERR(port_sel->outer.ttc)) - return PTR_ERR(port_sel->outer.ttc); - - return 0; + return PTR_ERR_OR_ZERO(port_sel->outer.ttc); } static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) @@ -521,10 +518,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); - if (IS_ERR(port_sel->inner.ttc)) - return PTR_ERR(port_sel->inner.ttc); - - return 0; + return PTR_ERR_OR_ZERO(port_sel->inner.ttc); } int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c index 5a80fb7dbbca..40c7be124041 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c @@ -81,7 +81,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data) int inlen, eqn; int err; - err = mlx5_vector2eqn(mdev, 0, &eqn); + err = mlx5_comp_eqn_get(mdev, 0, &eqn); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 377372f0578a..0c83ef174275 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -32,6 +32,7 @@ #include <linux/clocksource.h> #include <linux/highmem.h> +#include <linux/log2.h> #include <linux/ptp_clock_kernel.h> #include <rdma/mlx5-abi.h> #include "lib/eq.h" @@ -39,10 +40,6 @@ #include "clock.h" enum { - MLX5_CYCLES_SHIFT = 31 -}; - -enum { MLX5_PIN_MODE_IN = 0x0, MLX5_PIN_MODE_OUT = 0x1, }; @@ -93,6 +90,31 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify); } +static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz) +{ + /* Optimal shift constant leads to corrections above just 1 scaled ppm. + * + * Two sets of equations are needed to derive the optimal shift + * constant for the cyclecounter. + * + * dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm + * ppb = scaled_ppm * 1000 / 2^16 + * + * Using the two equations together + * + * dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant + * dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant + * dev_freq_khz = 2^(shift_constant - 16) + * + * then yields + * + * shift_constant = ilog2(dev_freq_khz) + 16 + */ + + return min(ilog2(dev_freq_khz) + 16, + ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz)); +} + static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); @@ -362,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta) { - return mlx5_ptp_adjtime(ptp, delta); + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_core_dev *mdev; + + mdev = container_of(clock, struct mlx5_core_dev, clock); + + return mlx5_ptp_adjtime_real_time(mdev, delta); } static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm) @@ -909,7 +936,7 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); timer->cycles.read = read_internal_timer; - timer->cycles.shift = MLX5_CYCLES_SHIFT; + timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq); timer->cycles.mult = clocksource_khz2mult(dev_freq, timer->cycles.shift); timer->nominal_c_mult = timer->cycles.mult; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index 78c94b22bdc0..e8e50563e956 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -2,214 +2,278 @@ /* Copyright (c) 2018 Mellanox Technologies */ #include <linux/mlx5/vport.h> +#include <linux/list.h> #include "lib/devcom.h" #include "mlx5_core.h" -static LIST_HEAD(devcom_list); +static LIST_HEAD(devcom_dev_list); +static LIST_HEAD(devcom_comp_list); +/* protect device list */ +static DEFINE_MUTEX(dev_list_lock); +/* protect component list */ +static DEFINE_MUTEX(comp_list_lock); -#define devcom_for_each_component(priv, comp, iter) \ - for (iter = 0; \ - comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \ - iter++) +#define devcom_for_each_component(iter) \ + list_for_each_entry(iter, &devcom_comp_list, comp_list) -struct mlx5_devcom_component { - struct { - void __rcu *data; - } device[MLX5_DEVCOM_PORTS_SUPPORTED]; +struct mlx5_devcom_dev { + struct list_head list; + struct mlx5_core_dev *dev; + struct kref ref; +}; +struct mlx5_devcom_comp { + struct list_head comp_list; + enum mlx5_devcom_component id; + u64 key; + struct list_head comp_dev_list_head; mlx5_devcom_event_handler_t handler; - struct rw_semaphore sem; + struct kref ref; bool ready; + struct rw_semaphore sem; + struct lock_class_key lock_key; }; -struct mlx5_devcom_list { +struct mlx5_devcom_comp_dev { struct list_head list; - - struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS]; - struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED]; -}; - -struct mlx5_devcom { - struct mlx5_devcom_list *priv; - int idx; + struct mlx5_devcom_comp *comp; + struct mlx5_devcom_dev *devc; + void __rcu *data; }; -static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void) +static bool devcom_dev_exists(struct mlx5_core_dev *dev) { - struct mlx5_devcom_component *comp; - struct mlx5_devcom_list *priv; - int i; + struct mlx5_devcom_dev *iter; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return NULL; - - devcom_for_each_component(priv, comp, i) - init_rwsem(&comp->sem); + list_for_each_entry(iter, &devcom_dev_list, list) + if (iter->dev == dev) + return true; - return priv; + return false; } -static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv, - u8 idx) +static struct mlx5_devcom_dev * +mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev) { - struct mlx5_devcom *devcom; + struct mlx5_devcom_dev *devc; - devcom = kzalloc(sizeof(*devcom), GFP_KERNEL); - if (!devcom) + devc = kzalloc(sizeof(*devc), GFP_KERNEL); + if (!devc) return NULL; - devcom->priv = priv; - devcom->idx = idx; - return devcom; + devc->dev = dev; + kref_init(&devc->ref); + return devc; } -/* Must be called with intf_mutex held */ -struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) +struct mlx5_devcom_dev * +mlx5_devcom_register_device(struct mlx5_core_dev *dev) { - struct mlx5_devcom_list *priv = NULL, *iter; - struct mlx5_devcom *devcom = NULL; - bool new_priv = false; - u64 sguid0, sguid1; - int idx, i; + struct mlx5_devcom_dev *devc; - if (!mlx5_core_is_pf(dev)) - return NULL; - if (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_DEVCOM_PORTS_SUPPORTED) - return NULL; - - mlx5_dev_list_lock(); - sguid0 = mlx5_query_nic_system_image_guid(dev); - list_for_each_entry(iter, &devcom_list, list) { - /* There is at least one device in iter */ - struct mlx5_core_dev *tmp_dev; - - idx = -1; - for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) { - if (iter->devs[i]) - tmp_dev = iter->devs[i]; - else - idx = i; - } - - if (idx == -1) - continue; - - sguid1 = mlx5_query_nic_system_image_guid(tmp_dev); - if (sguid0 != sguid1) - continue; - - priv = iter; - break; - } - - if (!priv) { - priv = mlx5_devcom_list_alloc(); - if (!priv) { - devcom = ERR_PTR(-ENOMEM); - goto out; - } + mutex_lock(&dev_list_lock); - idx = 0; - new_priv = true; + if (devcom_dev_exists(dev)) { + devc = ERR_PTR(-EEXIST); + goto out; } - priv->devs[idx] = dev; - devcom = mlx5_devcom_alloc(priv, idx); - if (!devcom) { - if (new_priv) - kfree(priv); - devcom = ERR_PTR(-ENOMEM); + devc = mlx5_devcom_dev_alloc(dev); + if (!devc) { + devc = ERR_PTR(-ENOMEM); goto out; } - if (new_priv) - list_add(&priv->list, &devcom_list); + list_add_tail(&devc->list, &devcom_dev_list); out: - mlx5_dev_list_unlock(); - return devcom; + mutex_unlock(&dev_list_lock); + return devc; } -/* Must be called with intf_mutex held */ -void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) +static void +mlx5_devcom_dev_release(struct kref *ref) { - struct mlx5_devcom_list *priv; - int i; + struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref); - if (IS_ERR_OR_NULL(devcom)) - return; + mutex_lock(&dev_list_lock); + list_del(&devc->list); + mutex_unlock(&dev_list_lock); + kfree(devc); +} - mlx5_dev_list_lock(); - priv = devcom->priv; - priv->devs[devcom->idx] = NULL; +void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc) +{ + if (!IS_ERR_OR_NULL(devc)) + kref_put(&devc->ref, mlx5_devcom_dev_release); +} - kfree(devcom); +static struct mlx5_devcom_comp * +mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler) +{ + struct mlx5_devcom_comp *comp; - for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) - if (priv->devs[i]) - break; + comp = kzalloc(sizeof(*comp), GFP_KERNEL); + if (!comp) + return ERR_PTR(-ENOMEM); - if (i != MLX5_DEVCOM_PORTS_SUPPORTED) - goto out; + comp->id = id; + comp->key = key; + comp->handler = handler; + init_rwsem(&comp->sem); + lockdep_register_key(&comp->lock_key); + lockdep_set_class(&comp->sem, &comp->lock_key); + kref_init(&comp->ref); + INIT_LIST_HEAD(&comp->comp_dev_list_head); - list_del(&priv->list); - kfree(priv); -out: - mlx5_dev_list_unlock(); + return comp; } -void mlx5_devcom_register_component(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, - mlx5_devcom_event_handler_t handler, - void *data) +static void +mlx5_devcom_comp_release(struct kref *ref) { - struct mlx5_devcom_component *comp; + struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref); - if (IS_ERR_OR_NULL(devcom)) - return; + mutex_lock(&comp_list_lock); + list_del(&comp->comp_list); + mutex_unlock(&comp_list_lock); + lockdep_unregister_key(&comp->lock_key); + kfree(comp); +} - WARN_ON(!data); +static struct mlx5_devcom_comp_dev * +devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc, + struct mlx5_devcom_comp *comp, + void *data) +{ + struct mlx5_devcom_comp_dev *devcom; + + devcom = kzalloc(sizeof(*devcom), GFP_KERNEL); + if (!devcom) + return ERR_PTR(-ENOMEM); + + kref_get(&devc->ref); + devcom->devc = devc; + devcom->comp = comp; + rcu_assign_pointer(devcom->data, data); - comp = &devcom->priv->components[id]; down_write(&comp->sem); - comp->handler = handler; - rcu_assign_pointer(comp->device[devcom->idx].data, data); + list_add_tail(&devcom->list, &comp->comp_dev_list_head); up_write(&comp->sem); + + return devcom; } -void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id) +static void +devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom) { - struct mlx5_devcom_component *comp; - - if (IS_ERR_OR_NULL(devcom)) - return; + struct mlx5_devcom_comp *comp = devcom->comp; - comp = &devcom->priv->components[id]; down_write(&comp->sem); - RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL); + list_del(&devcom->list); up_write(&comp->sem); - synchronize_rcu(); + + kref_put(&devcom->devc->ref, mlx5_devcom_dev_release); + kfree(devcom); + kref_put(&comp->ref, mlx5_devcom_comp_release); } -int mlx5_devcom_send_event(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, +static bool +devcom_component_equal(struct mlx5_devcom_comp *devcom, + enum mlx5_devcom_component id, + u64 key) +{ + return devcom->id == id && devcom->key == key; +} + +static struct mlx5_devcom_comp * +devcom_component_get(struct mlx5_devcom_dev *devc, + enum mlx5_devcom_component id, + u64 key, + mlx5_devcom_event_handler_t handler) +{ + struct mlx5_devcom_comp *comp; + + devcom_for_each_component(comp) { + if (devcom_component_equal(comp, id, key)) { + if (handler == comp->handler) { + kref_get(&comp->ref); + return comp; + } + + mlx5_core_err(devc->dev, + "Cannot register existing devcom component with different handler\n"); + return ERR_PTR(-EINVAL); + } + } + + return NULL; +} + +struct mlx5_devcom_comp_dev * +mlx5_devcom_register_component(struct mlx5_devcom_dev *devc, + enum mlx5_devcom_component id, + u64 key, + mlx5_devcom_event_handler_t handler, + void *data) +{ + struct mlx5_devcom_comp_dev *devcom; + struct mlx5_devcom_comp *comp; + + if (IS_ERR_OR_NULL(devc)) + return NULL; + + mutex_lock(&comp_list_lock); + comp = devcom_component_get(devc, id, key, handler); + if (IS_ERR(comp)) { + devcom = ERR_PTR(-EINVAL); + goto out_unlock; + } + + if (!comp) { + comp = mlx5_devcom_comp_alloc(id, key, handler); + if (IS_ERR(comp)) { + devcom = ERR_CAST(comp); + goto out_unlock; + } + list_add_tail(&comp->comp_list, &devcom_comp_list); + } + mutex_unlock(&comp_list_lock); + + devcom = devcom_alloc_comp_dev(devc, comp, data); + if (IS_ERR(devcom)) + kref_put(&comp->ref, mlx5_devcom_comp_release); + + return devcom; + +out_unlock: + mutex_unlock(&comp_list_lock); + return devcom; +} + +void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom) +{ + if (!IS_ERR_OR_NULL(devcom)) + devcom_free_comp_dev(devcom); +} + +int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, int event, int rollback_event, void *event_data) { - struct mlx5_devcom_component *comp; - int err = -ENODEV, i; + struct mlx5_devcom_comp_dev *pos; + struct mlx5_devcom_comp *comp; + int err = 0; + void *data; if (IS_ERR_OR_NULL(devcom)) - return err; + return -ENODEV; - comp = &devcom->priv->components[id]; + comp = devcom->comp; down_write(&comp->sem); - for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) { - void *data = rcu_dereference_protected(comp->device[i].data, - lockdep_is_held(&comp->sem)); + list_for_each_entry(pos, &comp->comp_dev_list_head, list) { + data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem)); - if (i != devcom->idx && data) { + if (pos != devcom && data) { err = comp->handler(event, data, event_data); if (err) goto rollback; @@ -220,48 +284,43 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, return 0; rollback: - while (i--) { - void *data = rcu_dereference_protected(comp->device[i].data, - lockdep_is_held(&comp->sem)); + if (list_entry_is_head(pos, &comp->comp_dev_list_head, list)) + goto out; + pos = list_prev_entry(pos, list); + list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) { + data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem)); - if (i != devcom->idx && data) + if (pos != devcom && data) comp->handler(rollback_event, data, event_data); } - +out: up_write(&comp->sem); return err; } -void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, - bool ready) +void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready) { - struct mlx5_devcom_component *comp; - - comp = &devcom->priv->components[id]; - WARN_ON(!rwsem_is_locked(&comp->sem)); + WARN_ON(!rwsem_is_locked(&devcom->comp->sem)); - WRITE_ONCE(comp->ready, ready); + WRITE_ONCE(devcom->comp->ready, ready); } -bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id) +bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom) { if (IS_ERR_OR_NULL(devcom)) return false; - return READ_ONCE(devcom->priv->components[id].ready); + return READ_ONCE(devcom->comp->ready); } -bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id) +bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom) { - struct mlx5_devcom_component *comp; + struct mlx5_devcom_comp *comp; if (IS_ERR_OR_NULL(devcom)) return false; - comp = &devcom->priv->components[id]; + comp = devcom->comp; down_read(&comp->sem); if (!READ_ONCE(comp->ready)) { up_read(&comp->sem); @@ -271,74 +330,81 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom, return true; } -void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id) +void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom) { - struct mlx5_devcom_component *comp = &devcom->priv->components[id]; - - up_read(&comp->sem); + up_read(&devcom->comp->sem); } -void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, - int *i) +void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom, + struct mlx5_devcom_comp_dev **pos) { - struct mlx5_devcom_component *comp; - void *ret; - int idx; + struct mlx5_devcom_comp *comp = devcom->comp; + struct mlx5_devcom_comp_dev *tmp; + void *data; - comp = &devcom->priv->components[id]; + tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list); - if (*i == MLX5_DEVCOM_PORTS_SUPPORTED) - return NULL; - for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) { - if (idx != devcom->idx) { - ret = rcu_dereference_protected(comp->device[idx].data, - lockdep_is_held(&comp->sem)); - if (ret) + list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) { + if (tmp != devcom) { + data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem)); + if (data) break; } } - if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) { - *i = idx; + if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list)) return NULL; - } - *i = idx + 1; - return ret; + *pos = tmp; + return data; } -void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, - int *i) +void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, + struct mlx5_devcom_comp_dev **pos) { - struct mlx5_devcom_component *comp; - void *ret; - int idx; + struct mlx5_devcom_comp *comp = devcom->comp; + struct mlx5_devcom_comp_dev *tmp; + void *data; - comp = &devcom->priv->components[id]; + tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list); - if (*i == MLX5_DEVCOM_PORTS_SUPPORTED) - return NULL; - for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) { - if (idx != devcom->idx) { + list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) { + if (tmp != devcom) { /* This can change concurrently, however 'data' pointer will remain * valid for the duration of RCU read section. */ if (!READ_ONCE(comp->ready)) return NULL; - ret = rcu_dereference(comp->device[idx].data); - if (ret) + data = rcu_dereference(tmp->data); + if (data) break; } } - if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) { - *i = idx; + if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list)) return NULL; - } - *i = idx + 1; - return ret; + *pos = tmp; + return data; +} + +void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom) +{ + if (IS_ERR_OR_NULL(devcom)) + return; + down_write(&devcom->comp->sem); +} + +void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom) +{ + if (IS_ERR_OR_NULL(devcom)) + return; + up_write(&devcom->comp->sem); +} + +int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom) +{ + if (IS_ERR_OR_NULL(devcom)) + return 0; + return down_write_trylock(&devcom->comp->sem); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index d953a01b8eaa..fc23bbef87b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -6,11 +6,10 @@ #include <linux/mlx5/driver.h> -#define MLX5_DEVCOM_PORTS_SUPPORTED 4 - -enum mlx5_devcom_components { +enum mlx5_devcom_component { MLX5_DEVCOM_ESW_OFFLOADS, - + MLX5_DEVCOM_MPV, + MLX5_DEVCOM_HCA_PORTS, MLX5_DEVCOM_NUM_COMPONENTS, }; @@ -18,45 +17,44 @@ typedef int (*mlx5_devcom_event_handler_t)(int event, void *my_data, void *event_data); -struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev); -void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom); +struct mlx5_devcom_dev *mlx5_devcom_register_device(struct mlx5_core_dev *dev); +void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc); -void mlx5_devcom_register_component(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, - mlx5_devcom_event_handler_t handler, - void *data); -void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id); +struct mlx5_devcom_comp_dev * +mlx5_devcom_register_component(struct mlx5_devcom_dev *devc, + enum mlx5_devcom_component id, + u64 key, + mlx5_devcom_event_handler_t handler, + void *data); +void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom); -int mlx5_devcom_send_event(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, +int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, int event, int rollback_event, void *event_data); -void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, - bool ready); -bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id); - -bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id); -void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id); -void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, int *i); - -#define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \ - for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \ - data; \ - data = mlx5_devcom_get_next_peer_data(devcom, id, &i)) - -void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom, - enum mlx5_devcom_components id, int *i); - -#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \ - for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \ - data; \ - data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i)) - -#endif +void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready); +bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom); + +bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom); +void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom); +void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom, + struct mlx5_devcom_comp_dev **pos); + +#define mlx5_devcom_for_each_peer_entry(devcom, data, pos) \ + for (pos = NULL, data = mlx5_devcom_get_next_peer_data(devcom, &pos); \ + data; \ + data = mlx5_devcom_get_next_peer_data(devcom, &pos)) + +void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, + struct mlx5_devcom_comp_dev **pos); + +#define mlx5_devcom_for_each_peer_entry_rcu(devcom, data, pos) \ + for (pos = NULL, data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos); \ + data; \ + data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos)) + +void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom); +void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom); +int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom); + +#endif /* __LIB_MLX5_DEVCOM_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h index d3d628b862f3..4b7f7131c560 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -85,7 +85,6 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn); struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev); void mlx5_cq_tasklet_cb(struct tasklet_struct *t); -struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq); void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev); @@ -104,6 +103,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev); struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev); #endif -int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn); +int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c index 4047629a876b..30564d9b00e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c @@ -40,7 +40,7 @@ struct mlx5_hv_vhca_agent { struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev) { - struct mlx5_hv_vhca *hv_vhca = NULL; + struct mlx5_hv_vhca *hv_vhca; hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL); if (!hv_vhca) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c index 6e3f178d6f84..234cd00f71a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c @@ -2,8 +2,11 @@ /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ #include "fs_core.h" +#include "fs_cmd.h" +#include "en.h" #include "lib/ipsec_fs_roce.h" #include "mlx5_core.h" +#include <linux/random.h> struct mlx5_ipsec_miss { struct mlx5_flow_group *group; @@ -15,6 +18,12 @@ struct mlx5_ipsec_rx_roce { struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; struct mlx5_ipsec_miss roce_miss; + struct mlx5_flow_table *nic_master_ft; + struct mlx5_flow_group *nic_master_group; + struct mlx5_flow_handle *nic_master_rule; + struct mlx5_flow_table *goto_alias_ft; + u32 alias_id; + char key[ACCESS_KEY_LEN]; struct mlx5_flow_table *ft_rdma; struct mlx5_flow_namespace *ns_rdma; @@ -24,6 +33,9 @@ struct mlx5_ipsec_tx_roce { struct mlx5_flow_group *g; struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; + struct mlx5_flow_table *goto_alias_ft; + u32 alias_id; + char key[ACCESS_KEY_LEN]; struct mlx5_flow_namespace *ns; }; @@ -31,6 +43,7 @@ struct mlx5_ipsec_fs { struct mlx5_ipsec_rx_roce ipv4_rx; struct mlx5_ipsec_rx_roce ipv6_rx; struct mlx5_ipsec_tx_roce tx; + struct mlx5_devcom_comp_dev **devcom; }; static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, @@ -43,11 +56,83 @@ static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport); } +static bool ipsec_fs_create_alias_supported_one(struct mlx5_core_dev *mdev) +{ + u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access); + u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported); + + if (!(obj_supp & + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS)) + return false; + + if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE)) + return false; + + return true; +} + +static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev, + struct mlx5_core_dev *master_mdev) +{ + if (ipsec_fs_create_alias_supported_one(mdev) && + ipsec_fs_create_alias_supported_one(master_mdev)) + return true; + + return false; +} + +static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner, + struct mlx5_core_dev *ibv_allowed, + struct mlx5_flow_table *ft, + u32 *obj_id, char *alias_key, bool from_event) +{ + u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id; + u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id); + struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {}; + struct mlx5_cmd_alias_obj_create_attr alias_attr = {}; + int ret; + int i; + + if (!ipsec_fs_create_alias_supported(ibv_owner, ibv_allowed)) + return -EOPNOTSUPP; + + for (i = 0; i < ACCESS_KEY_LEN; i++) + if (!from_event) + alias_key[i] = get_random_u64() & 0xFF; + + memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN); + allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + allow_attr.obj_id = aliased_object_id; + + if (!from_event) { + ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr); + if (ret) { + mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n", + ret); + return ret; + } + } + + memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN); + alias_attr.obj_id = aliased_object_id; + alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + alias_attr.vhca_id = vhca_id_to_be_accessed; + ret = mlx5_cmd_alias_obj_create(ibv_allowed, &alias_attr, obj_id); + if (ret) { + mlx5_core_err(ibv_allowed, "Failed to create alias object err=%d\n", + ret); + return ret; + } + + return 0; +} + static int ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, struct mlx5_flow_destination *default_dst, struct mlx5_ipsec_rx_roce *roce) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_flow_destination dst = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_handle *rule; @@ -61,14 +146,19 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; - dst.ft = roce->ft_rdma; + if (is_mpv_slave) { + dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dst.ft = roce->goto_alias_ft; + } else { + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_rdma; + } rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n", err); - goto fail_add_rule; + goto out; } roce->rule = rule; @@ -84,12 +174,30 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, roce->roce_miss.rule = rule; + if (!is_mpv_slave) + goto out; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_rdma; + rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst, + 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule for alias err=%d\n", + err); + goto fail_add_nic_master_rule; + } + roce->nic_master_rule = rule; + kvfree(spec); return 0; +fail_add_nic_master_rule: + mlx5_del_flow_rules(roce->roce_miss.rule); fail_add_default_rule: mlx5_del_flow_rules(roce->rule); -fail_add_rule: +out: kvfree(spec); return err; } @@ -120,25 +228,373 @@ out: return err; } -void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce) +static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft) { + struct mlx5_flow_destination dst = {}; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.source_vhca_port); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters.source_vhca_port, + MLX5_CAP_GEN(mdev, native_port_num)); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->goto_alias_ft; + rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n", + err); + goto out; + } + roce->rule = rule; + + /* No need for miss rule, since on miss we go to next PRIO, in which + * if master is configured, he will catch the traffic to go to his + * encryption table. + */ + +out: + kvfree(spec); + return err; +} + +#define MLX5_TX_ROCE_GROUP_SIZE BIT(0) +#define MLX5_IPSEC_RDMA_TX_FT_LEVEL 0 +#define MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL 3 /* Since last used level in NIC ipsec is 2 */ + +static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft, + struct mlx5e_priv *peer_priv, + bool from_event) +{ + struct mlx5_flow_namespace *roce_ns, *nic_ns; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table next_ft; + struct mlx5_flow_table *ft; + int err; + + roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC); + if (!roce_ns) + return -EOPNOTSUPP; + + nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); + if (!nic_ns) + return -EOPNOTSUPP; + + err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id, roce->key, + from_event); + if (err) + return err; + + next_ft.id = roce->alias_id; + ft_attr.max_fte = 1; + ft_attr.next_ft = &next_ft; + ft_attr.level = MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL; + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft = mlx5_create_flow_table(nic_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec goto alias ft err=%d\n", err); + goto destroy_alias; + } + + roce->goto_alias_ft = ft; + + memset(&ft_attr, 0, sizeof(ft_attr)); + ft_attr.max_fte = 1; + ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL; + ft = mlx5_create_flow_table(roce_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err); + goto destroy_alias_ft; + } + + roce->ft = ft; + + return 0; + +destroy_alias_ft: + mlx5_destroy_flow_table(roce->goto_alias_ft); +destroy_alias: + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + return err; +} + +static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft, + u32 *in) +{ + struct mlx5_flow_group *g; + int ix = 0; + int err; + u8 *mc; + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters.source_vhca_port); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TX_ROCE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err); + return err; + } + roce->g = g; + + err = ipsec_fs_roce_tx_mpv_rule_setup(mdev, roce, pol_ft); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err); + goto destroy_group; + } + + return 0; + +destroy_group: + mlx5_destroy_flow_group(roce->g); + return err; +} + +static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_flow_table *pol_ft, + u32 *in, bool from_event) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5_ipsec_tx_roce *roce; + struct mlx5e_priv *peer_priv; + int err; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return -EOPNOTSUPP; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + err = -EOPNOTSUPP; + goto release_peer; + } + + roce = &ipsec_roce->tx; + + err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv, from_event); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err); + goto release_peer; + } + + err = ipsec_fs_roce_tx_mpv_create_group_rules(mdev, roce, pol_ft, in); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group/rule err=%d\n", err); + goto destroy_tables; + } + + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return 0; + +destroy_tables: + mlx5_destroy_flow_table(roce->ft); + mlx5_destroy_flow_table(roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); +release_peer: + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return err; +} + +static void roce_rx_mpv_destroy_tables(struct mlx5_core_dev *mdev, struct mlx5_ipsec_rx_roce *roce) +{ + mlx5_destroy_flow_table(roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + mlx5_destroy_flow_group(roce->nic_master_group); + mlx5_destroy_flow_table(roce->nic_master_ft); +} + +#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) +#define MLX5_IPSEC_RX_IPV4_FT_LEVEL 3 +#define MLX5_IPSEC_RX_IPV6_FT_LEVEL 2 + +static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_flow_namespace *ns, + u32 family, u32 level, u32 prio) +{ + struct mlx5_flow_namespace *roce_ns, *nic_ns; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5_ipsec_rx_roce *roce; + struct mlx5_flow_table next_ft; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *g; + struct mlx5e_priv *peer_priv; + int ix = 0; + u32 *in; + int err; + + roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx : + &ipsec_roce->ipv6_rx; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return -EOPNOTSUPP; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + err = -EOPNOTSUPP; + goto release_peer; + } + + roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC); + if (!roce_ns) { + err = -EOPNOTSUPP; + goto release_peer; + } + + nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); + if (!nic_ns) { + err = -EOPNOTSUPP; + goto release_peer; + } + + in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto release_peer; + } + + ft_attr.level = (family == AF_INET) ? MLX5_IPSEC_RX_IPV4_FT_LEVEL : + MLX5_IPSEC_RX_IPV6_FT_LEVEL; + ft_attr.max_fte = 1; + ft = mlx5_create_flow_table(roce_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma master err=%d\n", err); + goto free_in; + } + + roce->ft_rdma = ft; + + ft_attr.max_fte = 1; + ft_attr.prio = prio; + ft_attr.level = level + 2; + ft = mlx5_create_flow_table(nic_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC master err=%d\n", err); + goto destroy_ft_rdma; + } + roce->nic_master_ft = ft; + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += 1; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->nic_master_ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group aliased err=%d\n", err); + goto destroy_nic_master_ft; + } + roce->nic_master_group = g; + + err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft, + &roce->alias_id, roce->key, false); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err); + goto destroy_group; + } + + next_ft.id = roce->alias_id; + ft_attr.max_fte = 1; + ft_attr.prio = prio; + ft_attr.level = roce->ft->level + 1; + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.next_ft = &next_ft; + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC slave err=%d\n", err); + goto destroy_alias; + } + roce->goto_alias_ft = ft; + + kvfree(in); + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return 0; + +destroy_alias: + mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); +destroy_group: + mlx5_destroy_flow_group(roce->nic_master_group); +destroy_nic_master_ft: + mlx5_destroy_flow_table(roce->nic_master_ft); +destroy_ft_rdma: + mlx5_destroy_flow_table(roce->ft_rdma); +free_in: + kvfree(in); +release_peer: + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return err; +} + +void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_core_dev *mdev) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; struct mlx5_ipsec_tx_roce *tx_roce; + struct mlx5e_priv *peer_priv; if (!ipsec_roce) return; tx_roce = &ipsec_roce->tx; + if (!tx_roce->ft) + return; /* Incase RoCE was cleaned from MPV event flow */ + mlx5_del_flow_rules(tx_roce->rule); mlx5_destroy_flow_group(tx_roce->g); mlx5_destroy_flow_table(tx_roce->ft); -} -#define MLX5_TX_ROCE_GROUP_SIZE BIT(0) + if (!mlx5_core_is_mp_slave(mdev)) + return; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return; + } + + mlx5_destroy_flow_table(tx_roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + tx_roce->ft = NULL; +} int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, - struct mlx5_flow_table *pol_ft) + struct mlx5_flow_table *pol_ft, + bool from_event) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_ipsec_tx_roce *roce; @@ -157,7 +613,14 @@ int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, if (!in) return -ENOMEM; + if (mlx5_core_is_mp_slave(mdev)) { + err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in, from_event); + goto free_in; + } + ft_attr.max_fte = 1; + ft_attr.prio = 1; + ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL; ft = mlx5_create_flow_table(roce->ns, &ft_attr); if (IS_ERR(ft)) { err = PTR_ERR(ft); @@ -209,8 +672,10 @@ struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_ro return rx_roce->ft; } -void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family) +void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family, + struct mlx5_core_dev *mdev) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_ipsec_rx_roce *rx_roce; if (!ipsec_roce) @@ -218,23 +683,29 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family) rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx : &ipsec_roce->ipv6_rx; + if (!rx_roce->ft) + return; /* Incase RoCE was cleaned from MPV event flow */ + if (is_mpv_slave) + mlx5_del_flow_rules(rx_roce->nic_master_rule); mlx5_del_flow_rules(rx_roce->roce_miss.rule); mlx5_del_flow_rules(rx_roce->rule); + if (is_mpv_slave) + roce_rx_mpv_destroy_tables(mdev, rx_roce); mlx5_destroy_flow_table(rx_roce->ft_rdma); mlx5_destroy_flow_group(rx_roce->roce_miss.group); mlx5_destroy_flow_group(rx_roce->g); mlx5_destroy_flow_table(rx_roce->ft); + rx_roce->ft = NULL; } -#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) - int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_namespace *ns, struct mlx5_flow_destination *default_dst, u32 family, u32 level, u32 prio) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_ipsec_rx_roce *roce; struct mlx5_flow_table *ft; @@ -298,18 +769,28 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, } roce->roce_miss.group = g; - memset(&ft_attr, 0, sizeof(ft_attr)); - if (family == AF_INET) - ft_attr.level = 1; - ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr); - if (IS_ERR(ft)) { - err = PTR_ERR(ft); - mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err); - goto fail_rdma_table; + if (is_mpv_slave) { + err = ipsec_fs_roce_rx_mpv_create(mdev, ipsec_roce, ns, family, level, prio); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias err=%d\n", err); + goto fail_mpv_create; + } + } else { + memset(&ft_attr, 0, sizeof(ft_attr)); + if (family == AF_INET) + ft_attr.level = 1; + ft_attr.max_fte = 1; + ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err); + goto fail_rdma_table; + } + + roce->ft_rdma = ft; } - roce->ft_rdma = ft; - err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce); if (err) { mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err); @@ -320,7 +801,10 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, return 0; fail_setup_rule: + if (is_mpv_slave) + roce_rx_mpv_destroy_tables(mdev, roce); mlx5_destroy_flow_table(roce->ft_rdma); +fail_mpv_create: fail_rdma_table: mlx5_destroy_flow_group(roce->roce_miss.group); fail_mgroup: @@ -332,12 +816,24 @@ fail_nomem: return err; } +bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev) +{ + if (!mlx5_core_mp_enabled(mdev)) + return true; + + if (ipsec_fs_create_alias_supported_one(mdev)) + return true; + + return false; +} + void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce) { kfree(ipsec_roce); } -struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev) +struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev, + struct mlx5_devcom_comp_dev **devcom) { struct mlx5_ipsec_fs *roce_ipsec; struct mlx5_flow_namespace *ns; @@ -363,6 +859,8 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev) roce_ipsec->tx.ns = ns; + roce_ipsec->devcom = devcom; + return roce_ipsec; err_tx: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h index 9712d705fe48..2a1af78309fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h @@ -4,22 +4,28 @@ #ifndef __MLX5_LIB_IPSEC_H__ #define __MLX5_LIB_IPSEC_H__ +#include "lib/devcom.h" + struct mlx5_ipsec_fs; struct mlx5_flow_table * mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family); void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, - u32 family); + u32 family, struct mlx5_core_dev *mdev); int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_namespace *ns, struct mlx5_flow_destination *default_dst, u32 family, u32 level, u32 prio); -void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce); +void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_core_dev *mdev); int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, - struct mlx5_flow_table *pol_ft); + struct mlx5_flow_table *pol_ft, + bool from_event); void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce); -struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev); +struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev, + struct mlx5_devcom_comp_dev **devcom); +bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev); #endif /* __MLX5_LIB_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c new file mode 100644 index 000000000000..4a078113e292 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c @@ -0,0 +1,2411 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include <net/macsec.h> +#include <linux/mlx5/qp.h> +#include <linux/if_vlan.h> +#include <linux/mlx5/fs_helpers.h> +#include <linux/mlx5/macsec.h> +#include "fs_core.h" +#include "lib/macsec_fs.h" +#include "mlx5_core.h" + +/* MACsec TX flow steering */ +#define CRYPTO_NUM_MAXSEC_FTE BIT(15) +#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1 + +#define TX_CRYPTO_TABLE_LEVEL 0 +#define TX_CRYPTO_TABLE_NUM_GROUPS 3 +#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1 +#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \ + (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \ + CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE)) +#define TX_CHECK_TABLE_LEVEL 1 +#define TX_CHECK_TABLE_NUM_FTE 2 +#define RX_CRYPTO_TABLE_LEVEL 0 +#define RX_CHECK_TABLE_LEVEL 1 +#define RX_ROCE_TABLE_LEVEL 2 +#define RX_CHECK_TABLE_NUM_FTE 3 +#define RX_ROCE_TABLE_NUM_FTE 2 +#define RX_CRYPTO_TABLE_NUM_GROUPS 3 +#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \ + ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2) +#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \ + (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE) +#define RX_NUM_OF_RULES_PER_SA 2 + +#define RDMA_RX_ROCE_IP_TABLE_LEVEL 0 +#define RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL 1 + +#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */ +#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23 +#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8 +#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5 +#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) +#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8 +#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN) + +/* MACsec RX flow steering */ +#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E + +/* MACsec fs_id handling for steering */ +#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2) +#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30)) + +struct mlx5_sectag_header { + __be16 ethertype; + u8 tci_an; + u8 sl; + u32 pn; + u8 sci[MACSEC_SCI_LEN]; /* optional */ +} __packed; + +struct mlx5_roce_macsec_tx_rule { + u32 fs_id; + u16 gid_idx; + struct list_head entry; + struct mlx5_flow_handle *rule; + struct mlx5_modify_hdr *meta_modhdr; +}; + +struct mlx5_macsec_tx_rule { + struct mlx5_flow_handle *rule; + struct mlx5_pkt_reformat *pkt_reformat; + u32 fs_id; +}; + +struct mlx5_macsec_flow_table { + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; +}; + +struct mlx5_macsec_tables { + struct mlx5_macsec_flow_table ft_crypto; + struct mlx5_flow_handle *crypto_miss_rule; + + struct mlx5_flow_table *ft_check; + struct mlx5_flow_group *ft_check_group; + struct mlx5_fc *check_miss_rule_counter; + struct mlx5_flow_handle *check_miss_rule; + struct mlx5_fc *check_rule_counter; + + u32 refcnt; +}; + +struct mlx5_fs_id { + u32 id; + refcount_t refcnt; + sci_t sci; + struct rhash_head hash; +}; + +struct mlx5_macsec_device { + struct list_head macsec_devices_list_entry; + void *macdev; + struct xarray tx_id_xa; + struct xarray rx_id_xa; +}; + +struct mlx5_macsec_tx { + struct mlx5_flow_handle *crypto_mke_rule; + struct mlx5_flow_handle *check_rule; + + struct ida tx_halloc; + + struct mlx5_macsec_tables tables; + + struct mlx5_flow_table *ft_rdma_tx; +}; + +struct mlx5_roce_macsec_rx_rule { + u32 fs_id; + u16 gid_idx; + struct mlx5_flow_handle *op; + struct mlx5_flow_handle *ip; + struct list_head entry; +}; + +struct mlx5_macsec_rx_rule { + struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA]; + struct mlx5_modify_hdr *meta_modhdr; +}; + +struct mlx5_macsec_miss { + struct mlx5_flow_group *g; + struct mlx5_flow_handle *rule; +}; + +struct mlx5_macsec_rx_roce { + /* Flow table/rules in NIC domain, to check if it's a RoCE packet */ + struct mlx5_flow_group *g; + struct mlx5_flow_table *ft; + struct mlx5_flow_handle *rule; + struct mlx5_modify_hdr *copy_modify_hdr; + struct mlx5_macsec_miss nic_miss; + + /* Flow table/rule in RDMA domain, to check dgid */ + struct mlx5_flow_table *ft_ip_check; + struct mlx5_flow_table *ft_macsec_op_check; + struct mlx5_macsec_miss miss; +}; + +struct mlx5_macsec_rx { + struct mlx5_flow_handle *check_rule[2]; + struct mlx5_pkt_reformat *check_rule_pkt_reformat[2]; + + struct mlx5_macsec_tables tables; + struct mlx5_macsec_rx_roce roce; +}; + +union mlx5_macsec_rule { + struct mlx5_macsec_tx_rule tx_rule; + struct mlx5_macsec_rx_rule rx_rule; +}; + +static const struct rhashtable_params rhash_sci = { + .key_len = sizeof_field(struct mlx5_fs_id, sci), + .key_offset = offsetof(struct mlx5_fs_id, sci), + .head_offset = offsetof(struct mlx5_fs_id, hash), + .automatic_shrinking = true, + .min_size = 1, +}; + +static const struct rhashtable_params rhash_fs_id = { + .key_len = sizeof_field(struct mlx5_fs_id, id), + .key_offset = offsetof(struct mlx5_fs_id, id), + .head_offset = offsetof(struct mlx5_fs_id, hash), + .automatic_shrinking = true, + .min_size = 1, +}; + +struct mlx5_macsec_fs { + struct mlx5_core_dev *mdev; + struct mlx5_macsec_tx *tx_fs; + struct mlx5_macsec_rx *rx_fs; + + /* Stats manage */ + struct mlx5_macsec_stats stats; + + /* Tx sci -> fs id mapping handling */ + struct rhashtable sci_hash; /* sci -> mlx5_fs_id */ + + /* RX fs_id -> mlx5_fs_id mapping handling */ + struct rhashtable fs_id_hash; /* fs_id -> mlx5_fs_id */ + + /* TX & RX fs_id lists per macsec device */ + struct list_head macsec_devices_list; +}; + +static void macsec_fs_destroy_groups(struct mlx5_macsec_flow_table *ft) +{ + int i; + + for (i = ft->num_groups - 1; i >= 0; i--) { + if (!IS_ERR_OR_NULL(ft->g[i])) + mlx5_destroy_flow_group(ft->g[i]); + ft->g[i] = NULL; + } + ft->num_groups = 0; +} + +static void macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table *ft) +{ + macsec_fs_destroy_groups(ft); + kfree(ft->g); + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; +} + +static void macsec_fs_tx_destroy(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_macsec_tables *tx_tables; + + if (mlx5_is_macsec_roce_supported(macsec_fs->mdev)) + mlx5_destroy_flow_table(tx_fs->ft_rdma_tx); + + tx_tables = &tx_fs->tables; + + /* Tx check table */ + if (tx_fs->check_rule) { + mlx5_del_flow_rules(tx_fs->check_rule); + tx_fs->check_rule = NULL; + } + + if (tx_tables->check_miss_rule) { + mlx5_del_flow_rules(tx_tables->check_miss_rule); + tx_tables->check_miss_rule = NULL; + } + + if (tx_tables->ft_check_group) { + mlx5_destroy_flow_group(tx_tables->ft_check_group); + tx_tables->ft_check_group = NULL; + } + + if (tx_tables->ft_check) { + mlx5_destroy_flow_table(tx_tables->ft_check); + tx_tables->ft_check = NULL; + } + + /* Tx crypto table */ + if (tx_fs->crypto_mke_rule) { + mlx5_del_flow_rules(tx_fs->crypto_mke_rule); + tx_fs->crypto_mke_rule = NULL; + } + + if (tx_tables->crypto_miss_rule) { + mlx5_del_flow_rules(tx_tables->crypto_miss_rule); + tx_tables->crypto_miss_rule = NULL; + } + + macsec_fs_destroy_flow_table(&tx_tables->ft_crypto); +} + +static int macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int mclen = MLX5_ST_SZ_BYTES(fte_match_param); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + + if (!in) { + kfree(ft->g); + ft->g = NULL; + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + /* Flow Group for MKE match */ + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for SA rules */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2); + MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a, + MLX5_ETH_WQE_FT_META_MACSEC_MASK); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for l2 traps */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static struct mlx5_flow_table + *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags, + int level, int max_fte) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *fdb = NULL; + + /* reserve entry for the match all miss group and rule */ + ft_attr.autogroup.num_reserved_entries = 1; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.prio = 0; + ft_attr.flags = flags; + ft_attr.level = level; + ft_attr.max_fte = max_fte; + + fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + + return fdb; +} + +enum { + RDMA_TX_MACSEC_LEVEL = 0, +}; + +static int macsec_fs_tx_roce_create(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + int err; + + if (!mlx5_is_macsec_roce_supported(mdev)) { + mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n"); + return 0; + } + + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC); + if (!ns) + return -ENOMEM; + + /* Tx RoCE crypto table */ + ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_TX_MACSEC_LEVEL, CRYPTO_NUM_MAXSEC_FTE); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Failed to create MACsec RoCE Tx crypto table err(%d)\n", err); + return err; + } + tx_fs->ft_rdma_tx = ft; + + return 0; +} + +static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *tx_tables; + struct mlx5_flow_act flow_act = {}; + struct mlx5_macsec_flow_table *ft_crypto; + struct mlx5_flow_table *flow_table; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + u32 *flow_group_in; + int err; + + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); + if (!ns) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) { + err = -ENOMEM; + goto out_spec; + } + + tx_tables = &tx_fs->tables; + ft_crypto = &tx_tables->ft_crypto; + + /* Tx crypto table */ + ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; + ft_attr.level = TX_CRYPTO_TABLE_LEVEL; + ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; + + flow_table = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Failed to create MACsec Tx crypto table err(%d)\n", err); + goto out_flow_group; + } + ft_crypto->t = flow_table; + + /* Tx crypto table groups */ + err = macsec_fs_tx_create_crypto_table_groups(ft_crypto); + if (err) { + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + + /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */ + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec TX MKE rule, err=%d\n", err); + goto err; + } + tx_fs->crypto_mke_rule = rule; + + /* Tx crypto table Default miss rule */ + memset(&flow_act, 0, sizeof(flow_act)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec Tx table default miss rule %d\n", err); + goto err; + } + tx_tables->crypto_miss_rule = rule; + + /* Tx check table */ + flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL, + TX_CHECK_TABLE_NUM_FTE); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Fail to create MACsec TX check table, err(%d)\n", err); + goto err; + } + tx_tables->ft_check = flow_table; + + /* Tx check table Default miss group/rule */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); + flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + tx_tables->ft_check_group = flow_group; + + /* Tx check table default drop rule */ + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err); + goto err; + } + tx_tables->check_miss_rule = rule; + + /* Tx check table rule */ + memset(spec, 0, sizeof(struct mlx5_flow_spec)); + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + + flow_act.flags = FLOW_ACT_NO_APPEND; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter); + rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec check rule, err=%d\n", err); + goto err; + } + tx_fs->check_rule = rule; + + err = macsec_fs_tx_roce_create(macsec_fs); + if (err) + goto err; + + kvfree(flow_group_in); + kvfree(spec); + return 0; + +err: + macsec_fs_tx_destroy(macsec_fs); +out_flow_group: + kvfree(flow_group_in); +out_spec: + kvfree(spec); + return err; +} + +static int macsec_fs_tx_ft_get(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_macsec_tables *tx_tables; + int err = 0; + + tx_tables = &tx_fs->tables; + if (tx_tables->refcnt) + goto out; + + err = macsec_fs_tx_create(macsec_fs); + if (err) + return err; + +out: + tx_tables->refcnt++; + return err; +} + +static void macsec_fs_tx_ft_put(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; + + if (--tx_tables->refcnt) + return; + + macsec_fs_tx_destroy(macsec_fs); +} + +static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + u32 macsec_obj_id, + u32 *fs_id) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + int err = 0; + u32 id; + + err = ida_alloc_range(&tx_fs->tx_halloc, 1, + MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES, + GFP_KERNEL); + if (err < 0) + return err; + + id = err; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + + /* Metadata match */ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a, + MLX5_ETH_WQE_FT_META_MACSEC_MASK); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a, + macsec_fs_set_tx_fs_id(id)); + + *fs_id = id; + flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; + flow_act->crypto.obj_id = macsec_obj_id; + + mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id); + return 0; +} + +static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx, + char *reformatbf, + size_t *reformat_size) +{ + const struct macsec_secy *secy = ctx->secy; + bool sci_present = macsec_send_sci(secy); + struct mlx5_sectag_header sectag = {}; + const struct macsec_tx_sc *tx_sc; + + tx_sc = &secy->tx_sc; + sectag.ethertype = htons(ETH_P_MACSEC); + + if (sci_present) { + sectag.tci_an |= MACSEC_TCI_SC; + memcpy(§ag.sci, &secy->sci, + sizeof(sectag.sci)); + } else { + if (tx_sc->end_station) + sectag.tci_an |= MACSEC_TCI_ES; + if (tx_sc->scb) + sectag.tci_an |= MACSEC_TCI_SCB; + } + + /* With GCM, C/E clear for !encrypt, both set for encrypt */ + if (tx_sc->encrypt) + sectag.tci_an |= MACSEC_TCI_CONFID; + else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) + sectag.tci_an |= MACSEC_TCI_C; + + sectag.tci_an |= tx_sc->encoding_sa; + + *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); + + memcpy(reformatbf, §ag, *reformat_size); +} + +static bool macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device *macsec_device) +{ + if (xa_empty(&macsec_device->tx_id_xa) && + xa_empty(&macsec_device->rx_id_xa)) + return true; + + return false; +} + +static void macsec_fs_id_del(struct list_head *macsec_devices_list, u32 fs_id, + void *macdev, struct rhashtable *hash_table, bool is_tx) +{ + const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id; + struct mlx5_macsec_device *iter, *macsec_device = NULL; + struct mlx5_fs_id *fs_id_found; + struct xarray *fs_id_xa; + + list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) { + if (iter->macdev == macdev) { + macsec_device = iter; + break; + } + } + WARN_ON(!macsec_device); + + fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa : + &macsec_device->rx_id_xa; + xa_lock(fs_id_xa); + fs_id_found = xa_load(fs_id_xa, fs_id); + WARN_ON(!fs_id_found); + + if (!refcount_dec_and_test(&fs_id_found->refcnt)) { + xa_unlock(fs_id_xa); + return; + } + + if (fs_id_found->id) { + /* Make sure ongoing datapath readers sees a valid SA */ + rhashtable_remove_fast(hash_table, &fs_id_found->hash, *rhash); + fs_id_found->id = 0; + } + xa_unlock(fs_id_xa); + + xa_erase(fs_id_xa, fs_id); + + kfree(fs_id_found); + + if (macsec_fs_is_macsec_device_empty(macsec_device)) { + list_del(&macsec_device->macsec_devices_list_entry); + kfree(macsec_device); + } +} + +static int macsec_fs_id_add(struct list_head *macsec_devices_list, u32 fs_id, + void *macdev, struct rhashtable *hash_table, sci_t sci, + bool is_tx) +{ + const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id; + struct mlx5_macsec_device *iter, *macsec_device = NULL; + struct mlx5_fs_id *fs_id_iter; + struct xarray *fs_id_xa; + int err; + + if (!is_tx) { + rcu_read_lock(); + fs_id_iter = rhashtable_lookup(hash_table, &fs_id, rhash_fs_id); + if (fs_id_iter) { + refcount_inc(&fs_id_iter->refcnt); + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + } + + fs_id_iter = kzalloc(sizeof(*fs_id_iter), GFP_KERNEL); + if (!fs_id_iter) + return -ENOMEM; + + list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) { + if (iter->macdev == macdev) { + macsec_device = iter; + break; + } + } + + if (!macsec_device) { /* first time adding a SA to that device */ + macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL); + if (!macsec_device) { + err = -ENOMEM; + goto err_alloc_dev; + } + macsec_device->macdev = macdev; + xa_init(&macsec_device->tx_id_xa); + xa_init(&macsec_device->rx_id_xa); + list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list); + } + + fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa : + &macsec_device->rx_id_xa; + fs_id_iter->id = fs_id; + refcount_set(&fs_id_iter->refcnt, 1); + fs_id_iter->sci = sci; + err = xa_err(xa_store(fs_id_xa, fs_id, fs_id_iter, GFP_KERNEL)); + if (err) + goto err_store_id; + + err = rhashtable_insert_fast(hash_table, &fs_id_iter->hash, *rhash); + if (err) + goto err_hash_insert; + + return 0; + +err_hash_insert: + xa_erase(fs_id_xa, fs_id); +err_store_id: + if (macsec_fs_is_macsec_device_empty(macsec_device)) { + list_del(&macsec_device->macsec_devices_list_entry); + kfree(macsec_device); + } +err_alloc_dev: + kfree(fs_id_iter); + return err; +} + +static void macsec_fs_tx_del_rule(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_tx_rule *tx_rule, + void *macdev) +{ + macsec_fs_id_del(&macsec_fs->macsec_devices_list, tx_rule->fs_id, macdev, + &macsec_fs->sci_hash, true); + + if (tx_rule->rule) { + mlx5_del_flow_rules(tx_rule->rule); + tx_rule->rule = NULL; + } + + if (tx_rule->pkt_reformat) { + mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat); + tx_rule->pkt_reformat = NULL; + } + + if (tx_rule->fs_id) { + ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id); + tx_rule->fs_id = 0; + } + + kfree(tx_rule); + + macsec_fs_tx_ft_put(macsec_fs); +} + +#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1 + +static union mlx5_macsec_rule * +macsec_fs_tx_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, u32 *fs_id) +{ + char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN]; + struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + union mlx5_macsec_rule *macsec_rule = NULL; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *tx_tables; + struct mlx5_macsec_tx_rule *tx_rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + size_t reformat_size; + int err = 0; + + tx_tables = &tx_fs->tables; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return NULL; + + err = macsec_fs_tx_ft_get(macsec_fs); + if (err) + goto out_spec; + + macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); + if (!macsec_rule) { + macsec_fs_tx_ft_put(macsec_fs); + goto out_spec; + } + + tx_rule = &macsec_rule->tx_rule; + + /* Tx crypto table crypto rule */ + macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size); + + reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC; + reformat_params.size = reformat_size; + reformat_params.data = reformatbf; + + if (is_vlan_dev(macsec_ctx->netdev)) + reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES; + + flow_act.pkt_reformat = mlx5_packet_reformat_alloc(mdev, + &reformat_params, + MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); + if (IS_ERR(flow_act.pkt_reformat)) { + err = PTR_ERR(flow_act.pkt_reformat); + mlx5_core_err(mdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err); + goto err; + } + tx_rule->pkt_reformat = flow_act.pkt_reformat; + + err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, fs_id); + if (err) { + mlx5_core_err(mdev, + "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n", + err); + goto err; + } + + tx_rule->fs_id = *fs_id; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = tx_tables->ft_check; + rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec TX crypto rule, err=%d\n", err); + goto err; + } + tx_rule->rule = rule; + + err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, *fs_id, macsec_ctx->secy->netdev, + &macsec_fs->sci_hash, attrs->sci, true); + if (err) { + mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err); + goto err; + } + + goto out_spec; + +err: + macsec_fs_tx_del_rule(macsec_fs, tx_rule, macsec_ctx->secy->netdev); + macsec_rule = NULL; +out_spec: + kvfree(spec); + + return macsec_rule; +} + +static void macsec_fs_tx_cleanup(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *tx_tables; + + if (!tx_fs) + return; + + tx_tables = &tx_fs->tables; + if (tx_tables->refcnt) { + mlx5_core_err(mdev, + "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n", + tx_tables->refcnt); + return; + } + + ida_destroy(&tx_fs->tx_halloc); + + if (tx_tables->check_miss_rule_counter) { + mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter); + tx_tables->check_miss_rule_counter = NULL; + } + + if (tx_tables->check_rule_counter) { + mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); + tx_tables->check_rule_counter = NULL; + } + + kfree(tx_fs); + macsec_fs->tx_fs = NULL; +} + +static int macsec_fs_tx_init(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *tx_tables; + struct mlx5_macsec_tx *tx_fs; + struct mlx5_fc *flow_counter; + int err; + + tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL); + if (!tx_fs) + return -ENOMEM; + + tx_tables = &tx_fs->tables; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Tx encrypt flow counter, err(%d)\n", + err); + goto err_encrypt_counter; + } + tx_tables->check_rule_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Tx drop flow counter, err(%d)\n", + err); + goto err_drop_counter; + } + tx_tables->check_miss_rule_counter = flow_counter; + + ida_init(&tx_fs->tx_halloc); + INIT_LIST_HEAD(&macsec_fs->macsec_devices_list); + + macsec_fs->tx_fs = tx_fs; + + return 0; + +err_drop_counter: + mlx5_fc_destroy(mdev, tx_tables->check_rule_counter); + tx_tables->check_rule_counter = NULL; + +err_encrypt_counter: + kfree(tx_fs); + macsec_fs->tx_fs = NULL; + + return err; +} + +static void macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss *miss) +{ + mlx5_del_flow_rules(miss->rule); + mlx5_destroy_flow_group(miss->g); +} + +static void macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce *roce, struct mlx5_core_dev *mdev) +{ + if (!mlx5_is_macsec_roce_supported(mdev)) + return; + + mlx5_del_flow_rules(roce->nic_miss.rule); + mlx5_del_flow_rules(roce->rule); + mlx5_modify_header_dealloc(mdev, roce->copy_modify_hdr); + mlx5_destroy_flow_group(roce->nic_miss.g); + mlx5_destroy_flow_group(roce->g); + mlx5_destroy_flow_table(roce->ft); + + macsec_fs_rx_roce_miss_destroy(&roce->miss); + mlx5_destroy_flow_table(roce->ft_macsec_op_check); + mlx5_destroy_flow_table(roce->ft_ip_check); +} + +static void macsec_fs_rx_destroy(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_macsec_tables *rx_tables; + int i; + + /* Rx check table */ + for (i = 1; i >= 0; --i) { + if (rx_fs->check_rule[i]) { + mlx5_del_flow_rules(rx_fs->check_rule[i]); + rx_fs->check_rule[i] = NULL; + } + + if (rx_fs->check_rule_pkt_reformat[i]) { + mlx5_packet_reformat_dealloc(macsec_fs->mdev, + rx_fs->check_rule_pkt_reformat[i]); + rx_fs->check_rule_pkt_reformat[i] = NULL; + } + } + + rx_tables = &rx_fs->tables; + + if (rx_tables->check_miss_rule) { + mlx5_del_flow_rules(rx_tables->check_miss_rule); + rx_tables->check_miss_rule = NULL; + } + + if (rx_tables->ft_check_group) { + mlx5_destroy_flow_group(rx_tables->ft_check_group); + rx_tables->ft_check_group = NULL; + } + + if (rx_tables->ft_check) { + mlx5_destroy_flow_table(rx_tables->ft_check); + rx_tables->ft_check = NULL; + } + + /* Rx crypto table */ + if (rx_tables->crypto_miss_rule) { + mlx5_del_flow_rules(rx_tables->crypto_miss_rule); + rx_tables->crypto_miss_rule = NULL; + } + + macsec_fs_destroy_flow_table(&rx_tables->ft_crypto); + + macsec_fs_rdma_rx_destroy(&macsec_fs->rx_fs->roce, macsec_fs->mdev); +} + +static int macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int mclen = MLX5_ST_SZ_BYTES(fte_match_param); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + + /* Flow group for SA rule with SCI */ + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS_5); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow group for SA rule without SCI */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS_5); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + + MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Flow Group for l2 traps */ + memset(in, 0, inlen); + memset(mc, 0, mclen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); + + return err; +} + +static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_spec *spec, + int reformat_param_size) +{ + int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1; + u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI]; + struct mlx5_pkt_reformat_params reformat_params = {}; + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_destination roce_dest[2]; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_flow_handle *rule; + int err = 0, dstn = 0; + + rx_tables = &rx_fs->tables; + + /* Rx check table decap 16B rule */ + memset(dest, 0, sizeof(*dest)); + memset(flow_act, 0, sizeof(*flow_act)); + memset(spec, 0, sizeof(*spec)); + + reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC; + reformat_params.size = reformat_param_size; + reformat_params.data = mlx5_reformat_buf; + flow_act->pkt_reformat = mlx5_packet_reformat_alloc(mdev, + &reformat_params, + MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (IS_ERR(flow_act->pkt_reformat)) { + err = PTR_ERR(flow_act->pkt_reformat); + mlx5_core_err(mdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err); + return err; + } + rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + /* MACsec syndrome match */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0); + /* ASO return reg syndrome match */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + /* Sectag TCI SC present bit*/ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + flow_act->flags = FLOW_ACT_NO_APPEND; + + if (rx_fs->roce.ft) { + flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + roce_dest[dstn].ft = rx_fs->roce.ft; + dstn++; + } else { + flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + } + + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter); + rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1); + + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add MACsec Rx check rule, err=%d\n", err); + return err; + } + + rx_fs->check_rule[rule_index] = rule; + + return 0; +} + +static int macsec_fs_rx_roce_miss_create(struct mlx5_core_dev *mdev, + struct mlx5_macsec_rx_roce *roce) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_handle *rule; + u32 *flow_group_in; + int err; + + flow_group_in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + /* IP check ft has no miss rule since we use default miss action which is go to next PRIO */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, + roce->ft_macsec_op_check->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + roce->ft_macsec_op_check->max_fte - 1); + flow_group = mlx5_create_flow_group(roce->ft_macsec_op_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + mlx5_core_err(mdev, + "Failed to create miss flow group for MACsec RoCE operation check table err(%d)\n", + err); + goto err_macsec_op_miss_group; + } + roce->miss.g = flow_group; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + rule = mlx5_add_flow_rules(roce->ft_macsec_op_check, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE operation check table err(%d)\n", + err); + goto err_macsec_op_rule; + } + roce->miss.rule = rule; + + kvfree(flow_group_in); + return 0; + +err_macsec_op_rule: + mlx5_destroy_flow_group(roce->miss.g); +err_macsec_op_miss_group: + kvfree(flow_group_in); + return err; +} + +#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) + +static int macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev *mdev, + struct mlx5_macsec_rx_roce *roce) +{ + struct mlx5_flow_group *g; + void *outer_headers_c; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); + + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_RX_ROCE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Failed to create main flow group for MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_udp_group; + } + roce->g = g; + + memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in)); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_RX_ROCE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Failed to create miss flow group for MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_udp_miss_group; + } + roce->nic_miss.g = g; + + kvfree(in); + return 0; + +err_udp_miss_group: + mlx5_destroy_flow_group(roce->g); +err_udp_group: + kvfree(in); + return err; +} + +static int macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_rx_roce *roce) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_destination dst = {}; + struct mlx5_modify_hdr *modify_hdr; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ROCE_V2_UDP_DPORT); + + MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); + MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + MLX5_SET(copy_action_in, action, src_offset, 0); + MLX5_SET(copy_action_in, action, length, 32); + MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5); + MLX5_SET(copy_action_in, action, dst_offset, 0); + + modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, + 1, action); + + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + mlx5_core_err(mdev, + "Failed to alloc macsec copy modify_header_id err(%d)\n", err); + goto err_alloc_hdr; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.modify_hdr = modify_hdr; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_ip_check; + rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add rule to MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_add_rule; + } + roce->rule = rule; + roce->copy_modify_hdr = modify_hdr; + + memset(&flow_act, 0, sizeof(flow_act)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE NIC UDP table err(%d)\n", + err); + goto err_add_rule2; + } + roce->nic_miss.rule = rule; + + kvfree(spec); + return 0; + +err_add_rule2: + mlx5_del_flow_rules(roce->rule); +err_add_rule: + mlx5_modify_header_dealloc(macsec_fs->mdev, modify_hdr); +err_alloc_hdr: + kvfree(spec); + return err; +} + +static int macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_rx_roce *roce) +{ + int err; + + err = macsec_fs_rx_roce_jump_to_rdma_groups_create(macsec_fs->mdev, roce); + if (err) + return err; + + err = macsec_fs_rx_roce_jump_to_rdma_rules_create(macsec_fs, roce); + if (err) + goto err; + + return 0; +err: + mlx5_destroy_flow_group(roce->nic_miss.g); + mlx5_destroy_flow_group(roce->g); + return err; +} + +static int macsec_fs_rx_roce_create(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + int err = 0; + + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) { + mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n"); + return 0; + } + + ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC); + if (!ns) + return -ENOMEM; + + ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_IP_TABLE_LEVEL, + CRYPTO_NUM_MAXSEC_FTE); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Failed to create MACsec IP check RoCE table err(%d)\n", err); + return err; + } + rx_fs->roce.ft_ip_check = ft; + + ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL, + CRYPTO_NUM_MAXSEC_FTE); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Failed to create MACsec operation check RoCE table err(%d)\n", + err); + goto err_macsec_op; + } + rx_fs->roce.ft_macsec_op_check = ft; + + err = macsec_fs_rx_roce_miss_create(mdev, &rx_fs->roce); + if (err) + goto err_miss_create; + + ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (!ns) { + err = -EOPNOTSUPP; + goto err_ns; + } + + ft_attr.level = RX_ROCE_TABLE_LEVEL; + ft_attr.max_fte = RX_ROCE_TABLE_NUM_FTE; + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Failed to create MACsec jump to RX RoCE, NIC table err(%d)\n", err); + goto err_ns; + } + rx_fs->roce.ft = ft; + + err = macsec_fs_rx_roce_jump_to_rdma_create(macsec_fs, &rx_fs->roce); + if (err) + goto err_udp_ft; + + return 0; + +err_udp_ft: + mlx5_destroy_flow_table(rx_fs->roce.ft); +err_ns: + macsec_fs_rx_roce_miss_destroy(&rx_fs->roce.miss); +err_miss_create: + mlx5_destroy_flow_table(rx_fs->roce.ft_macsec_op_check); +err_macsec_op: + mlx5_destroy_flow_table(rx_fs->roce.ft_ip_check); + return err; +} + +static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_flow_table *ft_crypto; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_flow_table *flow_table; + struct mlx5_flow_group *flow_group; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + u32 *flow_group_in; + int err; + + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); + if (!ns) + return -ENOMEM; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) { + err = -ENOMEM; + goto free_spec; + } + + rx_tables = &rx_fs->tables; + ft_crypto = &rx_tables->ft_crypto; + + err = macsec_fs_rx_roce_create(macsec_fs); + if (err) + goto out_flow_group; + + /* Rx crypto table */ + ft_attr.level = RX_CRYPTO_TABLE_LEVEL; + ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE; + + flow_table = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Failed to create MACsec Rx crypto table err(%d)\n", err); + goto err; + } + ft_crypto->t = flow_table; + + /* Rx crypto table groups */ + err = macsec_fs_rx_create_crypto_table_groups(ft_crypto); + if (err) { + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Tx crypto table err(%d)\n", + err); + goto err; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; + rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add MACsec Rx crypto table default miss rule %d\n", + err); + goto err; + } + rx_tables->crypto_miss_rule = rule; + + /* Rx check table */ + flow_table = macsec_fs_auto_group_table_create(ns, + MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT, + RX_CHECK_TABLE_LEVEL, + RX_CHECK_TABLE_NUM_FTE); + if (IS_ERR(flow_table)) { + err = PTR_ERR(flow_table); + mlx5_core_err(mdev, "Fail to create MACsec RX check table, err(%d)\n", err); + goto err; + } + rx_tables->ft_check = flow_table; + + /* Rx check table Default miss group/rule */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1); + flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in); + if (IS_ERR(flow_group)) { + err = PTR_ERR(flow_group); + mlx5_core_err(mdev, + "Failed to create default flow group for MACsec Rx check table err(%d)\n", + err); + goto err; + } + rx_tables->ft_check_group = flow_group; + + /* Rx check table default drop rule */ + memset(&flow_act, 0, sizeof(flow_act)); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; + rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err); + goto err; + } + rx_tables->check_miss_rule = rule; + + /* Rx check table decap rules */ + err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, + MLX5_SECTAG_HEADER_SIZE_WITH_SCI); + if (err) + goto err; + + err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec, + MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI); + if (err) + goto err; + + goto out_flow_group; + +err: + macsec_fs_rx_destroy(macsec_fs); +out_flow_group: + kvfree(flow_group_in); +free_spec: + kvfree(spec); + return err; +} + +static int macsec_fs_rx_ft_get(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + int err = 0; + + if (rx_tables->refcnt) + goto out; + + err = macsec_fs_rx_create(macsec_fs); + if (err) + return err; + +out: + rx_tables->refcnt++; + return err; +} + +static void macsec_fs_rx_ft_put(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + + if (--rx_tables->refcnt) + return; + + macsec_fs_rx_destroy(macsec_fs); +} + +static void macsec_fs_rx_del_rule(struct mlx5_macsec_fs *macsec_fs, + struct mlx5_macsec_rx_rule *rx_rule, + void *macdev, u32 fs_id) +{ + int i; + + macsec_fs_id_del(&macsec_fs->macsec_devices_list, fs_id, macdev, + &macsec_fs->fs_id_hash, false); + + for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) { + if (rx_rule->rule[i]) { + mlx5_del_flow_rules(rx_rule->rule[i]); + rx_rule->rule[i] = NULL; + } + } + + if (rx_rule->meta_modhdr) { + mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr); + rx_rule->meta_modhdr = NULL; + } + + kfree(rx_rule); + + macsec_fs_rx_ft_put(macsec_fs); +} + +static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_macsec_rule_attrs *attrs, + bool sci_present) +{ + u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num; + struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto; + __be32 *sci_p = (__be32 *)(&attrs->sci); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + /* MACsec ethertype */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + + /* Sectag AN + TCI SC present bit*/ + MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0, + MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0, + tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET); + + if (sci_present) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_5.macsec_tag_2); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2, + be32_to_cpu(sci_p[0])); + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_5.macsec_tag_3); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3, + be32_to_cpu(sci_p[1])); + } else { + /* When SCI isn't present in the Sectag, need to match the source */ + /* MAC address only if the SCI contains the default MACsec PORT */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16), + sci_p, ETH_ALEN); + } + + crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC; + crypto_params->obj_id = attrs->macsec_obj_id; +} + +static union mlx5_macsec_rule * +macsec_fs_rx_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 fs_id) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + union mlx5_macsec_rule *macsec_rule = NULL; + struct mlx5_modify_hdr *modify_hdr = NULL; + struct mlx5_macsec_flow_table *ft_crypto; + struct mlx5_flow_destination dest = {}; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_macsec_rx_rule *rx_rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return NULL; + + err = macsec_fs_rx_ft_get(macsec_fs); + if (err) + goto out_spec; + + macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL); + if (!macsec_rule) { + macsec_fs_rx_ft_put(macsec_fs); + goto out_spec; + } + + rx_rule = &macsec_rule->rx_rule; + rx_tables = &rx_fs->tables; + ft_crypto = &rx_tables->ft_crypto; + + /* Set bit[31 - 30] macsec marker - 0x01 */ + /* Set bit[15-0] fs id */ + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + MLX5_SET(set_action_in, action, data, macsec_fs_set_rx_fs_id(fs_id)); + MLX5_SET(set_action_in, action, offset, 0); + MLX5_SET(set_action_in, action, length, 32); + + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC, + 1, action); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + mlx5_core_err(mdev, "Fail to alloc MACsec set modify_header_id err=%d\n", err); + modify_hdr = NULL; + goto err; + } + rx_rule->meta_modhdr = modify_hdr; + + /* Rx crypto table with SCI rule */ + macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true); + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = rx_tables->ft_check; + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n", + err); + goto err; + } + rx_rule->rule[0] = rule; + + /* Rx crypto table without SCI rule */ + if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) { + memset(spec, 0, sizeof(struct mlx5_flow_spec)); + memset(&dest, 0, sizeof(struct mlx5_flow_destination)); + memset(&flow_act, 0, sizeof(flow_act)); + + macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false); + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = rx_tables->ft_check; + rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n", + err); + goto err; + } + rx_rule->rule[1] = rule; + } + + err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, fs_id, macsec_ctx->secy->netdev, + &macsec_fs->fs_id_hash, attrs->sci, false); + if (err) { + mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err); + goto err; + } + + kvfree(spec); + return macsec_rule; + +err: + macsec_fs_rx_del_rule(macsec_fs, rx_rule, macsec_ctx->secy->netdev, fs_id); + macsec_rule = NULL; +out_spec: + kvfree(spec); + return macsec_rule; +} + +static int macsec_fs_rx_init(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *rx_tables; + struct mlx5_macsec_rx *rx_fs; + struct mlx5_fc *flow_counter; + int err; + + rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL); + if (!rx_fs) + return -ENOMEM; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Rx encrypt flow counter, err(%d)\n", + err); + goto err_encrypt_counter; + } + + rx_tables = &rx_fs->tables; + rx_tables->check_rule_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + err = PTR_ERR(flow_counter); + mlx5_core_err(mdev, + "Failed to create MACsec Rx drop flow counter, err(%d)\n", + err); + goto err_drop_counter; + } + rx_tables->check_miss_rule_counter = flow_counter; + + macsec_fs->rx_fs = rx_fs; + + return 0; + +err_drop_counter: + mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); + rx_tables->check_rule_counter = NULL; + +err_encrypt_counter: + kfree(rx_fs); + macsec_fs->rx_fs = NULL; + + return err; +} + +static void macsec_fs_rx_cleanup(struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_macsec_tables *rx_tables; + + if (!rx_fs) + return; + + rx_tables = &rx_fs->tables; + + if (rx_tables->refcnt) { + mlx5_core_err(mdev, + "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n", + rx_tables->refcnt); + return; + } + + if (rx_tables->check_miss_rule_counter) { + mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter); + rx_tables->check_miss_rule_counter = NULL; + } + + if (rx_tables->check_rule_counter) { + mlx5_fc_destroy(mdev, rx_tables->check_rule_counter); + rx_tables->check_rule_counter = NULL; + } + + kfree(rx_fs); + macsec_fs->rx_fs = NULL; +} + +static void set_ipaddr_spec_v4(struct sockaddr_in *in, struct mlx5_flow_spec *spec, bool is_dst_ip) +{ + MLX5_SET(fte_match_param, spec->match_value, + outer_headers.ip_version, MLX5_FS_IPV4_VERSION); + + if (is_dst_ip) { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &in->sin_addr.s_addr, 4); + } else { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), + &in->sin_addr.s_addr, 4); + } +} + +static void set_ipaddr_spec_v6(struct sockaddr_in6 *in6, struct mlx5_flow_spec *spec, + bool is_dst_ip) +{ + MLX5_SET(fte_match_param, spec->match_value, + outer_headers.ip_version, MLX5_FS_IPV6_VERSION); + + if (is_dst_ip) { + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &in6->sin6_addr, 16); + } else { + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + &in6->sin6_addr, 16); + } +} + +static void set_ipaddr_spec(const struct sockaddr *addr, + struct mlx5_flow_spec *spec, bool is_dst_ip) +{ + struct sockaddr_in6 *in6; + + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.ip_version); + + if (addr->sa_family == AF_INET) { + struct sockaddr_in *in = (struct sockaddr_in *)addr; + + set_ipaddr_spec_v4(in, spec, is_dst_ip); + return; + } + + in6 = (struct sockaddr_in6 *)addr; + set_ipaddr_spec_v6(in6, spec, is_dst_ip); +} + +static void macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule *rx_rule) +{ + mlx5_del_flow_rules(rx_rule->op); + mlx5_del_flow_rules(rx_rule->ip); + list_del(&rx_rule->entry); + kfree(rx_rule); +} + +static void macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, + struct list_head *rx_rules_list) +{ + struct mlx5_roce_macsec_rx_rule *rx_rule, *next; + + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) + return; + + list_for_each_entry_safe(rx_rule, next, rx_rules_list, entry) { + if (rx_rule->fs_id == fs_id) + macsec_fs_del_roce_rule_rx(rx_rule); + } +} + +static void macsec_fs_del_roce_rule_tx(struct mlx5_core_dev *mdev, + struct mlx5_roce_macsec_tx_rule *tx_rule) +{ + mlx5_del_flow_rules(tx_rule->rule); + mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr); + list_del(&tx_rule->entry); + kfree(tx_rule); +} + +static void macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, + struct list_head *tx_rules_list) +{ + struct mlx5_roce_macsec_tx_rule *tx_rule, *next; + + if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) + return; + + list_for_each_entry_safe(tx_rule, next, tx_rules_list, entry) { + if (tx_rule->fs_id == fs_id) + macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule); + } +} + +void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats) +{ + struct mlx5_macsec_stats *stats = (struct mlx5_macsec_stats *)macsec_stats; + struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables; + struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + + if (tx_tables->check_rule_counter) + mlx5_fc_query(mdev, tx_tables->check_rule_counter, + &stats->macsec_tx_pkts, &stats->macsec_tx_bytes); + + if (tx_tables->check_miss_rule_counter) + mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter, + &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop); + + if (rx_tables->check_rule_counter) + mlx5_fc_query(mdev, rx_tables->check_rule_counter, + &stats->macsec_rx_pkts, &stats->macsec_rx_bytes); + + if (rx_tables->check_miss_rule_counter) + mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter, + &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop); +} + +struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs) +{ + if (!macsec_fs) + return NULL; + + return &macsec_fs->stats; +} + +u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci) +{ + struct mlx5_fs_id *mlx5_fs_id; + u32 fs_id = 0; + + rcu_read_lock(); + mlx5_fs_id = rhashtable_lookup(&macsec_fs->sci_hash, sci, rhash_sci); + if (mlx5_fs_id) + fs_id = mlx5_fs_id->id; + rcu_read_unlock(); + + return fs_id; +} + +union mlx5_macsec_rule * +mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *macsec_ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 *sa_fs_id) +{ + struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs, + .macdev = macsec_ctx->secy->netdev, + .is_tx = + (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) + }; + union mlx5_macsec_rule *macsec_rule; + u32 tx_new_fs_id; + + macsec_rule = (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? + macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, &tx_new_fs_id) : + macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id); + + data.fs_id = (data.is_tx) ? tx_new_fs_id : *sa_fs_id; + if (macsec_rule) + blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh, + MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, + &data); + + return macsec_rule; +} + +void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs, + union mlx5_macsec_rule *macsec_rule, + int action, void *macdev, u32 sa_fs_id) +{ + struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs, + .macdev = macdev, + .is_tx = (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) + }; + + data.fs_id = (data.is_tx) ? macsec_rule->tx_rule.fs_id : sa_fs_id; + blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh, + MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, + &data); + + (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ? + macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule, macdev) : + macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule, macdev, sa_fs_id); +} + +static int mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx, + const struct sockaddr *addr, + struct list_head *rx_rules_list) +{ + struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs; + struct mlx5_roce_macsec_rx_rule *rx_rule; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *new_rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + rx_rule = kzalloc(sizeof(*rx_rule), GFP_KERNEL); + if (!rx_rule) { + err = -ENOMEM; + goto out; + } + + set_ipaddr_spec(addr, spec, true); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.ft = rx_fs->roce.ft_macsec_op_check; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_ip_check, spec, &flow_act, + &dest, 1); + if (IS_ERR(new_rule)) { + err = PTR_ERR(new_rule); + goto ip_rule_err; + } + rx_rule->ip = new_rule; + + memset(&flow_act, 0, sizeof(flow_act)); + memset(spec, 0, sizeof(*spec)); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_5); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_5, + macsec_fs_set_rx_fs_id(fs_id)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; + new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_macsec_op_check, spec, &flow_act, + NULL, 0); + if (IS_ERR(new_rule)) { + err = PTR_ERR(new_rule); + goto op_rule_err; + } + rx_rule->op = new_rule; + rx_rule->gid_idx = gid_idx; + rx_rule->fs_id = fs_id; + list_add_tail(&rx_rule->entry, rx_rules_list); + + goto out; + +op_rule_err: + mlx5_del_flow_rules(rx_rule->ip); + rx_rule->ip = NULL; +ip_rule_err: + kfree(rx_rule); +out: + kvfree(spec); + return err; +} + +static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx, + const struct sockaddr *addr, + struct list_head *tx_rules_list) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_modify_hdr *modify_hdr = NULL; + struct mlx5_roce_macsec_tx_rule *tx_rule; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *new_rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + tx_rule = kzalloc(sizeof(*tx_rule), GFP_KERNEL); + if (!tx_rule) { + err = -ENOMEM; + goto out; + } + + set_ipaddr_spec(addr, spec, false); + + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A); + MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id)); + MLX5_SET(set_action_in, action, offset, 0); + MLX5_SET(set_action_in, action, length, 32); + + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC, + 1, action); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + mlx5_core_err(mdev, "Fail to alloc ROCE MACsec set modify_header_id err=%d\n", + err); + modify_hdr = NULL; + goto modify_hdr_err; + } + tx_rule->meta_modhdr = modify_hdr; + + flow_act.modify_hdr = modify_hdr; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dest.ft = tx_fs->tables.ft_crypto.t; + new_rule = mlx5_add_flow_rules(tx_fs->ft_rdma_tx, spec, &flow_act, &dest, 1); + if (IS_ERR(new_rule)) { + err = PTR_ERR(new_rule); + mlx5_core_err(mdev, "Failed to add ROCE TX rule, err=%d\n", err); + goto rule_err; + } + tx_rule->rule = new_rule; + tx_rule->gid_idx = gid_idx; + tx_rule->fs_id = fs_id; + list_add_tail(&tx_rule->entry, tx_rules_list); + + goto out; + +rule_err: + mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr); +modify_hdr_err: + kfree(tx_rule); +out: + kvfree(spec); + return err; +} + +void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, struct list_head *rx_rules_list) +{ + struct mlx5_roce_macsec_rx_rule *rx_rule, *next_rx; + struct mlx5_roce_macsec_tx_rule *tx_rule, *next_tx; + + list_for_each_entry_safe(tx_rule, next_tx, tx_rules_list, entry) { + if (tx_rule->gid_idx == gid_idx) + macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule); + } + + list_for_each_entry_safe(rx_rule, next_rx, rx_rules_list, entry) { + if (rx_rule->gid_idx == gid_idx) + macsec_fs_del_roce_rule_rx(rx_rule); + } +} +EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_rule); + +int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs) +{ + struct mlx5_macsec_device *iter, *macsec_device = NULL; + struct mlx5_core_dev *mdev = macsec_fs->mdev; + struct mlx5_fs_id *fs_id_iter; + unsigned long index = 0; + int err; + + list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) { + if (iter->macdev == macdev) { + macsec_device = iter; + break; + } + } + + if (!macsec_device) + return 0; + + xa_for_each(&macsec_device->tx_id_xa, index, fs_id_iter) { + err = mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id_iter->id, gid_idx, addr, + tx_rules_list); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n"); + goto out; + } + } + + index = 0; + xa_for_each(&macsec_device->rx_id_xa, index, fs_id_iter) { + err = mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id_iter->id, gid_idx, addr, + rx_rules_list); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n"); + goto out; + } + } + + return 0; +out: + mlx5_macsec_del_roce_rule(gid_idx, macsec_fs, tx_rules_list, rx_rules_list); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_rule); + +void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, + struct mlx5_macsec_fs *macsec_fs, bool is_tx) +{ + (is_tx) ? + mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id, gid_idx, addr, + tx_rules_list) : + mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id, gid_idx, addr, + rx_rules_list); +} +EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_sa_rules); + +void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs, + struct list_head *tx_rules_list, + struct list_head *rx_rules_list, bool is_tx) +{ + (is_tx) ? + macsec_fs_del_roce_rules_tx(macsec_fs, fs_id, tx_rules_list) : + macsec_fs_del_roce_rules_rx(macsec_fs, fs_id, rx_rules_list); +} +EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_sa_rules); + +void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs) +{ + macsec_fs_rx_cleanup(macsec_fs); + macsec_fs_tx_cleanup(macsec_fs); + rhashtable_destroy(&macsec_fs->fs_id_hash); + rhashtable_destroy(&macsec_fs->sci_hash); + kfree(macsec_fs); +} + +struct mlx5_macsec_fs * +mlx5_macsec_fs_init(struct mlx5_core_dev *mdev) +{ + struct mlx5_macsec_fs *macsec_fs; + int err; + + macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL); + if (!macsec_fs) + return NULL; + + macsec_fs->mdev = mdev; + + err = rhashtable_init(&macsec_fs->sci_hash, &rhash_sci); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n", + err); + goto err_hash; + } + + err = rhashtable_init(&macsec_fs->fs_id_hash, &rhash_fs_id); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init FS_ID hash table, err=%d\n", + err); + goto sci_hash_cleanup; + } + + err = macsec_fs_tx_init(macsec_fs); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); + goto fs_id_hash_cleanup; + } + + err = macsec_fs_rx_init(macsec_fs); + if (err) { + mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err); + goto tx_cleanup; + } + + BLOCKING_INIT_NOTIFIER_HEAD(&mdev->macsec_nh); + + return macsec_fs; + +tx_cleanup: + macsec_fs_tx_cleanup(macsec_fs); +fs_id_hash_cleanup: + rhashtable_destroy(&macsec_fs->fs_id_hash); +sci_hash_cleanup: + rhashtable_destroy(&macsec_fs->sci_hash); +err_hash: + kfree(macsec_fs); + return NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h new file mode 100644 index 000000000000..34b80c3ef6a5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_MACSEC_STEERING_H__ +#define __MLX5_MACSEC_STEERING_H__ + +#ifdef CONFIG_MLX5_MACSEC + +/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */ +#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */ +#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX +#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1) +#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK) + +#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16 + +struct mlx5_macsec_fs; +union mlx5_macsec_rule; + +struct mlx5_macsec_rule_attrs { + sci_t sci; + u32 macsec_obj_id; + u8 assoc_num; + int action; +}; + +struct mlx5_macsec_stats { + u64 macsec_rx_pkts; + u64 macsec_rx_bytes; + u64 macsec_rx_pkts_drop; + u64 macsec_rx_bytes_drop; + u64 macsec_tx_pkts; + u64 macsec_tx_bytes; + u64 macsec_tx_pkts_drop; + u64 macsec_tx_bytes_drop; +}; + +enum mlx5_macsec_action { + MLX5_ACCEL_MACSEC_ACTION_ENCRYPT, + MLX5_ACCEL_MACSEC_ACTION_DECRYPT, +}; + +void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs); + +struct mlx5_macsec_fs * +mlx5_macsec_fs_init(struct mlx5_core_dev *mdev); + +union mlx5_macsec_rule * +mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs, + const struct macsec_context *ctx, + struct mlx5_macsec_rule_attrs *attrs, + u32 *sa_fs_id); + +void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs, + union mlx5_macsec_rule *macsec_rule, + int action, void *macdev, u32 sa_fs_id); + +void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats); +struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs); +u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci); + +#endif + +#endif /* __MLX5_MACSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 72ae560a1c68..a17152c1cbb2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -49,7 +49,6 @@ #include <linux/version.h> #include <net/devlink.h> #include "mlx5_core.h" -#include "thermal.h" #include "lib/eq.h" #include "fs_core.h" #include "lib/mpfs.h" @@ -73,6 +72,8 @@ #include "sf/dev/dev.h" #include "sf/sf.h" #include "mlx5_irq.h" +#include "hwmon.h" +#include "lag/lag.h" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); @@ -361,9 +362,14 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay); -static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, - enum mlx5_cap_type cap_type, - enum mlx5_cap_mode cap_mode) +void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data) +{ + mlx5_blocking_notifier_call_chain(dev, event, data); +} +EXPORT_SYMBOL(mlx5_core_mp_event_replay); + +int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode) { u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); @@ -947,14 +953,36 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev) mlx5_pci_disable_device(dev); } +static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev) +{ + /* This component is use to sync adding core_dev to lag_dev and to sync + * changes of mlx5_adev_devices between LAG layer and other layers. + */ + if (!mlx5_lag_is_supported(dev)) + return; + + dev->priv.hca_devcom_comp = + mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS, + mlx5_query_nic_system_image_guid(dev), + NULL, dev); + if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp)) + mlx5_core_err(dev, "Failed to register devcom HCA component\n"); +} + +static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev) +{ + mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp); +} + static int mlx5_init_once(struct mlx5_core_dev *dev) { int err; - dev->priv.devcom = mlx5_devcom_register_device(dev); - if (IS_ERR(dev->priv.devcom)) - mlx5_core_err(dev, "failed to register with devcom (0x%p)\n", - dev->priv.devcom); + dev->priv.devc = mlx5_devcom_register_device(dev); + if (IS_ERR(dev->priv.devc)) + mlx5_core_warn(dev, "failed to register devcom device %ld\n", + PTR_ERR(dev->priv.devc)); + mlx5_register_hca_devcom_comp(dev); err = mlx5_query_board_id(dev); if (err) { @@ -1089,7 +1117,8 @@ err_eq_cleanup: err_irq_cleanup: mlx5_irq_table_cleanup(dev); err_devcom: - mlx5_devcom_unregister_device(dev->priv.devcom); + mlx5_unregister_hca_devcom_comp(dev); + mlx5_devcom_unregister_device(dev->priv.devc); return err; } @@ -1118,7 +1147,8 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_events_cleanup(dev); mlx5_eq_table_cleanup(dev); mlx5_irq_table_cleanup(dev); - mlx5_devcom_unregister_device(dev->priv.devcom); + mlx5_unregister_hca_devcom_comp(dev); + mlx5_devcom_unregister_device(dev->priv.devc); } static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout) @@ -1142,7 +1172,7 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou return err; } - err = mlx5_cmd_init(dev); + err = mlx5_cmd_enable(dev); if (err) { mlx5_core_err(dev, "Failed initializing command interface, aborting\n"); return err; @@ -1196,7 +1226,7 @@ stop_health_poll: mlx5_stop_health_poll(dev, boot); err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); - mlx5_cmd_cleanup(dev); + mlx5_cmd_disable(dev); return err; } @@ -1207,7 +1237,7 @@ static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot) mlx5_core_disable_hca(dev, 0); mlx5_stop_health_poll(dev, boot); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); - mlx5_cmd_cleanup(dev); + mlx5_cmd_disable(dev); } static int mlx5_function_open(struct mlx5_core_dev *dev) @@ -1406,9 +1436,9 @@ err_irq_table: static void mlx5_unload(struct mlx5_core_dev *dev) { + mlx5_eswitch_disable(dev->priv.eswitch); mlx5_devlink_traps_unregister(priv_to_devlink(dev)); mlx5_sf_dev_table_destroy(dev); - mlx5_eswitch_disable(dev->priv.eswitch); mlx5_sriov_detach(dev); mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); @@ -1620,21 +1650,24 @@ static int mlx5_query_hca_caps_light(struct mlx5_core_dev *dev) return err; if (MLX5_CAP_GEN(dev, eth_net_offloads)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN(dev, nic_flow_table) || MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { - err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_FLOW_TABLE, + HCA_CAP_OPMOD_GET_CUR); if (err) return err; } if (MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { - err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION); + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_VDPA_EMULATION, + HCA_CAP_OPMOD_GET_CUR); if (err) return err; } @@ -1714,7 +1747,6 @@ static const int types[] = { MLX5_CAP_FLOW_TABLE, MLX5_CAP_ESWITCH_FLOW_TABLE, MLX5_CAP_ESWITCH, - MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, MLX5_CAP_DEBUG, MLX5_CAP_DEV_MEM, @@ -1723,7 +1755,6 @@ static const int types[] = { MLX5_CAP_VDPA_EMULATION, MLX5_CAP_IPSEC, MLX5_CAP_PORT_SELECTION, - MLX5_CAP_DEV_SHAMPO, MLX5_CAP_MACSEC, MLX5_CAP_ADV_VIRTUALIZATION, MLX5_CAP_CRYPTO, @@ -1797,6 +1828,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops); INIT_LIST_HEAD(&priv->traps); + err = mlx5_cmd_init(dev); + if (err) { + mlx5_core_err(dev, "Failed initializing cmdif SW structs, aborting\n"); + goto err_cmd_init; + } + err = mlx5_tout_init(dev); if (err) { mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); @@ -1842,6 +1879,8 @@ err_pagealloc_init: err_health_init: mlx5_tout_cleanup(dev); err_timeout_init: + mlx5_cmd_cleanup(dev); +err_cmd_init: debugfs_remove(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); @@ -1864,6 +1903,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_tout_cleanup(dev); + mlx5_cmd_cleanup(dev); debugfs_remove_recursive(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); @@ -1921,9 +1961,9 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (err) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); - err = mlx5_thermal_init(dev); + err = mlx5_hwmon_dev_register(dev); if (err) - dev_err(&pdev->dev, "mlx5_thermal_init failed with error code %d\n", err); + mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err); pci_save_state(pdev); devlink_register(devlink); @@ -1955,7 +1995,7 @@ static void remove_one(struct pci_dev *pdev) mlx5_drain_health_wq(dev); devlink_unregister(devlink); mlx5_sriov_disable(pdev, false); - mlx5_thermal_uninit(dev); + mlx5_hwmon_dev_unregister(dev); mlx5_crdump_disable(dev); mlx5_uninit_one(dev); mlx5_pci_close(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 682d3dc00dd1..6b14e347d914 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -41,6 +41,7 @@ #include <linux/mlx5/cq.h> #include <linux/mlx5/fs.h> #include <linux/mlx5/driver.h> +#include "lib/devcom.h" extern uint mlx5_core_debug_mask; @@ -97,6 +98,22 @@ do { \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define ACCESS_KEY_LEN 32 +#define FT_ID_FT_TYPE_OFFSET 24 + +struct mlx5_cmd_allow_other_vhca_access_attr { + u16 obj_type; + u32 obj_id; + u8 access_key[ACCESS_KEY_LEN]; +}; + +struct mlx5_cmd_alias_obj_create_attr { + u32 obj_id; + u16 vhca_id; + u16 obj_type; + u8 access_key[ACCESS_KEY_LEN]; +}; + static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) { struct device *device = dev->device; @@ -143,6 +160,8 @@ enum mlx5_semaphore_space_address { #define MLX5_DEFAULT_PROF 2 #define MLX5_SF_PROF 3 +#define MLX5_NUM_FW_CMD_THREADS 8 +#define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed, size_t item_size, size_t num_items, @@ -174,10 +193,16 @@ static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed, #define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \ mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__) +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); +int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode); int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); +int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num); int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +int mlx5_cmd_enable(struct mlx5_core_dev *dev); +void mlx5_cmd_disable(struct mlx5_core_dev *dev); void mlx5_cmd_set_state(struct mlx5_core_dev *dev, enum mlx5_cmdif_state cmdif_state); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); @@ -242,10 +267,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); -struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); -void mlx5_dev_list_lock(void); -void mlx5_dev_list_unlock(void); -int mlx5_dev_list_trylock(void); void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); @@ -284,14 +305,12 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) { int ret; - mlx5_dev_list_lock(); + mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); ret = mlx5_rescan_drivers_locked(dev); - mlx5_dev_list_unlock(); + mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); return ret; } -void mlx5_lag_update(struct mlx5_core_dev *dev); - enum { MLX5_NIC_IFC_FULL = 0, MLX5_NIC_IFC_DISABLED = 1, @@ -325,7 +344,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL) -void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work); static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); @@ -337,6 +355,12 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev); bool mlx5_rdma_supported(struct mlx5_core_dev *dev); bool mlx5_vnet_supported(struct mlx5_core_dev *dev); bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev); +int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, + struct mlx5_cmd_allow_other_vhca_access_attr *attr); +int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, + struct mlx5_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id); +int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type); static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index aa403a5ea34e..1088114e905d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -29,9 +29,9 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq); struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, struct irq_affinity_desc *af_desc, struct cpu_rmap **rmap); -int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, - struct mlx5_irq **irqs, struct cpu_rmap **rmap); -void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs); +struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu, + u16 vecidx, struct cpu_rmap **rmap); +void mlx5_irq_release_vector(struct mlx5_irq *irq); int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb); int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb); struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq); @@ -39,17 +39,17 @@ int mlx5_irq_get_index(struct mlx5_irq *irq); struct mlx5_irq_pool; #ifdef CONFIG_MLX5_SF -int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs, - struct mlx5_irq **irqs); +struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, + struct cpumask *used_cpus, u16 vecidx); struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc); -void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs, - int num_irqs); +void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq); #else -static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs, - struct mlx5_irq **irqs) +static inline +struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, + struct cpumask *used_cpus, u16 vecidx) { - return -EOPNOTSUPP; + return ERR_PTR(-EOPNOTSUPP); } static inline struct mlx5_irq * @@ -58,7 +58,9 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc * return ERR_PTR(-EOPNOTSUPP); } -static inline void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, - struct mlx5_irq **irqs, int num_irqs) {} +static inline +void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq) +{ +} #endif #endif /* __MLX5_IRQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index cba2a4afb5fd..4dcf995cb1a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -28,7 +28,7 @@ struct mlx5_irq { struct atomic_notifier_head nh; cpumask_var_t mask; - char name[MLX5_MAX_IRQ_NAME]; + char name[MLX5_MAX_IRQ_FORMATTED_NAME]; struct mlx5_irq_pool *pool; int refcount; struct msi_map map; @@ -259,8 +259,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, int err; irq = kzalloc(sizeof(*irq), GFP_KERNEL); - if (!irq) + if (!irq || !zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) { + kfree(irq); return ERR_PTR(-ENOMEM); + } + if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) { /* The vector at index 0 is always statically allocated. If * dynamic irq is not supported all vectors are statically @@ -289,19 +292,15 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, else irq_sf_set_name(pool, name, i); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); - snprintf(irq->name, MLX5_MAX_IRQ_NAME, - "%s@pci:%s", name, pci_name(dev->pdev)); + snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME, + MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev)); err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name, &irq->nh); if (err) { mlx5_core_err(dev, "Failed to request irq. err = %d\n", err); goto err_req_irq; } - if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) { - mlx5_core_warn(dev, "zalloc_cpumask_var failed\n"); - err = -ENOMEM; - goto err_cpumask; - } + if (af_desc) { cpumask_copy(irq->mask, &af_desc->mask); irq_set_affinity_and_hint(irq->map.virq, irq->mask); @@ -319,8 +318,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, err_xa: if (af_desc) irq_update_affinity_hint(irq->map.virq, NULL); - free_cpumask_var(irq->mask); -err_cpumask: free_irq(irq->map.virq, &irq->nh); err_req_irq: #ifdef CONFIG_RFS_ACCEL @@ -333,6 +330,7 @@ err_irq_rmap: if (i && pci_msix_can_alloc_dyn(dev->pdev)) pci_msix_free_irq(dev->pdev, irq->map); err_alloc_irq: + free_cpumask_var(irq->mask); kfree(irq); return ERR_PTR(err); } @@ -432,19 +430,10 @@ static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev) return pool ? pool : irq_table->pcif_pool; } -/** - * mlx5_irqs_release - release one or more IRQs back to the system. - * @irqs: IRQs to be released. - * @nirqs: number of IRQs to be released. - */ -static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs) +static void _mlx5_irq_release(struct mlx5_irq *irq) { - int i; - - for (i = 0; i < nirqs; i++) { - synchronize_irq(irqs[i]->map.virq); - mlx5_irq_put(irqs[i]); - } + synchronize_irq(irq->map.virq); + mlx5_irq_put(irq); } /** @@ -453,7 +442,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs) */ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq) { - mlx5_irqs_release(&ctrl_irq, 1); + _mlx5_irq_release(ctrl_irq); } /** @@ -569,53 +558,42 @@ void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map) EXPORT_SYMBOL(mlx5_msix_free); /** - * mlx5_irqs_release_vectors - release one or more IRQs back to the system. - * @irqs: IRQs to be released. - * @nirqs: number of IRQs to be released. + * mlx5_irq_release_vector - release one IRQ back to the system. + * @irq: the irq to release. */ -void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs) +void mlx5_irq_release_vector(struct mlx5_irq *irq) { - mlx5_irqs_release(irqs, nirqs); + _mlx5_irq_release(irq); } /** - * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device. - * @dev: mlx5 device that is requesting the IRQs. - * @cpus: CPUs array for binding the IRQs - * @nirqs: number of IRQs to request. - * @irqs: an output array of IRQs pointers. + * mlx5_irq_request_vector - request one IRQ for mlx5 device. + * @dev: mlx5 device that is requesting the IRQ. + * @cpu: CPU to bind the IRQ to. + * @vecidx: vector index to request an IRQ for. * @rmap: pointer to reverse map pointer for completion interrupts * * Each IRQ is bound to at most 1 CPU. - * This function is requests nirqs IRQs, starting from @vecidx. + * This function is requests one IRQ, for the given @vecidx. * - * This function returns the number of IRQs requested, (which might be smaller than - * @nirqs), if successful, or a negative error code in case of an error. + * This function returns a pointer to the irq on success, or an error pointer + * in case of an error. */ -int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, - struct mlx5_irq **irqs, struct cpu_rmap **rmap) +struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu, + u16 vecidx, struct cpu_rmap **rmap) { struct mlx5_irq_table *table = mlx5_irq_table_get(dev); struct mlx5_irq_pool *pool = table->pcif_pool; struct irq_affinity_desc af_desc; - struct mlx5_irq *irq; int offset = 1; - int i; if (!pool->xa_num_irqs.max) offset = 0; af_desc.is_managed = false; - for (i = 0; i < nirqs; i++) { - cpumask_clear(&af_desc.mask); - cpumask_set_cpu(cpus[i], &af_desc.mask); - irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap); - if (IS_ERR(irq)) - break; - irqs[i] = irq; - } - - return i ? i : PTR_ERR(irq); + cpumask_clear(&af_desc.mask); + cpumask_set_cpu(cpu, &af_desc.mask); + return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap); } static struct mlx5_irq_pool * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h index d3a77a0ab848..c4d377f8df30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h @@ -7,6 +7,9 @@ #include <linux/mlx5/driver.h> #define MLX5_MAX_IRQ_NAME (32) +#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s") +#define MLX5_MAX_IRQ_FORMATTED_NAME \ + (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR)) /* max irq_index is 2047, so four chars */ #define MLX5_MAX_IRQ_IDX_CHARS (4) #define MLX5_EQ_REFS_PER_IRQ (2) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 0daeb4b72cca..7d8c732818f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -271,7 +271,7 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, } EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); -static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) +int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) { u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmlp_reg)]; @@ -1098,10 +1098,11 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, - [MLX5E_400GAUI_8] = 400000, + [MLX5E_400GAUI_8_400GBASE_CR8] = 400000, [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000, [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000, [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000, + [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000, }; int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index 8e2abbab05f0..c93492b67788 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -14,17 +14,22 @@ struct mlx5_sf_dev_table { struct xarray devices; - unsigned int max_sfs; phys_addr_t base_address; u64 sf_bar_length; struct notifier_block nb; - struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */ struct workqueue_struct *active_wq; struct work_struct work; u8 stop_active_wq:1; struct mlx5_core_dev *dev; }; +struct mlx5_sf_dev_active_work_ctx { + struct work_struct work; + struct mlx5_vhca_state_event event; + struct mlx5_sf_dev_table *table; + int sf_index; +}; + static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev) { return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev); @@ -110,12 +115,6 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, sf_dev->parent_mdev = dev; sf_dev->fn_id = fn_id; - if (!table->max_sfs) { - mlx5_adev_idx_free(id); - kfree(sf_dev); - err = -EOPNOTSUPP; - goto add_err; - } sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); trace_mlx5_sf_dev_add(dev, sf_dev, id); @@ -129,7 +128,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, err = auxiliary_device_add(&sf_dev->adev); if (err) { - put_device(&sf_dev->adev.dev); + auxiliary_device_uninit(&sf_dev->adev); goto add_err; } @@ -167,12 +166,11 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ if (!max_functions) return 0; - base_id = MLX5_CAP_GEN(table->dev, sf_base_id); + base_id = mlx5_sf_start_function_id(table->dev); if (event->function_id < base_id || event->function_id >= (base_id + max_functions)) return 0; sf_index = event->function_id - base_id; - mutex_lock(&table->table_lock); sf_dev = xa_load(&table->devices, sf_index); switch (event->new_vhca_state) { case MLX5_VHCA_STATE_INVALID: @@ -185,7 +183,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ mlx5_sf_dev_del(table->dev, sf_dev, sf_index); else mlx5_core_err(table->dev, - "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n", + "SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n", sf_index, event->sw_function_id); break; case MLX5_VHCA_STATE_ACTIVE: @@ -196,7 +194,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ default: break; } - mutex_unlock(&table->table_lock); return 0; } @@ -209,7 +206,7 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) int i; max_functions = mlx5_sf_max_functions(dev); - function_id = MLX5_CAP_GEN(dev, sf_base_id); + function_id = mlx5_sf_start_function_id(dev); /* Arm the vhca context as the vhca event notifier */ for (i = 0; i < max_functions; i++) { err = mlx5_vhca_event_arm(dev, function_id); @@ -221,20 +218,49 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) return 0; } -static void mlx5_sf_dev_add_active_work(struct work_struct *work) +static void mlx5_sf_dev_add_active_work(struct work_struct *_work) { - struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work); + struct mlx5_sf_dev_active_work_ctx *work_ctx; + + work_ctx = container_of(_work, struct mlx5_sf_dev_active_work_ctx, work); + if (work_ctx->table->stop_active_wq) + goto out; + /* Don't probe device which is already probe */ + if (!xa_load(&work_ctx->table->devices, work_ctx->sf_index)) + mlx5_sf_dev_add(work_ctx->table->dev, work_ctx->sf_index, + work_ctx->event.function_id, work_ctx->event.sw_function_id); + /* There is a race where SF got inactive after the query + * above. e.g.: the query returns that the state of the + * SF is active, and after that the eswitch manager set it to + * inactive. + * This case cannot be managed in SW, since the probing of the + * SF is on one system, and the inactivation is on a different + * system. + * If the inactive is done after the SF perform init_hca(), + * the SF will fully probe and then removed. If it was + * done before init_hca(), the SF probe will fail. + */ +out: + kfree(work_ctx); +} + +/* In case SFs are generated externally, probe active SFs */ +static void mlx5_sf_dev_queue_active_works(struct work_struct *_work) +{ + struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work); u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; + struct mlx5_sf_dev_active_work_ctx *work_ctx; struct mlx5_core_dev *dev = table->dev; u16 max_functions; u16 function_id; u16 sw_func_id; int err = 0; + int wq_idx; u8 state; int i; max_functions = mlx5_sf_max_functions(dev); - function_id = MLX5_CAP_GEN(dev, sf_base_id); + function_id = mlx5_sf_start_function_id(dev); for (i = 0; i < max_functions; i++, function_id++) { if (table->stop_active_wq) return; @@ -249,27 +275,22 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work) continue; sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id); - mutex_lock(&table->table_lock); - /* Don't probe device which is already probe */ - if (!xa_load(&table->devices, i)) - mlx5_sf_dev_add(dev, i, function_id, sw_func_id); - /* There is a race where SF got inactive after the query - * above. e.g.: the query returns that the state of the - * SF is active, and after that the eswitch manager set it to - * inactive. - * This case cannot be managed in SW, since the probing of the - * SF is on one system, and the inactivation is on a different - * system. - * If the inactive is done after the SF perform init_hca(), - * the SF will fully probe and then removed. If it was - * done before init_hca(), the SF probe will fail. - */ - mutex_unlock(&table->table_lock); + work_ctx = kzalloc(sizeof(*work_ctx), GFP_KERNEL); + if (!work_ctx) + return; + + INIT_WORK(&work_ctx->work, &mlx5_sf_dev_add_active_work); + work_ctx->event.function_id = function_id; + work_ctx->event.sw_function_id = sw_func_id; + work_ctx->table = table; + work_ctx->sf_index = i; + wq_idx = work_ctx->event.function_id % MLX5_DEV_MAX_WQS; + mlx5_vhca_events_work_enqueue(dev, wq_idx, &work_ctx->work); } } /* In case SFs are generated externally, probe active SFs */ -static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) +static int mlx5_sf_dev_create_active_works(struct mlx5_sf_dev_table *table) { if (MLX5_CAP_GEN(table->dev, eswitch_manager)) return 0; /* the table is local */ @@ -280,12 +301,12 @@ static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) table->active_wq = create_singlethread_workqueue("mlx5_active_sf"); if (!table->active_wq) return -ENOMEM; - INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work); + INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works); queue_work(table->active_wq, &table->work); return 0; } -static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) +static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table) { if (table->active_wq) { table->stop_active_wq = true; @@ -296,10 +317,9 @@ static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) { struct mlx5_sf_dev_table *table; - unsigned int max_sfs; int err; - if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev)) + if (!mlx5_sf_dev_supported(dev)) return; table = kzalloc(sizeof(*table), GFP_KERNEL); @@ -310,37 +330,30 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) table->nb.notifier_call = mlx5_sf_dev_state_change_handler; table->dev = dev; - if (MLX5_CAP_GEN(dev, max_num_sf)) - max_sfs = MLX5_CAP_GEN(dev, max_num_sf); - else - max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf); table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); table->base_address = pci_resource_start(dev->pdev, 2); - table->max_sfs = max_sfs; xa_init(&table->devices); - mutex_init(&table->table_lock); dev->priv.sf_dev_table = table; err = mlx5_vhca_event_notifier_register(dev, &table->nb); if (err) goto vhca_err; - err = mlx5_sf_dev_queue_active_work(table); + err = mlx5_sf_dev_create_active_works(table); if (err) goto add_active_err; err = mlx5_sf_dev_vhca_arm_all(table); if (err) goto arm_err; - mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs); return; arm_err: - mlx5_sf_dev_destroy_active_work(table); + mlx5_sf_dev_destroy_active_works(table); add_active_err: mlx5_vhca_event_notifier_unregister(dev, &table->nb); + mlx5_vhca_event_work_queues_flush(dev); vhca_err: - table->max_sfs = 0; kfree(table); dev->priv.sf_dev_table = NULL; table_err: @@ -365,9 +378,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) if (!table) return; - mlx5_sf_dev_destroy_active_work(table); + mlx5_sf_dev_destroy_active_works(table); mlx5_vhca_event_notifier_unregister(dev, &table->nb); - mutex_destroy(&table->table_lock); + mlx5_vhca_event_work_queues_flush(dev); /* Now that event handler is not running, it is safe to destroy * the sf device without race. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h index 2a66a427ef15..b99131e95e37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h @@ -19,6 +19,12 @@ struct mlx5_sf_dev { u16 fn_id; }; +struct mlx5_sf_peer_devlink_event_ctx { + u16 fn_id; + struct devlink *devlink; + int err; +}; + void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev); void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 8fe82f1191bb..169c2c68ed5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -8,6 +8,20 @@ #include "dev.h" #include "devlink.h" +static int mlx5_core_peer_devlink_set(struct mlx5_sf_dev *sf_dev, struct devlink *devlink) +{ + struct mlx5_sf_peer_devlink_event_ctx event_ctx = { + .fn_id = sf_dev->fn_id, + .devlink = devlink, + }; + int ret; + + ret = mlx5_blocking_notifier_call_chain(sf_dev->parent_mdev, + MLX5_DRIVER_EVENT_SF_PEER_DEVLINK, + &event_ctx); + return ret == NOTIFY_OK ? event_ctx.err : 0; +} + static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); @@ -54,9 +68,21 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); goto init_one_err; } + + err = mlx5_core_peer_devlink_set(sf_dev, devlink); + if (err) { + mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err); + goto peer_devlink_set_err; + } + devlink_register(devlink); return 0; +peer_devlink_set_err: + if (mlx5_dev_is_lightweight(sf_dev->mdev)) + mlx5_uninit_one_light(sf_dev->mdev); + else + mlx5_uninit_one(sf_dev->mdev); init_one_err: iounmap(mdev->iseg); remap_err: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 6a3fa30b2bf2..6c11e075cab0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -12,7 +12,7 @@ #include "diag/sf_tracepoint.h" struct mlx5_sf { - struct devlink_port dl_port; + struct mlx5_devlink_port dl_port; unsigned int port_index; u32 controller; u16 id; @@ -20,43 +20,36 @@ struct mlx5_sf { u16 hw_state; }; +static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port) +{ + struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port); + + return container_of(mlx5_dl_port, struct mlx5_sf, dl_port); +} + struct mlx5_sf_table { struct mlx5_core_dev *dev; /* To refer from notifier context. */ - struct xarray port_indices; /* port index based lookup. */ - refcount_t refcount; - struct completion disable_complete; + struct xarray function_ids; /* function id based lookup. */ struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */ struct notifier_block esw_nb; struct notifier_block vhca_nb; + struct notifier_block mdev_nb; }; static struct mlx5_sf * -mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index) -{ - return xa_load(&table->port_indices, port_index); -} - -static struct mlx5_sf * mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) { - unsigned long index; - struct mlx5_sf *sf; - - xa_for_each(&table->port_indices, index, sf) { - if (sf->hw_fn_id == fn_id) - return sf; - } - return NULL; + return xa_load(&table->function_ids, fn_id); } -static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) +static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) { - return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL); + return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL); } -static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) +static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) { - xa_erase(&table->port_indices, sf->port_index); + xa_erase(&table->function_ids, sf->hw_fn_id); } static struct mlx5_sf * @@ -93,7 +86,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw, sf->hw_state = MLX5_VHCA_STATE_ALLOCATED; sf->controller = controller; - err = mlx5_sf_id_insert(table, sf); + err = mlx5_sf_function_id_insert(table, sf); if (err) goto insert_err; @@ -111,28 +104,11 @@ id_err: static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) { - mlx5_sf_id_erase(table, sf); mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id); trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id); kfree(sf); } -static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev) -{ - struct mlx5_sf_table *table = dev->priv.sf_table; - - if (!table) - return NULL; - - return refcount_inc_not_zero(&table->refcount) ? table : NULL; -} - -static void mlx5_sf_table_put(struct mlx5_sf_table *table) -{ - if (refcount_dec_and_test(&table->refcount)) - complete(&table->disable_complete); -} - static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state) { switch (hw_state) { @@ -172,26 +148,14 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); - struct mlx5_sf_table *table; - struct mlx5_sf *sf; - int err = 0; - - table = mlx5_sf_table_try_get(dev); - if (!table) - return -EOPNOTSUPP; + struct mlx5_sf_table *table = dev->priv.sf_table; + struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); - sf = mlx5_sf_lookup_by_index(table, dl_port->index); - if (!sf) { - err = -EOPNOTSUPP; - goto sf_err; - } mutex_lock(&table->sf_state_lock); *state = mlx5_sf_to_devlink_state(sf->hw_state); *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state); mutex_unlock(&table->sf_state_lock); -sf_err: - mlx5_sf_table_put(table); - return err; + return 0; } static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, @@ -257,26 +221,10 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); - struct mlx5_sf_table *table; - struct mlx5_sf *sf; - int err; - - table = mlx5_sf_table_try_get(dev); - if (!table) { - NL_SET_ERR_MSG_MOD(extack, - "Port state set is only supported in eswitch switchdev mode or SF ports are disabled."); - return -EOPNOTSUPP; - } - sf = mlx5_sf_lookup_by_index(table, dl_port->index); - if (!sf) { - err = -ENODEV; - goto out; - } + struct mlx5_sf_table *table = dev->priv.sf_table; + struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); - err = mlx5_sf_state_set(dev, table, sf, state, extack); -out: - mlx5_sf_table_put(table); - return err; + return mlx5_sf_state_set(dev, table, sf, state, extack); } static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, @@ -292,11 +240,11 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, if (IS_ERR(sf)) return PTR_ERR(sf); - err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id, - new_attr->controller, new_attr->sfnum); + err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE, + &sf->dl_port, new_attr->controller, new_attr->sfnum); if (err) goto esw_err; - *dl_port = &sf->dl_port; + *dl_port = &sf->dl_port.dl_port; trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum); return 0; @@ -335,32 +283,45 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ return 0; } +static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) +{ + return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && + mlx5_sf_hw_table_supported(dev); +} + int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *new_attr, struct netlink_ext_ack *extack, struct devlink_port **dl_port) { struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_sf_table *table; + struct mlx5_sf_table *table = dev->priv.sf_table; int err; err = mlx5_sf_new_check_attr(dev, new_attr, extack); if (err) return err; - table = mlx5_sf_table_try_get(dev); - if (!table) { + if (!mlx5_sf_table_supported(dev)) { + NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported."); + return -EOPNOTSUPP; + } + + if (!is_mdev_switchdev_mode(dev)) { NL_SET_ERR_MSG_MOD(extack, - "Port add is only supported in eswitch switchdev mode or SF ports are disabled."); + "SF ports are only supported in eswitch switchdev mode."); return -EOPNOTSUPP; } - err = mlx5_sf_add(dev, table, new_attr, extack, dl_port); - mlx5_sf_table_put(table); - return err; + + return mlx5_sf_add(dev, table, new_attr, extack, dl_port); } static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) { + mutex_lock(&table->sf_state_lock); + + mlx5_sf_function_id_erase(table, sf); + if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) { mlx5_sf_free(table, sf); } else if (mlx5_sf_is_active(sf)) { @@ -376,6 +337,16 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id); kfree(sf); } + + mutex_unlock(&table->sf_state_lock); +} + +static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + struct mlx5_eswitch *esw = table->dev->priv.eswitch; + + mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id); + mlx5_sf_dealloc(table, sf); } int mlx5_devlink_sf_port_del(struct devlink *devlink, @@ -383,32 +354,11 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; - struct mlx5_sf_table *table; - struct mlx5_sf *sf; - int err = 0; - - table = mlx5_sf_table_try_get(dev); - if (!table) { - NL_SET_ERR_MSG_MOD(extack, - "Port del is only supported in eswitch switchdev mode or SF ports are disabled."); - return -EOPNOTSUPP; - } - sf = mlx5_sf_lookup_by_index(table, dl_port->index); - if (!sf) { - err = -ENODEV; - goto sf_err; - } - - mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id); - mlx5_sf_id_erase(table, sf); + struct mlx5_sf_table *table = dev->priv.sf_table; + struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port); - mutex_lock(&table->sf_state_lock); - mlx5_sf_dealloc(table, sf); - mutex_unlock(&table->sf_state_lock); -sf_err: - mlx5_sf_table_put(table); - return err; + mlx5_sf_del(table, sf); + return 0; } static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state) @@ -433,14 +383,10 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v bool update = false; struct mlx5_sf *sf; - table = mlx5_sf_table_try_get(table->dev); - if (!table) - return 0; - mutex_lock(&table->sf_state_lock); sf = mlx5_sf_lookup_by_function_id(table, event->function_id); if (!sf) - goto sf_err; + goto unlock; /* When driver is attached or detached to a function, an event * notifies such state change. @@ -450,46 +396,18 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v sf->hw_state = event->new_vhca_state; trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller, sf->hw_fn_id, sf->hw_state); -sf_err: +unlock: mutex_unlock(&table->sf_state_lock); - mlx5_sf_table_put(table); return 0; } -static void mlx5_sf_table_enable(struct mlx5_sf_table *table) +static void mlx5_sf_del_all(struct mlx5_sf_table *table) { - init_completion(&table->disable_complete); - refcount_set(&table->refcount, 1); -} - -static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) -{ - struct mlx5_eswitch *esw = table->dev->priv.eswitch; unsigned long index; struct mlx5_sf *sf; - /* At this point, no new user commands can start and no vhca event can - * arrive. It is safe to destroy all user created SFs. - */ - xa_for_each(&table->port_indices, index, sf) { - mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id); - mlx5_sf_id_erase(table, sf); - mlx5_sf_dealloc(table, sf); - } -} - -static void mlx5_sf_table_disable(struct mlx5_sf_table *table) -{ - if (!refcount_read(&table->refcount)) - return; - - /* Balances with refcount_set; drop the reference so that new user cmd cannot start - * and new vhca event handler cannot run. - */ - mlx5_sf_table_put(table); - wait_for_completion(&table->disable_complete); - - mlx5_sf_deactivate_all(table); + xa_for_each(&table->function_ids, index, sf) + mlx5_sf_del(table, sf); } static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data) @@ -498,11 +416,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi const struct mlx5_esw_event_info *mode = data; switch (mode->new_mode) { - case MLX5_ESWITCH_OFFLOADS: - mlx5_sf_table_enable(table); - break; case MLX5_ESWITCH_LEGACY: - mlx5_sf_table_disable(table); + mlx5_sf_del_all(table); break; default: break; @@ -511,10 +426,29 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi return 0; } -static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) +static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data) { - return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && - mlx5_sf_hw_table_supported(dev); + struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb); + struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data; + int ret = NOTIFY_DONE; + struct mlx5_sf *sf; + + if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK) + return NOTIFY_DONE; + + + mutex_lock(&table->sf_state_lock); + sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id); + if (!sf) + goto out; + + event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port, + event_ctx->devlink); + + ret = NOTIFY_OK; +out: + mutex_unlock(&table->sf_state_lock); + return ret; } int mlx5_sf_table_init(struct mlx5_core_dev *dev) @@ -531,9 +465,8 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev) mutex_init(&table->sf_state_lock); table->dev = dev; - xa_init(&table->port_indices); + xa_init(&table->function_ids); dev->priv.sf_table = table; - refcount_set(&table->refcount, 0); table->esw_nb.notifier_call = mlx5_sf_esw_event; err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb); if (err) @@ -544,6 +477,9 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev) if (err) goto vhca_err; + table->mdev_nb.notifier_call = mlx5_sf_mdev_event; + mlx5_blocking_notifier_register(dev, &table->mdev_nb); + return 0; vhca_err: @@ -562,10 +498,10 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) if (!table) return; + mlx5_blocking_notifier_unregister(dev, &table->mdev_nb); mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); - WARN_ON(refcount_read(&table->refcount)); mutex_destroy(&table->sf_state_lock); - WARN_ON(!xa_empty(&table->port_indices)); + WARN_ON(!xa_empty(&table->function_ids)); kfree(table); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index 17aa348989cb..1f613320fe07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -9,6 +9,7 @@ #include "mlx5_core.h" #include "eswitch.h" #include "diag/sf_tracepoint.h" +#include "devlink.h" struct mlx5_sf_hw { u32 usr_sfnum; @@ -243,31 +244,61 @@ static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc) kfree(hwc->sfs); } +static void mlx5_sf_hw_table_res_unregister(struct mlx5_core_dev *dev) +{ + devl_resources_unregister(priv_to_devlink(dev)); +} + +static int mlx5_sf_hw_table_res_register(struct mlx5_core_dev *dev, u16 max_fn, + u16 max_ext_fn) +{ + struct devlink_resource_size_params size_params; + struct devlink *devlink = priv_to_devlink(dev); + int err; + + devlink_resource_size_params_init(&size_params, max_fn, max_fn, 1, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devl_resource_register(devlink, "max_local_SFs", max_fn, MLX5_DL_RES_MAX_LOCAL_SFS, + DEVLINK_RESOURCE_ID_PARENT_TOP, &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, max_ext_fn, max_ext_fn, 1, + DEVLINK_RESOURCE_UNIT_ENTRY); + return devl_resource_register(devlink, "max_external_SFs", max_ext_fn, + MLX5_DL_RES_MAX_EXTERNAL_SFS, DEVLINK_RESOURCE_ID_PARENT_TOP, + &size_params); +} + int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) { struct mlx5_sf_hw_table *table; u16 max_ext_fn = 0; u16 ext_base_id = 0; - u16 max_fn = 0; u16 base_id; + u16 max_fn; int err; if (!mlx5_vhca_event_supported(dev)) return 0; - if (mlx5_sf_supported(dev)) - max_fn = mlx5_sf_max_functions(dev); + max_fn = mlx5_sf_max_functions(dev); err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id); if (err) return err; + if (mlx5_sf_hw_table_res_register(dev, max_fn, max_ext_fn)) + mlx5_core_dbg(dev, "failed to register max SFs resources"); + if (!max_fn && !max_ext_fn) return 0; table = kzalloc(sizeof(*table), GFP_KERNEL); - if (!table) - return -ENOMEM; + if (!table) { + err = -ENOMEM; + goto alloc_err; + } mutex_init(&table->table_lock); table->dev = dev; @@ -291,6 +322,8 @@ ext_err: table_err: mutex_destroy(&table->table_lock); kfree(table); +alloc_err: + mlx5_sf_hw_table_res_unregister(dev); return err; } @@ -299,12 +332,14 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; if (!table) - return; + goto res_unregister; - mutex_destroy(&table->table_lock); mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]); mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]); + mutex_destroy(&table->table_lock); kfree(table); +res_unregister: + mlx5_sf_hw_table_res_unregister(dev); } static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c index d908fba968f0..cda01ba441ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -21,6 +21,15 @@ struct mlx5_vhca_event_work { struct mlx5_vhca_state_event event; }; +struct mlx5_vhca_event_handler { + struct workqueue_struct *wq; +}; + +struct mlx5_vhca_events { + struct mlx5_core_dev *dev; + struct mlx5_vhca_event_handler handler[MLX5_DEV_MAX_WQS]; +}; + int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) { u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; @@ -99,6 +108,11 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work) kfree(work); } +void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work) +{ + queue_work(dev->priv.vhca_events->handler[idx].wq, work); +} + static int mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data) { @@ -106,6 +120,7 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb); struct mlx5_vhca_event_work *work; struct mlx5_eqe *eqe = data; + int wq_idx; work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) @@ -113,7 +128,8 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); work->notifier = notifier; work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); - mlx5_events_work_enqueue(notifier->dev, &work->work); + wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS; + mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work); return NOTIFY_OK; } @@ -132,28 +148,75 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) int mlx5_vhca_event_init(struct mlx5_core_dev *dev) { struct mlx5_vhca_state_notifier *notifier; + char wq_name[MLX5_CMD_WQ_MAX_NAME]; + struct mlx5_vhca_events *events; + int err, i; if (!mlx5_vhca_event_supported(dev)) return 0; - notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); - if (!notifier) + events = kzalloc(sizeof(*events), GFP_KERNEL); + if (!events) return -ENOMEM; + events->dev = dev; + dev->priv.vhca_events = events; + for (i = 0; i < MLX5_DEV_MAX_WQS; i++) { + snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i); + events->handler[i].wq = create_singlethread_workqueue(wq_name); + if (!events->handler[i].wq) { + err = -ENOMEM; + goto err_create_wq; + } + } + + notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); + if (!notifier) { + err = -ENOMEM; + goto err_notifier; + } + dev->priv.vhca_state_notifier = notifier; notifier->dev = dev; BLOCKING_INIT_NOTIFIER_HEAD(¬ifier->n_head); MLX5_NB_INIT(¬ifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE); return 0; + +err_notifier: +err_create_wq: + for (--i; i >= 0; i--) + destroy_workqueue(events->handler[i].wq); + kfree(events); + return err; +} + +void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev) +{ + struct mlx5_vhca_events *vhca_events; + int i; + + if (!mlx5_vhca_event_supported(dev)) + return; + + vhca_events = dev->priv.vhca_events; + for (i = 0; i < MLX5_DEV_MAX_WQS; i++) + flush_workqueue(vhca_events->handler[i].wq); } void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev) { + struct mlx5_vhca_events *vhca_events; + int i; + if (!mlx5_vhca_event_supported(dev)) return; kfree(dev->priv.vhca_state_notifier); dev->priv.vhca_state_notifier = NULL; + vhca_events = dev->priv.vhca_events; + for (i = 0; i < MLX5_DEV_MAX_WQS; i++) + destroy_workqueue(vhca_events->handler[i].wq); + kvfree(vhca_events); } void mlx5_vhca_event_start(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h index 013cdfe90616..1725ba64f8af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h @@ -28,6 +28,9 @@ int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen); +void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work); +void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev); + #else static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 54bb0866ed72..e3ec559369fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -55,6 +55,13 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id) return action_type_to_str[action_id]; } +static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev) +{ + return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX || + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) || + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table)); +} + static const enum dr_action_valid_state next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = { [DR_ACTION_DOMAIN_NIC_INGRESS] = { @@ -1163,12 +1170,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, bool ignore_flow_level, u32 flow_source) { + struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest; struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_action **ref_actions; struct mlx5dr_action *action; bool reformat_req = false; + bool is_ft_wire = false; + u16 num_dst_ft = 0; u32 num_of_ref = 0; u32 ref_act_cnt; + u16 last_dest; int ret; int i; @@ -1210,11 +1221,22 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, break; case DR_ACTION_TYP_FT: + if (num_dst_ft && + !mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) { + mlx5dr_dbg(dmn, "multiple FT destinations not supported\n"); + goto free_ref_actions; + } + num_dst_ft++; hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - if (dest_action->dest_tbl->is_fw_tbl) + if (dest_action->dest_tbl->is_fw_tbl) { hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id; - else + } else { hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id; + if (dest_action->dest_tbl->is_wire_ft) { + is_ft_wire = true; + last_dest = i; + } + } break; default: @@ -1223,6 +1245,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, } } + /* In multidest, the FW does the iterator in the RX except of the last + * one that done in the TX. + * So, if one of the ft target is wire, put it at the end of the dest list. + */ + if (is_ft_wire && num_dst_ft > 1) { + tmp_hw_dest = hw_dests[last_dest]; + hw_dests[last_dest] = hw_dests[num_of_dests - 1]; + hw_dests[num_of_dests - 1] = tmp_hw_dest; + } + action = dr_action_create_generic(DR_ACTION_TYP_FT); if (!action) goto free_ref_actions; @@ -1422,7 +1454,6 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, case DR_ACTION_TYP_TNL_L3_TO_L2: { u8 *hw_actions; - int ret; hw_actions = kzalloc(DR_ACTION_CACHE_LINE_SIZE, GFP_KERNEL); if (!hw_actions) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 4a5ae86e2b62..6fa06ba2d346 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -1096,8 +1096,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, if (!in) goto err_cqwq; - vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev); - err = mlx5_vector2eqn(mdev, vector, &eqn); + vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev); + err = mlx5_comp_eqn_get(mdev, vector, &eqn); if (err) { kvfree(in); goto err_cqwq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 6c59de3e28f6..81eff6c410ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -436,10 +436,6 @@ void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, - struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -1064,6 +1060,7 @@ struct mlx5dr_action_sampler { struct mlx5dr_action_dest_tbl { u8 is_fw_tbl:1; + u8 is_wire_ft:1; union { struct mlx5dr_table *tbl; struct { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index feb307fb3440..50c2554c9ccf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -209,10 +209,17 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain, struct mlx5_flow_rule *dst) { struct mlx5_flow_table *dest_ft = dst->dest_attr.ft; + struct mlx5dr_action *tbl_action; if (mlx5dr_is_fw_table(dest_ft)) return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft); - return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); + + tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table); + if (tbl_action) + tbl_action->dest_tbl->is_wire_ft = + dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0; + + return tbl_action; } static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain, @@ -336,7 +343,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) { err = -EINVAL; mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n"); - goto free_actions; + goto free_actions; } is_decap = fte->action.pkt_reformat->reformat_type == diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c deleted file mode 100644 index 52199d39657e..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c +++ /dev/null @@ -1,114 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/device.h> -#include <linux/thermal.h> -#include <linux/err.h> -#include <linux/mlx5/driver.h> -#include "mlx5_core.h" -#include "thermal.h" - -#define MLX5_THERMAL_POLL_INT_MSEC 1000 -#define MLX5_THERMAL_NUM_TRIPS 0 -#define MLX5_THERMAL_ASIC_SENSOR_INDEX 0 - -/* Bit string indicating the writeablility of trip points if any */ -#define MLX5_THERMAL_TRIP_MASK (BIT(MLX5_THERMAL_NUM_TRIPS) - 1) - -struct mlx5_thermal { - struct mlx5_core_dev *mdev; - struct thermal_zone_device *tzdev; -}; - -static int mlx5_thermal_get_mtmp_temp(struct mlx5_core_dev *mdev, u32 id, int *p_temp) -{ - u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {}; - u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {}; - int err; - - MLX5_SET(mtmp_reg, mtmp_in, sensor_index, id); - - err = mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in), - mtmp_out, sizeof(mtmp_out), - MLX5_REG_MTMP, 0, 0); - - if (err) - return err; - - *p_temp = MLX5_GET(mtmp_reg, mtmp_out, temperature); - - return 0; -} - -static int mlx5_thermal_get_temp(struct thermal_zone_device *tzdev, - int *p_temp) -{ - struct mlx5_thermal *thermal = thermal_zone_device_priv(tzdev); - struct mlx5_core_dev *mdev = thermal->mdev; - int err; - - err = mlx5_thermal_get_mtmp_temp(mdev, MLX5_THERMAL_ASIC_SENSOR_INDEX, p_temp); - - if (err) - return err; - - /* The unit of temp returned is in 0.125 C. The thermal - * framework expects the value in 0.001 C. - */ - *p_temp *= 125; - - return 0; -} - -static struct thermal_zone_device_ops mlx5_thermal_ops = { - .get_temp = mlx5_thermal_get_temp, -}; - -int mlx5_thermal_init(struct mlx5_core_dev *mdev) -{ - char data[THERMAL_NAME_LENGTH]; - struct mlx5_thermal *thermal; - int err; - - if (!mlx5_core_is_pf(mdev) && !mlx5_core_is_ecpf(mdev)) - return 0; - - err = snprintf(data, sizeof(data), "mlx5_%s", dev_name(mdev->device)); - if (err < 0 || err >= sizeof(data)) { - mlx5_core_err(mdev, "Failed to setup thermal zone name, %d\n", err); - return -EINVAL; - } - - thermal = kzalloc(sizeof(*thermal), GFP_KERNEL); - if (!thermal) - return -ENOMEM; - - thermal->mdev = mdev; - thermal->tzdev = thermal_zone_device_register_with_trips(data, - NULL, - MLX5_THERMAL_NUM_TRIPS, - MLX5_THERMAL_TRIP_MASK, - thermal, - &mlx5_thermal_ops, - NULL, 0, MLX5_THERMAL_POLL_INT_MSEC); - if (IS_ERR(thermal->tzdev)) { - err = PTR_ERR(thermal->tzdev); - mlx5_core_err(mdev, "Failed to register thermal zone device (%s) %d\n", data, err); - kfree(thermal); - return err; - } - - mdev->thermal = thermal; - return 0; -} - -void mlx5_thermal_uninit(struct mlx5_core_dev *mdev) -{ - if (!mdev->thermal) - return; - - thermal_zone_device_unregister(mdev->thermal->tzdev); - kfree(mdev->thermal); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.h b/drivers/net/ethernet/mellanox/mlx5/core/thermal.h deleted file mode 100644 index 7d752c122192..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/thermal.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB - * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. - */ -#ifndef __MLX5_THERMAL_DRIVER_H -#define __MLX5_THERMAL_DRIVER_H - -#if IS_ENABLED(CONFIG_THERMAL) -int mlx5_thermal_init(struct mlx5_core_dev *mdev); -void mlx5_thermal_uninit(struct mlx5_core_dev *mdev); -#else -static inline int mlx5_thermal_init(struct mlx5_core_dev *mdev) -{ - mdev->thermal = NULL; - return 0; -} - -static inline void mlx5_thermal_uninit(struct mlx5_core_dev *mdev) { } -#endif - -#endif /* __MLX5_THERMAL_DRIVER_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 5a31fb47ffa5..21753f327868 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, req_list_size = max_list_size; } - out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) + + out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); out = kvzalloc(out_sz, GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h index a453b9cd9033..bc94e75a7aeb 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h @@ -175,9 +175,6 @@ enum mlxbf_gige_res { int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv); void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv); -irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id); -void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv); - void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv, unsigned int index, u64 dmac); void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv, diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 694de9513b9f..954ba0826c61 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -471,7 +471,7 @@ out: return err; } -static int mlxbf_gige_remove(struct platform_device *pdev) +static void mlxbf_gige_remove(struct platform_device *pdev) { struct mlxbf_gige *priv = platform_get_drvdata(pdev); @@ -479,8 +479,6 @@ static int mlxbf_gige_remove(struct platform_device *pdev) phy_disconnect(priv->netdev->phydev); mlxbf_gige_mdio_remove(priv); platform_set_drvdata(pdev, NULL); - - return 0; } static void mlxbf_gige_shutdown(struct platform_device *pdev) @@ -499,7 +497,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match); static struct platform_driver mlxbf_gige_driver = { .probe = mlxbf_gige_probe, - .remove = mlxbf_gige_remove, + .remove_new = mlxbf_gige_remove, .shutdown = mlxbf_gige_shutdown, .driver = { .name = KBUILD_MODNAME, diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 3ca9fce759ea..71cad6bb6e62 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -29,7 +29,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_nve.o spectrum_nve_vxlan.o \ spectrum_dpipe.o spectrum_trap.o \ spectrum_ethtool.o spectrum_policer.o \ - spectrum_pgt.o + spectrum_pgt.o spectrum_port_range.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 09bef04b11d1..e827c78be114 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -276,6 +276,12 @@ MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8); */ MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8); +/* cmd_mbox_query_fw_lag_mode_support + * 0: CONFIG_PROFILE.lag_mode is not supported by FW + * 1: CONFIG_PROFILE.lag_mode is supported by FW + */ +MLXSW_ITEM32(cmd_mbox, query_fw, lag_mode_support, 0x18, 1, 1); + /* cmd_mbox_query_fw_clr_int_base_offset * Clear Interrupt register's offset from clr_int_bar register * in PCI address space. @@ -659,42 +665,48 @@ MLXSW_ITEM32(cmd_mbox, config_profile, */ MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1); -/* cmd_mbox_config_set_ubridge +/* cmd_mbox_config_profile_set_ubridge * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1); -/* cmd_mbox_config_set_kvd_linear_size +/* cmd_mbox_config_profile_set_kvd_linear_size * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1); -/* cmd_mbox_config_set_kvd_hash_single_size +/* cmd_mbox_config_profile_set_kvd_hash_single_size * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1); -/* cmd_mbox_config_set_kvd_hash_double_size +/* cmd_mbox_config_profile_set_kvd_hash_double_size * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); -/* cmd_mbox_config_set_cqe_version +/* cmd_mbox_config_profile_set_cqe_version * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1); -/* cmd_mbox_config_set_cqe_time_stamp_type +/* cmd_mbox_config_profile_set_cqe_time_stamp_type * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1); +/* cmd_mbox_config_profile_set_lag_mode + * Capability bit. Setting a bit to 1 configures the lag_mode + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_lag_mode, 0x08, 7, 1); + /* cmd_mbox_config_profile_max_vepa_channels * Maximum number of VEPA channels per port (0 through 16) * 0 - multi-channel VEPA is disabled @@ -840,6 +852,21 @@ MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1); +enum mlxsw_cmd_mbox_config_profile_lag_mode { + /* FW manages PGT LAG table */ + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW, + /* SW manages PGT LAG table */ + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW, +}; + +/* cmd_mbox_config_profile_lag_mode + * LAG mode + * Configured if set_lag_mode is set + * Supported from Spectrum-2 and above. + * Supported only when ubridge = 1 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, lag_mode, 0x50, 3, 1); + /* cmd_mbox_config_kvd_linear_size * KVD Linear Size * Valid for Spectrum only @@ -847,7 +874,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24); -/* cmd_mbox_config_kvd_hash_single_size +/* cmd_mbox_config_profile_kvd_hash_single_size * KVD Hash single-entries size * Valid for Spectrum only * Allowed values are 128*N where N=0 or higher @@ -856,7 +883,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24); */ MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24); -/* cmd_mbox_config_kvd_hash_double_size +/* cmd_mbox_config_profile_kvd_hash_double_size * KVD Hash double-entries size (units of single-size entries) * Valid for Spectrum only * Allowed values are 128*N where N=0 or higher diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 1ccf3b73ed72..f23421f038f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -204,6 +204,13 @@ int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag) } EXPORT_SYMBOL(mlxsw_core_max_lag); +enum mlxsw_cmd_mbox_config_profile_lag_mode +mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->bus->lag_mode(mlxsw_core->bus_priv); +} +EXPORT_SYMBOL(mlxsw_core_lag_mode); + void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) { return mlxsw_core->driver_priv; @@ -1792,122 +1799,78 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg, static const struct mlxsw_listener mlxsw_core_health_listener = MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE); -static int +static void mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val, tile_v; - int err; val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl); if (tile_v) { val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); } - - return 0; } -static int +static void mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val, tile_v; - int err; val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var0", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var0", val); val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var1", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var1", val); val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var2", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var2", val); val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var3", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var3", val); val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "var4", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "var4", val); val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "existptr", val); val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "callra", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "callra", val); val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "old_event", val); tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl); if (tile_v) { val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); } val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); - if (err) - return err; - - return 0; + devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); } -static int +static void mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val; - int err; val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "old_event", val); val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl); - return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); + devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); } -static int +static void mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl, struct devlink_fmsg *fmsg) { u32 val; - int err; val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "log_address", val); val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "old_event", val); val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl); - err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); - if (err) - return err; - - return 0; + devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); } static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter, @@ -1918,24 +1881,17 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor char *val_str; u8 event_id; u32 val; - int err; if (!priv_ctx) /* User-triggered dumps are not possible */ return -EOPNOTSUPP; val = mlxsw_reg_mfde_irisc_id_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val); - if (err) - return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "event"); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val); + devlink_fmsg_arr_pair_nest_start(fmsg, "event"); event_id = mlxsw_reg_mfde_event_id_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "id", event_id); switch (event_id) { case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: val_str = "CR space timeout"; @@ -1955,24 +1911,13 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str); - if (err) - return err; - } - - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); - if (err) - return err; + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "desc", val_str); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); val = mlxsw_reg_mfde_severity_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "id", val); - if (err) - return err; + devlink_fmsg_u8_pair_put(fmsg, "id", val); switch (val) { case MLXSW_REG_MFDE_SEVERITY_FATL: val_str = "Fatal"; @@ -1986,15 +1931,9 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str); - if (err) - return err; - } - - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "desc", val_str); + devlink_fmsg_arr_pair_nest_end(fmsg); val = mlxsw_reg_mfde_method_get(mfde_pl); switch (val) { @@ -2007,16 +1946,11 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "method", val_str); - if (err) - return err; - } + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "method", val_str); val = mlxsw_reg_mfde_long_process_get(mfde_pl); - err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "long_process", val); val = mlxsw_reg_mfde_command_type_get(mfde_pl); switch (val) { @@ -2032,29 +1966,25 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor default: val_str = NULL; } - if (val_str) { - err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str); - if (err) - return err; - } + if (val_str) + devlink_fmsg_string_pair_put(fmsg, "command_type", val_str); val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val); - if (err) - return err; + devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val); switch (event_id) { case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: - return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, - fmsg); + mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, fmsg); + break; case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: - return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, - fmsg); + mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, fmsg); + break; case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: - return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); + mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); + break; case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: - return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, - fmsg); + mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, fmsg); + break; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index e5474d3e34db..764d14bd5bc0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -36,6 +36,8 @@ struct mlxsw_fw_rev; unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag); +enum mlxsw_cmd_mbox_config_profile_lag_mode +mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core); void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); @@ -335,6 +337,7 @@ struct mlxsw_config_profile { u8 kvd_hash_single_parts; u8 kvd_hash_double_parts; u8 cqe_time_stamp_type; + bool lag_mode_prefer_sw; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -485,6 +488,7 @@ struct mlxsw_bus { u32 (*read_frc_l)(void *bus_priv); u32 (*read_utc_sec)(void *bus_priv); u32 (*read_utc_nsec)(void *bus_priv); + enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv); u8 features; }; @@ -624,7 +628,7 @@ struct mlxsw_linecards { struct mlxsw_linecard_types_info *types_info; struct list_head event_ops_list; struct mutex event_ops_list_lock; /* Locks accesses to event ops list */ - struct mlxsw_linecard linecards[]; + struct mlxsw_linecard linecards[] __counted_by(count); }; static inline struct mlxsw_linecard * diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 9dfe7148199f..faa63ea9b83e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -1887,6 +1887,46 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, } EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set); +/* Ignore Action + * ------------- + * The ignore action is used to ignore basic switching functions such as + * learning on a per-packet basis. + */ + +#define MLXSW_AFA_IGNORE_CODE 0x0F +#define MLXSW_AFA_IGNORE_SIZE 1 + +/* afa_ignore_disable_learning + * Disable learning on ingress. + */ +MLXSW_ITEM32(afa, ignore, disable_learning, 0x00, 29, 1); + +/* afa_ignore_disable_security + * Disable security lookup on ingress. + * Reserved when Spectrum-1. + */ +MLXSW_ITEM32(afa, ignore, disable_security, 0x00, 28, 1); + +static void mlxsw_afa_ignore_pack(char *payload, bool disable_learning, + bool disable_security) +{ + mlxsw_afa_ignore_disable_learning_set(payload, disable_learning); + mlxsw_afa_ignore_disable_security_set(payload, disable_security); +} + +int mlxsw_afa_block_append_ignore(struct mlxsw_afa_block *block, + bool disable_learning, bool disable_security) +{ + char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_IGNORE_CODE, + MLXSW_AFA_IGNORE_SIZE); + + if (IS_ERR(act)) + return PTR_ERR(act); + mlxsw_afa_ignore_pack(act, disable_learning, disable_security); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_ignore); + /* MC Routing Action * ----------------- * The Multicast router action. Can be used by RMFT_V2 - Router Multicast diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index db58037be46e..0ead3a212de8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -89,6 +89,8 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, struct netlink_ext_ack *extack); int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, struct netlink_ext_ack *extack); +int mlxsw_afa_block_append_ignore(struct mlxsw_afa_block *block, + bool disable_learning, bool disable_security); int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, u16 expected_irif, u16 min_mtu, bool rmid_valid, u32 kvdl_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 973de2adc943..0d5e6f9b466e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -32,8 +32,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER, 0x18, 17, 12), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), @@ -43,6 +42,10 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4), MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4), MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1), + MLXSW_AFK_ELEMENT_INFO_U32(L4_PORT_RANGE, 0x40, 1, 16), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_3, 0x40, 17, 4), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_4_7, 0x40, 21, 4), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x40, 25, 4), }; struct mlxsw_afk { @@ -135,6 +138,7 @@ mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_picker { DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); + DECLARE_BITMAP(chosen_element, MLXSW_AFK_ELEMENT_MAX); unsigned int total; }; @@ -205,7 +209,7 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, if (key_info->blocks_count == mlxsw_afk->max_blocks) return -EINVAL; - for_each_set_bit(element, picker[block_index].element, + for_each_set_bit(element, picker[block_index].chosen_element, MLXSW_AFK_ELEMENT_MAX) { key_info->element_to_block[element] = key_info->blocks_count; mlxsw_afk_element_usage_add(&key_info->elusage, element); @@ -217,11 +221,43 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, return 0; } +static int mlxsw_afk_keys_fill(struct mlxsw_afk *mlxsw_afk, + unsigned long *chosen_blocks_bm, + struct mlxsw_afk_picker *picker, + struct mlxsw_afk_key_info *key_info) +{ + int i, err; + + /* First fill only key blocks with high_entropy. */ + for_each_set_bit(i, chosen_blocks_bm, mlxsw_afk->blocks_count) { + if (!mlxsw_afk->blocks[i].high_entropy) + continue; + + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, i, + key_info); + if (err) + return err; + __clear_bit(i, chosen_blocks_bm); + } + + /* Fill the rest of key blocks. */ + for_each_set_bit(i, chosen_blocks_bm, mlxsw_afk->blocks_count) { + err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, i, + key_info); + if (err) + return err; + } + + return 0; +} + static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_usage *elusage) { + DECLARE_BITMAP(elusage_chosen, MLXSW_AFK_ELEMENT_MAX) = {0}; struct mlxsw_afk_picker *picker; + unsigned long *chosen_blocks_bm; enum mlxsw_afk_element element; int err; @@ -229,6 +265,12 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, if (!picker) return -ENOMEM; + chosen_blocks_bm = bitmap_zalloc(mlxsw_afk->blocks_count, GFP_KERNEL); + if (!chosen_blocks_bm) { + err = -ENOMEM; + goto err_bitmap_alloc; + } + /* Since the same elements could be present in multiple blocks, * we must find out optimal block list in order to make the * block count as low as possible. @@ -253,15 +295,26 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, err = block_index; goto out; } - err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, - block_index, key_info); - if (err) - goto out; + + __set_bit(block_index, chosen_blocks_bm); + + bitmap_copy(picker[block_index].chosen_element, + picker[block_index].element, MLXSW_AFK_ELEMENT_MAX); + + bitmap_or(elusage_chosen, elusage_chosen, + picker[block_index].chosen_element, + MLXSW_AFK_ELEMENT_MAX); + mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index); - } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage)); - err = 0; + } while (!bitmap_equal(elusage_chosen, elusage->usage, + MLXSW_AFK_ELEMENT_MAX)); + + err = mlxsw_afk_keys_fill(mlxsw_afk, chosen_blocks_bm, picker, + key_info); out: + bitmap_free(chosen_blocks_bm); +err_bitmap_alloc: kfree(picker); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 65a4abadc7db..98a05598178b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -33,9 +33,12 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_IP_TTL_, MLXSW_AFK_ELEMENT_IP_ECN, MLXSW_AFK_ELEMENT_IP_DSCP, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, + MLXSW_AFK_ELEMENT_VIRT_ROUTER, MLXSW_AFK_ELEMENT_FDB_MISS, + MLXSW_AFK_ELEMENT_L4_PORT_RANGE, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, MLXSW_AFK_ELEMENT_MAX, }; @@ -116,6 +119,7 @@ struct mlxsw_afk_block { u16 encoding; /* block ID */ struct mlxsw_afk_element_inst *instances; unsigned int instances_count; + bool high_entropy; }; #define MLXSW_AFK_BLOCK(_encoding, _instances) \ @@ -125,6 +129,14 @@ struct mlxsw_afk_block { .instances_count = ARRAY_SIZE(_instances), \ } +#define MLXSW_AFK_BLOCK_HIGH_ENTROPY(_encoding, _instances) \ + { \ + .encoding = _encoding, \ + .instances = _instances, \ + .instances_count = ARRAY_SIZE(_instances), \ + .high_entropy = true, \ + } + struct mlxsw_afk_element_usage { DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX); }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 0107cbc32fc7..53b150b7ae4e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -32,8 +32,9 @@ struct mlxsw_env { const struct mlxsw_bus_info *bus_info; u8 max_module_count; /* Maximum number of modules per-slot. */ u8 num_of_slots; /* Including the main board. */ + u8 max_eeprom_len; /* Maximum module EEPROM transaction length. */ struct mutex line_cards_lock; /* Protects line cards. */ - struct mlxsw_env_line_card *line_cards[]; + struct mlxsw_env_line_card *line_cards[] __counted_by(num_of_slots); }; static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env, @@ -111,7 +112,7 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id, if (err) return err; - mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1, MLXSW_REG_MCIA_I2C_ADDR_LOW); err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); @@ -146,6 +147,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index, int module, u16 offset, u16 size, void *data, bool qsfp, unsigned int *p_read_size) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); char mcia_pl[MLXSW_REG_MCIA_LEN]; char *eeprom_tmp; u16 i2c_addr; @@ -153,11 +155,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index, int status; int err; - /* MCIA register accepts buffer size <= 48. Page of size 128 should be - * read by chunks of size 48, 48, 32. Align the size of the last chunk - * to avoid reading after the end of the page. - */ - size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE); + size = min_t(u16, size, mlxsw_env->max_eeprom_len); if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH && offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) @@ -188,7 +186,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index, } } - mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page, offset, size, i2c_addr); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl); @@ -266,12 +264,12 @@ mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index, page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM; else page = MLXSW_REG_MCIA_TH_PAGE_NUM; - mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page, MLXSW_REG_MCIA_TH_PAGE_OFF + off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_LOW); } else { - mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, MLXSW_REG_MCIA_PAGE0_LO, off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_HIGH); @@ -489,9 +487,9 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 size; size = min_t(u8, page->length - bytes_read, - MLXSW_REG_MCIA_EEPROM_SIZE); + mlxsw_env->max_eeprom_len); - mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page, device_addr + bytes_read, size, page->i2c_address); mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank); @@ -777,7 +775,7 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, int err; mlxsw_reg_mtbr_pack(mtbr_pl, slot_index, - MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl); if (err) return err; @@ -1359,6 +1357,26 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = { .got_inactive = mlxsw_env_got_inactive, }; +static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env) +{ + char mcam_pl[MLXSW_REG_MCAM_LEN]; + bool mcia_128b_supported; + int err; + + mlxsw_reg_mcam_pack(mcam_pl, + MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES); + err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl); + if (err) + return err; + + mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B, + &mcia_128b_supported); + + mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48; + + return 0; +} + int mlxsw_env_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *bus_info, struct mlxsw_env **p_env) @@ -1427,10 +1445,15 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, if (err) goto err_type_set; + err = mlxsw_env_max_module_eeprom_len_query(env); + if (err) + goto err_eeprom_len_query; + env->line_cards[0]->active = true; return 0; +err_eeprom_len_query: err_type_set: mlxsw_env_module_event_disable(env, 0); err_mlxsw_env_module_event_enable: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 70735068cf29..9c12e1feb643 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -293,7 +293,7 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index, - MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); if (err) { dev_err(dev, "Failed to query module temperature sensor\n"); @@ -405,7 +405,8 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev, container_of(attr, struct mlxsw_hwmon_attr, dev_attr); return sprintf(buf, "front panel %03u\n", - mlxsw_hwmon_attr->type_index); + mlxsw_hwmon_attr->type_index + 1 - + mlxsw_hwmon_attr->mlxsw_hwmon_dev->sensor_count); } static ssize_t diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c index af37e650a8ad..e8d6fe35bf36 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c @@ -132,6 +132,7 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev, struct mlxsw_linecard *linecard = linecard_bdev->linecard; struct mlxsw_linecard_dev *linecard_dev; struct devlink *devlink; + int err; devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops, sizeof(*linecard_dev), &adev->dev); @@ -141,8 +142,12 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev, linecard_dev->linecard = linecard_bdev->linecard; linecard_bdev->linecard_dev = linecard_dev; + err = devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink); + if (err) { + devlink_free(devlink); + return err; + } devlink_register(devlink); - devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink); return 0; } @@ -151,9 +156,7 @@ static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev) struct mlxsw_linecard_bdev *linecard_bdev = container_of(adev, struct mlxsw_linecard_bdev, adev); struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev); - struct mlxsw_linecard *linecard = linecard_bdev->linecard; - devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL); devlink_unregister(devlink); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 70d7fff24fa2..f1b48d6615f6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -31,6 +31,7 @@ /* External cooling devices, allowed for binding to mlxsw thermal zones. */ static char * const mlxsw_thermal_external_allowed_cdev[] = { "mlxreg_fan", + "emc2305", }; struct mlxsw_cooling_states { @@ -535,7 +536,7 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal, static int mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz) { - char tz_name[THERMAL_NAME_LENGTH]; + char tz_name[40]; int ret; if (gearbox_tz->slot_index) diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 41298835a11e..1e150ce1c73a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -48,6 +48,7 @@ #define MLXSW_I2C_MBOX_SIZE_BITS 12 #define MLXSW_I2C_ADDR_BUF_SIZE 4 #define MLXSW_I2C_BLK_DEF 32 +#define MLXSW_I2C_BLK_MAX 100 #define MLXSW_I2C_RETRY 5 #define MLXSW_I2C_TIMEOUT_MSECS 5000 #define MLXSW_I2C_MAX_DATA_SIZE 256 @@ -423,9 +424,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size, if (in_mbox) { reg_size = mlxsw_i2c_get_reg_size(in_mbox); - num = reg_size / mlxsw_i2c->block_size; - if (reg_size % mlxsw_i2c->block_size) - num++; + num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size); if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) { dev_err(&client->dev, "Could not acquire lock"); @@ -444,7 +443,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size, } else { /* No input mailbox is case of initialization query command. */ reg_size = MLXSW_I2C_MAX_DATA_SIZE; - num = reg_size / mlxsw_i2c->block_size; + num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size); if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) { dev_err(&client->dev, "Could not acquire lock"); @@ -653,7 +652,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client) return -EOPNOTSUPP; } - mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF, + mlxsw_i2c->block_size = min_t(u16, MLXSW_I2C_BLK_MAX, min_t(u16, quirks->max_read_len, quirks->max_write_len)); } else { diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 51eea1f0529c..e4b25e187467 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -105,6 +105,8 @@ struct mlxsw_pci { u64 free_running_clock_offset; u64 utc_sec_offset; u64 utc_nsec_offset; + bool lag_mode_support; + enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode; struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; u32 doorbell_offset; struct mlxsw_core *core; @@ -352,14 +354,15 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, } static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue_elem_info *elem_info) + struct mlxsw_pci_queue_elem_info *elem_info, + gfp_t gfp) { size_t buf_len = MLXSW_PORT_MAX_MTU; char *wqe = elem_info->elem; struct sk_buff *skb; int err; - skb = netdev_alloc_skb_ip_align(NULL, buf_len); + skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp); if (!skb) return -ENOMEM; @@ -420,7 +423,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, for (i = 0; i < q->count; i++) { elem_info = mlxsw_pci_queue_elem_info_producer_get(q); BUG_ON(!elem_info); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL); if (err) goto rollback; /* Everything is set up, ring doorbell to pass elem to HW */ @@ -640,7 +643,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC); if (err) { dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); goto out; @@ -1311,6 +1314,16 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, profile->cqe_time_stamp_type); } + if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) { + enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode = + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW; + + mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1); + mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode); + mlxsw_pci->lag_mode = lag_mode; + } else { + mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW; + } return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); } @@ -1586,6 +1599,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, mlxsw_pci->utc_nsec_offset = mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox); + mlxsw_pci->lag_mode_support = + mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox); num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); if (err) @@ -1618,9 +1633,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_config_profile; - /* Some resources depend on unified bridge model, which is configured - * as part of config_profile. Query the resources again to get correct - * values. + /* Some resources depend on details of config_profile, such as unified + * bridge model. Query the resources again to get correct values. */ err = mlxsw_core_resources_query(mlxsw_core, mbox, res); if (err) @@ -1895,6 +1909,14 @@ static u32 mlxsw_pci_read_utc_nsec(void *bus_priv) return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset); } +static enum mlxsw_cmd_mbox_config_profile_lag_mode +mlxsw_pci_lag_mode(void *bus_priv) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + + return mlxsw_pci->lag_mode; +} + static const struct mlxsw_bus mlxsw_pci_bus = { .kind = "pci", .init = mlxsw_pci_init, @@ -1906,6 +1928,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .read_frc_l = mlxsw_pci_read_frc_l, .read_utc_sec = mlxsw_pci_read_utc_sec, .read_utc_nsec = mlxsw_pci_read_utc_nsec, + .lag_mode = mlxsw_pci_lag_mode, .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 17160e867bef..25b294fdeb3d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -38,18 +38,18 @@ static const struct mlxsw_reg_info mlxsw_reg_##_name = { \ MLXSW_REG_DEFINE(sgcr, MLXSW_REG_SGCR_ID, MLXSW_REG_SGCR_LEN); -/* reg_sgcr_llb - * Link Local Broadcast (Default=0) - * When set, all Link Local packets (224.0.0.X) will be treated as broadcast - * packets and ignore the IGMP snooping entries. +/* reg_sgcr_lag_lookup_pgt_base + * Base address used for lookup in PGT table + * Supported when CONFIG_PROFILE.lag_mode = 1 + * Note: when IGCR.ddd_lag_mode=0, the address shall be aligned to 8 entries. * Access: RW */ -MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1); +MLXSW_ITEM32(reg, sgcr, lag_lookup_pgt_base, 0x0C, 0, 16); -static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb) +static inline void mlxsw_reg_sgcr_pack(char *payload, u16 lag_lookup_pgt_base) { MLXSW_REG_ZERO(sgcr, payload); - mlxsw_reg_sgcr_llb_set(payload, !!llb); + mlxsw_reg_sgcr_lag_lookup_pgt_base_set(payload, lag_lookup_pgt_base); } /* SPAD - Switch Physical Address Register @@ -2790,6 +2790,78 @@ static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info) mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info); } +/* PPRR - Policy-Engine Port Range Register + * ---------------------------------------- + * This register is used for configuring port range identification. + */ +#define MLXSW_REG_PPRR_ID 0x3008 +#define MLXSW_REG_PPRR_LEN 0x14 + +MLXSW_REG_DEFINE(pprr, MLXSW_REG_PPRR_ID, MLXSW_REG_PPRR_LEN); + +/* reg_pprr_ipv4 + * Apply port range register to IPv4 packets. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, ipv4, 0x00, 31, 1); + +/* reg_pprr_ipv6 + * Apply port range register to IPv6 packets. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, ipv6, 0x00, 30, 1); + +/* reg_pprr_src + * Apply port range register to source L4 ports. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, src, 0x00, 29, 1); + +/* reg_pprr_dst + * Apply port range register to destination L4 ports. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, dst, 0x00, 28, 1); + +/* reg_pprr_tcp + * Apply port range register to TCP packets. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, tcp, 0x00, 27, 1); + +/* reg_pprr_udp + * Apply port range register to UDP packets. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, udp, 0x00, 26, 1); + +/* reg_pprr_register_index + * Index of Port Range Register being accessed. + * Range is 0..cap_max_acl_l4_port_range-1. + * Access: Index + */ +MLXSW_ITEM32(reg, pprr, register_index, 0x00, 0, 8); + +/* reg_prrr_port_range_min + * Minimum port range for comparison. + * Match is defined as: + * port_range_min <= packet_port <= port_range_max. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, port_range_min, 0x04, 16, 16); + +/* reg_prrr_port_range_max + * Maximum port range for comparison. + * Access: RW + */ +MLXSW_ITEM32(reg, pprr, port_range_max, 0x04, 0, 16); + +static inline void mlxsw_reg_pprr_pack(char *payload, u8 register_index) +{ + MLXSW_REG_ZERO(pprr, payload); + mlxsw_reg_pprr_register_index_set(payload, register_index); +} + /* PPBS - Policy-Engine Policy Based Switching Register * ---------------------------------------------------- * This register retrieves and sets Policy Based Switching Table entries. @@ -9479,7 +9551,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1); #define MLXSW_REG_MTBR_ID 0x900F #define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */ #define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */ -#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */ +#define MLXSW_REG_MTBR_REC_MAX_COUNT 1 #define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \ MLXSW_REG_MTBR_REC_LEN * \ MLXSW_REG_MTBR_REC_MAX_COUNT) @@ -9525,12 +9597,12 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false); static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index, - u16 base_sensor_index, u8 num_rec) + u16 base_sensor_index) { MLXSW_REG_ZERO(mtbr, payload); mlxsw_reg_mtbr_slot_index_set(payload, slot_index); mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index); - mlxsw_reg_mtbr_num_rec_set(payload, num_rec); + mlxsw_reg_mtbr_num_rec_set(payload, 1); } /* Error codes from temperatute reading */ @@ -9559,18 +9631,10 @@ static inline void mlxsw_reg_mtbr_temp_unpack(char *payload, int rec_ind, */ #define MLXSW_REG_MCIA_ID 0x9014 -#define MLXSW_REG_MCIA_LEN 0x40 +#define MLXSW_REG_MCIA_LEN 0x94 MLXSW_REG_DEFINE(mcia, MLXSW_REG_MCIA_ID, MLXSW_REG_MCIA_LEN); -/* reg_mcia_l - * Lock bit. Setting this bit will lock the access to the specific - * cable. Used for updating a full page in a cable EPROM. Any access - * other then subsequence writes will fail while the port is locked. - * Access: RW - */ -MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1); - /* reg_mcia_module * Module number. * Access: Index @@ -9635,7 +9699,6 @@ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16); #define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256 #define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128 -#define MLXSW_REG_MCIA_EEPROM_SIZE 48 #define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50 #define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51 #define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0 @@ -9672,7 +9735,7 @@ enum mlxsw_reg_mcia_eeprom_module_info { * Bytes to read/write. * Access: RW */ -MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE); +MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, 128); /* This is used to access the optional upper pages (1-3) in the QSFP+ * memory map. Page 1 is available on offset 256 through 383, page 2 - @@ -9683,14 +9746,12 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE); MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1) static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module, - u8 lock, u8 page_number, - u16 device_addr, u8 size, + u8 page_number, u16 device_addr, u8 size, u8 i2c_device_addr) { MLXSW_REG_ZERO(mcia, payload); mlxsw_reg_mcia_slot_set(payload, slot_index); mlxsw_reg_mcia_module_set(payload, module); - mlxsw_reg_mcia_l_set(payload, lock); mlxsw_reg_mcia_page_number_set(payload, page_number); mlxsw_reg_mcia_device_address_set(payload, device_addr); mlxsw_reg_mcia_size_set(payload, size); @@ -10500,6 +10561,79 @@ static inline void mlxsw_reg_mcda_pack(char *payload, u32 update_handle, mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]); } +/* MCAM - Management Capabilities Mask Register + * -------------------------------------------- + * Reports the device supported management features. + */ +#define MLXSW_REG_MCAM_ID 0x907F +#define MLXSW_REG_MCAM_LEN 0x48 + +MLXSW_REG_DEFINE(mcam, MLXSW_REG_MCAM_ID, MLXSW_REG_MCAM_LEN); + +enum mlxsw_reg_mcam_feature_group { + /* Enhanced features. */ + MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES, +}; + +/* reg_mcam_feature_group + * Feature list mask index. + * Access: Index + */ +MLXSW_ITEM32(reg, mcam, feature_group, 0x00, 16, 8); + +enum mlxsw_reg_mcam_mng_feature_cap_mask_bits { + /* If set, MCIA supports 128 bytes payloads. Otherwise, 48 bytes. */ + MLXSW_REG_MCAM_MCIA_128B = 34, +}; + +#define MLXSW_REG_BYTES_PER_DWORD 0x4 + +/* reg_mcam_mng_feature_cap_mask + * Supported port's enhanced features. + * Based on feature_group index. + * When bit is set, the feature is supported in the device. + * Access: RO + */ +#define MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(_dw_num, _offset) \ + MLXSW_ITEM_BIT_ARRAY(reg, mcam, mng_feature_cap_mask_dw##_dw_num, \ + _offset, MLXSW_REG_BYTES_PER_DWORD, 1) + +/* The access to the bits in the field 'mng_feature_cap_mask' is not same to + * other mask fields in other registers. In most of the cases bit #0 is the + * first one in the last dword. In MCAM register, the first dword contains bits + * #0-#31 and so on, so the access to the bits is simpler using bit array per + * dword. Declare each dword of 'mng_feature_cap_mask' field separately. + */ +MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(0, 0x28); +MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(1, 0x2C); +MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(2, 0x30); +MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(3, 0x34); + +static inline void +mlxsw_reg_mcam_pack(char *payload, enum mlxsw_reg_mcam_feature_group feat_group) +{ + MLXSW_REG_ZERO(mcam, payload); + mlxsw_reg_mcam_feature_group_set(payload, feat_group); +} + +static inline void +mlxsw_reg_mcam_unpack(char *payload, + enum mlxsw_reg_mcam_mng_feature_cap_mask_bits bit, + bool *p_mng_feature_cap_val) +{ + int offset = bit % (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE); + int dword = bit / (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE); + u8 (*getters[])(const char *, u16) = { + mlxsw_reg_mcam_mng_feature_cap_mask_dw0_get, + mlxsw_reg_mcam_mng_feature_cap_mask_dw1_get, + mlxsw_reg_mcam_mng_feature_cap_mask_dw2_get, + mlxsw_reg_mcam_mng_feature_cap_mask_dw3_get, + }; + + if (!WARN_ON_ONCE(dword >= ARRAY_SIZE(getters))) + *p_mng_feature_cap_val = getters[dword](payload, offset); +} + /* MPSC - Monitoring Packet Sampling Configuration Register * -------------------------------------------------------- * MPSC Register is used to configure the Packet Sampling mechanism. @@ -12810,6 +12944,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pacl), MLXSW_REG(pagt), MLXSW_REG(ptar), + MLXSW_REG(pprr), MLXSW_REG(ppbs), MLXSW_REG(prcr), MLXSW_REG(pefa), @@ -12892,10 +13027,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcion), MLXSW_REG(mtpps), MLXSW_REG(mtutc), - MLXSW_REG(mpsc), MLXSW_REG(mcqi), MLXSW_REG(mcc), MLXSW_REG(mcda), + MLXSW_REG(mcam), + MLXSW_REG(mpsc), MLXSW_REG(mgpc), MLXSW_REG(mprs), MLXSW_REG(mogcr), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 19ae0d1c74a8..89dd2777ec4d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -39,6 +39,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_ACL_FLEX_KEYS, MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, MLXSW_RES_ID_ACL_ACTIONS_PER_SET, + MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE, MLXSW_RES_ID_ACL_MAX_ERPT_BANKS, MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE, MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID, @@ -99,6 +100,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, + [MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE] = 0x2920, [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940, [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941, [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 25a01dafde1b..cec72d99d9c9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1132,8 +1132,8 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev, return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); } -static int mlxsw_sp_port_kill_vid(struct net_device *dev, - __be16 __always_unused proto, u16 vid) +int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; @@ -2692,6 +2692,63 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->trap); } +static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) +{ + char sgcr_pl[MLXSW_REG_SGCR_LEN]; + u16 max_lag; + int err; + + if (mlxsw_core_lag_mode(mlxsw_sp->core) != + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) + return 0; + + err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); + if (err) + return err; + + /* In DDD mode, which we by default use, each LAG entry is 8 PGT + * entries. The LAG table address needs to be 8-aligned, but that ought + * to be the case, since the LAG table is allocated first. + */ + err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, + max_lag * 8); + if (err) + return err; + if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { + err = -EINVAL; + goto err_mid_alloc_range; + } + + mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl); + if (err) + goto err_mid_alloc_range; + + return 0; + +err_mid_alloc_range: + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, + max_lag * 8); + return err; +} + +static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) +{ + u16 max_lag; + int err; + + if (mlxsw_core_lag_mode(mlxsw_sp->core) != + MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) + return; + + err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); + if (err) + return; + + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, + max_lag * 8); +} + #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) @@ -2723,16 +2780,27 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS)) return -EIO; + err = mlxsw_sp_lag_pgt_init(mlxsw_sp); + if (err) + return err; + mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper), GFP_KERNEL); - if (!mlxsw_sp->lags) - return -ENOMEM; + if (!mlxsw_sp->lags) { + err = -ENOMEM; + goto err_kcalloc; + } return 0; + +err_kcalloc: + mlxsw_sp_lag_pgt_fini(mlxsw_sp); + return err; } static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp) { + mlxsw_sp_lag_pgt_fini(mlxsw_sp); kfree(mlxsw_sp->lags); } @@ -3113,6 +3181,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_pgt_init; } + /* Initialize before FIDs so that the LAG table is at the start of PGT + * and 8-aligned without overallocation. + */ + err = mlxsw_sp_lag_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); + goto err_lag_init; + } + err = mlxsw_sp_fids_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); @@ -3143,12 +3220,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_buffers_init; } - err = mlxsw_sp_lag_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); - goto err_lag_init; - } - /* Initialize SPAN before router and switchdev, so that those components * can call mlxsw_sp_span_respin(). */ @@ -3188,6 +3259,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_nve_init; } + err = mlxsw_sp_port_range_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n"); + goto err_port_range_init; + } + err = mlxsw_sp_acl_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); @@ -3280,6 +3357,8 @@ err_ptp_clock_init: err_router_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: + mlxsw_sp_port_range_fini(mlxsw_sp); +err_port_range_init: mlxsw_sp_nve_fini(mlxsw_sp); err_nve_init: mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); @@ -3292,8 +3371,6 @@ err_counter_pool_init: err_switchdev_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: - mlxsw_sp_lag_fini(mlxsw_sp); -err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); err_buffers_init: mlxsw_sp_devlink_traps_fini(mlxsw_sp); @@ -3304,6 +3381,8 @@ err_traps_init: err_policers_init: mlxsw_sp_fids_fini(mlxsw_sp); err_fids_init: + mlxsw_sp_lag_fini(mlxsw_sp); +err_lag_init: mlxsw_sp_pgt_fini(mlxsw_sp); err_pgt_init: mlxsw_sp_kvdl_fini(mlxsw_sp); @@ -3462,18 +3541,19 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) } mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); + mlxsw_sp_port_range_fini(mlxsw_sp); mlxsw_sp_nve_fini(mlxsw_sp); mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); - mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_devlink_traps_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_policers_fini(mlxsw_sp); mlxsw_sp_fids_fini(mlxsw_sp); + mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_pgt_fini(mlxsw_sp); mlxsw_sp_kvdl_fini(mlxsw_sp); mlxsw_sp_parsing_fini(mlxsw_sp); @@ -3517,6 +3597,7 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { }, .used_cqe_time_stamp_type = 1, .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, + .lag_mode_prefer_sw = true, }; /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs @@ -3544,6 +3625,7 @@ static const struct mlxsw_config_profile mlxsw_sp4_config_profile = { }, .used_cqe_time_stamp_type = 1, .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC, + .lag_mode_prefer_sw = true, }; static void @@ -3730,6 +3812,26 @@ static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core) &size_params); } +static int +mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params size_params; + u64 max; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE)) + return -EIO; + + max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE); + devlink_resource_size_params_init(&size_params, max, max, 1, + DEVLINK_RESOURCE_UNIT_ENTRY); + + return devl_resource_register(devlink, "port_range_registers", max, + MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS, + DEVLINK_RESOURCE_ID_PARENT_TOP, + &size_params); +} + static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) { int err; @@ -3758,8 +3860,13 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) if (err) goto err_resources_rifs_register; + err = mlxsw_sp_resources_port_range_register(mlxsw_core); + if (err) + goto err_resources_port_range_register; + return 0; +err_resources_port_range_register: err_resources_rifs_register: err_resources_rif_mac_profile_register: err_policer_resources_register: @@ -3797,8 +3904,13 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) if (err) goto err_resources_rifs_register; + err = mlxsw_sp_resources_port_range_register(mlxsw_core); + if (err) + goto err_resources_port_range_register; + return 0; +err_resources_port_range_register: err_resources_rifs_register: err_resources_rif_mac_profile_register: err_policer_resources_register: @@ -4073,23 +4185,6 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) return (struct mlxsw_sp_port *)priv.data; } -struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) -{ - struct mlxsw_sp_port *mlxsw_sp_port; - - rcu_read_lock(); - mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); - if (mlxsw_sp_port) - dev_hold(mlxsw_sp_port->dev); - rcu_read_unlock(); - return mlxsw_sp_port; -} - -void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) -{ - dev_put(mlxsw_sp_port->dev); -} - int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) { char mprs_pl[MLXSW_REG_MPRS_LEN]; @@ -4298,6 +4393,88 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, return -EBUSY; } +static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev, + struct netlink_ext_ack *extack) +{ + struct net_device *upper_dev; + struct net_device *master; + struct list_head *iter; + int done = 0; + int err; + + master = netdev_master_upper_dev_get(lag_dev); + if (master && netif_is_bridge_master(master)) { + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master, + extack); + if (err) + return err; + } + + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!is_vlan_dev(upper_dev)) + continue; + + master = netdev_master_upper_dev_get(upper_dev); + if (master && netif_is_bridge_master(master)) { + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, + upper_dev, master, + extack); + if (err) + goto err_port_bridge_join; + } + + ++done; + } + + return 0; + +err_port_bridge_join: + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!is_vlan_dev(upper_dev)) + continue; + + master = netdev_master_upper_dev_get(upper_dev); + if (!master || !netif_is_bridge_master(master)) + continue; + + if (!done--) + break; + + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); + } + + master = netdev_master_upper_dev_get(lag_dev); + if (master && netif_is_bridge_master(master)) + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); + + return err; +} + +static void +mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct net_device *upper_dev; + struct net_device *master; + struct list_head *iter; + + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!is_vlan_dev(upper_dev)) + continue; + + master = netdev_master_upper_dev_get(upper_dev); + if (!master) + continue; + + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master); + } + + master = netdev_master_upper_dev_get(lag_dev); + if (master) + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master); +} + static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev, struct netlink_ext_ack *extack) @@ -4322,6 +4499,12 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); if (err) return err; + + err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev, + extack); + if (err) + goto err_lag_uppers_bridge_join; + err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); if (err) goto err_col_port_add; @@ -4342,8 +4525,14 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, if (err) goto err_router_join; + err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack); + if (err) + goto err_replay; + return 0; +err_replay: + mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev); err_router_join: lag->ref_count--; mlxsw_sp_port->lagged = 0; @@ -4351,6 +4540,8 @@ err_router_join: mlxsw_sp_port->local_port); mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); err_col_port_add: + mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev); +err_lag_uppers_bridge_join: if (!lag->ref_count) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); return err; @@ -4600,9 +4791,62 @@ static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, return true; } +static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev, + struct net_device *dev) +{ + return upper_dev == netdev_master_upper_dev_get(dev); +} + +static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, + unsigned long event, void *ptr, + bool process_foreign); + +static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct net_device *upper_dev; + struct list_head *iter; + int err; + + netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) { + struct netdev_notifier_changeupper_info info = { + .info = { + .dev = dev, + .extack = extack, + }, + .master = mlxsw_sp_netdev_is_master(upper_dev, dev), + .upper_dev = upper_dev, + .linking = true, + + /* upper_info is relevant for LAG devices. But we would + * only need this if LAG were a valid upper above + * another upper (e.g. a bridge that is a member of a + * LAG), and that is never a valid configuration. So we + * can keep this as NULL. + */ + .upper_info = NULL, + }; + + err = __mlxsw_sp_netdevice_event(mlxsw_sp, + NETDEV_PRECHANGEUPPER, + &info, true); + if (err) + return err; + + err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev, + extack); + if (err) + return err; + } + + return 0; +} + static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, struct net_device *dev, - unsigned long event, void *ptr) + unsigned long event, void *ptr, + bool replay_deslavement) { struct netdev_notifier_changeupper_info *info; struct mlxsw_sp_port *mlxsw_sp_port; @@ -4640,8 +4884,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); - return -EINVAL; + err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, + upper_dev, + extack); + if (err) + return err; } if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, @@ -4656,11 +4903,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } - if (netif_is_macvlan(upper_dev) && - !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) { - NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); - return -EOPNOTSUPP; - } if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); return -EINVAL; @@ -4707,15 +4949,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (netif_is_bridge_master(upper_dev)) { - if (info->linking) + if (info->linking) { err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lower_dev, upper_dev, extack); - else + } else { mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lower_dev, upper_dev); + if (!replay_deslavement) + break; + mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, + lower_dev); + } } else if (netif_is_lag_master(upper_dev)) { if (info->linking) { err = mlxsw_sp_port_lag_join(mlxsw_sp_port, @@ -4724,6 +4971,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); mlxsw_sp_port_lag_leave(mlxsw_sp_port, upper_dev); + mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, + dev); } } else if (netif_is_ovs_master(upper_dev)) { if (info->linking) @@ -4776,13 +5025,15 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, struct net_device *port_dev, - unsigned long event, void *ptr) + unsigned long event, void *ptr, + bool replay_deslavement) { switch (event) { case NETDEV_PRECHANGEUPPER: case NETDEV_CHANGEUPPER: return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev, - event, ptr); + event, ptr, + replay_deslavement); case NETDEV_CHANGELOWERSTATE: return mlxsw_sp_netdevice_port_lower_event(port_dev, event, ptr); @@ -4791,6 +5042,30 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev, return 0; } +/* Called for LAG or its upper VLAN after the per-LAG-lower processing was done, + * to do any per-LAG / per-LAG-upper processing. + */ +static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev, + unsigned long event, + void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev); + struct netdev_notifier_changeupper_info *info = ptr; + + if (!mlxsw_sp) + return 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + if (info->linking) + break; + if (netif_is_bridge_master(info->upper_dev)) + mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev); + break; + } + return 0; +} + static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, unsigned long event, void *ptr) { @@ -4801,19 +5076,19 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, netdev_for_each_lower_dev(lag_dev, dev, iter) { if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event, - ptr); + ptr, false); if (ret) return ret; } } - return 0; + return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr); } static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, struct net_device *dev, unsigned long event, void *ptr, - u16 vid) + u16 vid, bool replay_deslavement) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -4844,27 +5119,30 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); - return -EINVAL; - } - if (netif_is_macvlan(upper_dev) && - !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { - NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); - return -EOPNOTSUPP; + err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, + upper_dev, + extack); + if (err) + return err; } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (netif_is_bridge_master(upper_dev)) { - if (info->linking) + if (info->linking) { err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, vlan_dev, upper_dev, extack); - else + } else { mlxsw_sp_port_bridge_leave(mlxsw_sp_port, vlan_dev, upper_dev); + if (!replay_deslavement) + break; + mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, + vlan_dev); + } } else if (netif_is_macvlan(upper_dev)) { if (!info->linking) mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); @@ -4888,26 +5166,26 @@ static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev, event, ptr, - vid); + vid, false); if (ret) return ret; } } - return 0; + return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr); } -static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, +static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev, struct net_device *br_dev, unsigned long event, void *ptr, - u16 vid) + u16 vid, bool process_foreign) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); struct netdev_notifier_changeupper_info *info = ptr; struct netlink_ext_ack *extack; struct net_device *upper_dev; - if (!mlxsw_sp) + if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev)) return 0; extack = netdev_notifier_info_to_extack(&info->info); @@ -4920,13 +5198,6 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EOPNOTSUPP; } - if (!info->linking) - break; - if (netif_is_macvlan(upper_dev) && - !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) { - NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); - return -EOPNOTSUPP; - } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -4940,36 +5211,42 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, return 0; } -static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, - unsigned long event, void *ptr) +static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev, + unsigned long event, void *ptr, + bool process_foreign) { struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); u16 vid = vlan_dev_vlan_id(vlan_dev); if (mlxsw_sp_port_dev_check(real_dev)) return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev, - event, ptr, vid); + event, ptr, vid, + true); else if (netif_is_lag_master(real_dev)) return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, real_dev, event, ptr, vid); else if (netif_is_bridge_master(real_dev)) - return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, - event, ptr, vid); + return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev, + real_dev, event, + ptr, vid, + process_foreign); return 0; } -static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, - unsigned long event, void *ptr) +static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev, + unsigned long event, void *ptr, + bool process_foreign) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); struct netdev_notifier_changeupper_info *info = ptr; struct netlink_ext_ack *extack; struct net_device *upper_dev; u16 proto; - if (!mlxsw_sp) + if (!process_foreign && !mlxsw_sp_lower_get(br_dev)) return 0; extack = netdev_notifier_info_to_extack(&info->info); @@ -4997,11 +5274,6 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); return -EOPNOTSUPP; } - if (netif_is_macvlan(upper_dev) && - !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { - NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); - return -EOPNOTSUPP; - } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -5107,35 +5379,48 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, return 0; } -static int mlxsw_sp_netdevice_event(struct notifier_block *nb, - unsigned long event, void *ptr) +static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp, + unsigned long event, void *ptr, + bool process_foreign) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct mlxsw_sp_span_entry *span_entry; - struct mlxsw_sp *mlxsw_sp; int err = 0; - mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); if (event == NETDEV_UNREGISTER) { span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); if (span_entry) mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); } - mlxsw_sp_span_respin(mlxsw_sp); if (netif_is_vxlan(dev)) err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); else if (mlxsw_sp_port_dev_check(dev)) - err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); + err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true); else if (netif_is_lag_master(dev)) err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); else if (is_vlan_dev(dev)) - err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr, + process_foreign); else if (netif_is_bridge_master(dev)) - err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); + err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr, + process_foreign); else if (netif_is_macvlan(dev)) err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); + return err; +} + +static int mlxsw_sp_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp; + int err; + + mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); + mlxsw_sp_span_respin(mlxsw_sp); + err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false); + return notifier_from_errno(err); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 231e364cbb7c..c70333b460ea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -69,6 +69,7 @@ enum mlxsw_sp_resource_id { MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS, MLXSW_SP_RESOURCE_RIF_MAC_PROFILES, MLXSW_SP_RESOURCE_RIFS, + MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS, }; struct mlxsw_sp_port; @@ -175,6 +176,7 @@ struct mlxsw_sp { struct mlxsw_sp_acl *acl; struct mlxsw_sp_fid_core *fid_core; struct mlxsw_sp_policer_core *policer_core; + struct mlxsw_sp_port_range_core *pr_core; struct mlxsw_sp_kvdl *kvdl; struct mlxsw_sp_nve *nve; struct notifier_block netdevice_nb; @@ -210,6 +212,7 @@ struct mlxsw_sp { struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */ struct mlxsw_sp_pgt *pgt; bool pgt_smpe_index_valid; + u16 lag_pgt_base; }; struct mlxsw_sp_ptp_ops { @@ -698,6 +701,8 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); +int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid); int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged); int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, @@ -716,8 +721,6 @@ int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core, bool mlxsw_sp_port_dev_check(const struct net_device *dev); struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); -struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); -void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev); int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp); @@ -865,9 +868,13 @@ struct mlxsw_sp_acl_rule_info { egress_bind_blocker:1, counter_valid:1, policer_index_valid:1, - ipv6_valid:1; + ipv6_valid:1, + src_port_range_reg_valid:1, + dst_port_range_reg_valid:1; unsigned int counter_index; u16 policer_index; + u8 src_port_range_reg_index; + u8 dst_port_range_reg_index; struct { u32 prev_val; enum mlxsw_sp_acl_mangle_field prev_field; @@ -992,7 +999,8 @@ void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset, struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl, struct mlxsw_afa_block *afa_block); -void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, unsigned int priority); @@ -1043,6 +1051,9 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, u16 fid, struct netlink_ext_ack *extack); +int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + bool disable_learning, bool disable_security); int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_flow_block *block, @@ -1261,7 +1272,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port, struct flow_block_offload *f); /* spectrum_fid.c */ -bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index); struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp, u16 fid_index); int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex); @@ -1394,10 +1404,6 @@ void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp); -/* spectrum_nve_vxlan.c */ -int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp); -void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp); - /* spectrum_trap.c */ int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp); @@ -1475,7 +1481,7 @@ int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core); /* spectrum_pgt.c */ int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid); void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base); -int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, +int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 *mid_base, u16 count); void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count); @@ -1484,4 +1490,18 @@ int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid, int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp); +/* spectrum_port_range.c */ +struct mlxsw_sp_port_range { + u16 min; + u16 max; + u8 source:1; /* Source or destination */ +}; + +int mlxsw_sp_port_range_reg_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_port_range *range, + struct netlink_ext_ack *extack, + u8 *p_prr_index); +void mlxsw_sp_port_range_reg_put(struct mlxsw_sp *mlxsw_sp, u8 prr_index); +int mlxsw_sp_port_range_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_port_range_fini(struct mlxsw_sp *mlxsw_sp); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 3a636f753607..dfcdd37e797b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -90,7 +90,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, err_entry_add: err_rulei_commit: err_rulei_act_continue: - mlxsw_sp_acl_rulei_destroy(rulei); + mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rulei); err_rulei_create: mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); return err; @@ -105,7 +105,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, ®ion->catchall.cchunk, ®ion->catchall.centry); - mlxsw_sp_acl_rulei_destroy(rulei); + mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rulei); mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c index b1178b7a7f51..99eeafdc8d1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -45,8 +45,7 @@ static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp, } static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = { - MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, + MLXSW_AFK_ELEMENT_VIRT_ROUTER, MLXSW_AFK_ELEMENT_SRC_IP_0_31, MLXSW_AFK_ELEMENT_DST_IP_0_31, }; @@ -89,8 +88,9 @@ static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam) } static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = { + MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7, MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, MLXSW_AFK_ELEMENT_SRC_IP_96_127, MLXSW_AFK_ELEMENT_SRC_IP_64_95, MLXSW_AFK_ELEMENT_SRC_IP_32_63, @@ -142,6 +142,8 @@ static void mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_mr_route_key *key) { + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER, + key->vrid, GENMASK(11, 0)); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, (char *) &key->source.addr4, (char *) &key->source_mask.addr4, 4); @@ -154,6 +156,13 @@ static void mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_mr_route_key *key) { + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3, + key->vrid, GENMASK(3, 0)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7, + key->vrid >> 4, GENMASK(3, 0)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, + key->vrid >> 8, GENMASK(3, 0)); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, &key->source.addr6.s6_addr[0x0], &key->source_mask.addr6.s6_addr[0x0], 4); @@ -189,11 +198,6 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule, rulei = mlxsw_sp_acl_rule_rulei(rule); rulei->priority = priority; - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, - key->vrid, GENMASK(7, 0)); - mlxsw_sp_acl_rulei_keymask_u32(rulei, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - key->vrid >> 8, GENMASK(3, 0)); switch (key->proto) { case MLXSW_SP_L3_PROTO_IPV4: return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 0423ac262d89..7c59c8a13584 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -339,10 +339,17 @@ err_afa_block_create: return ERR_PTR(err); } -void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) +void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei) { if (rulei->action_created) mlxsw_afa_block_destroy(rulei->act_block); + if (rulei->src_port_range_reg_valid) + mlxsw_sp_port_range_reg_put(mlxsw_sp, + rulei->src_port_range_reg_index); + if (rulei->dst_port_range_reg_valid) + mlxsw_sp_port_range_reg_put(mlxsw_sp, + rulei->dst_port_range_reg_index); kfree(rulei); } @@ -768,6 +775,15 @@ int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack); } +int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + bool disable_learning, bool disable_security) +{ + return mlxsw_afa_block_append_ignore(rulei->act_block, + disable_learning, + disable_security); +} + int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_flow_block *block, @@ -834,7 +850,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; - mlxsw_sp_acl_rulei_destroy(rule->rulei); + mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rule->rulei); kfree(rule); mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index e2aced7ab454..95f63fcf4ba1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks) * is 2^ACL_MAX_BF_LOG */ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG); - bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks), + bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)), GFP_KERNEL); if (!bf) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index ae2d6f12b799..eaad78605602 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -31,12 +31,14 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16), MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; @@ -169,20 +171,22 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8), }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = { + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = { + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), }; @@ -205,6 +209,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = { MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */ + MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16), }; static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { @@ -217,7 +222,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), - MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4), + MLXSW_AFK_BLOCK(0x3D, mlxsw_sp_afk_element_info_ipv4_5), MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2), @@ -319,33 +324,33 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = { MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */ }; -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = { - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8), - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4), +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4), MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), }; static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = { - MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0), - MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x10, mlxsw_sp_afk_element_info_mac_0), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x11, mlxsw_sp_afk_element_info_mac_1), MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2), MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3), MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4), - MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b), - MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), - MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1), MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), - MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b), + MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b), MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b), MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3), MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4), MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5), - MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0), + MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x90, mlxsw_sp_afk_element_info_l4_0), MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c index ee59c79156e4..50e591420bd9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c @@ -24,7 +24,7 @@ struct mlxsw_sp_counter_pool { spinlock_t counter_pool_lock; /* Protects counter pool allocations */ atomic_t active_entries_count; unsigned int sub_pools_count; - struct mlxsw_sp_counter_sub_pool sub_pools[]; + struct mlxsw_sp_counter_sub_pool sub_pools[] __counted_by(sub_pools_count); }; static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 472830d07ac1..0f29e9c19411 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -619,7 +619,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) int i; for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", + snprintf(*p, ETH_GSTRING_LEN, "%.28s_%d", mlxsw_sp_port_hw_tc_stats[i].str, tc); *p += ETH_GSTRING_LEN; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index b6ee2d658b0c..e954b8cd2ee8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -137,16 +137,6 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = { [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types, }; -bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index) -{ - enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY; - struct mlxsw_sp_fid_family *fid_family; - - fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type]; - - return fid_family->start_index == fid_index; -} - struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp, u16 fid_index) { @@ -331,6 +321,14 @@ mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family) } static u16 +mlxsw_sp_fid_family_pgt_size(const struct mlxsw_sp_fid_family *fid_family) +{ + u16 num_fids = mlxsw_sp_fid_family_num_fids(fid_family); + + return num_fids * fid_family->nr_flood_tables; +} + +static u16 mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family, const struct mlxsw_sp_flood_table *flood_table, u16 fid_offset) @@ -1078,8 +1076,6 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = { #define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2) #define MLXSW_SP_FID_RFID_MAX (11 * 1024) -#define MLXSW_SP_FID_8021Q_PGT_BASE 0 -#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX) static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = { { @@ -1444,7 +1440,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = { .ops = &mlxsw_sp_fid_8021q_ops, .flood_rsp = false, .bridge_type = MLXSW_REG_BRIDGE_TYPE_0, - .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE, .smpe_index_valid = false, }; @@ -1458,7 +1453,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = { .rif_type = MLXSW_SP_RIF_TYPE_FID, .ops = &mlxsw_sp_fid_8021d_ops, .bridge_type = MLXSW_REG_BRIDGE_TYPE_1, - .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE, .smpe_index_valid = false, }; @@ -1500,7 +1494,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = { .ops = &mlxsw_sp_fid_8021q_ops, .flood_rsp = false, .bridge_type = MLXSW_REG_BRIDGE_TYPE_0, - .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE, .smpe_index_valid = true, }; @@ -1514,7 +1507,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = { .rif_type = MLXSW_SP_RIF_TYPE_FID, .ops = &mlxsw_sp_fid_8021d_ops, .bridge_type = MLXSW_REG_BRIDGE_TYPE_1, - .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE, .smpe_index_valid = true, }; @@ -1664,14 +1656,10 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family, enum mlxsw_sp_flood_type packet_type = flood_table->packet_type; struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; const int *sfgc_packet_types; - u16 num_fids, mid_base; + u16 mid_base; int err, i; mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0); - num_fids = mlxsw_sp_fid_family_num_fids(fid_family); - err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids); - if (err) - return err; sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type]; for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) { @@ -1685,57 +1673,56 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family, err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl); if (err) - goto err_reg_write; + return err; } return 0; - -err_reg_write: - mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids); - return err; -} - -static void -mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family, - const struct mlxsw_sp_flood_table *flood_table) -{ - struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; - u16 num_fids, mid_base; - - mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0); - num_fids = mlxsw_sp_fid_family_num_fids(fid_family); - mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids); } static int mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family) { + struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; + u16 pgt_size; + int err; int i; + if (!fid_family->nr_flood_tables) + return 0; + + pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family); + err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &fid_family->pgt_base, + pgt_size); + if (err) + return err; + for (i = 0; i < fid_family->nr_flood_tables; i++) { const struct mlxsw_sp_flood_table *flood_table; - int err; flood_table = &fid_family->flood_tables[i]; err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table); if (err) - return err; + goto err_flood_table_init; } return 0; + +err_flood_table_init: + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size); + return err; } static void mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family) { - int i; + struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; + u16 pgt_size; - for (i = 0; i < fid_family->nr_flood_tables; i++) { - const struct mlxsw_sp_flood_table *flood_table; + if (!fid_family->nr_flood_tables) + return; - flood_table = &fid_family->flood_tables[i]; - mlxsw_sp_fid_flood_table_fini(fid_family, flood_table); - } + pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family); + mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size); } static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 72917f09e806..9fd1ca079258 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -160,6 +160,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, */ rulei->egress_bind_blocker = 1; + /* Ignore learning and security lookup as redirection + * using ingress filters happens before the bridge. + */ + err = mlxsw_sp_acl_rulei_act_ignore(mlxsw_sp, rulei, + true, true); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append ignore action"); + return err; + } + fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); fid_index = mlxsw_sp_fid_index(fid); err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, @@ -418,6 +428,68 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, return 0; } +static int +mlxsw_sp_flower_parse_ports_range(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct flow_cls_offload *f, u8 ip_proto) +{ + const struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_match_ports_range match; + u32 key_mask_value = 0; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) + return 0; + + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); + return -EINVAL; + } + + flow_rule_match_ports_range(rule, &match); + + if (match.mask->tp_min.src) { + struct mlxsw_sp_port_range range = { + .min = ntohs(match.key->tp_min.src), + .max = ntohs(match.key->tp_max.src), + .source = true, + }; + u8 prr_index; + int err; + + err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range, + f->common.extack, &prr_index); + if (err) + return err; + + rulei->src_port_range_reg_index = prr_index; + rulei->src_port_range_reg_valid = true; + key_mask_value |= BIT(prr_index); + } + + if (match.mask->tp_min.dst) { + struct mlxsw_sp_port_range range = { + .min = ntohs(match.key->tp_min.dst), + .max = ntohs(match.key->tp_max.dst), + }; + u8 prr_index; + int err; + + err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range, + f->common.extack, &prr_index); + if (err) + return err; + + rulei->dst_port_range_reg_index = prr_index; + rulei->dst_port_range_reg_valid = true; + key_mask_value |= BIT(prr_index); + } + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_L4_PORT_RANGE, + key_mask_value, key_mask_value); + + return 0; +} + static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct flow_cls_offload *f, @@ -496,16 +568,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, int err; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_META) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_TCP) | - BIT(FLOW_DISSECTOR_KEY_IP) | - BIT(FLOW_DISSECTOR_KEY_VLAN))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); return -EOPNOTSUPP; @@ -604,6 +677,11 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); if (err) return err; + + err = mlxsw_sp_flower_parse_ports_range(mlxsw_sp, rulei, f, ip_proto); + if (err) + return err; + err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index d2b57a045aa4..5479a1c19d2e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -989,6 +989,9 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp, int nve_ifindex; __be32 vni; + /* Necessary for __dev_get_by_index() below. */ + ASSERT_RTNL(); + mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid); mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index); mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index); @@ -997,15 +1000,13 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fid_vni(fid, &vni))) goto out; - nve_dev = dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex); + nve_dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex); if (!nve_dev) goto out; mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni); mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev); - dev_put(nve_dev); - out: mlxsw_sp_fid_vni_clear(fid); mlxsw_sp_nve_tunnel_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index bb8eeb86edf7..52c2fe3644d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -310,8 +310,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, }; -static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, - bool learning_en) +static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, + bool learning_en) { char tnpc_pl[MLXSW_REG_TNPC_LEN]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c index 7dd3dba0fa83..4ef81bac17d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c @@ -54,25 +54,15 @@ void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base) mutex_unlock(&mlxsw_sp->pgt->lock); } -int -mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count) +int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 *p_mid_base, + u16 count) { - unsigned int idr_cursor; + unsigned int mid_base; int i, err; mutex_lock(&mlxsw_sp->pgt->lock); - /* This function is supposed to be called several times as part of - * driver init, in specific order. Verify that the mid_index is the - * first free index in the idr, to be able to free the indexes in case - * of error. - */ - idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); - if (WARN_ON(idr_cursor != mid_base)) { - err = -EINVAL; - goto err_idr_cursor; - } - + mid_base = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr); for (i = 0; i < count; i++) { err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL, mid_base, mid_base + count, GFP_KERNEL); @@ -81,12 +71,12 @@ mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count) } mutex_unlock(&mlxsw_sp->pgt->lock); + *p_mid_base = mid_base; return 0; err_idr_alloc_cyclic: for (i--; i >= 0; i--) idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i); -err_idr_cursor: mutex_unlock(&mlxsw_sp->pgt->lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c new file mode 100644 index 000000000000..2d193de12be6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include <linux/bits.h> +#include <linux/netlink.h> +#include <linux/refcount.h> +#include <linux/xarray.h> +#include <net/devlink.h> + +#include "spectrum.h" + +struct mlxsw_sp_port_range_reg { + struct mlxsw_sp_port_range range; + refcount_t refcount; + u32 index; +}; + +struct mlxsw_sp_port_range_core { + struct xarray prr_xa; + struct xa_limit prr_ids; + atomic_t prr_count; +}; + +static int +mlxsw_sp_port_range_reg_configure(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_port_range_reg *prr) +{ + char pprr_pl[MLXSW_REG_PPRR_LEN]; + + /* We do not care if packet is IPv4/IPv6 and TCP/UDP, so set all four + * fields. + */ + mlxsw_reg_pprr_pack(pprr_pl, prr->index); + mlxsw_reg_pprr_ipv4_set(pprr_pl, true); + mlxsw_reg_pprr_ipv6_set(pprr_pl, true); + mlxsw_reg_pprr_src_set(pprr_pl, prr->range.source); + mlxsw_reg_pprr_dst_set(pprr_pl, !prr->range.source); + mlxsw_reg_pprr_tcp_set(pprr_pl, true); + mlxsw_reg_pprr_udp_set(pprr_pl, true); + mlxsw_reg_pprr_port_range_min_set(pprr_pl, prr->range.min); + mlxsw_reg_pprr_port_range_max_set(pprr_pl, prr->range.max); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pprr), pprr_pl); +} + +static struct mlxsw_sp_port_range_reg * +mlxsw_sp_port_range_reg_create(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_port_range *range, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core; + struct mlxsw_sp_port_range_reg *prr; + int err; + + prr = kzalloc(sizeof(*prr), GFP_KERNEL); + if (!prr) + return ERR_PTR(-ENOMEM); + + prr->range = *range; + refcount_set(&prr->refcount, 1); + + err = xa_alloc(&pr_core->prr_xa, &prr->index, prr, pr_core->prr_ids, + GFP_KERNEL); + if (err) { + if (err == -EBUSY) + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of port range registers"); + goto err_xa_alloc; + } + + err = mlxsw_sp_port_range_reg_configure(mlxsw_sp, prr); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to configure port range register"); + goto err_reg_configure; + } + + atomic_inc(&pr_core->prr_count); + + return prr; + +err_reg_configure: + xa_erase(&pr_core->prr_xa, prr->index); +err_xa_alloc: + kfree(prr); + return ERR_PTR(err); +} + +static void mlxsw_sp_port_range_reg_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_port_range_reg *prr) +{ + struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core; + + atomic_dec(&pr_core->prr_count); + xa_erase(&pr_core->prr_xa, prr->index); + kfree(prr); +} + +static struct mlxsw_sp_port_range_reg * +mlxsw_sp_port_range_reg_find(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_port_range *range) +{ + struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core; + struct mlxsw_sp_port_range_reg *prr; + unsigned long index; + + xa_for_each(&pr_core->prr_xa, index, prr) { + if (prr->range.min == range->min && + prr->range.max == range->max && + prr->range.source == range->source) + return prr; + } + + return NULL; +} + +int mlxsw_sp_port_range_reg_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_port_range *range, + struct netlink_ext_ack *extack, + u8 *p_prr_index) +{ + struct mlxsw_sp_port_range_reg *prr; + + prr = mlxsw_sp_port_range_reg_find(mlxsw_sp, range); + if (prr) { + refcount_inc(&prr->refcount); + *p_prr_index = prr->index; + return 0; + } + + prr = mlxsw_sp_port_range_reg_create(mlxsw_sp, range, extack); + if (IS_ERR(prr)) + return PTR_ERR(prr); + + *p_prr_index = prr->index; + + return 0; +} + +void mlxsw_sp_port_range_reg_put(struct mlxsw_sp *mlxsw_sp, u8 prr_index) +{ + struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core; + struct mlxsw_sp_port_range_reg *prr; + + prr = xa_load(&pr_core->prr_xa, prr_index); + if (WARN_ON(!prr)) + return; + + if (!refcount_dec_and_test(&prr->refcount)) + return; + + mlxsw_sp_port_range_reg_destroy(mlxsw_sp, prr); +} + +static u64 mlxsw_sp_port_range_reg_occ_get(void *priv) +{ + struct mlxsw_sp_port_range_core *pr_core = priv; + + return atomic_read(&pr_core->prr_count); +} + +int mlxsw_sp_port_range_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_port_range_core *pr_core; + struct mlxsw_core *core = mlxsw_sp->core; + u64 max; + + if (!MLXSW_CORE_RES_VALID(core, ACL_MAX_L4_PORT_RANGE)) + return -EIO; + max = MLXSW_CORE_RES_GET(core, ACL_MAX_L4_PORT_RANGE); + + /* Each port range register is represented using a single bit in the + * two bytes "l4_port_range" ACL key element. + */ + WARN_ON(max > BITS_PER_BYTE * sizeof(u16)); + + pr_core = kzalloc(sizeof(*mlxsw_sp->pr_core), GFP_KERNEL); + if (!pr_core) + return -ENOMEM; + mlxsw_sp->pr_core = pr_core; + + pr_core->prr_ids.max = max - 1; + xa_init_flags(&pr_core->prr_xa, XA_FLAGS_ALLOC); + + devl_resource_occ_get_register(priv_to_devlink(core), + MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS, + mlxsw_sp_port_range_reg_occ_get, + pr_core); + + return 0; +} + +void mlxsw_sp_port_range_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core; + + devl_resource_occ_get_unregister(priv_to_devlink(mlxsw_sp->core), + MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS); + WARN_ON(!xa_empty(&pr_core->prr_xa)); + xa_destroy(&pr_core->prr_xa); + kfree(pr_core); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index b32adf277a22..82a95125d9ca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -71,6 +71,7 @@ static const struct rhashtable_params mlxsw_sp_crif_ht_params = { struct mlxsw_sp_rif { struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */ + netdevice_tracker dev_tracker; struct list_head neigh_list; struct mlxsw_sp_fid *fid; unsigned char addr[ETH_ALEN]; @@ -139,6 +140,7 @@ struct mlxsw_sp_rif_ops { struct netlink_ext_ack *extack); void (*deconfigure)(struct mlxsw_sp_rif *rif); struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, + const struct mlxsw_sp_rif_params *params, struct netlink_ext_ack *extack); void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; @@ -2871,6 +2873,21 @@ static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev) return !!mlxsw_sp_port; } +static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router, + struct neighbour *n) +{ + struct net *net; + + net = neigh_parms_net(n->parms); + + /* Take a reference to ensure the neighbour won't be destructed until we + * drop the reference in delayed work. + */ + neigh_clone(n); + return mlxsw_sp_router_schedule_work(net, router, n, + mlxsw_sp_router_neigh_event_work); +} + static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -2878,7 +2895,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, unsigned long interval; struct neigh_parms *p; struct neighbour *n; - struct net *net; router = container_of(nb, struct mlxsw_sp_router, netevent_nb); @@ -2902,7 +2918,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, break; case NETEVENT_NEIGH_UPDATE: n = ptr; - net = neigh_parms_net(n->parms); if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) return NOTIFY_DONE; @@ -2910,13 +2925,7 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, if (!mlxsw_sp_dev_lower_is_port(n->dev)) return NOTIFY_DONE; - /* Take a reference to ensure the neighbour won't be - * destructed until we drop the reference in delayed - * work. - */ - neigh_clone(n); - return mlxsw_sp_router_schedule_work(net, router, n, - mlxsw_sp_router_neigh_event_work); + return mlxsw_sp_router_schedule_neigh_work(router, n); case NETEVENT_IPV4_MPATH_HASH_UPDATE: case NETEVENT_IPV6_MPATH_HASH_UPDATE: @@ -2975,6 +2984,52 @@ static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } } +struct mlxsw_sp_neigh_rif_made_sync { + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err; +}; + +static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data) +{ + struct mlxsw_sp_neigh_rif_made_sync *rms = data; + int rc; + + if (rms->err) + return; + if (n->dev != mlxsw_sp_rif_dev(rms->rif)) + return; + rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n); + if (rc != NOTIFY_DONE) + rms->err = -ENOMEM; +} + +static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_neigh_rif_made_sync rms = { + .mlxsw_sp = mlxsw_sp, + .rif = rif, + }; + + neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms); + if (rms.err) + goto err_arp; + +#if IS_ENABLED(CONFIG_IPV6) + neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms); +#endif + if (rms.err) + goto err_nd; + + return 0; + +err_nd: +err_arp: + mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); + return rms.err; +} + enum mlxsw_sp_nexthop_type { MLXSW_SP_NEXTHOP_TYPE_ETH, MLXSW_SP_NEXTHOP_TYPE_IPIP, @@ -3052,7 +3107,7 @@ struct mlxsw_sp_nexthop_group_info { gateway:1, /* routes using the group use a gateway */ is_resilient:1; struct list_head list; /* member in nh_res_grp_list */ - struct mlxsw_sp_nexthop nexthops[]; + struct mlxsw_sp_nexthop nexthops[] __counted_by(count); }; static struct mlxsw_sp_rif * @@ -4396,6 +4451,19 @@ err_neigh_init: return err; } +static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + switch (nh->type) { + case MLXSW_SP_NEXTHOP_TYPE_ETH: + return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + case MLXSW_SP_NEXTHOP_TYPE_IPIP: + break; + } + + return 0; +} + static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { @@ -4524,6 +4592,35 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, } } +static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_nexthop *nh, *tmp; + unsigned int n = 0; + int err; + + list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list, + crif_list_node) { + err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh); + if (err) + goto err_nexthop_type_rif; + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); + n++; + } + + return 0; + +err_nexthop_type_rif: + list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list, + crif_list_node) { + if (!n--) + break; + mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); + } + return err; +} + static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { @@ -7451,6 +7548,7 @@ struct mlxsw_sp_fib6_event_work { struct mlxsw_sp_fib_event_work { struct work_struct work; + netdevice_tracker dev_tracker; union { struct mlxsw_sp_fib6_event_work fib6_work; struct fib_entry_notifier_info fen_info; @@ -7624,12 +7722,12 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) &fib_work->ven_info); if (err) dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n"); - dev_put(fib_work->ven_info.dev); + netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker); break; case FIB_EVENT_VIF_DEL: mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_work->ven_info); - dev_put(fib_work->ven_info.dev); + netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker); break; } mutex_unlock(&mlxsw_sp->router->lock); @@ -7700,7 +7798,8 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, case FIB_EVENT_VIF_ADD: case FIB_EVENT_VIF_DEL: memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); - dev_hold(fib_work->ven_info.dev); + netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker, + GFP_ATOMIC); break; } } @@ -7884,6 +7983,26 @@ static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); } +static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + int err; + + err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif); + if (err) + return err; + + err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif); + if (err) + goto err_nexthop; + + return 0; + +err_nexthop: + mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); + return err; +} + static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { @@ -8190,6 +8309,7 @@ mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif, struct mlxsw_sp_router_hwstats_notify_work { struct work_struct work; struct net_device *dev; + netdevice_tracker dev_tracker; }; static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work) @@ -8201,7 +8321,7 @@ static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work) rtnl_lock(); rtnl_offload_xstats_notify(hws_work->dev); rtnl_unlock(); - dev_put(hws_work->dev); + netdev_put(hws_work->dev, &hws_work->dev_tracker); kfree(hws_work); } @@ -8221,7 +8341,7 @@ mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev) return; INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work); - dev_hold(dev); + netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL); hws_work->dev = dev; mlxsw_core_schedule_work(&hws_work->work); } @@ -8293,14 +8413,14 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, err = -ENOMEM; goto err_rif_alloc; } - dev_hold(params->dev); + netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL); mlxsw_sp->router->rifs[rif_index] = rif; rif->mlxsw_sp = mlxsw_sp; rif->ops = ops; rif->rif_entries = rif_entries; if (ops->fid_get) { - fid = ops->fid_get(rif, extack); + fid = ops->fid_get(rif, params, extack); if (IS_ERR(fid)) { err = PTR_ERR(fid); goto err_fid_get; @@ -8321,6 +8441,10 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, goto err_mr_rif_add; } + err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif); + if (err) + goto err_rif_made_sync; + if (netdev_offload_xstats_enabled(params->dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) { err = mlxsw_sp_router_port_l3_stats_enable(rif); @@ -8335,6 +8459,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, return rif; err_stats_enable: + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); +err_rif_made_sync: err_mr_rif_add: for (i--; i >= 0; i--) mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); @@ -8344,7 +8470,7 @@ err_configure: mlxsw_sp_fid_put(fid); err_fid_get: mlxsw_sp->router->rifs[rif_index] = NULL; - dev_put(params->dev); + netdev_put(params->dev, &rif->dev_tracker); mlxsw_sp_rif_free(rif); err_rif_alloc: err_crif_lookup: @@ -8386,7 +8512,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) /* Loopback RIFs are not associated with a FID. */ mlxsw_sp_fid_put(fid); mlxsw_sp->router->rifs[rif->rif_index] = NULL; - dev_put(dev); + netdev_put(dev, &rif->dev_tracker); mlxsw_sp_rif_free(rif); mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries); vr->rif_count--; @@ -8410,6 +8536,110 @@ out: mutex_unlock(&mlxsw_sp->router->lock); } +static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev, + u16 vid) +{ + struct net_device *upper_dev; + struct mlxsw_sp_crif *crif; + + rcu_read_lock(); + upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid); + rcu_read_unlock(); + + if (!upper_dev) + return; + + crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev); + if (!crif || !crif->rif) + return; + + mlxsw_sp_rif_destroy(crif->rif); +} + +static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + int lower_pvid, + unsigned long event, + struct netlink_ext_ack *extack); + +int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev, + u16 new_vid, bool is_pvid, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_rif *old_rif; + struct mlxsw_sp_rif *new_rif; + struct net_device *upper_dev; + u16 old_pvid = 0; + u16 new_pvid; + int err = 0; + + mutex_lock(&mlxsw_sp->router->lock); + old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); + if (old_rif) { + /* If the RIF on the bridge is not a VLAN RIF, we shouldn't have + * gotten a PVID notification. + */ + if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)) + old_rif = NULL; + else + old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid); + } + + if (is_pvid) + new_pvid = new_vid; + else if (old_pvid == new_vid) + new_pvid = 0; + else + goto out; + + if (old_pvid == new_pvid) + goto out; + + if (new_pvid) { + struct mlxsw_sp_rif_params params = { + .dev = br_dev, + .vid = new_pvid, + }; + + /* If there is a VLAN upper with the same VID as the new PVID, + * kill its RIF, if there is one. + */ + mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid); + + if (mlxsw_sp_dev_addr_list_empty(br_dev)) + goto out; + new_rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); + if (IS_ERR(new_rif)) { + err = PTR_ERR(new_rif); + goto out; + } + + if (old_pvid) + mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif, + true); + } else { + mlxsw_sp_rif_destroy(old_rif); + } + + if (old_pvid) { + rcu_read_lock(); + upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), + old_pvid); + rcu_read_unlock(); + if (upper_dev) + err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, + upper_dev, + new_pvid, + NETDEV_UP, extack); + } + +out: + mutex_unlock(&mlxsw_sp->router->lock); + return err; +} + static void mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) @@ -8664,21 +8894,24 @@ __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_rif_params params = { - .dev = l3_dev, - }; + struct mlxsw_sp_rif_params params; u16 vid = mlxsw_sp_port_vlan->vid; struct mlxsw_sp_rif *rif; struct mlxsw_sp_fid *fid; int err; + params = (struct mlxsw_sp_rif_params) { + .dev = l3_dev, + .vid = vid, + }; + mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan); rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack); if (IS_ERR(rif)) return PTR_ERR(rif); /* FID was already created, just take a reference */ - fid = rif->ops->fid_get(rif, extack); + fid = rif->ops->fid_get(rif, ¶ms, extack); err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); if (err) goto err_fid_port_vid_map; @@ -8776,10 +9009,11 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, } static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, - unsigned long event, + unsigned long event, bool nomaster, struct netlink_ext_ack *extack) { - if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev)) + if (!nomaster && (netif_is_any_bridge_port(port_dev) || + netif_is_lag_port(port_dev))) return 0; return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, @@ -8810,10 +9044,10 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, } static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, - unsigned long event, + unsigned long event, bool nomaster, struct netlink_ext_ack *extack) { - if (netif_is_bridge_port(lag_dev)) + if (!nomaster && netif_is_bridge_port(lag_dev)) return 0; return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, @@ -8822,6 +9056,7 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, struct net_device *l3_dev, + int lower_pvid, unsigned long event, struct netlink_ext_ack *extack) { @@ -8829,6 +9064,7 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, .dev = l3_dev, }; struct mlxsw_sp_rif *rif; + int err; switch (event) { case NETDEV_UP: @@ -8840,7 +9076,21 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported"); return -EOPNOTSUPP; } + err = br_vlan_get_pvid(l3_dev, ¶ms.vid); + if (err) + return err; + if (!params.vid) + return 0; + } else if (is_vlan_dev(l3_dev)) { + params.vid = vlan_dev_vlan_id(l3_dev); + + /* If the VID matches PVID of the bridge below, the + * bridge owns the RIF for this VLAN. Don't do anything. + */ + if ((int)params.vid == lower_pvid) + return 0; } + rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); if (IS_ERR(rif)) return PTR_ERR(rif); @@ -8856,24 +9106,32 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp, struct net_device *vlan_dev, - unsigned long event, + unsigned long event, bool nomaster, struct netlink_ext_ack *extack) { struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); u16 vid = vlan_dev_vlan_id(vlan_dev); + u16 lower_pvid; + int err; - if (netif_is_bridge_port(vlan_dev)) + if (!nomaster && netif_is_bridge_port(vlan_dev)) return 0; - if (mlxsw_sp_port_dev_check(real_dev)) + if (mlxsw_sp_port_dev_check(real_dev)) { return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev, event, vid, extack); - else if (netif_is_lag_master(real_dev)) + } else if (netif_is_lag_master(real_dev)) { return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, vid, extack); - else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev)) - return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event, + } else if (netif_is_bridge_master(real_dev) && + br_vlan_enabled(real_dev)) { + err = br_vlan_get_pvid(real_dev, &lower_pvid); + if (err) + return err; + return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, + lower_pvid, event, extack); + } return 0; } @@ -8927,10 +9185,8 @@ static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, int err; rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); - if (!rif) { - NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); - return -EOPNOTSUPP; - } + if (!rif) + return 0; err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, mlxsw_sp_fid_index(rif->fid), true); @@ -9000,19 +9256,21 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp, static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, - unsigned long event, + unsigned long event, bool nomaster, struct netlink_ext_ack *extack) { if (mlxsw_sp_port_dev_check(dev)) - return mlxsw_sp_inetaddr_port_event(dev, event, extack); + return mlxsw_sp_inetaddr_port_event(dev, event, nomaster, + extack); else if (netif_is_lag_master(dev)) - return mlxsw_sp_inetaddr_lag_event(dev, event, extack); + return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster, + extack); else if (netif_is_bridge_master(dev)) - return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event, + return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event, extack); else if (is_vlan_dev(dev)) return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event, - extack); + nomaster, extack); else if (netif_is_macvlan(dev)) return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event, extack); @@ -9039,7 +9297,8 @@ static int mlxsw_sp_inetaddr_event(struct notifier_block *nb, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL); + err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false, + NULL); out: mutex_unlock(&router->lock); return notifier_from_errno(err); @@ -9063,7 +9322,8 @@ static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack); + err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, + ivi->extack); out: mutex_unlock(&mlxsw_sp->router->lock); return notifier_from_errno(err); @@ -9073,6 +9333,7 @@ struct mlxsw_sp_inet6addr_event_work { struct work_struct work; struct mlxsw_sp *mlxsw_sp; struct net_device *dev; + netdevice_tracker dev_tracker; unsigned long event; }; @@ -9092,11 +9353,11 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL); + __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL); out: mutex_unlock(&mlxsw_sp->router->lock); rtnl_unlock(); - dev_put(dev); + netdev_put(dev, &inet6addr_work->dev_tracker); kfree(inet6addr_work); } @@ -9122,7 +9383,7 @@ static int mlxsw_sp_inet6addr_event(struct notifier_block *nb, inet6addr_work->mlxsw_sp = router->mlxsw_sp; inet6addr_work->dev = dev; inet6addr_work->event = event; - dev_hold(dev); + netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC); mlxsw_core_schedule_work(&inet6addr_work->work); return NOTIFY_DONE; @@ -9146,7 +9407,8 @@ static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack); + err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, + i6vi->extack); out: mutex_unlock(&mlxsw_sp->router->lock); return notifier_from_errno(err); @@ -9466,10 +9728,11 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, */ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); if (rif) - __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, + __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, extack); - return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack); + return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false, + extack); } static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, @@ -9480,7 +9743,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); if (!rif) return; - __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL); + __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL); } static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) @@ -9523,6 +9786,116 @@ mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, return err; } +struct mlxsw_sp_router_replay_inetaddr_up { + struct mlxsw_sp *mlxsw_sp; + struct netlink_ext_ack *extack; + unsigned int done; + bool deslavement; +}; + +static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data; + bool nomaster = ctx->deslavement; + struct mlxsw_sp_crif *crif; + int err; + + if (mlxsw_sp_dev_addr_list_empty(dev)) + return 0; + + crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev); + if (!crif || crif->rif) + return 0; + + if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP)) + return 0; + + err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP, + nomaster, ctx->extack); + if (err) + return err; + + ctx->done++; + return 0; +} + +static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data; + bool nomaster = ctx->deslavement; + struct mlxsw_sp_crif *crif; + + if (!ctx->done) + return 0; + + if (mlxsw_sp_dev_addr_list_empty(dev)) + return 0; + + crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev); + if (!crif || !crif->rif) + return 0; + + /* We are rolling back NETDEV_UP, so ask for that. */ + if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP)) + return 0; + + __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster, + NULL); + + ctx->done--; + return 0; +} + +int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp, + struct net_device *upper_dev, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_router_replay_inetaddr_up ctx = { + .mlxsw_sp = mlxsw_sp, + .extack = extack, + .deslavement = false, + }; + struct netdev_nested_priv priv = { + .data = &ctx, + }; + int err; + + err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv); + if (err) + return err; + + err = netdev_walk_all_upper_dev_rcu(upper_dev, + mlxsw_sp_router_replay_inetaddr_up, + &priv); + if (err) + goto err_replay_up; + + return 0; + +err_replay_up: + netdev_walk_all_upper_dev_rcu(upper_dev, + mlxsw_sp_router_unreplay_inetaddr_up, + &priv); + mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv); + return err; +} + +void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev) +{ + struct mlxsw_sp_router_replay_inetaddr_up ctx = { + .mlxsw_sp = mlxsw_sp, + .deslavement = true, + }; + struct netdev_nested_priv priv = { + .data = &ctx, + }; + + mlxsw_sp_router_replay_inetaddr_up(dev, &priv); +} + static int mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, struct net_device *dev, @@ -9539,15 +9912,84 @@ mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port, dev, extack); } +static void +mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, + struct net_device *dev) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, + vid); + if (WARN_ON(!mlxsw_sp_port_vlan)) + return; + + __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); +} + static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev, struct netlink_ext_ack *extack) { u16 default_vid = MLXSW_SP_DEFAULT_VID; + struct net_device *upper_dev; + struct list_head *iter; + int done = 0; + u16 vid; + int err; - return mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, - default_vid, lag_dev, - extack); + err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid, + lag_dev, extack); + if (err) + return err; + + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!is_vlan_dev(upper_dev)) + continue; + + vid = vlan_dev_vlan_id(upper_dev); + err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid, + upper_dev, extack); + if (err) + goto err_router_join_dev; + + ++done; + } + + return 0; + +err_router_join_dev: + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!is_vlan_dev(upper_dev)) + continue; + if (!done--) + break; + + vid = vlan_dev_vlan_id(upper_dev); + mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev); + } + + mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev); + return err; +} + +static void +__mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + u16 default_vid = MLXSW_SP_DEFAULT_VID; + struct net_device *upper_dev; + struct list_head *iter; + u16 vid; + + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!is_vlan_dev(upper_dev)) + continue; + + vid = vlan_dev_vlan_id(upper_dev); + mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev); + } + + mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev); } int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port, @@ -9563,6 +10005,14 @@ int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port, return err; } +void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock); + __mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev); + mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock); +} + static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -9608,6 +10058,40 @@ out: return notifier_from_errno(err); } +struct mlxsw_sp_macvlan_replay { + struct mlxsw_sp *mlxsw_sp; + struct netlink_ext_ack *extack; +}; + +static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + const struct mlxsw_sp_macvlan_replay *rms = priv->data; + struct netlink_ext_ack *extack = rms->extack; + struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp; + + if (!netif_is_macvlan(dev)) + return 0; + + return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack); +} + +static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_macvlan_replay rms = { + .mlxsw_sp = rif->mlxsw_sp, + .extack = extack, + }; + struct netdev_nested_priv priv = { + .data = &rms, + }; + + return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif), + mlxsw_sp_macvlan_replay_upper, + &priv); +} + static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, struct netdev_nested_priv *priv) { @@ -9630,7 +10114,6 @@ static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) if (!netif_is_macvlan_port(dev)) return 0; - netdev_warn(dev, "Router interface is deleted. Upper macvlans will not work\n"); return netdev_walk_all_upper_dev_rcu(dev, __mlxsw_sp_rif_macvlan_flush, &priv); } @@ -9688,6 +10171,10 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif, if (err) goto err_rif_subport_op; + err = mlxsw_sp_macvlan_replay(rif, extack); + if (err) + goto err_macvlan_replay; + err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, mlxsw_sp_fid_index(rif->fid), true); if (err) @@ -9703,6 +10190,8 @@ err_fid_rif_set: mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, mlxsw_sp_fid_index(rif->fid), false); err_rif_fdb_op: + mlxsw_sp_rif_macvlan_flush(rif); +err_macvlan_replay: mlxsw_sp_rif_subport_op(rif, false); err_rif_subport_op: mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile); @@ -9724,6 +10213,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) static struct mlxsw_sp_fid * mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, + const struct mlxsw_sp_rif_params *params, struct netlink_ext_ack *extack) { return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); @@ -9788,6 +10278,10 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif, if (err) goto err_fid_bc_flood_set; + err = mlxsw_sp_macvlan_replay(rif, extack); + if (err) + goto err_macvlan_replay; + err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, mlxsw_sp_fid_index(rif->fid), true); if (err) @@ -9803,6 +10297,8 @@ err_fid_rif_set: mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, mlxsw_sp_fid_index(rif->fid), false); err_rif_fdb_op: + mlxsw_sp_rif_macvlan_flush(rif); +err_macvlan_replay: mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); err_fid_bc_flood_set: @@ -9836,6 +10332,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) static struct mlxsw_sp_fid * mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, + const struct mlxsw_sp_rif_params *params, struct netlink_ext_ack *extack) { int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif); @@ -9869,27 +10366,22 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { static struct mlxsw_sp_fid * mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, + const struct mlxsw_sp_rif_params *params, struct netlink_ext_ack *extack) { struct net_device *dev = mlxsw_sp_rif_dev(rif); struct net_device *br_dev; - u16 vid; - int err; + + if (WARN_ON(!params->vid)) + return ERR_PTR(-EINVAL); if (is_vlan_dev(dev)) { - vid = vlan_dev_vlan_id(dev); br_dev = vlan_dev_real_dev(dev); if (WARN_ON(!netif_is_bridge_master(br_dev))) return ERR_PTR(-EINVAL); - } else { - err = br_vlan_get_pvid(dev, &vid); - if (err < 0 || !vid) { - NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); - return ERR_PTR(-EINVAL); - } } - return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); + return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid); } static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) @@ -9954,6 +10446,10 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid, if (err) goto err_fid_bc_flood_set; + err = mlxsw_sp_macvlan_replay(rif, extack); + if (err) + goto err_macvlan_replay; + err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, mlxsw_sp_fid_index(rif->fid), true); if (err) @@ -9969,6 +10465,8 @@ err_fid_rif_set: mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, mlxsw_sp_fid_index(rif->fid), false); err_rif_fdb_op: + mlxsw_sp_rif_macvlan_flush(rif); +err_macvlan_replay: mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); err_fid_bc_flood_set: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 9a2669a08480..ed3b628caafe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -171,8 +171,19 @@ int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp); struct net_device * mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev); +int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + u16 new_vid, bool is_pvid, + struct netlink_ext_ack *extack); int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev, struct netlink_ext_ack *extack); +void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev); +int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp, + struct net_device *upper_dev, + struct netlink_ext_ack *extack); +void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev); #endif /* _MLXSW_ROUTER_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index b3472fb94617..af50ff9e5f26 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -31,7 +31,7 @@ struct mlxsw_sp_span { refcount_t policer_id_base_ref_count; atomic_t active_entries_count; int entries_count; - struct mlxsw_sp_span_entry entries[]; + struct mlxsw_sp_span_entry entries[] __counted_by(entries_count); }; struct mlxsw_sp_span_analyzed_port { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 82e711afb02b..c59b5f11f357 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -93,13 +93,8 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp); struct mlxsw_sp_span_entry * mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev); - void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_span_entry *span_entry); - -int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu); -void mlxsw_sp_span_speed_update_work(struct work_struct *work); - int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, const struct mlxsw_sp_span_agent_parms *parms); void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d88e62bc759f..6c749c148148 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -384,6 +384,91 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); } +static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack); +static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj); + +struct mlxsw_sp_bridge_port_replay_switchdev_objs { + struct net_device *brport_dev; + struct mlxsw_sp_port *mlxsw_sp_port; + int done; +}; + +static int +mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_port_obj_info *port_obj_info = ptr; + struct netlink_ext_ack *extack = port_obj_info->info.extack; + struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso; + int err = 0; + + rso = (void *)port_obj_info->info.ctx; + + if (event != SWITCHDEV_PORT_OBJ_ADD || + dev != rso->brport_dev) + goto out; + + /* When a port is joining the bridge through a LAG, there likely are + * VLANs configured on that LAG already. The replay will thus attempt to + * have the given port-vlans join the corresponding FIDs. But the LAG + * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN + * memberships, back before CHANGEUPPER was distributed and netdevice + * master set. So now before propagating the VLAN events further, we + * first need to kill the corresponding VID at the mlxsw_sp_port. + * + * Note that this doesn't need to be rolled back on failure -- if the + * replay fails, the enslavement is off, and the VIDs would be killed by + * LAG anyway as part of its rollback. + */ + if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) { + u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid; + + err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid); + if (err) + goto out; + } + + ++rso->done; + err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL, + port_obj_info->obj, extack); + +out: + return notifier_from_errno(err); +} + +static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = { + .notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs, +}; + +static int +mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_port_obj_info *port_obj_info = ptr; + struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso; + + rso = (void *)port_obj_info->info.ctx; + + if (event != SWITCHDEV_PORT_OBJ_ADD || + dev != rso->brport_dev) + return NOTIFY_DONE; + if (!rso->done--) + return NOTIFY_STOP; + + mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL, + port_obj_info->obj); + return NOTIFY_DONE; +} + +static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = { + .notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs, +}; + static struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, struct net_device *brport_dev, @@ -405,7 +490,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, bridge_port->system_port = mlxsw_sp_port->local_port; bridge_port->dev = brport_dev; bridge_port->bridge_device = bridge_device; - bridge_port->stp_state = BR_STATE_DISABLED; + bridge_port->stp_state = br_port_get_stp_state(brport_dev); bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | BR_MCAST_FLOOD; INIT_LIST_HEAD(&bridge_port->vlans_list); @@ -1479,29 +1564,15 @@ err_port_vlan_set: } static int -mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev, - const struct switchdev_obj_port_vlan *vlan) +mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { - u16 pvid; - - pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev); - if (!pvid) - return 0; - - if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { - if (vlan->vid != pvid) { - netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); - return -EBUSY; - } - } else { - if (vlan->vid == pvid) { - netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); - return -EBUSY; - } - } + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - return 0; + return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid, + flag_pvid, extack); } static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, @@ -1518,8 +1589,8 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, int err = 0; if (br_vlan_enabled(orig_dev)) - err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, - orig_dev, vlan); + err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev, + vlan, extack); if (!err) err = -EOPNOTSUPP; return err; @@ -2365,6 +2436,33 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, } static int +mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = { + .brport_dev = bridge_port->dev, + .mlxsw_sp_port = mlxsw_sp_port, + }; + struct notifier_block *nb; + int err; + + nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb; + err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev, + &rso, NULL, nb, extack); + if (err) + goto err_replay; + + return 0; + +err_replay: + nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb; + switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev, + &rso, NULL, nb, extack); + return err; +} + +static int mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port, struct mlxsw_sp_port *mlxsw_sp_port, struct netlink_ext_ack *extack) @@ -2378,7 +2476,7 @@ mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port, if (mlxsw_sp_port->default_vlan->fid) mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); - return 0; + return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack); } static int @@ -2550,6 +2648,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; struct net_device *dev = bridge_port->dev; u16 vid; + int err; vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); @@ -2565,8 +2664,20 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, if (mlxsw_sp_port_vlan->fid) mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); - return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, - extack); + err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, + extack); + if (err) + return err; + + err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack); + if (err) + goto err_replay; + + return 0; + +err_replay: + mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); + return err; } static void @@ -2783,8 +2894,15 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, if (err) goto err_port_join; + err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack); + if (err) + goto err_replay; + return 0; +err_replay: + bridge_device->ops->port_leave(bridge_device, bridge_port, + mlxsw_sp_port); err_port_join: mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); return err; @@ -2948,9 +3066,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, goto just_remove; } - if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid)) - goto just_remove; - mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); if (!mlxsw_sp_port_vlan) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); @@ -3018,9 +3133,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, goto just_remove; } - if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid)) - goto just_remove; - mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid); if (!mlxsw_sp_port_vlan) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n"); @@ -3262,6 +3374,7 @@ out: struct mlxsw_sp_switchdev_event_work { struct work_struct work; + netdevice_tracker dev_tracker; union { struct switchdev_notifier_fdb_info fdb_info; struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; @@ -3418,8 +3531,8 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) out: rtnl_unlock(); kfree(switchdev_work->fdb_info.addr); + netdev_put(dev, &switchdev_work->dev_tracker); kfree(switchdev_work); - dev_put(dev); } static void @@ -3430,7 +3543,6 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; struct mlxsw_sp_bridge_device *bridge_device; struct net_device *dev = switchdev_work->dev; - u8 all_zeros_mac[ETH_ALEN] = { 0 }; enum mlxsw_sp_l3proto proto; union mlxsw_sp_l3addr addr; struct net_device *br_dev; @@ -3452,7 +3564,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, &proto, &addr); - if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { + if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) { err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr); if (err) { mlxsw_sp_fid_put(fid); @@ -3504,7 +3616,6 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_device *bridge_device; struct net_device *dev = switchdev_work->dev; struct net_device *br_dev = netdev_master_upper_dev_get(dev); - u8 all_zeros_mac[ETH_ALEN] = { 0 }; enum mlxsw_sp_l3proto proto; union mlxsw_sp_l3addr addr; struct mlxsw_sp_fid *fid; @@ -3525,7 +3636,7 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, &proto, &addr); - if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { + if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) { mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr); mlxsw_sp_fid_put(fid); return; @@ -3574,8 +3685,8 @@ static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work) out: rtnl_unlock(); + netdev_put(dev, &switchdev_work->dev_tracker); kfree(switchdev_work); - dev_put(dev); } static int @@ -3675,7 +3786,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, * upper device containig mlxsw_sp_port or just a * mlxsw_sp_port */ - dev_hold(dev); + netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC); break; case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: @@ -3685,7 +3796,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, info); if (err) goto err_vxlan_work_prepare; - dev_hold(dev); + netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC); break; default: kfree(switchdev_work); diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index c11b118dc415..ddd87ef71caf 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1228,7 +1228,7 @@ err_mem_region: return err; } -static int ks8842_remove(struct platform_device *pdev) +static void ks8842_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct ks8842_adapter *adapter = netdev_priv(netdev); @@ -1239,7 +1239,6 @@ static int ks8842_remove(struct platform_device *pdev) iounmap(adapter->hw_addr); free_netdev(netdev); release_mem_region(iomem->start, resource_size(iomem)); - return 0; } @@ -1248,7 +1247,7 @@ static struct platform_driver ks8842_platform_driver = { .name = DRV_NAME, }, .probe = ks8842_probe, - .remove = ks8842_remove, + .remove_new = ks8842_remove, }; module_platform_driver(ks8842_platform_driver); diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index fecd43754cea..e5ec0a363aff 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -350,6 +350,8 @@ union ks8851_tx_hdr { * @rxd: Space for receiving SPI data, in DMA-able space. * @txd: Space for transmitting SPI data, in DMA-able space. * @msg_enable: The message flags controlling driver output (see ethtool). + * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR). + * @queued_len: Space required in hardware TX buffer for queued packets in txq. * @fid: Incrementing frame id tag. * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. @@ -399,6 +401,7 @@ struct ks8851_net { struct work_struct rxctrl_work; struct sk_buff_head txq; + unsigned int queued_len; struct eeprom_93cx6 eeprom; struct regulator *vdd_reg; diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index cfbc900d4aeb..0bf13b38b8f5 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) handled |= IRQ_RXPSI; if (status & IRQ_TXI) { - handled |= IRQ_TXI; + unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR); - /* no lock here, tx queue should have been stopped */ + netif_dbg(ks, intr, ks->netdev, + "%s: txspace %d\n", __func__, tx_space); - /* update our idea of how much tx space is available to the - * system */ - ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); + spin_lock(&ks->statelock); + ks->tx_space = tx_space; + if (netif_queue_stopped(ks->netdev)) + netif_wake_queue(ks->netdev); + spin_unlock(&ks->statelock); - netif_dbg(ks, intr, ks->netdev, - "%s: txspace %d\n", __func__, ks->tx_space); + handled |= IRQ_TXI; } if (status & IRQ_RXI) @@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) if (status & IRQ_LCI) mii_check_link(&ks->mii); - if (status & IRQ_TXI) - netif_wake_queue(ks->netdev); - return IRQ_HANDLED; } @@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev) ks8851_wrreg16(ks, KS_ISR, ks->rc_ier); ks8851_wrreg16(ks, KS_IER, ks->rc_ier); + ks->queued_len = 0; netif_start_queue(ks->netdev); netif_dbg(ks, ifup, ks->netdev, "network device up\n"); diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c index 7f49042484bd..2a7f29854267 100644 --- a/drivers/net/ethernet/micrel/ks8851_par.c +++ b/drivers/net/ethernet/micrel/ks8851_par.c @@ -327,11 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev) return ks8851_probe_common(netdev, dev, msg_enable); } -static int ks8851_remove_par(struct platform_device *pdev) +static void ks8851_remove_par(struct platform_device *pdev) { ks8851_remove_common(&pdev->dev); - - return 0; } static const struct of_device_id ks8851_match_table[] = { @@ -347,7 +345,7 @@ static struct platform_driver ks8851_driver = { .pm = &ks8851_pm_ops, }, .probe = ks8851_probe_par, - .remove = ks8851_remove_par, + .remove_new = ks8851_remove_par, }; module_platform_driver(ks8851_driver); diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 70bc7253454f..88e26c120b48 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -287,6 +287,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp, } /** + * calc_txlen - calculate size of message to send packet + * @len: Length of data + * + * Returns the size of the TXFIFO message needed to send + * this packet. + */ +static unsigned int calc_txlen(unsigned int len) +{ + return ALIGN(len + 4, 4); +} + +/** * ks8851_rx_skb_spi - receive skbuff * @ks: The device state * @skb: The skbuff @@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb) */ static void ks8851_tx_work(struct work_struct *work) { + unsigned int dequeued_len = 0; struct ks8851_net_spi *kss; + unsigned short tx_space; struct ks8851_net *ks; unsigned long flags; struct sk_buff *txb; @@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work) last = skb_queue_empty(&ks->txq); if (txb) { + dequeued_len += calc_txlen(txb->len); + ks8851_wrreg16_spi(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); ks8851_wrfifo_spi(ks, txb, last); @@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work) } } + tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR); + + spin_lock(&ks->statelock); + ks->queued_len -= dequeued_len; + ks->tx_space = tx_space; + spin_unlock(&ks->statelock); + ks8851_unlock_spi(ks, &flags); } @@ -347,18 +370,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks) } /** - * calc_txlen - calculate size of message to send packet - * @len: Length of data - * - * Returns the size of the TXFIFO message needed to send - * this packet. - */ -static unsigned int calc_txlen(unsigned int len) -{ - return ALIGN(len + 4, 4); -} - -/** * ks8851_start_xmit_spi - transmit packet using SPI * @skb: The buffer to transmit * @dev: The device used to transmit the packet. @@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb, spin_lock(&ks->statelock); - if (needed > ks->tx_space) { + if (ks->queued_len + needed > ks->tx_space) { netif_stop_queue(dev); ret = NETDEV_TX_BUSY; } else { - ks->tx_space -= needed; + ks->queued_len += needed; skb_queue_tail(&ks->txq, skb); } spin_unlock(&ks->statelock); - schedule_work(&kss->tx_work); + if (ret == NETDEV_TX_OK) + schedule_work(&kss->tx_work); return ret; } diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 329e374b9539..43ba71e82260 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -46,6 +46,7 @@ config LAN743X tristate "LAN743x support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + select PHYLIB select FIXED_PHY select CRC16 select CRC32 diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 2db5949b4c7e..6961cfc55fb9 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1047,7 +1047,8 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev, BIT(HWTSTAMP_TX_ON) | BIT(HWTSTAMP_TX_ONESTEP_SYNC); ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | - BIT(HWTSTAMP_FILTER_ALL); + BIT(HWTSTAMP_FILTER_ALL) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index a36f6369f132..45e209a7d083 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1466,9 +1466,15 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) static void lan743x_phy_close(struct lan743x_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct phy_device *phydev = netdev->phydev; phy_stop(netdev->phydev); phy_disconnect(netdev->phydev); + + /* using phydev here as phy_disconnect NULLs netdev->phydev */ + if (phy_is_pseudo_fixed_link(phydev)) + fixed_phy_unregister(phydev); + } static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) @@ -1515,7 +1521,7 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) &fphy_status, NULL); if (IS_ERR(phydev)) { netdev_err(netdev, "No PHY/fixed_PHY found\n"); - return -EIO; + return PTR_ERR(phydev); } } else { goto return_error; @@ -1864,6 +1870,50 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) return last_head - last_tail - 1; } +static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter, + int rx_ts_config) +{ + int channel_number; + int index; + u32 data; + + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + channel_number = adapter->rx[index].channel_number; + data = lan743x_csr_read(adapter, RX_CFG_B(channel_number)); + data &= RX_CFG_B_TS_MASK_; + data |= rx_ts_config; + lan743x_csr_write(adapter, RX_CFG_B(channel_number), + data); + } +} + +int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter, + int rx_filter) +{ + u32 data; + + switch (rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_EVENT: + lan743x_rx_cfg_b_tstamp_config(adapter, + RX_CFG_B_TS_DESCR_EN_); + data = lan743x_csr_read(adapter, PTP_RX_TS_CFG); + data |= PTP_RX_TS_CFG_EVENT_MSGS_; + lan743x_csr_write(adapter, PTP_RX_TS_CFG, data); + break; + case HWTSTAMP_FILTER_NONE: + lan743x_rx_cfg_b_tstamp_config(adapter, + RX_CFG_B_TS_NONE_); + break; + case HWTSTAMP_FILTER_ALL: + lan743x_rx_cfg_b_tstamp_config(adapter, + RX_CFG_B_TS_ALL_RX_); + break; + default: + return -ERANGE; + } + return 0; +} + void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx, bool enable_timestamping, bool enable_onestep_sync) @@ -2938,7 +2988,6 @@ static int lan743x_rx_open(struct lan743x_rx *rx) data |= RX_CFG_B_RX_PAD_2_; data &= ~RX_CFG_B_RX_RING_LEN_MASK_; data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); - data |= RX_CFG_B_TS_ALL_RX_; if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) data |= RX_CFG_B_RDMABL_512_; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 52609fc13ad9..b648461787d2 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -522,6 +522,8 @@ (((u32)(rx_latency)) & 0x0000FFFF) #define PTP_CAP_INFO (0x0A60) #define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4) +#define PTP_RX_TS_CFG (0x0A68) +#define PTP_RX_TS_CFG_EVENT_MSGS_ GENMASK(3, 0) #define PTP_TX_MOD (0x0AA4) #define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000) @@ -657,6 +659,9 @@ #define RX_CFG_B(channel) (0xC44 + ((channel) << 6)) #define RX_CFG_B_TS_ALL_RX_ BIT(29) +#define RX_CFG_B_TS_DESCR_EN_ BIT(28) +#define RX_CFG_B_TS_NONE_ 0 +#define RX_CFG_B_TS_MASK_ (0xCFFFFFFF) #define RX_CFG_B_RX_PAD_MASK_ (0x03000000) #define RX_CFG_B_RX_PAD_0_ (0x00000000) #define RX_CFG_B_RX_PAD_2_ (0x02000000) @@ -991,6 +996,9 @@ struct lan743x_rx { struct sk_buff *skb_head, *skb_tail; }; +int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter, + int rx_filter); + /* SGMII Link Speed Duplex status */ enum lan743x_sgmii_lsd { POWER_DOWN = 0, diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 39e1066ecd5f..2f04bc77a118 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1493,6 +1493,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) temp = lan743x_csr_read(adapter, PTP_TX_MOD2); temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_; lan743x_csr_write(adapter, PTP_TX_MOD2, temp); + + /* Default Timestamping */ + lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE); + lan743x_ptp_enable(adapter); lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_); lan743x_csr_write(adapter, PTP_INT_EN_SET, @@ -1653,6 +1657,9 @@ static void lan743x_ptp_disable(struct lan743x_adapter *adapter) { struct lan743x_ptp *ptp = &adapter->ptp; + /* Disable Timestamping */ + lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE); + mutex_lock(&ptp->command_lock); if (!lan743x_ptp_is_enabled(adapter)) { netif_warn(adapter, drv, adapter->netdev, @@ -1785,6 +1792,8 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) break; } + ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter); + if (!ret) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index bd72fbc2220f..3960534ac2ad 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -2,6 +2,7 @@ #include <linux/bpf.h> #include <linux/filter.h> +#include <net/page_pool/helpers.h> #include "lan966x_main.h" diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index fbb0bb4594cd..2635ef8958c8 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -5,9 +5,10 @@ #include <linux/if_vlan.h> #include <linux/iopoll.h> #include <linux/ip.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/of_net.h> #include <linux/phy/phy.h> +#include <linux/platform_device.h> #include <linux/reset.h> #include <net/addrconf.h> @@ -449,39 +450,46 @@ static int lan966x_port_get_parent_id(struct net_device *dev, return 0; } -static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) +static int lan966x_port_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) +{ + struct lan966x_port *port = netdev_priv(dev); + + if (!port->lan966x->ptp) + return -EOPNOTSUPP; + + lan966x_ptp_hwtstamp_get(port, cfg); + + return 0; +} + +static int lan966x_port_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct lan966x_port *port = netdev_priv(dev); int err; - if (cmd == SIOCSHWTSTAMP) { - err = lan966x_ptp_setup_traps(port, ifr); - if (err) - return err; - } + if (cfg->source != HWTSTAMP_SOURCE_NETDEV && + cfg->source != HWTSTAMP_SOURCE_PHYLIB) + return -EOPNOTSUPP; - if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) { - switch (cmd) { - case SIOCSHWTSTAMP: - err = lan966x_ptp_hwtstamp_set(port, ifr); - if (err) - lan966x_ptp_del_traps(port); + err = lan966x_ptp_setup_traps(port, cfg); + if (err) + return err; + + if (cfg->source == HWTSTAMP_SOURCE_NETDEV) { + if (!port->lan966x->ptp) + return -EOPNOTSUPP; + err = lan966x_ptp_hwtstamp_set(port, cfg, extack); + if (err) { + lan966x_ptp_del_traps(port); return err; - case SIOCGHWTSTAMP: - return lan966x_ptp_hwtstamp_get(port, ifr); } } - if (!dev->phydev) - return -ENODEV; - - err = phy_mii_ioctl(dev->phydev, ifr, cmd); - if (err && cmd == SIOCSHWTSTAMP) - lan966x_ptp_del_traps(port); - - return err; + return 0; } static const struct net_device_ops lan966x_port_netdev_ops = { @@ -494,10 +502,12 @@ static const struct net_device_ops lan966x_port_netdev_ops = { .ndo_get_stats64 = lan966x_stats_get, .ndo_set_mac_address = lan966x_port_set_mac_address, .ndo_get_port_parent_id = lan966x_port_get_parent_id, - .ndo_eth_ioctl = lan966x_port_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_setup_tc = lan966x_tc_setup, .ndo_bpf = lan966x_xdp, .ndo_xdp_xmit = lan966x_xdp_xmit, + .ndo_hwtstamp_get = lan966x_port_hwtstamp_get, + .ndo_hwtstamp_set = lan966x_port_hwtstamp_set, }; bool lan966x_netdevice_check(const struct net_device *dev) @@ -661,7 +671,6 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args) skb = netdev_alloc_skb(dev, len); if (unlikely(!skb)) { netdev_err(dev, "Unable to allocate sk_buff\n"); - err = -ENOMEM; break; } buf_len = len - ETH_FCS_LEN; @@ -807,6 +816,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p, NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_TC; dev->hw_features |= NETIF_F_HW_TC; + dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS; dev->needed_headroom = IFH_LEN_BYTES; eth_hw_addr_gen(dev, lan966x->base_mac, p + 1); @@ -1108,8 +1118,8 @@ static int lan966x_probe(struct platform_device *pdev) /* set irq */ lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr"); - if (lan966x->xtr_irq <= 0) - return -EINVAL; + if (lan966x->xtr_irq < 0) + return lan966x->xtr_irq; err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL, lan966x_xtr_irq_handler, IRQF_ONESHOT, @@ -1250,7 +1260,7 @@ cleanup_ports: return err; } -static int lan966x_remove(struct platform_device *pdev) +static void lan966x_remove(struct platform_device *pdev) { struct lan966x *lan966x = platform_get_drvdata(pdev); @@ -1269,13 +1279,11 @@ static int lan966x_remove(struct platform_device *pdev) lan966x_ptp_deinit(lan966x); debugfs_remove_recursive(lan966x->debugfs_root); - - return 0; } static struct platform_driver lan966x_driver = { .probe = lan966x_probe, - .remove = lan966x_remove, + .remove_new = lan966x_remove, .driver = { .name = "lan966x-switch", .of_match_table = lan966x_match, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 27f272831ea5..caa9e0533c96 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -10,10 +10,11 @@ #include <linux/phy.h> #include <linux/phylink.h> #include <linux/ptp_clock_kernel.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> #include <net/pkt_cls.h> #include <net/pkt_sched.h> #include <net/switchdev.h> +#include <net/xdp.h> #include <vcap_api.h> #include <vcap_api_client.h> @@ -298,7 +299,7 @@ struct lan966x_phc { struct ptp_clock *clock; struct ptp_clock_info info; struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM]; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; struct lan966x *lan966x; u8 index; }; @@ -578,8 +579,11 @@ void lan966x_mdb_restore_entries(struct lan966x *lan966x); int lan966x_ptp_init(struct lan966x *lan966x); void lan966x_ptp_deinit(struct lan966x *lan966x); -int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr); -int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr); +int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); +void lan966x_ptp_hwtstamp_get(struct lan966x_port *port, + struct kernel_hwtstamp_config *cfg); void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb, u64 src_port, u64 timestamp); int lan966x_ptp_txtstamp_request(struct lan966x_port *port, @@ -590,7 +594,8 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args); irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args); u32 lan966x_ptp_get_period_ps(void); int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); -int lan966x_ptp_setup_traps(struct lan966x_port *port, struct ifreq *ifr); +int lan966x_ptp_setup_traps(struct lan966x_port *port, + struct kernel_hwtstamp_config *cfg); int lan966x_ptp_del_traps(struct lan966x_port *port); int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index 266a21a2d124..63905bb5a63a 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -59,7 +59,7 @@ static int lan966x_ptp_add_trap(struct lan966x_port *port, int err; vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id); - if (vrule) { + if (!IS_ERR(vrule)) { u32 value, mask; /* Just modify the ingress port mask and exit */ @@ -106,7 +106,7 @@ static int lan966x_ptp_del_trap(struct lan966x_port *port, int err; vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id); - if (!vrule) + if (IS_ERR(vrule)) return -EEXIST; vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, &value, &mask); @@ -248,29 +248,23 @@ int lan966x_ptp_del_traps(struct lan966x_port *port) return err; } -int lan966x_ptp_setup_traps(struct lan966x_port *port, struct ifreq *ifr) +int lan966x_ptp_setup_traps(struct lan966x_port *port, + struct kernel_hwtstamp_config *cfg) { - struct hwtstamp_config cfg; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - if (cfg.rx_filter == HWTSTAMP_FILTER_NONE) + if (cfg->rx_filter == HWTSTAMP_FILTER_NONE) return lan966x_ptp_del_traps(port); else return lan966x_ptp_add_traps(port); } -int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) +int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct lan966x *lan966x = port->lan966x; - struct hwtstamp_config cfg; struct lan966x_phc *phc; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_ON: port->ptp_tx_cmd = IFH_REW_OP_TWO_STEP_PTP; break; @@ -284,7 +278,7 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: port->ptp_rx_cmd = false; break; @@ -303,7 +297,7 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: port->ptp_rx_cmd = true; - cfg.rx_filter = HWTSTAMP_FILTER_ALL; + cfg->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; @@ -312,20 +306,20 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) /* Commit back the result & save it */ mutex_lock(&lan966x->ptp_lock); phc = &lan966x->phc[LAN966X_PHC_PORT]; - memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg)); + phc->hwtstamp_config = *cfg; mutex_unlock(&lan966x->ptp_lock); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } -int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr) +void lan966x_ptp_hwtstamp_get(struct lan966x_port *port, + struct kernel_hwtstamp_config *cfg) { struct lan966x *lan966x = port->lan966x; struct lan966x_phc *phc; phc = &lan966x->phc[LAN966X_PHC_PORT]; - return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config, - sizeof(phc->hwtstamp_config)) ? -EFAULT : 0; + *cfg = phc->hwtstamp_config; } static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c index 96b3def6c474..d696cf9dbd19 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c @@ -75,7 +75,7 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL); return err; @@ -172,7 +172,7 @@ lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st) } } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC); return err; out: NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error"); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index 01f3a3a41cdb..37d2584b48a7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1012,8 +1012,7 @@ static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data) return; for (idx = 0; idx < sparx5->num_ethtool_stats; idx++) - strncpy(data + idx * ETH_GSTRING_LEN, - sparx5->stats_layout[idx], ETH_GSTRING_LEN); + ethtool_sprintf(&data, "%s", sparx5->stats_layout[idx]); } static void sparx5_get_sset_data(struct net_device *ndev, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index dc9af480bfea..d1f7fc8b1b71 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -911,7 +911,7 @@ cleanup_pnode: return err; } -static int mchp_sparx5_remove(struct platform_device *pdev) +static void mchp_sparx5_remove(struct platform_device *pdev) { struct sparx5 *sparx5 = platform_get_drvdata(pdev); @@ -931,8 +931,6 @@ static int mchp_sparx5_remove(struct platform_device *pdev) /* Unregister netdevs */ sparx5_unregister_notifier_blocks(sparx5); destroy_workqueue(sparx5->mact_queue); - - return 0; } static const struct of_device_id mchp_sparx5_match[] = { @@ -943,7 +941,7 @@ MODULE_DEVICE_TABLE(of, mchp_sparx5_match); static struct platform_driver mchp_sparx5_driver = { .probe = mchp_sparx5_probe, - .remove = mchp_sparx5_remove, + .remove_new = mchp_sparx5_remove, .driver = { .name = "sparx5-switch", .of_match_table = mchp_sparx5_match, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 62c85463b634..6f565c0c0c3d 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -205,7 +205,7 @@ enum sparx5_core_clockfreq { struct sparx5_phc { struct ptp_clock *clock; struct ptp_clock_info info; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; struct sparx5 *sparx5; u8 index; }; @@ -388,8 +388,11 @@ void sparx5_unregister_netdevs(struct sparx5 *sparx5); /* sparx5_ptp.c */ int sparx5_ptp_init(struct sparx5 *sparx5); void sparx5_ptp_deinit(struct sparx5 *sparx5); -int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr); -int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr); +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); +void sparx5_ptp_hwtstamp_get(struct sparx5_port *port, + struct kernel_hwtstamp_config *cfg); void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb, u64 timestamp); int sparx5_ptp_txtstamp_request(struct sparx5_port *port, @@ -411,7 +414,6 @@ enum sparx5_pgid_type { }; void sparx5_pgid_init(struct sparx5 *spx5); -int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx); int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx); int sparx5_pgid_free(struct sparx5 *spx5, u16 idx); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c index d078156581d5..705a004b324f 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c @@ -210,22 +210,31 @@ static int sparx5_get_port_parent_id(struct net_device *dev, return 0; } -static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) +static int sparx5_port_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) { struct sparx5_port *sparx5_port = netdev_priv(dev); struct sparx5 *sparx5 = sparx5_port->sparx5; - if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) { - switch (cmd) { - case SIOCSHWTSTAMP: - return sparx5_ptp_hwtstamp_set(sparx5_port, ifr); - case SIOCGHWTSTAMP: - return sparx5_ptp_hwtstamp_get(sparx5_port, ifr); - } - } + if (!sparx5->ptp) + return -EOPNOTSUPP; + + sparx5_ptp_hwtstamp_get(sparx5_port, cfg); + + return 0; +} + +static int sparx5_port_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct sparx5_port *sparx5_port = netdev_priv(dev); + struct sparx5 *sparx5 = sparx5_port->sparx5; + + if (!sparx5->ptp) + return -EOPNOTSUPP; - return phy_mii_ioctl(dev->phydev, ifr, cmd); + return sparx5_ptp_hwtstamp_set(sparx5_port, cfg, extack); } static const struct net_device_ops sparx5_port_netdev_ops = { @@ -238,8 +247,10 @@ static const struct net_device_ops sparx5_port_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = sparx5_get_stats64, .ndo_get_port_parent_id = sparx5_get_port_parent_id, - .ndo_eth_ioctl = sparx5_port_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_setup_tc = sparx5_port_setup_tc, + .ndo_hwtstamp_get = sparx5_port_hwtstamp_get, + .ndo_hwtstamp_set = sparx5_port_hwtstamp_set, }; bool sparx5_netdevice_check(const struct net_device *dev) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c index 0edb98cef7e4..5a932460db58 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c @@ -74,10 +74,11 @@ static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5) return res; } -int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) +int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct sparx5 *sparx5 = port->sparx5; - struct hwtstamp_config cfg; struct sparx5_phc *phc; /* For now don't allow to run ptp on ports that are part of a bridge, @@ -88,10 +89,7 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) if (test_bit(port->portno, sparx5->bridge_mask)) return -EINVAL; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_ON: port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; break; @@ -105,7 +103,7 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -122,7 +120,7 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - cfg.rx_filter = HWTSTAMP_FILTER_ALL; + cfg->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; @@ -131,20 +129,20 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr) /* Commit back the result & save it */ mutex_lock(&sparx5->ptp_lock); phc = &sparx5->phc[SPARX5_PHC_PORT]; - memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg)); + phc->hwtstamp_config = *cfg; mutex_unlock(&sparx5->ptp_lock); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } -int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr) +void sparx5_ptp_hwtstamp_get(struct sparx5_port *port, + struct kernel_hwtstamp_config *cfg) { struct sparx5 *sparx5 = port->sparx5; struct sparx5_phc *phc; phc = &sparx5->phc[SPARX5_PHC_PORT]; - return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config, - sizeof(phc->hwtstamp_config)) ? -EFAULT : 0; + *cfg = phc->hwtstamp_config; } static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c index 3f87a5285a6d..523e0c470894 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c @@ -126,7 +126,7 @@ sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st) } } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC); return err; @@ -175,7 +175,7 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL); return err; @@ -1274,7 +1274,7 @@ static int sparx5_tc_free_rule_resources(struct net_device *ndev, int ret = 0; vrule = vcap_get_rule(vctrl, rule_id); - if (!vrule || IS_ERR(vrule)) + if (IS_ERR(vrule)) return -EINVAL; sparx5_tc_free_psfp_resources(sparx5, vrule); diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c index a418ad8e8770..ef980e4e5bc2 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c @@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri, list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) { newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL); if (!newckf) - return ERR_PTR(-ENOMEM); + goto err; list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields); } list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) { newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL); if (!newcaf) - return ERR_PTR(-ENOMEM); + goto err; list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields); } return duprule; + +err: + list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) { + list_del(&ckf->ctrl.list); + kfree(ckf); + } + + list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) { + list_del(&caf->ctrl.list); + kfree(caf); + } + + kfree(duprule); + return ERR_PTR(-ENOMEM); } static void vcap_apply_width(u8 *dst, int width, int bytes) @@ -2396,7 +2410,7 @@ struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem) ri = vcap_dup_rule(elem, elem->state == VCAP_RS_DISABLED); if (IS_ERR(ri)) - return ERR_PTR(PTR_ERR(ri)); + return ERR_CAST(ri); if (ri->state == VCAP_RS_DISABLED) goto out; @@ -2429,7 +2443,7 @@ struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id) elem = vcap_get_locked_rule(vctrl, id); if (!elem) - return NULL; + return ERR_PTR(-ENOENT); rule = vcap_decode_rule(elem); mutex_unlock(&elem->admin->lock); diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h index 62db270f65af..9eccfa633c1a 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h @@ -277,7 +277,4 @@ struct vcap_control { struct list_head list; /* list of vcap instances */ }; -/* Set client control interface on the API */ -int vcap_api_set_client(struct vcap_control *vctrl); - #endif /* __VCAP_API__ */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h index d9d1f7c9d762..88641508f885 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h @@ -226,9 +226,6 @@ int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid); bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid); /* Is this chain id the last lookup of all VCAPs */ bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress); -/* Provide all rules via a callback interface */ -int vcap_rule_iter(struct vcap_control *vctrl, - int (*callback)(void *, struct vcap_rule *), void *arg); /* Match a list of keys against the keysets available in a vcap type */ bool vcap_rule_find_keysets(struct vcap_rule *rule, struct vcap_keyset_list *matches); diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c index c2c3397c5898..59bfbda29bb3 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c @@ -300,7 +300,7 @@ static int vcap_show_admin(struct vcap_control *vctrl, vcap_show_admin_info(vctrl, admin, out); list_for_each_entry(elem, &admin->rules, list) { vrule = vcap_decode_rule(elem); - if (IS_ERR_OR_NULL(vrule)) { + if (IS_ERR(vrule)) { ret = PTR_ERR(vrule); break; } diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index c07f25e791c7..fe4e166de8a0 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -243,10 +243,9 @@ static void vcap_test_api_init(struct vcap_admin *admin) } /* Helper function to create a rule of a specific size */ -static struct vcap_rule * -test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user, - u16 priority, - int id, int size, int expected_addr) +static void test_vcap_xn_rule_creator(struct kunit *test, int cid, + enum vcap_user user, u16 priority, + int id, int size, int expected_addr) { struct vcap_rule *rule; struct vcap_rule_internal *ri; @@ -311,7 +310,7 @@ test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user, ret = vcap_add_rule(rule); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, expected_addr, ri->addr); - return rule; + vcap_free_rule(rule); } /* Prepare testing rule deletion */ @@ -995,6 +994,16 @@ static void vcap_api_encode_rule_actionset_test(struct kunit *test) KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]); } +static void vcap_free_ckf(struct vcap_rule *rule) +{ + struct vcap_client_keyfield *ckf, *next_ckf; + + list_for_each_entry_safe(ckf, next_ckf, &rule->keyfields, ctrl.list) { + list_del(&ckf->ctrl.list); + kfree(ckf); + } +} + static void vcap_api_rule_add_keyvalue_test(struct kunit *test) { struct vcap_admin admin = { @@ -1027,6 +1036,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value); KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1); @@ -1039,6 +1049,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value); KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, @@ -1052,6 +1063,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value); KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab); @@ -1064,6 +1076,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type); KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value); KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask); + vcap_free_ckf(rule); INIT_LIST_HEAD(&rule->keyfields); ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip); @@ -1078,6 +1091,18 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]); for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx) KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]); + vcap_free_ckf(rule); +} + +static void vcap_free_caf(struct vcap_rule *rule) +{ + struct vcap_client_actionfield *caf, *next_caf; + + list_for_each_entry_safe(caf, next_caf, + &rule->actionfields, ctrl.list) { + list_del(&caf->ctrl.list); + kfree(caf); + } } static void vcap_api_rule_add_actionvalue_test(struct kunit *test) @@ -1105,6 +1130,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1); @@ -1116,6 +1142,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY); @@ -1127,6 +1154,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432); @@ -1138,6 +1166,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value); + vcap_free_caf(rule); INIT_LIST_HEAD(&rule->actionfields); ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd); @@ -1149,6 +1178,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test) KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action); KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type); KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value); + vcap_free_caf(rule); } static void vcap_api_rule_find_keyset_basic_test(struct kunit *test) @@ -1408,6 +1438,10 @@ static void vcap_api_encode_rule_test(struct kunit *test) ret = list_empty(&is2_admin.rules); KUNIT_EXPECT_EQ(test, false, ret); KUNIT_EXPECT_EQ(test, 0, ret); + + vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0, + rule->cookie, false); + vcap_free_rule(rule); /* Check that the rule has been freed: tricky to access since this @@ -1418,6 +1452,8 @@ static void vcap_api_encode_rule_test(struct kunit *test) KUNIT_EXPECT_EQ(test, true, ret); ret = list_empty(&rule->actionfields); KUNIT_EXPECT_EQ(test, true, ret); + + vcap_del_rule(&test_vctrl, &test_netdev, id); } static void vcap_api_set_rule_counter_test(struct kunit *test) @@ -1561,6 +1597,11 @@ static void vcap_api_rule_insert_in_order_test(struct kunit *test) test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774); test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771); test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768); + + vcap_del_rule(&test_vctrl, &test_netdev, 200); + vcap_del_rule(&test_vctrl, &test_netdev, 300); + vcap_del_rule(&test_vctrl, &test_netdev, 400); + vcap_del_rule(&test_vctrl, &test_netdev, 500); } static void vcap_api_rule_insert_reverse_order_test(struct kunit *test) @@ -1619,6 +1660,11 @@ static void vcap_api_rule_insert_reverse_order_test(struct kunit *test) ++idx; } KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr); + + vcap_del_rule(&test_vctrl, &test_netdev, 500); + vcap_del_rule(&test_vctrl, &test_netdev, 400); + vcap_del_rule(&test_vctrl, &test_netdev, 300); + vcap_del_rule(&test_vctrl, &test_netdev, 200); } static void vcap_api_rule_remove_at_end_test(struct kunit *test) @@ -1819,6 +1865,9 @@ static void vcap_api_rule_remove_in_front_test(struct kunit *test) KUNIT_EXPECT_EQ(test, 786, test_init_start); KUNIT_EXPECT_EQ(test, 8, test_init_count); KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr); + + vcap_del_rule(&test_vctrl, &test_netdev, 200); + vcap_del_rule(&test_vctrl, &test_netdev, 300); } static struct kunit_case vcap_api_rule_remove_test_cases[] = { diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.c b/drivers/net/ethernet/microchip/vcap/vcap_tc.c index 09abe7944af6..27e2dffb65e6 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_tc.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.c @@ -50,7 +50,7 @@ int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS); return err; @@ -86,7 +86,7 @@ int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st) } } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS); return err; @@ -124,7 +124,7 @@ int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st) goto out; } } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS); return err; out: NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error"); @@ -158,7 +158,7 @@ int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_PORTS); return err; @@ -201,7 +201,7 @@ int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN); return 0; out: @@ -238,7 +238,7 @@ int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st, if (mt.mask->vlan_tpid) st->tpid = be16_to_cpu(mt.key->vlan_tpid); - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN); return 0; out: @@ -313,7 +313,7 @@ int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_TCP); return err; @@ -376,7 +376,7 @@ int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ARP); return 0; @@ -401,7 +401,7 @@ int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st) goto out; } - st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP); + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IP); return err; diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.h b/drivers/net/ethernet/microchip/vcap/vcap_tc.h index 071f892f9aa4..49b02d032906 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_tc.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.h @@ -14,7 +14,7 @@ struct vcap_tc_flower_parse_usage { u16 l3_proto; u8 l4_proto; u16 tpid; - unsigned int used_keys; + unsigned long long used_keys; }; int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st); diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index 090e6b983243..01eb7445ead9 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -20,6 +20,7 @@ config MICROSOFT_MANA depends on PCI_MSI && X86_64 depends on PCI_HYPERV select AUXILIARY_BUS + select PAGE_POOL help This driver supports Microsoft Azure Network Adapter (MANA). So far, the driver is only supported on X86_64. diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 8f3f78b68592..6367de0c2c2e 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -106,6 +106,25 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) return 0; } +static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val) +{ + struct gdma_context *gc = pci_get_drvdata(pdev); + struct gdma_query_hwc_timeout_resp resp = {}; + struct gdma_query_hwc_timeout_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_HWC_TIMEOUT, + sizeof(req), sizeof(resp)); + req.timeout_ms = *timeout_val; + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err || resp.hdr.status) + return err ? err : -EPROTO; + + *timeout_val = resp.timeout_ms; + + return 0; +} + static int mana_gd_detect_devices(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); @@ -300,8 +319,11 @@ static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index, void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) { + /* Hardware Spec specifies that software client should set 0 for + * wqe_cnt for Receive Queues. This value is not used in Send Queues. + */ mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type, - queue->id, queue->head * GDMA_WQE_BU_SIZE, 1); + queue->id, queue->head * GDMA_WQE_BU_SIZE, 0); } void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit) @@ -879,8 +901,10 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) struct gdma_context *gc = pci_get_drvdata(pdev); struct gdma_verify_ver_resp resp = {}; struct gdma_verify_ver_req req = {}; + struct hw_channel_context *hwc; int err; + hwc = gc->hwc.driver_data; mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION, sizeof(req), sizeof(resp)); @@ -907,7 +931,14 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) err, resp.hdr.status); return err ? err : -EPROTO; } - + if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) { + err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout); + if (err) { + dev_err(gc->dev, "Failed to set the hwc timeout %d\n", err); + return err; + } + dev_dbg(gc->dev, "set the hwc timeout to %u\n", hwc->hwc_timeout); + } return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 2bd1d74021f7..9d1cd3bfcf66 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -174,7 +174,25 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, complete(&hwc->hwc_init_eqe_comp); break; + case GDMA_EQE_HWC_SOC_RECONFIG_DATA: + type_data.as_uint32 = event->details[0]; + type = type_data.type; + val = type_data.value; + + switch (type) { + case HWC_DATA_CFG_HWC_TIMEOUT: + hwc->hwc_timeout = val; + break; + + default: + dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type); + break; + } + + break; + default: + dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type); /* Ignore unknown events, which should never happen. */ break; } @@ -696,6 +714,7 @@ int mana_hwc_create_channel(struct gdma_context *gc) gd->driver_data = hwc; hwc->gdma_dev = gd; hwc->dev = gc->dev; + hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS; /* HWC's instance number is always 0. */ gd->dev_id.as_uint32 = 0; @@ -770,6 +789,8 @@ void mana_hwc_destroy_channel(struct gdma_context *gc) hwc->gdma_dev->doorbell = INVALID_DOORBELL; hwc->gdma_dev->pdid = INVALID_PDID; + hwc->hwc_timeout = 0; + kfree(hwc); gc->hwc.driver_data = NULL; gc->hwc.gdma_context = NULL; @@ -825,7 +846,8 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, goto out; } - if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) { + if (!wait_for_completion_timeout(&ctx->comp_event, + (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) { dev_err(hwc->dev, "HWC: Request timed out!\n"); err = -ETIMEDOUT; goto out; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index c2ad0921e893..fc3d2903a80f 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -12,6 +12,8 @@ #include <net/checksum.h> #include <net/ip6_checksum.h> +#include <net/page_pool/helpers.h> +#include <net/xdp.h> #include <net/mana/mana.h> #include <net/mana/mana_auxiliary.h> @@ -89,63 +91,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb) return 0; } +static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash, + int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey) +{ + ash->dma_handle[sg_i] = da; + ash->size[sg_i] = sge_len; + + tp->wqe_req.sgl[sg_i].address = da; + tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; + tp->wqe_req.sgl[sg_i].size = sge_len; +} + static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, - struct mana_tx_package *tp) + struct mana_tx_package *tp, int gso_hs) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + int hsg = 1; /* num of SGEs of linear part */ struct gdma_dev *gd = apc->ac->gdma_dev; + int skb_hlen = skb_headlen(skb); + int sge0_len, sge1_len = 0; struct gdma_context *gc; struct device *dev; skb_frag_t *frag; dma_addr_t da; + int sg_i; int i; gc = gd->gdma_context; dev = gc->dev; - da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (gso_hs && gso_hs < skb_hlen) { + sge0_len = gso_hs; + sge1_len = skb_hlen - gso_hs; + } else { + sge0_len = skb_hlen; + } + + da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, da)) return -ENOMEM; - ash->dma_handle[0] = da; - ash->size[0] = skb_headlen(skb); + mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); - tp->wqe_req.sgl[0].address = ash->dma_handle[0]; - tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[0].size = ash->size[0]; + if (sge1_len) { + sg_i = 1; + da = dma_map_single(dev, skb->data + sge0_len, sge1_len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, da)) + goto frag_err; + + mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); + hsg = 2; + } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg_i = hsg + i; + frag = &skb_shinfo(skb)->frags[i]; da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (dma_mapping_error(dev, da)) goto frag_err; - ash->dma_handle[i + 1] = da; - ash->size[i + 1] = skb_frag_size(frag); - - tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; - tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; + mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag), + gd->gpa_mkey); } return 0; frag_err: - for (i = i - 1; i >= 0; i--) - dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], + for (i = sg_i - 1; i >= hsg; i--) + dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + for (i = hsg - 1; i >= 0; i--) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); return -ENOMEM; } +/* Handle the case when GSO SKB linear length is too large. + * MANA NIC requires GSO packets to put only the packet header to SGE0. + * So, we need 2 SGEs for the skb linear part which contains more than the + * header. + * Return a positive value for the number of SGEs, or a negative value + * for an error. + */ +static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb, + int gso_hs) +{ + int num_sge = 1 + skb_shinfo(skb)->nr_frags; + int skb_hlen = skb_headlen(skb); + + if (gso_hs < skb_hlen) { + num_sge++; + } else if (gso_hs > skb_hlen) { + if (net_ratelimit()) + netdev_err(ndev, + "TX nonlinear head: hs:%d, skb_hlen:%d\n", + gso_hs, skb_hlen); + + return -EINVAL; + } + + return num_sge; +} + +/* Get the GSO packet's header size */ +static int mana_get_gso_hs(struct sk_buff *skb) +{ + int gso_hs; + + if (skb->encapsulation) { + gso_hs = skb_inner_tcp_all_headers(skb); + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + gso_hs = skb_transport_offset(skb) + + sizeof(struct udphdr); + } else { + gso_hs = skb_tcp_all_headers(skb); + } + } + + return gso_hs; +} + netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) { enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; struct mana_port_context *apc = netdev_priv(ndev); + int gso_hs = 0; /* zero for non-GSO pkts */ u16 txq_idx = skb_get_queue_mapping(skb); struct gdma_dev *gd = apc->ac->gdma_dev; bool ipv4 = false, ipv6 = false; @@ -157,7 +233,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct mana_txq *txq; struct mana_cq *cq; int err, len; - u16 ihs; if (unlikely(!apc->port_is_up)) goto tx_drop; @@ -207,19 +282,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) pkg.wqe_req.client_data_unit = 0; pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; - WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); - - if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { - pkg.wqe_req.sgl = pkg.sgl_array; - } else { - pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, - sizeof(struct gdma_sge), - GFP_ATOMIC); - if (!pkg.sgl_ptr) - goto tx_drop_count; - - pkg.wqe_req.sgl = pkg.sgl_ptr; - } if (skb->protocol == htons(ETH_P_IP)) ipv4 = true; @@ -227,6 +289,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ipv6 = true; if (skb_is_gso(skb)) { + int num_sge; + + gso_hs = mana_get_gso_hs(skb); + + num_sge = mana_fix_skb_head(ndev, skb, gso_hs); + if (num_sge > 0) + pkg.wqe_req.num_sge = num_sge; + else + goto tx_drop_count; + + u64_stats_update_begin(&tx_stats->syncp); + if (skb->encapsulation) { + tx_stats->tso_inner_packets++; + tx_stats->tso_inner_bytes += skb->len - gso_hs; + } else { + tx_stats->tso_packets++; + tx_stats->tso_bytes += skb->len - gso_hs; + } + u64_stats_update_end(&tx_stats->syncp); + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; @@ -250,28 +332,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } - - if (skb->encapsulation) { - ihs = skb_inner_tcp_all_headers(skb); - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_inner_packets++; - tx_stats->tso_inner_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } else { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { - ihs = skb_transport_offset(skb) + sizeof(struct udphdr); - } else { - ihs = skb_tcp_all_headers(skb); - if (ipv6_has_hopopt_jumbo(skb)) - ihs -= sizeof(struct hop_jumbo_hdr); - } - - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_packets++; - tx_stats->tso_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { csum_type = mana_checksum_info(skb); @@ -294,11 +354,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } else { /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) - goto free_sgl_ptr; + goto tx_drop_count; } } - if (mana_map_skb(skb, apc, &pkg)) { + WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); + + if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { + pkg.wqe_req.sgl = pkg.sgl_array; + } else { + pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, + sizeof(struct gdma_sge), + GFP_ATOMIC); + if (!pkg.sgl_ptr) + goto tx_drop_count; + + pkg.wqe_req.sgl = pkg.sgl_ptr; + } + + if (mana_map_skb(skb, apc, &pkg, gso_hs)) { u64_stats_update_begin(&tx_stats->syncp); tx_stats->mana_map_err++; u64_stats_update_end(&tx_stats->syncp); @@ -1256,11 +1330,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct device *dev = gc->dev; - int i; + int hsg, i; + + /* Number of SGEs of linear part */ + hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + for (i = 0; i < hsg; i++) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); - for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) + for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); } @@ -1315,19 +1394,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq) case CQE_TX_VPORT_IDX_OUT_OF_RANGE: case CQE_TX_VPORT_DISABLED: case CQE_TX_VLAN_TAGGING_VIOLATION: - WARN_ONCE(1, "TX: CQE error %d: ignored.\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: CQE error %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_err++; break; default: - /* If the CQE type is unexpected, log an error, assert, - * and go through the error path. + /* If the CQE type is unknown, log an error, + * and still free the SKB, update tail, etc. */ - WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: unknown CQE type %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_unknown_type++; - return; + break; } if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) @@ -1387,8 +1470,8 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq) recv_buf_oob = &rxq->rx_oobs[curr_index]; - err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, - &recv_buf_oob->wqe_inf); + err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, + &recv_buf_oob->wqe_inf); if (WARN_ON_ONCE(err)) return; @@ -1415,8 +1498,8 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, return skb; } -static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, - struct mana_rxq *rxq) +static void mana_rx_skb(void *buf_va, bool from_pool, + struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) { struct mana_stats_rx *rx_stats = &rxq->stats; struct net_device *ndev = rxq->ndev; @@ -1449,6 +1532,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, if (!skb) goto drop; + if (from_pool) + skb_mark_for_recycle(skb); + skb->dev = napi->dev; skb->protocol = eth_type_trans(skb, ndev); @@ -1499,9 +1585,14 @@ drop_xdp: u64_stats_update_end(&rx_stats->syncp); drop: - WARN_ON_ONCE(rxq->xdp_save_va); - /* Save for reuse */ - rxq->xdp_save_va = buf_va; + if (from_pool) { + page_pool_recycle_direct(rxq->page_pool, + virt_to_head_page(buf_va)); + } else { + WARN_ON_ONCE(rxq->xdp_save_va); + /* Save for reuse */ + rxq->xdp_save_va = buf_va; + } ++ndev->stats.rx_dropped; @@ -1509,11 +1600,13 @@ drop: } static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, - dma_addr_t *da, bool is_napi) + dma_addr_t *da, bool *from_pool, bool is_napi) { struct page *page; void *va; + *from_pool = false; + /* Reuse XDP dropped page if available */ if (rxq->xdp_save_va) { va = rxq->xdp_save_va; @@ -1534,17 +1627,22 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, return NULL; } } else { - page = dev_alloc_page(); + page = page_pool_dev_alloc_pages(rxq->page_pool); if (!page) return NULL; + *from_pool = true; va = page_to_virt(page); } *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, DMA_FROM_DEVICE); if (dma_mapping_error(dev, *da)) { - put_page(virt_to_head_page(va)); + if (*from_pool) + page_pool_put_full_page(rxq->page_pool, page, false); + else + put_page(virt_to_head_page(va)); + return NULL; } @@ -1553,21 +1651,25 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, /* Allocate frag for rx buffer, and save the old buf */ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, - struct mana_recv_buf_oob *rxoob, void **old_buf) + struct mana_recv_buf_oob *rxoob, void **old_buf, + bool *old_fp) { + bool from_pool; dma_addr_t da; void *va; - va = mana_get_rxfrag(rxq, dev, &da, true); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true); if (!va) return; dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, DMA_FROM_DEVICE); *old_buf = rxoob->buf_va; + *old_fp = rxoob->from_pool; rxoob->buf_va = va; rxoob->sgl[0].address = da; + rxoob->from_pool = from_pool; } static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, @@ -1581,6 +1683,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, struct device *dev = gc->dev; void *old_buf = NULL; u32 curr, pktlen; + bool old_fp; apc = netdev_priv(ndev); @@ -1623,12 +1726,12 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, rxbuf_oob = &rxq->rx_oobs[curr]; WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); - mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf); + mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp); /* Unsuccessful refill will have old_buf == NULL. * In this case, mana_rx_skb() will drop the packet. */ - mana_rx_skb(old_buf, oob, rxq); + mana_rx_skb(old_buf, old_fp, oob, rxq); drop: mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); @@ -1658,6 +1761,12 @@ static void mana_poll_rx_cq(struct mana_cq *cq) mana_process_rx_cqe(rxq, cq, &comp[i]); } + if (comp_read > 0) { + struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; + + mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); + } + if (rxq->xdp_flush) xdp_do_flush(); } @@ -1882,6 +1991,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc, struct mana_recv_buf_oob *rx_oob; struct device *dev = gc->dev; struct napi_struct *napi; + struct page *page; int i; if (!rxq) @@ -1914,10 +2024,18 @@ static void mana_destroy_rxq(struct mana_port_context *apc, dma_unmap_single(dev, rx_oob->sgl[0].address, rx_oob->sgl[0].size, DMA_FROM_DEVICE); - put_page(virt_to_head_page(rx_oob->buf_va)); + page = virt_to_head_page(rx_oob->buf_va); + + if (rx_oob->from_pool) + page_pool_put_full_page(rxq->page_pool, page, false); + else + put_page(page); + rx_oob->buf_va = NULL; } + page_pool_destroy(rxq->page_pool); + if (rxq->gdma_rq) mana_gd_destroy_queue(gc, rxq->gdma_rq); @@ -1928,18 +2046,20 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, struct mana_rxq *rxq, struct device *dev) { struct mana_port_context *mpc = netdev_priv(rxq->ndev); + bool from_pool = false; dma_addr_t da; void *va; if (mpc->rxbufs_pre) va = mana_get_rxbuf_pre(rxq, &da); else - va = mana_get_rxfrag(rxq, dev, &da, false); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false); if (!va) return -ENOMEM; rx_oob->buf_va = va; + rx_oob->from_pool = from_pool; rx_oob->sgl[0].address = da; rx_oob->sgl[0].size = rxq->datasize; @@ -2009,6 +2129,26 @@ static int mana_push_wqe(struct mana_rxq *rxq) return 0; } +static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) +{ + struct page_pool_params pprm = {}; + int ret; + + pprm.pool_size = RX_BUFFERS_PER_QUEUE; + pprm.nid = gc->numa_node; + pprm.napi = &rxq->rx_cq.napi; + + rxq->page_pool = page_pool_create(&pprm); + + if (IS_ERR(rxq->page_pool)) { + ret = PTR_ERR(rxq->page_pool); + rxq->page_pool = NULL; + return ret; + } + + return 0; +} + static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, u32 rxq_idx, struct mana_eq *eq, struct net_device *ndev) @@ -2038,6 +2178,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, &rxq->headroom); + /* Create page pool for RX queue */ + err = mana_create_page_pool(rxq, gc); + if (err) { + netdev_err(ndev, "Create page pool err:%d\n", err); + goto out; + } + err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); if (err) goto out; @@ -2109,8 +2256,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, cq->napi.napi_id)); - WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL)); + WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, + rxq->page_pool)); napi_enable(&cq->napi); @@ -2229,6 +2376,46 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, return 0; } +void mana_query_gf_stats(struct mana_port_context *apc) +{ + struct mana_query_gf_stat_resp resp = {}; + struct mana_query_gf_stat_req req = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT, + sizeof(req), sizeof(resp)); + req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES | + STATISTICS_FLAGS_HC_TX_UCAST_PACKETS | + STATISTICS_FLAGS_HC_TX_UCAST_BYTES | + STATISTICS_FLAGS_HC_TX_MCAST_PACKETS | + STATISTICS_FLAGS_HC_TX_MCAST_BYTES | + STATISTICS_FLAGS_HC_TX_BCAST_PACKETS | + STATISTICS_FLAGS_HC_TX_BCAST_BYTES; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(ndev, "Failed to query GF stats: %d\n", err); + return; + } + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT, + sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err, + resp.hdr.status); + return; + } + + apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; + apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; + apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; + apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; + apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; + apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; + apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; +} + static int mana_init_port(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); @@ -2500,8 +2687,9 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; ndev->vlan_features = ndev->features; - ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT; + xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT); err = register_netdev(ndev); if (err) { diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index 0dc78679f620..607150165ab4 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -13,6 +13,19 @@ static const struct { } mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, + {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)}, + {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats, + hc_tx_ucast_pkts)}, + {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats, + hc_tx_ucast_bytes)}, + {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats, + hc_tx_bcast_pkts)}, + {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats, + hc_tx_bcast_bytes)}, + {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats, + hc_tx_mcast_pkts)}, + {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats, + hc_tx_mcast_bytes)}, {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, tx_cqe_unknown_type)}, @@ -114,6 +127,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev, if (!apc->port_is_up) return; + /* we call mana function to update stats from GDMA */ + mana_query_gf_stats(apc); for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 3da99b62797d..96dc69e7141f 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -558,7 +558,7 @@ irq_map_fail: return ret; } -static int moxart_remove(struct platform_device *pdev) +static void moxart_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); @@ -566,8 +566,6 @@ static int moxart_remove(struct platform_device *pdev) devm_free_irq(&pdev->dev, ndev->irq, ndev); moxart_mac_free_memory(ndev); free_netdev(ndev); - - return 0; } static const struct of_device_id moxart_mac_match[] = { @@ -578,7 +576,7 @@ MODULE_DEVICE_TABLE(of, moxart_mac_match); static struct platform_driver moxart_mac_driver = { .probe = moxart_mac_probe, - .remove = moxart_remove, + .remove_new = moxart_remove, .driver = { .name = "moxart-ethernet", .of_match_table = moxart_mac_match, diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 87f2055c242c..e50be508c166 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -97,8 +97,6 @@ int ocelot_netdev_to_port(struct net_device *dev); int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct device_node *portnp); void ocelot_release_port(struct ocelot_port *ocelot_port); -int ocelot_devlink_init(struct ocelot *ocelot); -void ocelot_devlink_teardown(struct ocelot *ocelot); int ocelot_port_devlink_init(struct ocelot *ocelot, int port, enum devlink_port_flavour flavour); void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port); diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c index 83a3ce0c568e..312a46832154 100644 --- a/drivers/net/ethernet/mscc/ocelot_fdma.c +++ b/drivers/net/ethernet/mscc/ocelot_fdma.c @@ -12,7 +12,6 @@ #include <linux/dmapool.h> #include <linux/dsa/ocelot.h> #include <linux/netdevice.h> -#include <linux/of_platform.h> #include <linux/skbuff.h> #include "ocelot_fdma.h" diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index e0916afcddfb..33b438c6aec5 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -581,14 +581,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, int ret; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_META) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { return -EOPNOTSUPP; } @@ -641,12 +641,12 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, * then just bail out */ if ((dissector->used_keys & - (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL))) != - (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL))) + (BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) != + (BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) return -EOPNOTSUPP; flow_rule_match_eth_addrs(rule, &match); diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index 5c55197c7327..c018783757fb 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -582,10 +582,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64]; rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127]; rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255]; - rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255]; - rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511]; - rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023]; - rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526]; + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511]; + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023]; + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526]; + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX]; } static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port, @@ -610,10 +610,10 @@ static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port, rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64]; rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127]; rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255]; - rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255]; - rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511]; - rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023]; - rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526]; + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511]; + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023]; + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526]; + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX]; } void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port, diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h index 523611ccc48f..6f546695faa5 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.h +++ b/drivers/net/ethernet/mscc/ocelot_vcap.h @@ -15,7 +15,6 @@ int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, struct ocelot_vcap_filter *rule); -void ocelot_detect_vcap_constants(struct ocelot *ocelot); int ocelot_vcap_init(struct ocelot *ocelot); int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 97e90e2869d4..993212c3a7da 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -10,8 +10,9 @@ #include <linux/of_net.h> #include <linux/netdevice.h> #include <linux/phylink.h> +#include <linux/of.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/mfd/syscon.h> #include <linux/skbuff.h> #include <net/switchdev.h> @@ -391,7 +392,7 @@ out_free_devlink: return err; } -static int mscc_ocelot_remove(struct platform_device *pdev) +static void mscc_ocelot_remove(struct platform_device *pdev) { struct ocelot *ocelot = platform_get_drvdata(pdev); @@ -407,13 +408,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev) unregister_switchdev_notifier(&ocelot_switchdev_nb); unregister_netdevice_notifier(&ocelot_netdevice_nb); devlink_free(ocelot->devlink); - - return 0; } static struct platform_driver mscc_ocelot_driver = { .probe = mscc_ocelot_probe, - .remove = mscc_ocelot_remove, + .remove_new = mscc_ocelot_remove, .driver = { .name = "ocelot-switch", .of_match_table = mscc_ocelot_match, diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index 3f371faeb6d0..2b6e097df28f 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -227,7 +227,7 @@ MODULE_ALIAS("platform:jazzsonic"); #include "sonic.c" -static int jazz_sonic_device_remove(struct platform_device *pdev) +static void jazz_sonic_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); @@ -237,13 +237,11 @@ static int jazz_sonic_device_remove(struct platform_device *pdev) lp->descriptors, lp->descriptors_laddr); release_mem_region(dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); - - return 0; } static struct platform_driver jazz_sonic_driver = { .probe = jazz_sonic_probe, - .remove = jazz_sonic_device_remove, + .remove_new = jazz_sonic_device_remove, .driver = { .name = jazz_sonic_string, }, diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index b16f7c830f9b..2fc63860dbdb 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -532,7 +532,7 @@ MODULE_ALIAS("platform:macsonic"); #include "sonic.c" -static int mac_sonic_platform_remove(struct platform_device *pdev) +static void mac_sonic_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local* lp = netdev_priv(dev); @@ -541,13 +541,11 @@ static int mac_sonic_platform_remove(struct platform_device *pdev) dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); free_netdev(dev); - - return 0; } static struct platform_driver mac_sonic_platform_driver = { .probe = mac_sonic_platform_probe, - .remove = mac_sonic_platform_remove, + .remove_new = mac_sonic_platform_remove, .driver = { .name = "macsonic", }, diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 52fef34d43f9..8943e7244310 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -249,7 +249,7 @@ MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); #include "sonic.c" -static int xtsonic_device_remove(struct platform_device *pdev) +static void xtsonic_device_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local *lp = netdev_priv(dev); @@ -260,13 +260,11 @@ static int xtsonic_device_remove(struct platform_device *pdev) lp->descriptors, lp->descriptors_laddr); release_region (dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); - - return 0; } static struct platform_driver xtsonic_driver = { .probe = xtsonic_probe, - .remove = xtsonic_device_remove, + .remove_new = xtsonic_device_remove, .driver = { .name = xtsonic_string, }, diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 700c05fb05b9..61d8bfd12d5f 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5091,13 +5091,10 @@ static void do_s2io_restore_unicast_mc(struct s2io_nic *sp) static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr) { int i; - u64 mac_addr = 0; + u64 mac_addr; struct config_param *config = &sp->config; - for (i = 0; i < ETH_ALEN; i++) { - mac_addr <<= 8; - mac_addr |= addr[i]; - } + mac_addr = ether_addr_to_u64(addr); if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY)) return SUCCESS; @@ -5220,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p) static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr) { struct s2io_nic *sp = netdev_priv(dev); - register u64 mac_addr = 0, perm_addr = 0; + register u64 mac_addr, perm_addr; int i; u64 tmp64; struct config_param *config = &sp->config; @@ -5230,12 +5227,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr) * change on the device address registered with the OS. It will be * at offset 0. */ - for (i = 0; i < ETH_ALEN; i++) { - mac_addr <<= 8; - mac_addr |= addr[i]; - perm_addr <<= 8; - perm_addr |= sp->def_mac_addr[0].mac_addr[i]; - } + mac_addr = ether_addr_to_u64(addr); + perm_addr = ether_addr_to_u64(sp->def_mac_addr[0].mac_addr); /* check if the dev_addr is different than perm_addr */ if (mac_addr == perm_addr) diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c index b1f026b81dea..cc54faca2283 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c @@ -378,6 +378,34 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, /* Encryption */ switch (x->props.ealgo) { case SADB_EALG_NONE: + /* The xfrm descriptor for CHACAH20_POLY1305 does not set the algorithm id, which + * is the default value SADB_EALG_NONE. In the branch of SADB_EALG_NONE, driver + * uses algorithm name to identify CHACAH20_POLY1305's algorithm. + */ + if (x->aead && !strcmp(x->aead->alg_name, "rfc7539esp(chacha20,poly1305)")) { + if (nn->pdev->device != PCI_DEVICE_ID_NFP3800) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported encryption algorithm for offload"); + return -EINVAL; + } + if (x->aead->alg_icv_len != 128) { + NL_SET_ERR_MSG_MOD(extack, + "ICV must be 128bit with CHACHA20_POLY1305"); + return -EINVAL; + } + + /* Aead->alg_key_len includes 32-bit salt */ + if (x->aead->alg_key_len - 32 != 256) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported CHACHA20 key length"); + return -EINVAL; + } + + /* The CHACHA20's mode is not configured */ + cfg->ctrl_word.hash = NFP_IPSEC_HASH_POLY1305_128; + cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_CHACHA20; + break; + } + fallthrough; case SADB_EALG_NULL: cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC; cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL; @@ -427,6 +455,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, } if (x->aead) { + int key_offset = 0; int salt_len = 4; key_len = DIV_ROUND_UP(x->aead->alg_key_len, BITS_PER_BYTE); @@ -437,9 +466,19 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, return -EINVAL; } - for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++) - cfg->ciph_key[i] = get_unaligned_be32(x->aead->alg_key + - sizeof(cfg->ciph_key[0]) * i); + /* The CHACHA20's key order needs to be adjusted based on hardware design. + * Other's key order: {K0, K1, K2, K3, K4, K5, K6, K7} + * CHACHA20's key order: {K4, K5, K6, K7, K0, K1, K2, K3} + */ + if (!strcmp(x->aead->alg_name, "rfc7539esp(chacha20,poly1305)")) + key_offset = key_len / sizeof(cfg->ciph_key[0]) >> 1; + + for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]); i++) { + int index = (i + key_offset) % (key_len / sizeof(cfg->ciph_key[0])); + + cfg->ciph_key[index] = get_unaligned_be32(x->aead->alg_key + + sizeof(cfg->ciph_key[0]) * i); + } /* Load up the salt */ cfg->aesgcm_fields.salt = get_unaligned_be32(x->aead->alg_key + key_len); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index f21cf1f40f98..153533cd8f08 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); struct nfp_flower_cmsg_merge_hint *msg; struct nfp_fl_payload *sub_flows[2]; + struct nfp_flower_priv *priv; int err, i, flow_cnt; msg = nfp_flower_cmsg_get_data(skb); @@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) return; } - rtnl_lock(); + priv = app->priv; + mutex_lock(&priv->nfp_fl_lock); for (i = 0; i < flow_cnt; i++) { u32 ctx = be32_to_cpu(msg->flow[i].host_ctx); sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx); if (!sub_flows[i]) { nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n"); - goto err_rtnl_unlock; + goto err_mutex_unlock; } } @@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) if (err == -ENOMEM) nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n"); -err_rtnl_unlock: - rtnl_unlock(); +err_mutex_unlock: + mutex_unlock(&priv->nfp_fl_lock); } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 73032173ac4e..2967bab72505 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -61,7 +61,7 @@ bool is_pre_ct_flow(struct flow_cls_offload *flow) struct flow_match_ct ct; int i; - if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) { + if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) { flow_rule_match_ct(rule, &ct); if (ct.key->ct_state) return false; @@ -94,7 +94,7 @@ bool is_post_ct_flow(struct flow_cls_offload *flow) struct flow_match_ct ct; int i; - if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) { + if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) { flow_rule_match_ct(rule, &ct); if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) return true; @@ -236,10 +236,11 @@ static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1, static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, struct nfp_fl_ct_flow_entry *entry2) { - unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys & - entry2->rule->match.dissector->used_keys; + unsigned long long ovlp_keys; bool out, is_v6 = false; u8 ip_proto = 0; + ovlp_keys = entry1->rule->match.dissector->used_keys & + entry2->rule->match.dissector->used_keys; /* Temporary buffer for mangling keys, 64 is enough to cover max * struct size of key in various fields that may be mangled. * Supported fields to mangle: @@ -257,7 +258,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, /* Check the overlapped fields one by one, the unmasked part * should not conflict with each other. */ - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match1, match2; flow_rule_match_control(entry1->rule, &match1); @@ -267,7 +268,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match1, match2; flow_rule_match_basic(entry1->rule, &match1); @@ -289,7 +290,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, * will be do merge check when do nft and post ct merge, * so skip this ip merge check here. */ - if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) && + if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) && nfp_ct_merge_check_cannot_skip(entry1, entry2)) { struct flow_match_ipv4_addrs match1, match2; @@ -311,7 +312,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, * will be do merge check when do nft and post ct merge, * so skip this ip merge check here. */ - if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) && + if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) && nfp_ct_merge_check_cannot_skip(entry1, entry2)) { struct flow_match_ipv6_addrs match1, match2; @@ -333,7 +334,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, * will be do merge check when do nft and post ct merge, * so skip this tport merge check here. */ - if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) && + if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_PORTS)) && nfp_ct_merge_check_cannot_skip(entry1, entry2)) { enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC; struct flow_match_ports match1, match2; @@ -355,7 +356,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match1, match2; flow_rule_match_eth_addrs(entry1->rule, &match1); @@ -371,7 +372,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match1, match2; flow_rule_match_vlan(entry1->rule, &match1); @@ -381,7 +382,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_MPLS)) { struct flow_match_mpls match1, match2; flow_rule_match_mpls(entry1->rule, &match1); @@ -391,7 +392,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_TCP)) { struct flow_match_tcp match1, match2; flow_rule_match_tcp(entry1->rule, &match1); @@ -401,7 +402,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match1, match2; flow_rule_match_ip(entry1->rule, &match1); @@ -413,7 +414,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_match_enc_keyid match1, match2; flow_rule_match_enc_keyid(entry1->rule, &match1); @@ -423,7 +424,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_match_ipv4_addrs match1, match2; flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1); @@ -433,7 +434,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match1, match2; flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1); @@ -443,7 +444,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_match_control match1, match2; flow_rule_match_enc_control(entry1->rule, &match1); @@ -453,7 +454,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) { struct flow_match_ip match1, match2; flow_rule_match_enc_ip(entry1->rule, &match1); @@ -463,7 +464,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, goto check_failed; } - if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) { + if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS)) { struct flow_match_enc_opts match1, match2; flow_rule_match_enc_opts(entry1->rule, &match1); @@ -589,7 +590,7 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry, int i; ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA); - if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) { + if (ct_met && (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT))) { u32 *act_lbl; act_lbl = ct_met->ct_metadata.labels; @@ -2130,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl struct nfp_fl_ct_flow_entry *ct_entry; struct netlink_ext_ack *extack = NULL; - ASSERT_RTNL(); - extack = flow->common.extack; switch (flow->command) { case FLOW_CLS_REPLACE: @@ -2177,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb switch (type) { case TC_SETUP_CLSFLOWER: - rtnl_lock(); + while (!mutex_trylock(&zt->priv->nfp_fl_lock)) { + if (!zt->nft) /* avoid deadlock */ + return err; + msleep(20); + } err = nfp_fl_ct_offload_nft_flow(zt, flow); - rtnl_unlock(); + mutex_unlock(&zt->priv->nfp_fl_lock); break; default: return -EOPNOTSUPP; @@ -2207,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) struct nfp_fl_ct_flow_entry *ct_entry; struct nfp_fl_ct_zone_entry *zt; struct rhashtable *m_table; + struct nf_flowtable *nft; if (!ct_map_ent) return -ENOENT; @@ -2225,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) if (ct_map_ent->cookie > 0) kfree(ct_map_ent); - if (!zt->pre_ct_count) { - zt->nft = NULL; + if (!zt->pre_ct_count && zt->nft) { + nft = zt->nft; + zt->nft = NULL; /* avoid deadlock */ + nf_flow_table_offload_del_cb(nft, + nfp_fl_ct_handle_nft_flow, + zt); nfp_fl_ct_clean_nft_entries(zt); } break; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 40372545148e..2b7c947ff4f2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -297,6 +297,7 @@ struct nfp_fl_internal_ports { * @predt_list: List to keep track of decap pretun flows * @neigh_table: Table to keep track of neighbor entries * @predt_lock: Lock to serialise predt/neigh table updates + * @nfp_fl_lock: Lock to protect the flow offload operation */ struct nfp_flower_priv { struct nfp_app *app; @@ -339,6 +340,7 @@ struct nfp_flower_priv { struct list_head predt_list; struct rhashtable neigh_table; spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */ + struct mutex nfp_fl_lock; /* Protect the flow operation */ }; /** diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 0f06ef6e24bf..80e4675582bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_stats_ctx_table; + mutex_init(&priv->nfp_fl_lock); + err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params); if (err) goto err_free_merge_table; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 18328eb7f5c3..0aceef9fe582 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -24,43 +24,43 @@ FLOW_DIS_FIRST_FRAG) #define NFP_FLOWER_WHITELIST_DISSECTOR \ - (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \ - BIT(FLOW_DISSECTOR_KEY_BASIC) | \ - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_TCP) | \ - BIT(FLOW_DISSECTOR_KEY_PORTS) | \ - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_VLAN) | \ - BIT(FLOW_DISSECTOR_KEY_CVLAN) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \ - BIT(FLOW_DISSECTOR_KEY_MPLS) | \ - BIT(FLOW_DISSECTOR_KEY_CT) | \ - BIT(FLOW_DISSECTOR_KEY_META) | \ - BIT(FLOW_DISSECTOR_KEY_IP)) + (BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_CT) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_META) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_IP)) #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ - (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IP)) + (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ - (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) + (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \ - (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) + (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) #define NFP_FLOWER_MERGE_FIELDS \ (NFP_FLOWER_LAYER_PORT | \ @@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, u64 parent_ctx = 0; int err; - ASSERT_RTNL(); - if (sub_flow1 == sub_flow2 || nfp_flower_is_merge_flow(sub_flow1) || nfp_flower_is_merge_flow(sub_flow2)) @@ -1303,7 +1301,7 @@ static bool offload_pre_check(struct flow_cls_offload *flow) struct flow_dissector *dissector = rule->match.dissector; struct flow_match_ct ct; - if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) { + if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) { flow_rule_match_ct(rule, &ct); /* Allow special case where CT match is all 0 */ if (memchr_inv(ct.key, 0, sizeof(*ct.key))) @@ -1727,19 +1725,30 @@ static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flower) { + struct nfp_flower_priv *priv = app->priv; + int ret; + if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; + mutex_lock(&priv->nfp_fl_lock); switch (flower->command) { case FLOW_CLS_REPLACE: - return nfp_flower_add_offload(app, netdev, flower); + ret = nfp_flower_add_offload(app, netdev, flower); + break; case FLOW_CLS_DESTROY: - return nfp_flower_del_offload(app, netdev, flower); + ret = nfp_flower_del_offload(app, netdev, flower); + break; case FLOW_CLS_STATS: - return nfp_flower_get_stats(app, netdev, flower); + ret = nfp_flower_get_stats(app, netdev, flower); + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + mutex_unlock(&priv->nfp_fl_lock); + + return ret; } static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, @@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, repr_priv = repr->app_priv; repr_priv->block_shared = f->block_shared; f->driver_block_list = &nfp_block_cb_list; + f->unlocked_driver_cb = true; switch (f->command) { case FLOW_BLOCK_BIND: @@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; + f->unlocked_driver_cb = true; + switch (f->command) { case FLOW_BLOCK_BIND: cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index 99052a925d9e..e7180b4793c7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, { struct netlink_ext_ack *extack = flow->common.extack; struct nfp_flower_priv *fl_priv = app->priv; + int ret; if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload"); return -EOPNOTSUPP; } + mutex_lock(&fl_priv->nfp_fl_lock); switch (flow->command) { case TC_CLSMATCHALL_REPLACE: - return nfp_flower_install_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack); + break; case TC_CLSMATCHALL_DESTROY: - return nfp_flower_remove_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack); + break; case TC_CLSMATCHALL_STATS: - return nfp_flower_stats_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack); + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + mutex_unlock(&fl_priv->nfp_fl_lock); + + return ret; } /* Offload tc action, currently only for tc police */ diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 060a77f2265d..e522845c7c21 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -160,6 +160,18 @@ struct nfp_tun_mac_addr_offload { u8 addr[ETH_ALEN]; }; +/** + * struct nfp_neigh_update_work - update neighbour information to nfp + * @work: Work queue for writing neigh to the nfp + * @n: neighbour entry + * @app: Back pointer to app + */ +struct nfp_neigh_update_work { + struct work_struct work; + struct neighbour *n; + struct nfp_app *app; +}; + enum nfp_flower_mac_offload_cmd { NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, @@ -607,38 +619,30 @@ err: nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n"); } -static int -nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, - void *ptr) +static void +nfp_tun_release_neigh_update_work(struct nfp_neigh_update_work *update_work) { - struct nfp_flower_priv *app_priv; - struct netevent_redirect *redir; - struct neighbour *n; + neigh_release(update_work->n); + kfree(update_work); +} + +static void nfp_tun_neigh_update(struct work_struct *work) +{ + struct nfp_neigh_update_work *update_work; struct nfp_app *app; + struct neighbour *n; bool neigh_invalid; int err; - switch (event) { - case NETEVENT_REDIRECT: - redir = (struct netevent_redirect *)ptr; - n = redir->neigh; - break; - case NETEVENT_NEIGH_UPDATE: - n = (struct neighbour *)ptr; - break; - default: - return NOTIFY_DONE; - } - - neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; - - app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); - app = app_priv->app; + update_work = container_of(work, struct nfp_neigh_update_work, work); + app = update_work->app; + n = update_work->n; if (!nfp_flower_get_port_id_from_netdev(app, n->dev)) - return NOTIFY_DONE; + goto out; #if IS_ENABLED(CONFIG_INET) + neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; if (n->tbl->family == AF_INET6) { #if IS_ENABLED(CONFIG_IPV6) struct flowi6 flow6 = {}; @@ -655,13 +659,11 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL, &flow6, NULL); if (IS_ERR(dst)) - return NOTIFY_DONE; + goto out; dst_release(dst); } nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false); -#else - return NOTIFY_DONE; #endif /* CONFIG_IPV6 */ } else { struct flowi4 flow4 = {}; @@ -678,17 +680,71 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, rt = ip_route_output_key(dev_net(n->dev), &flow4); err = PTR_ERR_OR_ZERO(rt); if (err) - return NOTIFY_DONE; + goto out; ip_rt_put(rt); } nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false); } -#else - return NOTIFY_DONE; #endif /* CONFIG_INET */ +out: + nfp_tun_release_neigh_update_work(update_work); +} - return NOTIFY_OK; +static struct nfp_neigh_update_work * +nfp_tun_alloc_neigh_update_work(struct nfp_app *app, struct neighbour *n) +{ + struct nfp_neigh_update_work *update_work; + + update_work = kzalloc(sizeof(*update_work), GFP_ATOMIC); + if (!update_work) + return NULL; + + INIT_WORK(&update_work->work, nfp_tun_neigh_update); + neigh_hold(n); + update_work->n = n; + update_work->app = app; + + return update_work; +} + +static int +nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct nfp_neigh_update_work *update_work; + struct nfp_flower_priv *app_priv; + struct netevent_redirect *redir; + struct neighbour *n; + struct nfp_app *app; + + switch (event) { + case NETEVENT_REDIRECT: + redir = (struct netevent_redirect *)ptr; + n = redir->neigh; + break; + case NETEVENT_NEIGH_UPDATE: + n = (struct neighbour *)ptr; + break; + default: + return NOTIFY_DONE; + } +#if IS_ENABLED(CONFIG_IPV6) + if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) +#else + if (n->tbl != &arp_tbl) +#endif + return NOTIFY_DONE; + + app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); + app = app_priv->app; + update_work = nfp_tun_alloc_neigh_update_work(app, n); + if (!update_work) + return NOTIFY_DONE; + + queue_work(system_highpri_wq, &update_work->work); + + return NOTIFY_DONE; } void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) @@ -706,6 +762,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); if (!netdev) goto fail_rcu_unlock; + dev_hold(netdev); flow.daddr = payload->ipv4_addr; flow.flowi4_proto = IPPROTO_UDP; @@ -725,13 +782,16 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) ip_rt_put(rt); if (!n) goto fail_rcu_unlock; + rcu_read_unlock(); + nfp_tun_write_neigh(n->dev, app, &flow, n, false, true); neigh_release(n); - rcu_read_unlock(); + dev_put(netdev); return; fail_rcu_unlock: rcu_read_unlock(); + dev_put(netdev); nfp_flower_cmsg_warn(app, "Requested route not found.\n"); } @@ -749,6 +809,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); if (!netdev) goto fail_rcu_unlock; + dev_hold(netdev); flow.daddr = payload->ipv6_addr; flow.flowi6_proto = IPPROTO_UDP; @@ -766,14 +827,16 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) dst_release(dst); if (!n) goto fail_rcu_unlock; + rcu_read_unlock(); nfp_tun_write_neigh(n->dev, app, &flow, n, true, true); neigh_release(n); - rcu_read_unlock(); + dev_put(netdev); return; fail_rcu_unlock: rcu_read_unlock(); + dev_put(netdev); nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n"); } diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c index 0cc026b0aefd..17381bfc15d7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c @@ -1070,7 +1070,7 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_repr_inc_rx_stats(netdev, pkt_len); } - skb = build_skb(rxbuf->frag, true_bufsz); + skb = napi_build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c index 5d9db8c2a5b4..45be6954d5aa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c +++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c @@ -256,7 +256,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget, nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring); if (xdp_redir) - xdp_do_flush_map(); + xdp_do_flush(); if (tx_ring->wr_ptr_add) nfp_net_tx_xmit_more_flush(tx_ring); diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c index 33b6d74adb4b..8d78c6faefa8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c @@ -1189,7 +1189,7 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_repr_inc_rx_stats(netdev, pkt_len); } - skb = build_skb(rxbuf->frag, true_bufsz); + skb = napi_build_skb(rxbuf->frag, true_bufsz); if (unlikely(!skb)) { nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6b1fb5708434..de0a5d5ded30 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -924,7 +924,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr) */ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) { - u32 new_ctrl, update; + u32 new_ctrl, new_ctrl_w1, update; unsigned int r; int err; @@ -937,14 +937,29 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; - nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); - nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); + if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)) { + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); + } nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, update); if (err) nn_err(nn, "Could not disable device: %d\n", err); + if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) { + new_ctrl_w1 = nn->dp.ctrl_w1; + new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_FREELIST_EN; + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); + nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); + + nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1); + err = nfp_net_reconfig(nn, update); + if (err) + nn_err(nn, "Could not disable FREELIST_EN: %d\n", err); + nn->dp.ctrl_w1 = new_ctrl_w1; + } + for (r = 0; r < nn->dp.num_rx_rings; r++) { nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx)) @@ -964,11 +979,12 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) */ static int nfp_net_set_config_and_enable(struct nfp_net *nn) { - u32 bufsz, new_ctrl, update = 0; + u32 bufsz, new_ctrl, new_ctrl_w1, update = 0; unsigned int r; int err; new_ctrl = nn->dp.ctrl; + new_ctrl_w1 = nn->dp.ctrl_w1; if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) { nfp_net_rss_write_key(nn); @@ -1001,16 +1017,25 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz); - /* Enable device */ - new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; + /* Enable device + * Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if + * FREELIST_EN exits. + */ + if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) + new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN; + else + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; update |= NFP_NET_CFG_UPDATE_GEN; update |= NFP_NET_CFG_UPDATE_MSIX; update |= NFP_NET_CFG_UPDATE_RING; if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; + /* Step 2: Send the configuration and write the freelist. + * - The freelist only need to be written once. + */ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); - nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, nn->dp.ctrl_w1); + nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1); err = nfp_net_reconfig(nn, update); if (err) { nfp_net_clear_config_and_disable(nn); @@ -1018,10 +1043,25 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) } nn->dp.ctrl = new_ctrl; + nn->dp.ctrl_w1 = new_ctrl_w1; for (r = 0; r < nn->dp.num_rx_rings; r++) nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); + /* Step 3: Do the NFP_NET_CFG_CTRL_ENABLE. Send the configuration. + */ + if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) { + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; + nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); + + err = nfp_net_reconfig(nn, update); + if (err) { + nfp_net_clear_config_and_disable(nn); + return err; + } + nn->dp.ctrl = new_ctrl; + } + return 0; } @@ -2068,9 +2108,6 @@ static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; - if (nla_len(attr) < sizeof(mode)) - return -EINVAL; - new_ctrl = nn->dp.ctrl; mode = nla_get_u16(attr); if (mode == BRIDGE_MODE_VEPA) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 669b9dccb6a9..3e63f6d6a563 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -268,6 +268,7 @@ #define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */ #define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */ #define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */ +#define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */ #define NFP_NET_CFG_CAP_WORD1 0x00a4 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 48a74accbbd3..77bf4198dbde 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -18,7 +18,7 @@ struct nfp_port; */ struct nfp_reprs { unsigned int num_reprs; - struct net_device __rcu *reprs[]; + struct net_device __rcu *reprs[] __counted_by(num_reprs); }; /** diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 6e044ac04917..00264af13b49 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -241,7 +241,7 @@ struct nfp_eth_table { u64 link_modes_supp[2]; u64 link_modes_ad[2]; - } ports[]; + } ports[] __counted_by(count); }; struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index ce7492a6a98f..279ea0b56955 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -159,7 +159,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) if (!res) return ERR_PTR(-ENOMEM); - strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + strscpy(res->name, name, sizeof(res->name)); dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, NFP_RESOURCE_TBL_BASE, diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 0fd156286d4d..fa1f78b03cb2 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -7,11 +7,10 @@ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of_address.h> +#include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> -#include <linux/of_irq.h> +#include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/phy.h> #include <linux/mii.h> @@ -684,7 +683,7 @@ static int nixge_poll(struct napi_struct *napi, int budget) if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { /* If there's more, reschedule, but clear */ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); - napi_reschedule(napi); + napi_schedule(napi); } else { /* if not, turn on RX IRQs again ... */ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); @@ -756,8 +755,7 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); - if (napi_schedule_prep(&priv->napi)) - __napi_schedule(&priv->napi); + napi_schedule(&priv->napi); goto out; } if (!(status & XAXIDMA_IRQ_ALL_MASK)) { @@ -1398,7 +1396,7 @@ free_netdev: return err; } -static int nixge_remove(struct platform_device *pdev) +static void nixge_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct nixge_priv *priv = netdev_priv(ndev); @@ -1413,13 +1411,11 @@ static int nixge_remove(struct platform_device *pdev) mdiobus_unregister(priv->mii_bus); free_netdev(ndev); - - return 0; } static struct platform_driver nixge_driver = { .probe = nixge_probe, - .remove = nixge_remove, + .remove_new = nixge_remove, .driver = { .name = "nixge", .of_match_table = nixge_dt_ids, diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 1a4a272f4c5c..dd3e58a1319c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1417,7 +1417,7 @@ err_exit: return ret; } -static int lpc_eth_drv_remove(struct platform_device *pdev) +static void lpc_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat = netdev_priv(ndev); @@ -1436,8 +1436,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -1505,7 +1503,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match); static struct platform_driver lpc_eth_driver = { .probe = lpc_eth_drv_probe, - .remove = lpc_eth_drv_remove, + .remove_new = lpc_eth_drv_remove, #ifdef CONFIG_PM .suspend = lpc_eth_drv_suspend, .resume = lpc_eth_drv_resume, diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 602f4d45d529..2453a40f6ee8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -81,7 +81,6 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait); void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status, int err); -int ionic_set_dma_mask(struct ionic *ionic); int ionic_setup(struct ionic *ionic); int ionic_identify(struct ionic *ionic); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index ab7d217b98b3..d6ce113a4210 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -213,29 +213,18 @@ out: return ret; } -static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static void ionic_clear_pci(struct ionic *ionic) { - struct device *dev = &pdev->dev; - struct ionic *ionic; - int num_vfs; - int err; - - ionic = ionic_devlink_alloc(dev); - if (!ionic) - return -ENOMEM; - - ionic->pdev = pdev; - ionic->dev = dev; - pci_set_drvdata(pdev, ionic); - mutex_init(&ionic->dev_cmd_lock); + ionic_unmap_bars(ionic); + pci_release_regions(ionic->pdev); + pci_disable_device(ionic->pdev); +} - /* Query system for DMA addressing limitation for the device. */ - err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN)); - if (err) { - dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n", - err); - goto err_out_clear_drvdata; - } +static int ionic_setup_one(struct ionic *ionic) +{ + struct pci_dev *pdev = ionic->pdev; + struct device *dev = ionic->dev; + int err; ionic_debugfs_add_dev(ionic); @@ -249,20 +238,19 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_request_regions(pdev, IONIC_DRV_NAME); if (err) { dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err); - goto err_out_pci_disable_device; + goto err_out_clear_pci; } - pcie_print_link_status(pdev); err = ionic_map_bars(ionic); if (err) - goto err_out_pci_release_regions; + goto err_out_clear_pci; /* Configure the device */ err = ionic_setup(ionic); if (err) { dev_err(dev, "Cannot setup device: %d, aborting\n", err); - goto err_out_unmap_bars; + goto err_out_clear_pci; } pci_set_master(pdev); @@ -279,24 +267,64 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_teardown; } - /* Configure the ports */ + /* Configure the port */ err = ionic_port_identify(ionic); if (err) { dev_err(dev, "Cannot identify port: %d, aborting\n", err); - goto err_out_reset; + goto err_out_teardown; } err = ionic_port_init(ionic); if (err) { dev_err(dev, "Cannot init port: %d, aborting\n", err); - goto err_out_reset; + goto err_out_teardown; + } + + return 0; + +err_out_teardown: + ionic_dev_teardown(ionic); +err_out_clear_pci: + ionic_clear_pci(ionic); +err_out_debugfs_del_dev: + ionic_debugfs_del_dev(ionic); + + return err; +} + +static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct ionic *ionic; + int num_vfs; + int err; + + ionic = ionic_devlink_alloc(dev); + if (!ionic) + return -ENOMEM; + + ionic->pdev = pdev; + ionic->dev = dev; + pci_set_drvdata(pdev, ionic); + mutex_init(&ionic->dev_cmd_lock); + + /* Query system for DMA addressing limitation for the device. */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN)); + if (err) { + dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n", + err); + goto err_out; } + err = ionic_setup_one(ionic); + if (err) + goto err_out; + /* Allocate and init the LIF */ err = ionic_lif_size(ionic); if (err) { dev_err(dev, "Cannot size LIF: %d, aborting\n", err); - goto err_out_port_reset; + goto err_out_pci; } err = ionic_lif_alloc(ionic); @@ -347,21 +375,10 @@ err_out_free_lifs: ionic->lif = NULL; err_out_free_irqs: ionic_bus_free_irq_vectors(ionic); -err_out_port_reset: - ionic_port_reset(ionic); -err_out_reset: - ionic_reset(ionic); -err_out_teardown: +err_out_pci: ionic_dev_teardown(ionic); -err_out_unmap_bars: - ionic_unmap_bars(ionic); -err_out_pci_release_regions: - pci_release_regions(pdev); -err_out_pci_disable_device: - pci_disable_device(pdev); -err_out_debugfs_del_dev: - ionic_debugfs_del_dev(ionic); -err_out_clear_drvdata: + ionic_clear_pci(ionic); +err_out: mutex_destroy(&ionic->dev_cmd_lock); ionic_devlink_free(ionic); @@ -386,20 +403,71 @@ static void ionic_remove(struct pci_dev *pdev) ionic_port_reset(ionic); ionic_reset(ionic); ionic_dev_teardown(ionic); - ionic_unmap_bars(ionic); - pci_release_regions(pdev); - pci_disable_device(pdev); + ionic_clear_pci(ionic); ionic_debugfs_del_dev(ionic); mutex_destroy(&ionic->dev_cmd_lock); ionic_devlink_free(ionic); } +static void ionic_reset_prepare(struct pci_dev *pdev) +{ + struct ionic *ionic = pci_get_drvdata(pdev); + struct ionic_lif *lif = ionic->lif; + + dev_dbg(ionic->dev, "%s: device stopping\n", __func__); + + del_timer_sync(&ionic->watchdog_timer); + cancel_work_sync(&lif->deferred.work); + + mutex_lock(&lif->queue_lock); + ionic_stop_queues_reconfig(lif); + ionic_txrx_free(lif); + ionic_lif_deinit(lif); + ionic_qcqs_free(lif); + mutex_unlock(&lif->queue_lock); + + ionic_dev_teardown(ionic); + ionic_clear_pci(ionic); + ionic_debugfs_del_dev(ionic); +} + +static void ionic_reset_done(struct pci_dev *pdev) +{ + struct ionic *ionic = pci_get_drvdata(pdev); + struct ionic_lif *lif = ionic->lif; + int err; + + err = ionic_setup_one(ionic); + if (err) + goto err_out; + + ionic_debugfs_add_sizes(ionic); + ionic_debugfs_add_lif(ionic->lif); + + err = ionic_restart_lif(lif); + if (err) + goto err_out; + + mod_timer(&ionic->watchdog_timer, jiffies + 1); + +err_out: + dev_dbg(ionic->dev, "%s: device recovery %s\n", + __func__, err ? "failed" : "done"); +} + +static const struct pci_error_handlers ionic_err_handler = { + /* FLR handling */ + .reset_prepare = ionic_reset_prepare, + .reset_done = ionic_reset_done, +}; + static struct pci_driver ionic_driver = { .name = IONIC_DRV_NAME, .id_table = ionic_id_table, .probe = ionic_probe, .remove = ionic_remove, .sriov_configure = ionic_sriov_configure, + .err_handler = &ionic_err_handler }; int ionic_bus_register_driver(void) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 0bea208bfba2..9b5463040075 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -7,6 +7,7 @@ #include <linux/atomic.h> #include <linux/mutex.h> #include <linux/workqueue.h> +#include <linux/skbuff.h> #include "ionic_if.h" #include "ionic_regs.h" @@ -187,6 +188,7 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q, struct ionic_desc_info *desc_info, struct ionic_cq_info *cq_info, void *cb_arg); +#define IONIC_MAX_BUF_LEN ((u16)-1) #define IONIC_PAGE_SIZE PAGE_SIZE #define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2) #define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ @@ -216,12 +218,12 @@ struct ionic_desc_info { }; unsigned int bytes; unsigned int nbufs; - struct ionic_buf_info bufs[IONIC_MAX_FRAGS]; + struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1]; ionic_desc_cb cb; void *cb_arg; }; -#define IONIC_QUEUE_NAME_MAX_SZ 32 +#define IONIC_QUEUE_NAME_MAX_SZ 16 struct ionic_queue { struct device *dev; @@ -376,7 +378,6 @@ void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_ void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, void *cb_arg); -void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start); void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, unsigned int stop_index); int ionic_heartbeat_check(struct ionic *ionic); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 432fb93aa801..bad919343180 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -49,24 +49,24 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif); static void ionic_dim_work(struct work_struct *work) { struct dim *dim = container_of(work, struct dim, work); + struct ionic_intr_info *intr; struct dim_cq_moder cur_moder; struct ionic_qcq *qcq; + struct ionic_lif *lif; u32 new_coal; cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); qcq = container_of(dim, struct ionic_qcq, dim); - new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); + lif = qcq->q.lif; + new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec); new_coal = new_coal ? new_coal : 1; - if (qcq->intr.dim_coal_hw != new_coal) { - unsigned int qi = qcq->cq.bound_q->index; - struct ionic_lif *lif = qcq->q.lif; - - qcq->intr.dim_coal_hw = new_coal; + intr = &qcq->intr; + if (intr->dim_coal_hw != new_coal) { + intr->dim_coal_hw = new_coal; ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, - lif->rxqcqs[qi]->intr.index, - qcq->intr.dim_coal_hw); + intr->index, intr->dim_coal_hw); } dim->state = DIM_START_MEASURE; @@ -434,7 +434,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) } } -static void ionic_qcqs_free(struct ionic_lif *lif) +void ionic_qcqs_free(struct ionic_lif *lif) { struct device *dev = lif->ionic->dev; struct ionic_qcq *adminqcq; @@ -1754,7 +1754,7 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa) return ionic_lif_addr_add(netdev_priv(netdev), mac); } -static void ionic_stop_queues_reconfig(struct ionic_lif *lif) +void ionic_stop_queues_reconfig(struct ionic_lif *lif) { /* Stop and clean the queues before reconfiguration */ netif_device_detach(lif->netdev); @@ -2013,7 +2013,7 @@ static void ionic_txrx_deinit(struct ionic_lif *lif) } } -static void ionic_txrx_free(struct ionic_lif *lif) +void ionic_txrx_free(struct ionic_lif *lif) { unsigned int i; @@ -3275,27 +3275,11 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) dev_info(ionic->dev, "FW Down: LIFs stopped\n"); } -static void ionic_lif_handle_fw_up(struct ionic_lif *lif) +int ionic_restart_lif(struct ionic_lif *lif) { struct ionic *ionic = lif->ionic; int err; - if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) - return; - - dev_info(ionic->dev, "FW Up: restarting LIFs\n"); - - ionic_init_devinfo(ionic); - err = ionic_identify(ionic); - if (err) - goto err_out; - err = ionic_port_identify(ionic); - if (err) - goto err_out; - err = ionic_port_init(ionic); - if (err) - goto err_out; - mutex_lock(&lif->queue_lock); if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) @@ -3331,12 +3315,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) clear_bit(IONIC_LIF_F_FW_RESET, lif->state); ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); - dev_info(ionic->dev, "FW Up: LIFs restarted\n"); - - /* restore the hardware timestamping queues */ - ionic_lif_hwstamp_replay(lif); - return; + return 0; err_txrx_free: ionic_txrx_free(lif); @@ -3346,6 +3326,46 @@ err_qcqs_free: ionic_qcqs_free(lif); err_unlock: mutex_unlock(&lif->queue_lock); + + return err; +} + +static void ionic_lif_handle_fw_up(struct ionic_lif *lif) +{ + struct ionic *ionic = lif->ionic; + int err; + + if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; + + dev_info(ionic->dev, "FW Up: restarting LIFs\n"); + + /* This is a little different from what happens at + * probe time because the LIF already exists so we + * just need to reanimate it. + */ + ionic_init_devinfo(ionic); + err = ionic_identify(ionic); + if (err) + goto err_out; + err = ionic_port_identify(ionic); + if (err) + goto err_out; + err = ionic_port_init(ionic); + if (err) + goto err_out; + + err = ionic_restart_lif(lif); + if (err) + goto err_out; + + dev_info(ionic->dev, "FW Up: LIFs restarted\n"); + + /* restore the hardware timestamping queues */ + ionic_lif_hwstamp_replay(lif); + + return; + err_out: dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); } @@ -3811,6 +3831,18 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif) qtype, qti->max_sg_elems); dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", qtype, qti->sg_desc_stride); + + if (qti->max_sg_elems >= IONIC_MAX_FRAGS) { + qti->max_sg_elems = IONIC_MAX_FRAGS - 1; + dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n", + qtype, qti->max_sg_elems); + } + + if (qti->max_sg_elems > MAX_SKB_FRAGS) { + qti->max_sg_elems = MAX_SKB_FRAGS; + dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n", + qtype, qti->max_sg_elems); + } } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index fd2ea670e7d8..457c24195ca6 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -325,6 +325,11 @@ void ionic_lif_deinit(struct ionic_lif *lif); int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); +void ionic_stop_queues_reconfig(struct ionic_lif *lif); +void ionic_txrx_free(struct ionic_lif *lif); +void ionic_qcqs_free(struct ionic_lif *lif); +int ionic_restart_lif(struct ionic_lif *lif); + int ionic_lif_register(struct ionic_lif *lif); void ionic_lif_unregister(struct ionic_lif *lif); int ionic_lif_identify(struct ionic *ionic, u8 lif_type, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 1dc79cecc5cc..835577392178 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -554,8 +554,8 @@ int ionic_identify(struct ionic *ionic) memset(ident, 0, sizeof(*ident)); ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX); - strncpy(ident->drv.driver_ver_str, UTS_RELEASE, - sizeof(ident->drv.driver_ver_str) - 1); + strscpy(ident->drv.driver_ver_str, UTS_RELEASE, + sizeof(ident->drv.driver_ver_str)); mutex_lock(&ionic->dev_cmd_lock); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h index 87b2666f248b..ee9e99cd1b5e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -43,7 +43,6 @@ struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); void ionic_rx_filter_sync(struct ionic_lif *lif); int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode); -int ionic_rx_filters_need_sync(struct ionic_lif *lif); int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid); int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 26798fc635db..ccc1b1d407e4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -207,7 +207,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, return NULL; } - frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); + frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, + IONIC_PAGE_SIZE - buf_info->page_offset)); len -= frag_len; dma_sync_single_for_cpu(dev, @@ -452,7 +453,8 @@ void ionic_rx_fill(struct ionic_queue *q) /* fill main descriptor - buf[0] */ desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); - frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); + frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, + IONIC_PAGE_SIZE - buf_info->page_offset)); desc->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; @@ -471,7 +473,9 @@ void ionic_rx_fill(struct ionic_queue *q) } sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); - frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset); + frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN, + IONIC_PAGE_SIZE - + buf_info->page_offset)); sg_elem->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; @@ -1239,25 +1243,84 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) { struct ionic_tx_stats *stats = q_to_tx_stats(q); + bool too_many_frags = false; + skb_frag_t *frag; + int desc_bufs; + int chunk_len; + int frag_rem; + int tso_rem; + int seg_rem; + bool encap; + int hdrlen; int ndescs; int err; /* Each desc is mss long max, so a descriptor for each gso_seg */ - if (skb_is_gso(skb)) + if (skb_is_gso(skb)) { ndescs = skb_shinfo(skb)->gso_segs; - else + } else { ndescs = 1; + if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) { + too_many_frags = true; + goto linearize; + } + } - /* If non-TSO, just need 1 desc and nr_frags sg elems */ - if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) + /* If non-TSO, or no frags to check, we're done */ + if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags) return ndescs; - /* Too many frags, so linearize */ - err = skb_linearize(skb); - if (err) - return err; + /* We need to scan the skb to be sure that none of the MTU sized + * packets in the TSO will require more sgs per descriptor than we + * can support. We loop through the frags, add up the lengths for + * a packet, and count the number of sgs used per packet. + */ + tso_rem = skb->len; + frag = skb_shinfo(skb)->frags; + encap = skb->encapsulation; + + /* start with just hdr in first part of first descriptor */ + if (encap) + hdrlen = skb_inner_tcp_all_headers(skb); + else + hdrlen = skb_tcp_all_headers(skb); + seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size); + frag_rem = hdrlen; + + while (tso_rem > 0) { + desc_bufs = 0; + while (seg_rem > 0) { + desc_bufs++; + + /* We add the +1 because we can take buffers for one + * more than we have SGs: one for the initial desc data + * in addition to the SG segments that might follow. + */ + if (desc_bufs > q->max_sg_elems + 1) { + too_many_frags = true; + goto linearize; + } + + if (frag_rem == 0) { + frag_rem = skb_frag_size(frag); + frag++; + } + chunk_len = min(frag_rem, seg_rem); + frag_rem -= chunk_len; + tso_rem -= chunk_len; + seg_rem -= chunk_len; + } + + seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size); + } - stats->linearize++; +linearize: + if (too_many_frags) { + err = skb_linearize(skb); + if (err) + return err; + stats->linearize++; + } return ndescs; } diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1d1e183d3a8b..ed24d6af7487 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -233,9 +233,7 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter) cmask = DMA_BIT_MASK(32); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { -#ifndef CONFIG_IA64 mask = DMA_BIT_MASK(35); -#endif } else { mask = DMA_BIT_MASK(39); cmask = mask; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d613095b78e0..1d719726f72b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -909,7 +909,6 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -int qed_device_num_engines(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 65e20693c549..33f4f58ee51c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) p_dma->virt_addr = NULL; } kfree(p_mngr->ilt_shadow); + p_mngr->ilt_shadow = NULL; } static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index cdcead614e9f..f67be4b8ad43 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -3204,8 +3204,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128; - strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); - strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + memcpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + memcpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); /* Dump memory header */ offset += qed_grc_dump_mem_hdr(p_hwfn, @@ -6359,8 +6359,7 @@ static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest) { const char *source_str = &((const char *)buf)[*offset]; - strncpy(dest, source_str, size); - dest[size - 1] = '\0'; + strscpy(dest, source_str, size); *offset += size; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index be5cc8b79bd5..dad8e617c393 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -66,12 +66,12 @@ qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, return err; } - err = devlink_fmsg_binary_pair_put(fmsg, "dump_data", - p_dbg_data_buf, dbg_data_buf_size); + devlink_fmsg_binary_pair_put(fmsg, "dump_data", p_dbg_data_buf, + dbg_data_buf_size); vfree(p_dbg_data_buf); - return err; + return 0; } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 717a0b3f89bd..ab5ef254a748 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt, static int qed_ll2_alloc_buffer(struct qed_dev *cdev, u8 **data, dma_addr_t *phys_addr) { - *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC); + size_t size = cdev->ll2->rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + *data = kmalloc(size, GFP_ATOMIC); if (!(*data)) { DP_INFO(cdev, "Failed to allocate LL2 buffer data\n"); return -ENOMEM; @@ -2589,7 +2592,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) INIT_LIST_HEAD(&cdev->ll2->list); spin_lock_init(&cdev->ll2->lock); - cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN + + cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN + L1_CACHE_BYTES + params->mtu; /* Allocate memory for LL2. diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 0bfc375161ed..a174c6fc626a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -110,9 +110,9 @@ struct qed_ll2_info { enum core_tx_dest tx_dest; u8 tx_stats_en; bool main_func_queue; + struct qed_ll2_cbs cbs; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; - struct qed_ll2_cbs cbs; }; extern const struct qed_ll2_ops qed_ll2_ops_pass; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 7b0e390c0b07..0e265ed1f501 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -60,7 +60,7 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) #define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 #define QED_VF_CHANNEL_MSLEEP_DELAY 25 -static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) +static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; @@ -72,9 +72,6 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) /* output tlvs list */ qed_dp_tlv_list(p_hwfn, p_req); - /* need to add the END TLV to the message size */ - resp_size += sizeof(struct channel_list_end_tlv); - /* Send TLVs over HW channel */ memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); trigger.vf_pf_msg_valid = 1; @@ -172,7 +169,7 @@ static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) rc = -EAGAIN; @@ -301,7 +298,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); /* send acquire request */ - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); /* Re-try acquire in case of vf-pf hw channel timeout */ if (retry_cnt && rc == -EBUSY) { @@ -705,7 +702,7 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, sizeof(struct channel_list_end_tlv)); p_resp = &p_iov->pf2vf_reply->tunn_param_resp; - rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status); if (rc) goto exit; @@ -772,7 +769,7 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->queue_start; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -822,7 +819,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -867,7 +864,7 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->queue_start; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -918,7 +915,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -968,7 +965,7 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -997,7 +994,7 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -1075,12 +1072,10 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, struct vfpf_vport_update_tlv *req; struct pfvf_def_resp_tlv *resp; u8 update_rx, update_tx; - u32 resp_size = 0; u16 size, tlv; int rc; resp = &p_iov->pf2vf_reply->default_resp; - resp_size = sizeof(*resp); update_rx = p_params->update_vport_active_rx_flg; update_tx = p_params->update_vport_active_tx_flg; @@ -1096,7 +1091,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, size); - resp_size += sizeof(struct pfvf_def_resp_tlv); if (update_rx) { p_act_tlv->update_rx = update_rx; @@ -1116,7 +1110,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); - resp_size += sizeof(struct pfvf_def_resp_tlv); p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; } @@ -1127,7 +1120,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_VPORT_UPDATE_MCAST, size); - resp_size += sizeof(struct pfvf_def_resp_tlv); memcpy(p_mcast_tlv->bins, p_params->bins, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); @@ -1142,7 +1134,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; size = sizeof(struct vfpf_vport_update_accept_param_tlv); p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); - resp_size += sizeof(struct pfvf_def_resp_tlv); if (update_rx) { p_accept_tlv->update_rx_mode = update_rx; @@ -1166,7 +1157,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, p_rss_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_VPORT_UPDATE_RSS, size); - resp_size += sizeof(struct pfvf_def_resp_tlv); if (rss_params->update_rss_config) p_rss_tlv->update_rss_flags |= @@ -1203,7 +1193,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); - resp_size += sizeof(struct pfvf_def_resp_tlv); p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; p_any_vlan_tlv->update_accept_any_vlan_flg = p_params->update_accept_any_vlan_flg; @@ -1213,7 +1202,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -1245,7 +1234,7 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -1303,7 +1292,7 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -1332,7 +1321,7 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -1364,7 +1353,7 @@ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->read_coal_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; @@ -1402,7 +1391,7 @@ qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, sizeof(struct channel_list_end_tlv)); p_resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status); qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -1433,7 +1422,7 @@ qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; - rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 4d83ceebdc49..042a75f34060 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -556,9 +556,6 @@ void qede_config_rx_mode(struct net_device *ndev); void qede_fill_rss_params(struct qede_dev *edev, struct qed_update_vport_rss_params *rss, u8 *update); -void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); -void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); - int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp); #ifdef CONFIG_DCB diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 95820cf1cd6c..b6b849a079ed 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -201,21 +201,6 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { /* Forced speed capabilities maps */ -struct qede_forced_speed_map { - u32 speed; - __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); - - const u32 *cap_arr; - u32 arr_size; -}; - -#define QEDE_FORCED_SPEED_MAP(value) \ -{ \ - .speed = SPEED_##value, \ - .cap_arr = qede_forced_speed_##value, \ - .arr_size = ARRAY_SIZE(qede_forced_speed_##value), \ -} - static const u32 qede_forced_speed_1000[] __initconst = { ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, @@ -263,28 +248,21 @@ static const u32 qede_forced_speed_100000[] __initconst = { ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, }; -static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = { - QEDE_FORCED_SPEED_MAP(1000), - QEDE_FORCED_SPEED_MAP(10000), - QEDE_FORCED_SPEED_MAP(20000), - QEDE_FORCED_SPEED_MAP(25000), - QEDE_FORCED_SPEED_MAP(40000), - QEDE_FORCED_SPEED_MAP(50000), - QEDE_FORCED_SPEED_MAP(100000), +static struct ethtool_forced_speed_map +qede_forced_speed_maps[] __ro_after_init = { + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 1000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 10000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 20000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 25000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 40000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 50000), + ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 100000), }; void __init qede_forced_speed_maps_init(void) { - struct qede_forced_speed_map *map; - u32 i; - - for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) { - map = qede_forced_speed_maps + i; - - linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); - map->cap_arr = NULL; - map->arr_size = 0; - } + ethtool_forced_speed_maps_init(qede_forced_speed_maps, + ARRAY_SIZE(qede_forced_speed_maps)); } /* Ethtool callbacks */ @@ -564,8 +542,8 @@ static int qede_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { const struct ethtool_link_settings *base = &cmd->base; + const struct ethtool_forced_speed_map *map; struct qede_dev *edev = netdev_priv(dev); - const struct qede_forced_speed_map *map; struct qed_link_output current_link; struct qed_link_params params; u32 i; diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 3010833ddde3..a5ac21a0ee33 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1827,12 +1827,12 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, memset(tuple, 0, sizeof(*tuple)); if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS))) { - DP_NOTICE(edev, "Unsupported key set:0x%x\n", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { + DP_NOTICE(edev, "Unsupported key set:0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 802ef81493e0..e4bc18009d08 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -8,7 +8,9 @@ #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/acpi.h> +#include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_platform.h> #include "emac.h" #include "emac-mac.h" #include "emac-sgmii.h" diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index eaa50050aa0b..3270df72541b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_device.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/acpi.h> @@ -719,7 +718,7 @@ err_undo_netdev: return ret; } -static int emac_remove(struct platform_device *pdev) +static void emac_remove(struct platform_device *pdev) { struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); @@ -743,8 +742,6 @@ static int emac_remove(struct platform_device *pdev) iounmap(adpt->phy.base); free_netdev(netdev); - - return 0; } static void emac_shutdown(struct platform_device *pdev) @@ -763,7 +760,7 @@ static void emac_shutdown(struct platform_device *pdev) static struct platform_driver emac_platform_driver = { .probe = emac_probe, - .remove = emac_remove, + .remove_new = emac_remove, .driver = { .name = "qcom-emac", .of_match_table = emac_dt_match, diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 6f2fa2a42770..1822f2ad8f0d 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -30,6 +30,8 @@ #define QCASPI_MAX_REGS 0x20 +#define QCASPI_RX_MAX_FRAMES 4 + static const u16 qcaspi_spi_regs[] = { SPI_REG_BFR_SIZE, SPI_REG_WRBUF_SPC_AVA, @@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, { struct qcaspi *qca = netdev_priv(dev); - ring->rx_max_pending = 4; + ring->rx_max_pending = QCASPI_RX_MAX_FRAMES; ring->tx_max_pending = TX_RING_MAX_LEN; - ring->rx_pending = 4; + ring->rx_pending = QCASPI_RX_MAX_FRAMES; ring->tx_pending = qca->txr.count; } @@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { - const struct net_device_ops *ops = dev->netdev_ops; struct qcaspi *qca = netdev_priv(dev); - if ((ring->rx_pending) || + if (ring->rx_pending != QCASPI_RX_MAX_FRAMES || (ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if (netif_running(dev)) - ops->ndo_stop(dev); + if (qca->spi_thread) + kthread_park(qca->spi_thread); qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); - if (netif_running(dev)) - ops->ndo_open(dev); + if (qca->spi_thread) + kthread_unpark(qca->spi_thread); return 0; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 4a1b94e5a8ea..5f3c11fb3fa2 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -35,7 +35,6 @@ #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/sched.h> #include <linux/skbuff.h> @@ -581,6 +580,18 @@ qcaspi_spi_thread(void *data) netdev_info(qca->net_dev, "SPI thread created\n"); while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_park()) { + netif_tx_disable(qca->net_dev); + netif_carrier_off(qca->net_dev); + qcaspi_flush_tx_ring(qca); + kthread_parkme(); + if (qca->sync == QCASPI_SYNC_READY) { + netif_carrier_on(qca->net_dev); + netif_wake_queue(qca->net_dev); + } + continue; + } + if ((qca->intr_req == qca->intr_svc) && !qca->txr.skb[qca->txr.head]) schedule(); @@ -609,11 +620,17 @@ qcaspi_spi_thread(void *data) if (intr_cause & SPI_INT_CPU_ON) { qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); + /* Frame decoding in progress */ + if (qca->frm_handle.state != qca->frm_handle.init) + qca->net_dev->stats.rx_dropped++; + + qcafrm_fsm_init_spi(&qca->frm_handle); + qca->stats.device_reset++; + /* not synced. */ if (qca->sync != QCASPI_SYNC_READY) continue; - qca->stats.device_reset++; netif_wake_queue(qca->net_dev); netif_carrier_on(qca->net_dev); } diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index 26646cb6a20a..9adec91f35e9 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -32,7 +32,6 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/sched.h> #include <linux/serdev.h> @@ -404,7 +403,7 @@ static struct serdev_device_driver qca_uart_driver = { .remove = qca_uart_remove, .driver = { .name = QCAUART_DRV_NAME, - .of_match_table = of_match_ptr(qca_uart_of_match), + .of_match_table = qca_uart_of_match, }, }; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 5eb50b265c0b..bb787a52bc75 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -196,6 +196,7 @@ enum rtl_registers { /* No threshold before first PCI xfer */ #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) #define RX_EARLY_OFF (1 << 11) +#define RX_PAUSE_SLOT_ON (1 << 11) /* 8125b and later */ #define RXCFG_DMA_SHIFT 8 /* Unlimited maximum PCI burst. */ #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) @@ -579,6 +580,7 @@ struct rtl8169_tc_offsets { enum rtl_flag { RTL_FLAG_TASK_ENABLED = 0, RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, RTL_FLAG_TASK_TX_TIMEOUT, RTL_FLAG_MAX }; @@ -624,6 +626,7 @@ struct rtl8169_private { unsigned supports_gmii:1; unsigned aspm_manageable:1; + unsigned dash_enabled:1; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; @@ -1253,14 +1256,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp) return r8168ep_ocp_read(tp, 0x128) & BIT(0); } -static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +static bool rtl_dash_is_enabled(struct rtl8169_private *tp) +{ + switch (tp->dash_type) { + case RTL_DASH_DP: + return r8168dp_check_dash(tp); + case RTL_DASH_EP: + return r8168ep_check_dash(tp); + default: + return false; + } +} + +static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp) { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: - return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + return RTL_DASH_DP; case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: - return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + return RTL_DASH_EP; default: return RTL_DASH_NONE; } @@ -1453,7 +1468,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) device_set_wakeup_enable(tp_to_dev(tp), wolopts); - if (tp->dash_type == RTL_DASH_NONE) { + if (!tp->dash_enabled) { rtl_set_d3_pll_down(tp, !wolopts); tp->dev->wol_enabled = wolopts ? 1 : 0; } @@ -2292,9 +2307,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_61: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; + case RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST | + RX_PAUSE_SLOT_ON); + break; default: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); break; @@ -2512,7 +2531,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp) static void rtl_prepare_power_down(struct rtl8169_private *tp) { - if (tp->dash_type != RTL_DASH_NONE) + if (tp->dash_enabled) return; if (tp->mac_version == RTL_GIGA_MAC_VER_32 || @@ -2582,6 +2601,8 @@ static void rtl_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_PROMISC) { rx_mode |= AcceptAllPhys; + } else if (!(dev->flags & IFF_MULTICAST)) { + rx_mode &= ~AcceptMulticast; } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || dev->flags & IFF_ALLMULTI || tp->mac_version == RTL_GIGA_MAC_VER_35) { @@ -4364,7 +4385,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, unsigned int entry = dirty_tx % NUM_TX_DESC; u32 status; - status = le32_to_cpu(tp->TxDescArray[entry].opts1); + status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1)); if (status & DescOwn) break; @@ -4394,7 +4415,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, * If skb is NULL then we come here again once a tx irq is * triggered after the last fragment is marked transmitted. */ - if (tp->cur_tx != dirty_tx && skb) + if (READ_ONCE(tp->cur_tx) != dirty_tx && skb) rtl8169_doorbell(tp); } } @@ -4427,7 +4448,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget dma_addr_t addr; u32 status; - status = le32_to_cpu(desc->opts1); + status = le32_to_cpu(READ_ONCE(desc->opts1)); if (status & DescOwn) break; @@ -4567,6 +4588,8 @@ static void rtl_task(struct work_struct *work) reset: rtl_reset_work(tp); netif_wake_queue(tp->dev); + } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) { + rtl_reset_work(tp); } out_unlock: rtnl_unlock(); @@ -4596,7 +4619,11 @@ static void r8169_phylink_handler(struct net_device *ndev) if (netif_carrier_ok(ndev)) { rtl_link_chg_patch(tp); pm_request_resume(d); + netif_wake_queue(tp->dev); } else { + /* In few cases rx is broken after link-down otherwise */ + if (rtl_is_8125(tp)) + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE); pm_runtime_idle(d); } @@ -4640,10 +4667,16 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl8169_cleanup(tp); rtl_disable_exit_l1(tp); rtl_prepare_power_down(tp); + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); } static void rtl8169_up(struct rtl8169_private *tp) { + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_start(tp); + pci_set_master(tp->pci_dev); phy_init_hw(tp->phydev); phy_resume(tp->phydev); @@ -4666,7 +4699,7 @@ static int rtl8169_close(struct net_device *dev) rtl8169_down(tp); rtl8169_rx_clear(tp); - cancel_work_sync(&tp->wk.work); + cancel_work(&tp->wk.work); free_irq(tp->irq, tp); @@ -4861,7 +4894,7 @@ static int rtl8169_runtime_idle(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (tp->dash_type != RTL_DASH_NONE) + if (tp->dash_enabled) return -EBUSY; if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) @@ -4887,8 +4920,7 @@ static void rtl_shutdown(struct pci_dev *pdev) /* Restore original MAC address */ rtl_rar_set(tp, tp->dev->perm_addr); - if (system_state == SYSTEM_POWER_OFF && - tp->dash_type == RTL_DASH_NONE) { + if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) { pci_wake_from_d3(pdev, tp->saved_wolopts); pci_set_power_state(pdev, PCI_D3hot); } @@ -4901,6 +4933,8 @@ static void rtl_remove_one(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); + cancel_work_sync(&tp->wk.work); + unregister_netdev(tp->dev); if (tp->dash_type != RTL_DASH_NONE) @@ -5239,18 +5273,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disable ASPM L1 as that cause random device stop working * problems as well as full system hangs for some PCIe devices users. - * Chips from RTL8168h partially have issues with L1.2, but seem - * to work fine with L1 and L1.1. */ if (rtl_aspm_is_safe(tp)) rc = 0; - else if (tp->mac_version >= RTL_GIGA_MAC_VER_46) - rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); else rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); tp->aspm_manageable = !rc; - tp->dash_type = rtl_check_dash(tp); + tp->dash_type = rtl_get_dash_type(tp); + tp->dash_enabled = rtl_dash_is_enabled(tp); tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; @@ -5321,7 +5352,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* configure chip for default features */ rtl8169_set_features(dev, dev->features); - if (tp->dash_type == RTL_DASH_NONE) { + if (!tp->dash_enabled) { rtl_set_d3_pll_down(tp, true); } else { rtl_set_d3_pll_down(tp, false); @@ -5361,7 +5392,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "ok" : "ko"); if (tp->dash_type != RTL_DASH_NONE) { - netdev_info(dev, "DASH enabled\n"); + netdev_info(dev, "DASH %s\n", + tp->dash_enabled ? "enabled" : "disabled"); rtl8168_driver_start(tp); } diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 3ceb57408ed0..8ef5b0241e64 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Renesas device configuration +# Renesas network device configuration # config NET_VENDOR_RENESAS @@ -25,9 +25,6 @@ config SH_ETH select PHYLIB help Renesas SuperH Ethernet device driver. - This driver supporting CPUs are: - - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - R8A7740, R8A774x, R8A777x and R8A779x. config RAVB tristate "Renesas Ethernet AVB support" @@ -39,8 +36,6 @@ config RAVB select PHYLIB help Renesas Ethernet AVB device driver. - This driver supports the following SoCs: - - R8A779x. config RENESAS_ETHER_SWITCH tristate "Renesas Ethernet Switch support" @@ -51,7 +46,5 @@ config RENESAS_ETHER_SWITCH select PHYLINK help Renesas Ethernet Switch device driver. - This driver supports the following SoCs: - - R8A779Fx. endif # NET_VENDOR_RENESAS diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile index 592005893464..e8fd85b5fe8f 100644 --- a/drivers/net/ethernet/renesas/Makefile +++ b/drivers/net/ethernet/renesas/Makefile @@ -1,14 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for the Renesas device drivers. +# Makefile for the Renesas network device drivers # obj-$(CONFIG_SH_ETH) += sh_eth.o ravb-objs := ravb_main.o ravb_ptp.o - obj-$(CONFIG_RAVB) += ravb.o rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o - obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 4d6b3b7d6abb..664eda4b5a11 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -21,10 +21,9 @@ #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -516,6 +515,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); + } else { + ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35); + ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, + CXR31_SEL_LINK0); + } + /* Receive frame limit set register */ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); @@ -538,14 +546,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev) /* E-MAC interrupt enable register */ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); - - if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { - ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); - ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); - } else { - ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, - CXR31_SEL_LINK0); - } } static void ravb_emac_init_rcar(struct net_device *ndev) @@ -1812,19 +1812,20 @@ static int ravb_open(struct net_device *ndev) if (info->gptp) ravb_ptp_init(ndev, priv->pdev); - netif_tx_start_all_queues(ndev); - /* PHY control start */ error = ravb_phy_start(ndev); if (error) goto out_ptp_stop; + netif_tx_start_all_queues(ndev); + return 0; out_ptp_stop: /* Stop PTP Clock driver */ if (info->gptp) ravb_ptp_stop(ndev); + ravb_stop_dma(ndev); out_free_irq_mgmta: if (!info->multi_irqs) goto out_free_irq; @@ -1875,6 +1876,12 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct net_device *ndev = priv->ndev; int error; + if (!rtnl_trylock()) { + usleep_range(1000, 2000); + schedule_work(&priv->work); + return; + } + netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ @@ -1908,7 +1915,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) */ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", __func__, error); - return; + goto out_unlock; } ravb_emac_init(ndev); @@ -1918,6 +1925,9 @@ out: ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); + +out_unlock: + rtnl_unlock(); } /* Packet transmit function for Ethernet AVB */ @@ -2168,6 +2178,8 @@ static int ravb_close(struct net_device *ndev) of_phy_deregister_fixed_link(np); } + cancel_work_sync(&priv->work); + if (info->multi_irqs) { free_irq(priv->tx_irqs[RAVB_NC], ndev); free_irq(priv->rx_irqs[RAVB_NC], ndev); @@ -2644,9 +2656,14 @@ static int ravb_probe(struct platform_device *pdev) ndev->features = info->net_features; ndev->hw_features = info->net_hw_features; - reset_control_deassert(rstc); + error = reset_control_deassert(rstc); + if (error) + goto out_free_netdev; + pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); + error = pm_runtime_resume_and_get(&pdev->dev); + if (error < 0) + goto out_rpm_disable; if (info->multi_irqs) { if (info->err_mgmt_irqs) @@ -2871,43 +2888,46 @@ out_disable_gptp_clk: out_disable_refclk: clk_disable_unprepare(priv->refclk); out_release: - free_netdev(ndev); - pm_runtime_put(&pdev->dev); +out_rpm_disable: pm_runtime_disable(&pdev->dev); reset_control_assert(rstc); +out_free_netdev: + free_netdev(ndev); return error; } -static int ravb_remove(struct platform_device *pdev) +static void ravb_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + unregister_netdev(ndev); + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); + + ravb_mdio_release(priv); + /* Stop PTP Clock driver */ if (info->ccc_gac) ravb_ptp_stop(ndev); - clk_disable_unprepare(priv->gptp_clk); - clk_disable_unprepare(priv->refclk); - dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); + /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); - unregister_netdev(ndev); - if (info->nc_queues) - netif_napi_del(&priv->napi[RAVB_NC]); - netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); + + clk_disable_unprepare(priv->gptp_clk); + clk_disable_unprepare(priv->refclk); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); free_netdev(ndev); platform_set_drvdata(pdev, NULL); - - return 0; } static int ravb_wol_setup(struct net_device *ndev) @@ -3045,7 +3065,7 @@ static const struct dev_pm_ops ravb_dev_pm_ops = { static struct platform_driver ravb_driver = { .probe = ravb_probe, - .remove = ravb_remove, + .remove_new = ravb_remove, .driver = { .name = "ravb", .pm = &ravb_dev_pm_ops, diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 4e412ac0965a..e77c6ff93d81 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -4,6 +4,7 @@ * Copyright (C) 2022 Renesas Electronics Corporation */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/etherdevice.h> @@ -12,15 +13,16 @@ #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/sys_soc.h> #include "rswitch.h" @@ -799,6 +801,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct rswitch_private *priv; struct rswitch_device *rdev; + unsigned long flags; int quota = budget; rdev = netdev_priv(ndev); @@ -816,10 +819,12 @@ retry: netif_wake_subqueue(ndev, 0); - napi_complete(napi); - - rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); - rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + if (napi_complete_done(napi, budget - quota)) { + spin_lock_irqsave(&priv->lock, flags); + rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); + rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&priv->lock, flags); + } out: return budget - quota; @@ -835,8 +840,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev) struct rswitch_device *rdev = netdev_priv(ndev); if (napi_schedule_prep(&rdev->napi)) { + spin_lock(&rdev->priv->lock); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock(&rdev->priv->lock); __napi_schedule(&rdev->napi); } } @@ -1044,7 +1051,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) static void rswitch_etha_enable_mii(struct rswitch_etha *etha) { rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, - MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); + MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); } @@ -1244,22 +1251,31 @@ static void rswitch_adjust_link(struct net_device *ndev) struct rswitch_device *rdev = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; - /* Current hardware has a restriction not to change speed at runtime */ if (phydev->link != rdev->etha->link) { phy_print_status(phydev); if (phydev->link) phy_power_on(rdev->serdes); - else + else if (rdev->serdes->power_count) phy_power_off(rdev->serdes); rdev->etha->link = phydev->link; + + if (!rdev->priv->etha_no_runtime_change && + phydev->speed != rdev->etha->speed) { + rdev->etha->speed = phydev->speed; + + rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); + phy_set_speed(rdev->serdes, rdev->etha->speed); + } } } static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev, struct phy_device *phydev) { - /* Current hardware has a restriction not to change speed at runtime */ + if (!rdev->priv->etha_no_runtime_change) + return; + switch (rdev->etha->speed) { case SPEED_2500: phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); @@ -1300,6 +1316,7 @@ static int rswitch_phy_device_init(struct rswitch_device *rdev) if (!phydev) goto out; __set_bit(rdev->etha->phy_interface, phydev->host_interfaces); + phydev->mac_managed_pm = true; phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0, rdev->etha->phy_interface); @@ -1348,7 +1365,8 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev) err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr); if (err < 0) return err; - rdev->etha->operated = true; + if (rdev->priv->etha_no_runtime_change) + rdev->etha->operated = true; } err = rswitch_mii_register(rdev); @@ -1389,7 +1407,8 @@ static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev) static int rswitch_ether_port_init_all(struct rswitch_private *priv) { - int i, err; + unsigned int i; + int err; rswitch_for_each_enabled_port(priv, i) { err = rswitch_ether_port_init_one(priv->rdev[i]); @@ -1430,14 +1449,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) static int rswitch_open(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); + unsigned long flags; phy_start(ndev->phydev); napi_enable(&rdev->napi); netif_start_queue(ndev); + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); + spin_unlock_irqrestore(&rdev->priv->lock, flags); if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); @@ -1451,6 +1473,7 @@ static int rswitch_stop(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_gwca_ts_info *ts_info, *ts_info2; + unsigned long flags; netif_tx_stop_all_queues(ndev); bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); @@ -1466,8 +1489,10 @@ static int rswitch_stop(struct net_device *ndev) kfree(ts_info); } + spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + spin_unlock_irqrestore(&rdev->priv->lock, flags); phy_stop(ndev->phydev); napi_disable(&rdev->napi); @@ -1479,8 +1504,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_gwca_queue *gq = rdev->tx_queue; + netdev_tx_t ret = NETDEV_TX_OK; struct rswitch_ext_desc *desc; - int ret = NETDEV_TX_OK; dma_addr_t dma_addr; if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { @@ -1492,10 +1517,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd return ret; dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) { - dev_kfree_skb_any(skb); - return ret; - } + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto err_kfree; gq->skbs[gq->cur] = skb; desc = &gq->tx_ring[gq->cur]; @@ -1508,10 +1531,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd struct rswitch_gwca_ts_info *ts_info; ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); - if (!ts_info) { - dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); - return -ENOMEM; - } + if (!ts_info) + goto err_unmap; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; rdev->ts_tag++; @@ -1534,6 +1555,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); return ret; + +err_unmap: + dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); + +err_kfree: + dev_kfree_skb_any(skb); + + return ret; } static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) @@ -1654,6 +1683,8 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info * static const struct ethtool_ops rswitch_ethtool_ops = { .get_ts_info = rswitch_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct of_device_id renesas_eth_sw_of_table[] = { @@ -1670,6 +1701,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index) etha->index = index; etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; etha->coma_addr = priv->addr; + + /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. + * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply + * both the numerator and the denominator by 10. + */ + etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; } static int rswitch_device_alloc(struct rswitch_private *priv, int index) @@ -1756,7 +1793,8 @@ static void rswitch_device_free(struct rswitch_private *priv, int index) static int rswitch_init(struct rswitch_private *priv) { - int i, err; + unsigned int i; + int err; for (i = 0; i < RSWITCH_NUM_PORTS; i++) rswitch_etha_init(priv, i); @@ -1786,7 +1824,7 @@ static int rswitch_init(struct rswitch_private *priv) for (i = 0; i < RSWITCH_NUM_PORTS; i++) { err = rswitch_device_alloc(priv, i); if (err < 0) { - for (i--; i >= 0; i--) + for (; i-- > 0; ) rswitch_device_free(priv, i); goto err_device_alloc; } @@ -1854,8 +1892,14 @@ err_ts_queue_alloc: return err; } +static const struct soc_device_attribute rswitch_soc_no_speed_change[] = { + { .soc_id = "r8a779f0", .revision = "ES1.0" }, + { /* Sentinel */ } +}; + static int renesas_eth_sw_probe(struct platform_device *pdev) { + const struct soc_device_attribute *attr; struct rswitch_private *priv; struct resource *res; int ret; @@ -1869,6 +1913,15 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + spin_lock_init(&priv->lock); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + attr = soc_device_match(rswitch_soc_no_speed_change); + if (attr) + priv->etha_no_runtime_change = true; priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); if (!priv->ptp_priv) @@ -1914,27 +1967,29 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) static void rswitch_deinit(struct rswitch_private *priv) { - int i; + unsigned int i; rswitch_gwca_hw_deinit(priv); rcar_gen4_ptp_unregister(priv->ptp_priv); - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { struct rswitch_device *rdev = priv->rdev[i]; - phy_exit(priv->rdev[i]->serdes); - rswitch_ether_port_deinit_one(rdev); unregister_netdev(rdev->ndev); - rswitch_device_free(priv, i); + rswitch_ether_port_deinit_one(rdev); + phy_exit(priv->rdev[i]->serdes); } + for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_device_free(priv, i); + rswitch_gwca_ts_queue_free(priv); rswitch_gwca_linkfix_free(priv); rswitch_clock_disable(priv); } -static int renesas_eth_sw_remove(struct platform_device *pdev) +static void renesas_eth_sw_remove(struct platform_device *pdev) { struct rswitch_private *priv = platform_get_drvdata(pdev); @@ -1944,15 +1999,54 @@ static int renesas_eth_sw_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); +} + +static int renesas_eth_sw_suspend(struct device *dev) +{ + struct rswitch_private *priv = dev_get_drvdata(dev); + struct net_device *ndev; + unsigned int i; + + rswitch_for_each_enabled_port(priv, i) { + ndev = priv->rdev[i]->ndev; + if (netif_running(ndev)) { + netif_device_detach(ndev); + rswitch_stop(ndev); + } + if (priv->rdev[i]->serdes->init_count) + phy_exit(priv->rdev[i]->serdes); + } + + return 0; +} + +static int renesas_eth_sw_resume(struct device *dev) +{ + struct rswitch_private *priv = dev_get_drvdata(dev); + struct net_device *ndev; + unsigned int i; + + rswitch_for_each_enabled_port(priv, i) { + phy_init(priv->rdev[i]->serdes); + ndev = priv->rdev[i]->ndev; + if (netif_running(ndev)) { + rswitch_open(ndev); + netif_device_attach(ndev); + } + } return 0; } +static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend, + renesas_eth_sw_resume); + static struct platform_driver renesas_eth_sw_driver_platform = { .probe = renesas_eth_sw_probe, - .remove = renesas_eth_sw_remove, + .remove_new = renesas_eth_sw_remove, .driver = { .name = "renesas_eth_sw", + .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops), .of_match_table = renesas_eth_sw_of_table, } }; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index bb9ed971a97c..27c9d3872c0e 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -20,7 +20,7 @@ else #define rswitch_for_each_enabled_port_continue_reverse(priv, i) \ - for (i--; i >= 0; i--) \ + for (; i-- > 0; ) \ if (priv->rdev[i]->disabled) \ continue; \ else @@ -915,6 +915,7 @@ struct rswitch_etha { bool external_phy; struct mii_bus *mii; phy_interface_t phy_interface; + u32 psmcs; u8 mac_addr[MAX_ADDR_LEN]; int link; int speed; @@ -1011,6 +1012,10 @@ struct rswitch_private { struct rswitch_etha etha[RSWITCH_NUM_PORTS]; struct rswitch_mfwd mfwd; + spinlock_t lock; /* lock interrupt registers' control */ + struct clk *clk; + + bool etha_no_runtime_change; bool gwca_halt; }; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d8ec729825be..475e1e8c1d35 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -19,8 +19,6 @@ #include <linux/mdio-bitbang.h> #include <linux/netdevice.h> #include <linux/of.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/cache.h> @@ -3433,7 +3431,7 @@ out_release: return ret; } -static int sh_eth_drv_remove(struct platform_device *pdev) +static void sh_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct sh_eth_private *mdp = netdev_priv(ndev); @@ -3443,8 +3441,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev) sh_mdio_release(mdp); pm_runtime_disable(&pdev->dev); free_netdev(ndev); - - return 0; } #ifdef CONFIG_PM @@ -3564,7 +3560,7 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table); static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, - .remove = sh_eth_drv_remove, + .remove_new = sh_eth_drv_remove, .id_table = sh_eth_id_table, .driver = { .name = CARDNAME, diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index fb59ff94509a..e6e130dbe1de 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -169,13 +169,11 @@ err_out: * Description: this function calls the main to free the net resources * and calls the platforms hook and release the resources (e.g. mem). */ -static int sxgbe_platform_remove(struct platform_device *pdev) +static void sxgbe_platform_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); sxgbe_drv_remove(ndev); - - return 0; } #ifdef CONFIG_PM @@ -226,7 +224,7 @@ MODULE_DEVICE_TABLE(of, sxgbe_dt_ids); static struct platform_driver sxgbe_platform_driver = { .probe = sxgbe_platform_probe, - .remove = sxgbe_platform_remove, + .remove_new = sxgbe_platform_remove, .driver = { .name = SXGBE_RESOURCE_NAME, .pm = &sxgbe_platform_pm_ops, diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 96065dfc747b..76356dadf233 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -819,7 +819,7 @@ err_out: return err; } -static int sgiseeq_remove(struct platform_device *pdev) +static void sgiseeq_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sgiseeq_private *sp = netdev_priv(dev); @@ -828,13 +828,11 @@ static int sgiseeq_remove(struct platform_device *pdev) dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, sp->srings_dma, DMA_BIDIRECTIONAL); free_netdev(dev); - - return 0; } static struct platform_driver sgiseeq_driver = { .probe = sgiseeq_probe, - .remove = sgiseeq_remove, + .remove_new = sgiseeq_remove, .driver = { .name = "sgiseeq", } diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 16293b58e0a8..8f446b9bd5ee 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -11,7 +11,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \ mae.o tc.o tc_bindings.o tc_counters.o \ - tc_encap_actions.o + tc_encap_actions.o tc_conntrack.o obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h index 1f981dfe4bdc..89665fc9b8d0 100644 --- a/drivers/net/ethernet/sfc/bitfield.h +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -26,6 +26,8 @@ /* Lowest bit numbers and widths */ #define EFX_DUMMY_FIELD_LBN 0 #define EFX_DUMMY_FIELD_WIDTH 0 +#define EFX_BYTE_0_LBN 0 +#define EFX_BYTE_0_WIDTH 8 #define EFX_WORD_0_LBN 0 #define EFX_WORD_0_WIDTH 16 #define EFX_WORD_1_LBN 16 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 8c019f382a7f..6dfa062feebc 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2209,7 +2209,7 @@ static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) /* low two bits of label are what we want for type */ BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3); tx_queue->type = tx_queue->label & 3; - return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, (tx_queue->ptr_mask + 1) * sizeof(efx_qword_t), GFP_KERNEL); @@ -4267,8 +4267,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .sriov_init = efx_ef10_sriov_init, .sriov_fini = efx_ef10_sriov_fini, .sriov_wanted = efx_ef10_sriov_wanted, - .sriov_reset = efx_ef10_sriov_reset, - .sriov_flr = efx_ef10_sriov_flr, .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 35d8e9811998..6da06931187d 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -224,7 +224,7 @@ int efx_ef100_init_datapath_caps(struct efx_nic *efx) static int ef100_ev_probe(struct efx_channel *channel) { /* Allocate an extra descriptor for the QMDA status completion entry */ - return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, + return efx_nic_alloc_buffer(channel->efx, &channel->eventq, (channel->eventq_mask + 2) * sizeof(efx_qword_t), GFP_KERNEL); diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c index 849e5555bd12..e6b6be549581 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.c +++ b/drivers/net/ethernet/sfc/ef100_tx.c @@ -23,7 +23,7 @@ int ef100_tx_probe(struct efx_tx_queue *tx_queue) { /* Allocate an extra descriptor for the QMDA status completion entry */ - return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, (tx_queue->ptr_mask + 2) * sizeof(efx_oword_t), GFP_KERNEL); @@ -101,8 +101,8 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) { - if (likely(tx_queue->txd.buf.addr)) - return ((efx_oword_t *)tx_queue->txd.buf.addr) + index; + if (likely(tx_queue->txd.addr)) + return ((efx_oword_t *)tx_queue->txd.addr) + index; else return NULL; } diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index 3c703ca878b0..be419c9c5dec 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -35,9 +35,7 @@ static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs); int efx_ef10_sriov_init(struct efx_nic *efx); -static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} void efx_ef10_sriov_fini(struct efx_nic *efx); -static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {} int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index d670a319b379..19f4b4d0b851 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -605,7 +605,6 @@ static const struct net_device_ops efx_netdev_ops = { #endif .ndo_get_phys_port_id = efx_get_phys_port_id, .ndo_get_phys_port_name = efx_get_phys_port_name, - .ndo_setup_tc = efx_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 4239c7ece123..48d3623735ba 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -30,8 +30,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct tx_queue, skb); } void efx_xmit_done_single(struct efx_tx_queue *tx_queue); -int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - void *type_data); extern unsigned int efx_piobuf_size; /* RX */ diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 41b33a75333c..c9e17a8208a9 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -713,9 +713,6 @@ int efx_probe_channels(struct efx_nic *efx) struct efx_channel *channel; int rc; - /* Restart special buffer allocation */ - efx->next_buffer_table = 0; - /* Probe channels in reverse, so that any 'extra' channels * use the start of the buffer table. This allows the traffic * channels to be resized without moving them or wasting the @@ -849,36 +846,14 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel, *ptp_channel = efx_ptp_channel(efx); struct efx_ptp_data *ptp_data = efx->ptp_data; - unsigned int i, next_buffer_table = 0; u32 old_rxq_entries, old_txq_entries; + unsigned int i; int rc, rc2; rc = efx_check_disabled(efx); if (rc) return rc; - /* Not all channels should be reallocated. We must avoid - * reallocating their buffer table entries. - */ - efx_for_each_channel(channel, efx) { - struct efx_rx_queue *rx_queue; - struct efx_tx_queue *tx_queue; - - if (channel->type->copy) - continue; - next_buffer_table = max(next_buffer_table, - channel->eventq.index + - channel->eventq.entries); - efx_for_each_channel_rx_queue(rx_queue, channel) - next_buffer_table = max(next_buffer_table, - rx_queue->rxd.index + - rx_queue->rxd.entries); - efx_for_each_channel_tx_queue(tx_queue, channel) - next_buffer_table = max(next_buffer_table, - tx_queue->txd.index + - tx_queue->txd.entries); - } - efx_device_detach_sync(efx); efx_stop_all(efx); efx_soft_disable_interrupts(efx); @@ -904,9 +879,6 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) for (i = 0; i < efx->n_channels; i++) swap(efx->channel[i], other_channel[i]); - /* Restart buffer table allocation */ - efx->next_buffer_table = next_buffer_table; - for (i = 0; i < efx->n_channels; i++) { channel = efx->channel[i]; if (!channel->type->copy) @@ -1288,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush_map(); + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 361687de308d..175bd9cdfdac 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -35,11 +35,6 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); /* This is the time (in jiffies) between invocations of the hardware * monitor. - * On Falcon-based NICs, this will: - * - Check the on-board hardware monitor; - * - Poll the link state and reconfigure the hardware as necessary. - * On Siena-based NICs for power systems with EEH support, this will give EEH a - * chance to start. */ static unsigned int efx_monitor_interval = 1 * HZ; @@ -785,8 +780,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) mutex_unlock(&efx->rss_lock); efx->type->filter_table_restore(efx); up_write(&efx->filter_sem); - if (efx->type->sriov_reset) - efx->type->sriov_reset(efx); mutex_unlock(&efx->mac_lock); diff --git a/drivers/net/ethernet/sfc/farch_regs.h b/drivers/net/ethernet/sfc/farch_regs.h deleted file mode 100644 index d138be423e63..000000000000 --- a/drivers/net/ethernet/sfc/farch_regs.h +++ /dev/null @@ -1,2929 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/**************************************************************************** - * Driver for Solarflare network controllers and boards - * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2012 Solarflare Communications Inc. - */ - -#ifndef EFX_FARCH_REGS_H -#define EFX_FARCH_REGS_H - -/* - * Falcon hardware architecture definitions have a name prefix following - * the format: - * - * F<type>_<min-rev><max-rev>_ - * - * The following <type> strings are used: - * - * MMIO register MC register Host memory structure - * ------------------------------------------------------------- - * Address R MCR - * Bitfield RF MCRF SF - * Enumerator FE MCFE SE - * - * <min-rev> is the first revision to which the definition applies: - * - * A: Falcon A1 (SFC4000AB) - * B: Falcon B0 (SFC4000BA) - * C: Siena A0 (SFL9021AA) - * - * If the definition has been changed or removed in later revisions - * then <max-rev> is the last revision to which the definition applies; - * otherwise it is "Z". - */ - -/************************************************************************** - * - * Falcon/Siena registers and descriptors - * - ************************************************************************** - */ - -/* ADR_REGION_REG: Address region register */ -#define FR_AZ_ADR_REGION 0x00000000 -#define FRF_AZ_ADR_REGION3_LBN 96 -#define FRF_AZ_ADR_REGION3_WIDTH 18 -#define FRF_AZ_ADR_REGION2_LBN 64 -#define FRF_AZ_ADR_REGION2_WIDTH 18 -#define FRF_AZ_ADR_REGION1_LBN 32 -#define FRF_AZ_ADR_REGION1_WIDTH 18 -#define FRF_AZ_ADR_REGION0_LBN 0 -#define FRF_AZ_ADR_REGION0_WIDTH 18 - -/* INT_EN_REG_KER: Kernel driver Interrupt enable register */ -#define FR_AZ_INT_EN_KER 0x00000010 -#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 -#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 -#define FRF_AZ_KER_INT_CHAR_LBN 4 -#define FRF_AZ_KER_INT_CHAR_WIDTH 1 -#define FRF_AZ_KER_INT_KER_LBN 3 -#define FRF_AZ_KER_INT_KER_WIDTH 1 -#define FRF_AZ_DRV_INT_EN_KER_LBN 0 -#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 - -/* INT_EN_REG_CHAR: Char Driver interrupt enable register */ -#define FR_BZ_INT_EN_CHAR 0x00000020 -#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 -#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 -#define FRF_BZ_CHAR_INT_CHAR_LBN 4 -#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 -#define FRF_BZ_CHAR_INT_KER_LBN 3 -#define FRF_BZ_CHAR_INT_KER_WIDTH 1 -#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 -#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 - -/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ -#define FR_AZ_INT_ADR_KER 0x00000030 -#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 -#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 -#define FRF_AZ_INT_ADR_KER_LBN 0 -#define FRF_AZ_INT_ADR_KER_WIDTH 64 - -/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ -#define FR_BZ_INT_ADR_CHAR 0x00000040 -#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 -#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 -#define FRF_BZ_INT_ADR_CHAR_LBN 0 -#define FRF_BZ_INT_ADR_CHAR_WIDTH 64 - -/* INT_ACK_KER: Kernel interrupt acknowledge register */ -#define FR_AA_INT_ACK_KER 0x00000050 -#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 -#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 - -/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ -#define FR_BZ_INT_ISR0 0x00000090 -#define FRF_BZ_INT_ISR_REG_LBN 0 -#define FRF_BZ_INT_ISR_REG_WIDTH 64 - -/* HW_INIT_REG: Hardware initialization register */ -#define FR_AZ_HW_INIT 0x000000c0 -#define FRF_BB_BDMRD_CPLF_FULL_LBN 124 -#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 -#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 -#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 -#define FRF_CZ_TX_MRG_TAGS_LBN 120 -#define FRF_CZ_TX_MRG_TAGS_WIDTH 1 -#define FRF_AB_TRGT_MASK_ALL_LBN 100 -#define FRF_AB_TRGT_MASK_ALL_WIDTH 1 -#define FRF_AZ_DOORBELL_DROP_LBN 92 -#define FRF_AZ_DOORBELL_DROP_WIDTH 8 -#define FRF_AB_TX_RREQ_MASK_EN_LBN 76 -#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 -#define FRF_AB_PE_EIDLE_DIS_LBN 75 -#define FRF_AB_PE_EIDLE_DIS_WIDTH 1 -#define FRF_AA_FC_BLOCKING_EN_LBN 45 -#define FRF_AA_FC_BLOCKING_EN_WIDTH 1 -#define FRF_BZ_B2B_REQ_EN_LBN 45 -#define FRF_BZ_B2B_REQ_EN_WIDTH 1 -#define FRF_AA_B2B_REQ_EN_LBN 44 -#define FRF_AA_B2B_REQ_EN_WIDTH 1 -#define FRF_BB_FC_BLOCKING_EN_LBN 44 -#define FRF_BB_FC_BLOCKING_EN_WIDTH 1 -#define FRF_AZ_POST_WR_MASK_LBN 40 -#define FRF_AZ_POST_WR_MASK_WIDTH 4 -#define FRF_AZ_TLP_TC_LBN 34 -#define FRF_AZ_TLP_TC_WIDTH 3 -#define FRF_AZ_TLP_ATTR_LBN 32 -#define FRF_AZ_TLP_ATTR_WIDTH 2 -#define FRF_AB_INTB_VEC_LBN 24 -#define FRF_AB_INTB_VEC_WIDTH 5 -#define FRF_AB_INTA_VEC_LBN 16 -#define FRF_AB_INTA_VEC_WIDTH 5 -#define FRF_AZ_WD_TIMER_LBN 8 -#define FRF_AZ_WD_TIMER_WIDTH 8 -#define FRF_AZ_US_DISABLE_LBN 5 -#define FRF_AZ_US_DISABLE_WIDTH 1 -#define FRF_AZ_TLP_EP_LBN 4 -#define FRF_AZ_TLP_EP_WIDTH 1 -#define FRF_AZ_ATTR_SEL_LBN 3 -#define FRF_AZ_ATTR_SEL_WIDTH 1 -#define FRF_AZ_TD_SEL_LBN 1 -#define FRF_AZ_TD_SEL_WIDTH 1 -#define FRF_AZ_TLP_TD_LBN 0 -#define FRF_AZ_TLP_TD_WIDTH 1 - -/* EE_SPI_HCMD_REG: SPI host command register */ -#define FR_AB_EE_SPI_HCMD 0x00000100 -#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 -#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 -#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 -#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 -#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 -#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 -#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 -#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 -#define FRF_AB_EE_SPI_HCMD_READ_LBN 15 -#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 -#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 -#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 -#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 -#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 -#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 -#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 - -/* USR_EV_CFG: User Level Event Configuration register */ -#define FR_CZ_USR_EV_CFG 0x00000100 -#define FRF_CZ_USREV_DIS_LBN 16 -#define FRF_CZ_USREV_DIS_WIDTH 1 -#define FRF_CZ_DFLT_EVQ_LBN 0 -#define FRF_CZ_DFLT_EVQ_WIDTH 10 - -/* EE_SPI_HADR_REG: SPI host address register */ -#define FR_AB_EE_SPI_HADR 0x00000110 -#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 -#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 -#define FRF_AB_EE_SPI_HADR_ADR_LBN 0 -#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 - -/* EE_SPI_HDATA_REG: SPI host data register */ -#define FR_AB_EE_SPI_HDATA 0x00000120 -#define FRF_AB_EE_SPI_HDATA3_LBN 96 -#define FRF_AB_EE_SPI_HDATA3_WIDTH 32 -#define FRF_AB_EE_SPI_HDATA2_LBN 64 -#define FRF_AB_EE_SPI_HDATA2_WIDTH 32 -#define FRF_AB_EE_SPI_HDATA1_LBN 32 -#define FRF_AB_EE_SPI_HDATA1_WIDTH 32 -#define FRF_AB_EE_SPI_HDATA0_LBN 0 -#define FRF_AB_EE_SPI_HDATA0_WIDTH 32 - -/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ -#define FR_AB_EE_BASE_PAGE 0x00000130 -#define FRF_AB_EE_EXPROM_MASK_LBN 16 -#define FRF_AB_EE_EXPROM_MASK_WIDTH 13 -#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 -#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 - -/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ -#define FR_AB_EE_VPD_CFG0 0x00000140 -#define FRF_AB_EE_SF_FASTRD_EN_LBN 127 -#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 -#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 -#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 -#define FRF_AB_EE_VPD_WIP_POLL_LBN 119 -#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 -#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 -#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 -#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 -#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 -#define FRF_AB_EE_VPDW_LENGTH_LBN 80 -#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 -#define FRF_AB_EE_VPDW_BASE_LBN 64 -#define FRF_AB_EE_VPDW_BASE_WIDTH 15 -#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 -#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 -#define FRF_AB_EE_VPD_BASE_LBN 32 -#define FRF_AB_EE_VPD_BASE_WIDTH 24 -#define FRF_AB_EE_VPD_LENGTH_LBN 16 -#define FRF_AB_EE_VPD_LENGTH_WIDTH 15 -#define FRF_AB_EE_VPD_AD_SIZE_LBN 8 -#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 -#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 -#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 -#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 -#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 -#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 -#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 -#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 -#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 -#define FRF_AB_EE_VPD_EN_LBN 0 -#define FRF_AB_EE_VPD_EN_WIDTH 1 - -/* EE_VPD_SW_CNTL_REG: VPD access SW control register */ -#define FR_AB_EE_VPD_SW_CNTL 0x00000150 -#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 -#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 -#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 -#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 -#define FRF_AB_EE_VPD_CYC_ADR_LBN 0 -#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 - -/* EE_VPD_SW_DATA_REG: VPD access SW data register */ -#define FR_AB_EE_VPD_SW_DATA 0x00000160 -#define FRF_AB_EE_VPD_CYC_DAT_LBN 0 -#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 - -/* PBMX_DBG_IADDR_REG: Capture Module address register */ -#define FR_CZ_PBMX_DBG_IADDR 0x000001f0 -#define FRF_CZ_PBMX_DBG_IADDR_LBN 0 -#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 - -/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ -#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 -#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 -#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 -#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 -#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 -#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 -#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 - -/* PBMX_DBG_IDATA_REG: Capture Module data register */ -#define FR_CZ_PBMX_DBG_IDATA 0x000001f8 -#define FRF_CZ_PBMX_DBG_IDATA_LBN 0 -#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 - -/* NIC_STAT_REG: NIC status register */ -#define FR_AB_NIC_STAT 0x00000200 -#define FRF_BB_AER_DIS_LBN 34 -#define FRF_BB_AER_DIS_WIDTH 1 -#define FRF_BB_EE_STRAP_EN_LBN 31 -#define FRF_BB_EE_STRAP_EN_WIDTH 1 -#define FRF_BB_EE_STRAP_LBN 24 -#define FRF_BB_EE_STRAP_WIDTH 4 -#define FRF_BB_REVISION_ID_LBN 17 -#define FRF_BB_REVISION_ID_WIDTH 7 -#define FRF_AB_ONCHIP_SRAM_LBN 16 -#define FRF_AB_ONCHIP_SRAM_WIDTH 1 -#define FRF_AB_SF_PRST_LBN 9 -#define FRF_AB_SF_PRST_WIDTH 1 -#define FRF_AB_EE_PRST_LBN 8 -#define FRF_AB_EE_PRST_WIDTH 1 -#define FRF_AB_ATE_MODE_LBN 3 -#define FRF_AB_ATE_MODE_WIDTH 1 -#define FRF_AB_STRAP_PINS_LBN 0 -#define FRF_AB_STRAP_PINS_WIDTH 3 - -/* GPIO_CTL_REG: GPIO control register */ -#define FR_AB_GPIO_CTL 0x00000210 -#define FRF_AB_GPIO_OUT3_LBN 112 -#define FRF_AB_GPIO_OUT3_WIDTH 16 -#define FRF_AB_GPIO_IN3_LBN 104 -#define FRF_AB_GPIO_IN3_WIDTH 8 -#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 -#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 -#define FRF_AB_GPIO_OUT2_LBN 80 -#define FRF_AB_GPIO_OUT2_WIDTH 16 -#define FRF_AB_GPIO_IN2_LBN 72 -#define FRF_AB_GPIO_IN2_WIDTH 8 -#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 -#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 -#define FRF_AB_GPIO15_OEN_LBN 63 -#define FRF_AB_GPIO15_OEN_WIDTH 1 -#define FRF_AB_GPIO14_OEN_LBN 62 -#define FRF_AB_GPIO14_OEN_WIDTH 1 -#define FRF_AB_GPIO13_OEN_LBN 61 -#define FRF_AB_GPIO13_OEN_WIDTH 1 -#define FRF_AB_GPIO12_OEN_LBN 60 -#define FRF_AB_GPIO12_OEN_WIDTH 1 -#define FRF_AB_GPIO11_OEN_LBN 59 -#define FRF_AB_GPIO11_OEN_WIDTH 1 -#define FRF_AB_GPIO10_OEN_LBN 58 -#define FRF_AB_GPIO10_OEN_WIDTH 1 -#define FRF_AB_GPIO9_OEN_LBN 57 -#define FRF_AB_GPIO9_OEN_WIDTH 1 -#define FRF_AB_GPIO8_OEN_LBN 56 -#define FRF_AB_GPIO8_OEN_WIDTH 1 -#define FRF_AB_GPIO15_OUT_LBN 55 -#define FRF_AB_GPIO15_OUT_WIDTH 1 -#define FRF_AB_GPIO14_OUT_LBN 54 -#define FRF_AB_GPIO14_OUT_WIDTH 1 -#define FRF_AB_GPIO13_OUT_LBN 53 -#define FRF_AB_GPIO13_OUT_WIDTH 1 -#define FRF_AB_GPIO12_OUT_LBN 52 -#define FRF_AB_GPIO12_OUT_WIDTH 1 -#define FRF_AB_GPIO11_OUT_LBN 51 -#define FRF_AB_GPIO11_OUT_WIDTH 1 -#define FRF_AB_GPIO10_OUT_LBN 50 -#define FRF_AB_GPIO10_OUT_WIDTH 1 -#define FRF_AB_GPIO9_OUT_LBN 49 -#define FRF_AB_GPIO9_OUT_WIDTH 1 -#define FRF_AB_GPIO8_OUT_LBN 48 -#define FRF_AB_GPIO8_OUT_WIDTH 1 -#define FRF_AB_GPIO15_IN_LBN 47 -#define FRF_AB_GPIO15_IN_WIDTH 1 -#define FRF_AB_GPIO14_IN_LBN 46 -#define FRF_AB_GPIO14_IN_WIDTH 1 -#define FRF_AB_GPIO13_IN_LBN 45 -#define FRF_AB_GPIO13_IN_WIDTH 1 -#define FRF_AB_GPIO12_IN_LBN 44 -#define FRF_AB_GPIO12_IN_WIDTH 1 -#define FRF_AB_GPIO11_IN_LBN 43 -#define FRF_AB_GPIO11_IN_WIDTH 1 -#define FRF_AB_GPIO10_IN_LBN 42 -#define FRF_AB_GPIO10_IN_WIDTH 1 -#define FRF_AB_GPIO9_IN_LBN 41 -#define FRF_AB_GPIO9_IN_WIDTH 1 -#define FRF_AB_GPIO8_IN_LBN 40 -#define FRF_AB_GPIO8_IN_WIDTH 1 -#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 -#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 -#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 -#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 -#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 -#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 -#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 -#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 -#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_CLK156_OUT_EN_LBN 31 -#define FRF_AB_CLK156_OUT_EN_WIDTH 1 -#define FRF_AB_USE_NIC_CLK_LBN 30 -#define FRF_AB_USE_NIC_CLK_WIDTH 1 -#define FRF_AB_GPIO5_OEN_LBN 29 -#define FRF_AB_GPIO5_OEN_WIDTH 1 -#define FRF_AB_GPIO4_OEN_LBN 28 -#define FRF_AB_GPIO4_OEN_WIDTH 1 -#define FRF_AB_GPIO3_OEN_LBN 27 -#define FRF_AB_GPIO3_OEN_WIDTH 1 -#define FRF_AB_GPIO2_OEN_LBN 26 -#define FRF_AB_GPIO2_OEN_WIDTH 1 -#define FRF_AB_GPIO1_OEN_LBN 25 -#define FRF_AB_GPIO1_OEN_WIDTH 1 -#define FRF_AB_GPIO0_OEN_LBN 24 -#define FRF_AB_GPIO0_OEN_WIDTH 1 -#define FRF_AB_GPIO7_OUT_LBN 23 -#define FRF_AB_GPIO7_OUT_WIDTH 1 -#define FRF_AB_GPIO6_OUT_LBN 22 -#define FRF_AB_GPIO6_OUT_WIDTH 1 -#define FRF_AB_GPIO5_OUT_LBN 21 -#define FRF_AB_GPIO5_OUT_WIDTH 1 -#define FRF_AB_GPIO4_OUT_LBN 20 -#define FRF_AB_GPIO4_OUT_WIDTH 1 -#define FRF_AB_GPIO3_OUT_LBN 19 -#define FRF_AB_GPIO3_OUT_WIDTH 1 -#define FRF_AB_GPIO2_OUT_LBN 18 -#define FRF_AB_GPIO2_OUT_WIDTH 1 -#define FRF_AB_GPIO1_OUT_LBN 17 -#define FRF_AB_GPIO1_OUT_WIDTH 1 -#define FRF_AB_GPIO0_OUT_LBN 16 -#define FRF_AB_GPIO0_OUT_WIDTH 1 -#define FRF_AB_GPIO7_IN_LBN 15 -#define FRF_AB_GPIO7_IN_WIDTH 1 -#define FRF_AB_GPIO6_IN_LBN 14 -#define FRF_AB_GPIO6_IN_WIDTH 1 -#define FRF_AB_GPIO5_IN_LBN 13 -#define FRF_AB_GPIO5_IN_WIDTH 1 -#define FRF_AB_GPIO4_IN_LBN 12 -#define FRF_AB_GPIO4_IN_WIDTH 1 -#define FRF_AB_GPIO3_IN_LBN 11 -#define FRF_AB_GPIO3_IN_WIDTH 1 -#define FRF_AB_GPIO2_IN_LBN 10 -#define FRF_AB_GPIO2_IN_WIDTH 1 -#define FRF_AB_GPIO1_IN_LBN 9 -#define FRF_AB_GPIO1_IN_WIDTH 1 -#define FRF_AB_GPIO0_IN_LBN 8 -#define FRF_AB_GPIO0_IN_WIDTH 1 -#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 -#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 -#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 -#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 -#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 -#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 -#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 -#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 -#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 -#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 - -/* GLB_CTL_REG: Global control register */ -#define FR_AB_GLB_CTL 0x00000220 -#define FRF_AB_EXT_PHY_RST_CTL_LBN 63 -#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 -#define FRF_AB_XAUI_SD_RST_CTL_LBN 62 -#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_SD_RST_CTL_LBN 61 -#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 -#define FRF_AA_PCIX_RST_CTL_LBN 60 -#define FRF_AA_PCIX_RST_CTL_WIDTH 1 -#define FRF_BB_BIU_RST_CTL_LBN 60 -#define FRF_BB_BIU_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 -#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 -#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 -#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 -#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 -#define FRF_AB_XGRX_RST_CTL_LBN 56 -#define FRF_AB_XGRX_RST_CTL_WIDTH 1 -#define FRF_AB_XGTX_RST_CTL_LBN 55 -#define FRF_AB_XGTX_RST_CTL_WIDTH 1 -#define FRF_AB_EM_RST_CTL_LBN 54 -#define FRF_AB_EM_RST_CTL_WIDTH 1 -#define FRF_AB_EV_RST_CTL_LBN 53 -#define FRF_AB_EV_RST_CTL_WIDTH 1 -#define FRF_AB_SR_RST_CTL_LBN 52 -#define FRF_AB_SR_RST_CTL_WIDTH 1 -#define FRF_AB_RX_RST_CTL_LBN 51 -#define FRF_AB_RX_RST_CTL_WIDTH 1 -#define FRF_AB_TX_RST_CTL_LBN 50 -#define FRF_AB_TX_RST_CTL_WIDTH 1 -#define FRF_AB_EE_RST_CTL_LBN 49 -#define FRF_AB_EE_RST_CTL_WIDTH 1 -#define FRF_AB_CS_RST_CTL_LBN 48 -#define FRF_AB_CS_RST_CTL_WIDTH 1 -#define FRF_AB_HOT_RST_CTL_LBN 40 -#define FRF_AB_HOT_RST_CTL_WIDTH 2 -#define FRF_AB_RST_EXT_PHY_LBN 31 -#define FRF_AB_RST_EXT_PHY_WIDTH 1 -#define FRF_AB_RST_XAUI_SD_LBN 30 -#define FRF_AB_RST_XAUI_SD_WIDTH 1 -#define FRF_AB_RST_PCIE_SD_LBN 29 -#define FRF_AB_RST_PCIE_SD_WIDTH 1 -#define FRF_AA_RST_PCIX_LBN 28 -#define FRF_AA_RST_PCIX_WIDTH 1 -#define FRF_BB_RST_BIU_LBN 28 -#define FRF_BB_RST_BIU_WIDTH 1 -#define FRF_AB_RST_PCIE_STKY_LBN 27 -#define FRF_AB_RST_PCIE_STKY_WIDTH 1 -#define FRF_AB_RST_PCIE_NSTKY_LBN 26 -#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 -#define FRF_AB_RST_PCIE_CORE_LBN 25 -#define FRF_AB_RST_PCIE_CORE_WIDTH 1 -#define FRF_AB_RST_XGRX_LBN 24 -#define FRF_AB_RST_XGRX_WIDTH 1 -#define FRF_AB_RST_XGTX_LBN 23 -#define FRF_AB_RST_XGTX_WIDTH 1 -#define FRF_AB_RST_EM_LBN 22 -#define FRF_AB_RST_EM_WIDTH 1 -#define FRF_AB_RST_EV_LBN 21 -#define FRF_AB_RST_EV_WIDTH 1 -#define FRF_AB_RST_SR_LBN 20 -#define FRF_AB_RST_SR_WIDTH 1 -#define FRF_AB_RST_RX_LBN 19 -#define FRF_AB_RST_RX_WIDTH 1 -#define FRF_AB_RST_TX_LBN 18 -#define FRF_AB_RST_TX_WIDTH 1 -#define FRF_AB_RST_SF_LBN 17 -#define FRF_AB_RST_SF_WIDTH 1 -#define FRF_AB_RST_CS_LBN 16 -#define FRF_AB_RST_CS_WIDTH 1 -#define FRF_AB_INT_RST_DUR_LBN 4 -#define FRF_AB_INT_RST_DUR_WIDTH 3 -#define FRF_AB_EXT_PHY_RST_DUR_LBN 1 -#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 -#define FFE_AB_EXT_PHY_RST_DUR_10240US 7 -#define FFE_AB_EXT_PHY_RST_DUR_5120US 6 -#define FFE_AB_EXT_PHY_RST_DUR_2560US 5 -#define FFE_AB_EXT_PHY_RST_DUR_1280US 4 -#define FFE_AB_EXT_PHY_RST_DUR_640US 3 -#define FFE_AB_EXT_PHY_RST_DUR_320US 2 -#define FFE_AB_EXT_PHY_RST_DUR_160US 1 -#define FFE_AB_EXT_PHY_RST_DUR_80US 0 -#define FRF_AB_SWRST_LBN 0 -#define FRF_AB_SWRST_WIDTH 1 - -/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ -#define FR_AZ_FATAL_INTR_KER 0x00000230 -#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 -#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 -#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 -#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 -#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 -#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 -#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 -#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 -#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 -#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 -#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 -#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 -#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 -#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 -#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 -#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 -#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 -#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 -#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 -#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 -#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 -#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 -#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 -#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 -#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 -#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 -#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_KER_LBN 11 -#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 -#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 -#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 -#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 -#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 -#define FRF_AZ_MEM_PERR_INT_KER_LBN 8 -#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 -#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 -#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 -#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 -#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 -#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 -#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 -#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 -#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 -#define FRF_AZ_ILL_ADR_INT_KER_LBN 1 -#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 -#define FRF_AZ_SRM_PERR_INT_KER_LBN 0 -#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 - -/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ -#define FR_BZ_FATAL_INTR_CHAR 0x00000240 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 -#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 -#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 -#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 -#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 -#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 -#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 -#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 -#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 -#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 -#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 -#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 -#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 -#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 -#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 -#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 -#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 -#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 -#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 -#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 -#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 -#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 -#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 -#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 -#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 -#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 -#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 -#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 -#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 -#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 -#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 -#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 -#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 -#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 -#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 -#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 -#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 - -/* DP_CTRL_REG: Datapath control register */ -#define FR_BZ_DP_CTRL 0x00000250 -#define FRF_BZ_FLS_EVQ_ID_LBN 0 -#define FRF_BZ_FLS_EVQ_ID_WIDTH 12 - -/* MEM_STAT_REG: Memory status register */ -#define FR_AZ_MEM_STAT 0x00000260 -#define FRF_AB_MEM_PERR_VEC_LBN 53 -#define FRF_AB_MEM_PERR_VEC_WIDTH 38 -#define FRF_AB_MBIST_CORR_LBN 38 -#define FRF_AB_MBIST_CORR_WIDTH 15 -#define FRF_AB_MBIST_ERR_LBN 0 -#define FRF_AB_MBIST_ERR_WIDTH 40 -#define FRF_CZ_MEM_PERR_VEC_LBN 0 -#define FRF_CZ_MEM_PERR_VEC_WIDTH 35 - -/* CS_DEBUG_REG: Debug register */ -#define FR_AZ_CS_DEBUG 0x00000270 -#define FRF_AB_GLB_DEBUG2_SEL_LBN 50 -#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 -#define FRF_AB_DEBUG_BLK_SEL2_LBN 47 -#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 -#define FRF_AB_DEBUG_BLK_SEL1_LBN 44 -#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 -#define FRF_AB_DEBUG_BLK_SEL0_LBN 41 -#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 -#define FRF_CZ_CS_PORT_NUM_LBN 40 -#define FRF_CZ_CS_PORT_NUM_WIDTH 2 -#define FRF_AB_MISC_DEBUG_ADDR_LBN 36 -#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 -#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 -#define FRF_CZ_CS_PORT_FPE_LBN 1 -#define FRF_CZ_CS_PORT_FPE_WIDTH 35 -#define FRF_AB_EM_DEBUG_ADDR_LBN 26 -#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_SR_DEBUG_ADDR_LBN 21 -#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_EV_DEBUG_ADDR_LBN 16 -#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_RX_DEBUG_ADDR_LBN 11 -#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_TX_DEBUG_ADDR_LBN 6 -#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 -#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 -#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 -#define FRF_AZ_CS_DEBUG_EN_LBN 0 -#define FRF_AZ_CS_DEBUG_EN_WIDTH 1 - -/* DRIVER_REG: Driver scratch register [0-7] */ -#define FR_AZ_DRIVER 0x00000280 -#define FR_AZ_DRIVER_STEP 16 -#define FR_AZ_DRIVER_ROWS 8 -#define FRF_AZ_DRIVER_DW0_LBN 0 -#define FRF_AZ_DRIVER_DW0_WIDTH 32 - -/* ALTERA_BUILD_REG: Altera build register */ -#define FR_AZ_ALTERA_BUILD 0x00000300 -#define FRF_AZ_ALTERA_BUILD_VER_LBN 0 -#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 - -/* CSR_SPARE_REG: Spare register */ -#define FR_AZ_CSR_SPARE 0x00000310 -#define FRF_AB_MEM_PERR_EN_LBN 64 -#define FRF_AB_MEM_PERR_EN_WIDTH 38 -#define FRF_CZ_MEM_PERR_EN_LBN 64 -#define FRF_CZ_MEM_PERR_EN_WIDTH 35 -#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 -#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 -#define FRF_AZ_CSR_SPARE_BITS_LBN 0 -#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 - -/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ -#define FR_AB_PCIE_SD_CTL0123 0x00000320 -#define FRF_AB_PCIE_TESTSIG_H_LBN 96 -#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 -#define FRF_AB_PCIE_TESTSIG_L_LBN 64 -#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 -#define FRF_AB_PCIE_OFFSET_LBN 56 -#define FRF_AB_PCIE_OFFSET_WIDTH 8 -#define FRF_AB_PCIE_OFFSETEN_H_LBN 55 -#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 -#define FRF_AB_PCIE_OFFSETEN_L_LBN 54 -#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 -#define FRF_AB_PCIE_HIVMODE_H_LBN 53 -#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 -#define FRF_AB_PCIE_HIVMODE_L_LBN 52 -#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 -#define FRF_AB_PCIE_PARRESET_H_LBN 51 -#define FRF_AB_PCIE_PARRESET_H_WIDTH 1 -#define FRF_AB_PCIE_PARRESET_L_LBN 50 -#define FRF_AB_PCIE_PARRESET_L_WIDTH 1 -#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 -#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 -#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 -#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 -#define FRF_AB_PCIE_LPBK_LBN 40 -#define FRF_AB_PCIE_LPBK_WIDTH 8 -#define FRF_AB_PCIE_PARLPBK_LBN 32 -#define FRF_AB_PCIE_PARLPBK_WIDTH 8 -#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 -#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 -#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 -#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 -#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 -#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 -#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 -#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 -#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 -#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 -#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 -#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 -#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 -#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 -#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 -#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 -#define FRF_AB_PCIE_RXEQCTL_H_LBN 18 -#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 -#define FRF_AB_PCIE_RXEQCTL_L_LBN 16 -#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 -#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 -#define FFE_AB_PCIE_RXEQCTL_OFF 2 -#define FFE_AB_PCIE_RXEQCTL_MIN 1 -#define FFE_AB_PCIE_RXEQCTL_MAX 0 -#define FRF_AB_PCIE_HIDRV_LBN 8 -#define FRF_AB_PCIE_HIDRV_WIDTH 8 -#define FRF_AB_PCIE_LODRV_LBN 0 -#define FRF_AB_PCIE_LODRV_WIDTH 8 - -/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ -#define FR_AB_PCIE_SD_CTL45 0x00000330 -#define FRF_AB_PCIE_DTX7_LBN 60 -#define FRF_AB_PCIE_DTX7_WIDTH 4 -#define FRF_AB_PCIE_DTX6_LBN 56 -#define FRF_AB_PCIE_DTX6_WIDTH 4 -#define FRF_AB_PCIE_DTX5_LBN 52 -#define FRF_AB_PCIE_DTX5_WIDTH 4 -#define FRF_AB_PCIE_DTX4_LBN 48 -#define FRF_AB_PCIE_DTX4_WIDTH 4 -#define FRF_AB_PCIE_DTX3_LBN 44 -#define FRF_AB_PCIE_DTX3_WIDTH 4 -#define FRF_AB_PCIE_DTX2_LBN 40 -#define FRF_AB_PCIE_DTX2_WIDTH 4 -#define FRF_AB_PCIE_DTX1_LBN 36 -#define FRF_AB_PCIE_DTX1_WIDTH 4 -#define FRF_AB_PCIE_DTX0_LBN 32 -#define FRF_AB_PCIE_DTX0_WIDTH 4 -#define FRF_AB_PCIE_DEQ7_LBN 28 -#define FRF_AB_PCIE_DEQ7_WIDTH 4 -#define FRF_AB_PCIE_DEQ6_LBN 24 -#define FRF_AB_PCIE_DEQ6_WIDTH 4 -#define FRF_AB_PCIE_DEQ5_LBN 20 -#define FRF_AB_PCIE_DEQ5_WIDTH 4 -#define FRF_AB_PCIE_DEQ4_LBN 16 -#define FRF_AB_PCIE_DEQ4_WIDTH 4 -#define FRF_AB_PCIE_DEQ3_LBN 12 -#define FRF_AB_PCIE_DEQ3_WIDTH 4 -#define FRF_AB_PCIE_DEQ2_LBN 8 -#define FRF_AB_PCIE_DEQ2_WIDTH 4 -#define FRF_AB_PCIE_DEQ1_LBN 4 -#define FRF_AB_PCIE_DEQ1_WIDTH 4 -#define FRF_AB_PCIE_DEQ0_LBN 0 -#define FRF_AB_PCIE_DEQ0_WIDTH 4 - -/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ -#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 -#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 -#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 -#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 -#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 -#define FRF_AB_PCIE_PRBSERR_LBN 40 -#define FRF_AB_PCIE_PRBSERR_WIDTH 8 -#define FRF_AB_PCIE_PRBSERRH0_LBN 32 -#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 -#define FRF_AB_PCIE_FASTINIT_H_LBN 15 -#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 -#define FRF_AB_PCIE_FASTINIT_L_LBN 14 -#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 -#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 -#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 -#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 -#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 -#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 -#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 -#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 -#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 -#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 -#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 -#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 -#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 -#define FRF_AB_PCIE_PRBSSEL_LBN 0 -#define FRF_AB_PCIE_PRBSSEL_WIDTH 8 - -/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ -#define FR_BB_DEBUG_DATA_OUT 0x00000350 -#define FRF_BB_DEBUG2_PORT_LBN 25 -#define FRF_BB_DEBUG2_PORT_WIDTH 15 -#define FRF_BB_DEBUG1_PORT_LBN 0 -#define FRF_BB_DEBUG1_PORT_WIDTH 25 - -/* EVQ_RPTR_REGP0: Event queue read pointer register */ -#define FR_BZ_EVQ_RPTR_P0 0x00000400 -#define FR_BZ_EVQ_RPTR_P0_STEP 8192 -#define FR_BZ_EVQ_RPTR_P0_ROWS 1024 -/* EVQ_RPTR_REG_KER: Event queue read pointer register */ -#define FR_AA_EVQ_RPTR_KER 0x00011b00 -#define FR_AA_EVQ_RPTR_KER_STEP 4 -#define FR_AA_EVQ_RPTR_KER_ROWS 4 -/* EVQ_RPTR_REG: Event queue read pointer register */ -#define FR_BZ_EVQ_RPTR 0x00fa0000 -#define FR_BZ_EVQ_RPTR_STEP 16 -#define FR_BB_EVQ_RPTR_ROWS 4096 -#define FR_CZ_EVQ_RPTR_ROWS 1024 -/* EVQ_RPTR_REGP123: Event queue read pointer register */ -#define FR_BB_EVQ_RPTR_P123 0x01000400 -#define FR_BB_EVQ_RPTR_P123_STEP 8192 -#define FR_BB_EVQ_RPTR_P123_ROWS 3072 -#define FRF_AZ_EVQ_RPTR_VLD_LBN 15 -#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 -#define FRF_AZ_EVQ_RPTR_LBN 0 -#define FRF_AZ_EVQ_RPTR_WIDTH 15 - -/* TIMER_COMMAND_REGP0: Timer Command Registers */ -#define FR_BZ_TIMER_COMMAND_P0 0x00000420 -#define FR_BZ_TIMER_COMMAND_P0_STEP 8192 -#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 -/* TIMER_COMMAND_REG_KER: Timer Command Registers */ -#define FR_AA_TIMER_COMMAND_KER 0x00000420 -#define FR_AA_TIMER_COMMAND_KER_STEP 8192 -#define FR_AA_TIMER_COMMAND_KER_ROWS 4 -/* TIMER_COMMAND_REGP123: Timer Command Registers */ -#define FR_BB_TIMER_COMMAND_P123 0x01000420 -#define FR_BB_TIMER_COMMAND_P123_STEP 8192 -#define FR_BB_TIMER_COMMAND_P123_ROWS 3072 -#define FRF_CZ_TC_TIMER_MODE_LBN 14 -#define FRF_CZ_TC_TIMER_MODE_WIDTH 2 -#define FRF_AB_TC_TIMER_MODE_LBN 12 -#define FRF_AB_TC_TIMER_MODE_WIDTH 2 -#define FRF_CZ_TC_TIMER_VAL_LBN 0 -#define FRF_CZ_TC_TIMER_VAL_WIDTH 14 -#define FRF_AB_TC_TIMER_VAL_LBN 0 -#define FRF_AB_TC_TIMER_VAL_WIDTH 12 - -/* DRV_EV_REG: Driver generated event register */ -#define FR_AZ_DRV_EV 0x00000440 -#define FRF_AZ_DRV_EV_QID_LBN 64 -#define FRF_AZ_DRV_EV_QID_WIDTH 12 -#define FRF_AZ_DRV_EV_DATA_LBN 0 -#define FRF_AZ_DRV_EV_DATA_WIDTH 64 - -/* EVQ_CTL_REG: Event queue control register */ -#define FR_AZ_EVQ_CTL 0x00000450 -#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 -#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 -#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 -#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 -#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 -#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 -#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 -#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 -#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 -#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 - -/* EVQ_CNT1_REG: Event counter 1 register */ -#define FR_AZ_EVQ_CNT1 0x00000460 -#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 -#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 -#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 -#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 -#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 -#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 -#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 -#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 -#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 -#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 - -/* EVQ_CNT2_REG: Event counter 2 register */ -#define FR_AZ_EVQ_CNT2 0x00000470 -#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 -#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 -#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_RDY_CNT_LBN 80 -#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 -#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 -#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 -#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 -#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 -#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 -#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 - -/* USR_EV_REG: Event mailbox register */ -#define FR_CZ_USR_EV 0x00000540 -#define FR_CZ_USR_EV_STEP 8192 -#define FR_CZ_USR_EV_ROWS 1024 -#define FRF_CZ_USR_EV_DATA_LBN 0 -#define FRF_CZ_USR_EV_DATA_WIDTH 32 - -/* BUF_TBL_CFG_REG: Buffer table configuration register */ -#define FR_AZ_BUF_TBL_CFG 0x00000600 -#define FRF_AZ_BUF_TBL_MODE_LBN 3 -#define FRF_AZ_BUF_TBL_MODE_WIDTH 1 - -/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ -#define FR_AZ_SRM_RX_DC_CFG 0x00000610 -#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 -#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 -#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 -#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 - -/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ -#define FR_AZ_SRM_TX_DC_CFG 0x00000620 -#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 -#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 - -/* SRM_CFG_REG: SRAM configuration register */ -#define FR_AZ_SRM_CFG 0x00000630 -#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 -#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 -#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 -#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 -#define FRF_AZ_SRM_INIT_EN_LBN 3 -#define FRF_AZ_SRM_INIT_EN_WIDTH 1 -#define FRF_AZ_SRM_NUM_BANK_LBN 2 -#define FRF_AZ_SRM_NUM_BANK_WIDTH 1 -#define FRF_AZ_SRM_BANK_SIZE_LBN 0 -#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 - -/* BUF_TBL_UPD_REG: Buffer table update register */ -#define FR_AZ_BUF_TBL_UPD 0x00000650 -#define FRF_AZ_BUF_UPD_CMD_LBN 63 -#define FRF_AZ_BUF_UPD_CMD_WIDTH 1 -#define FRF_AZ_BUF_CLR_CMD_LBN 62 -#define FRF_AZ_BUF_CLR_CMD_WIDTH 1 -#define FRF_AZ_BUF_CLR_END_ID_LBN 32 -#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 -#define FRF_AZ_BUF_CLR_START_ID_LBN 0 -#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 - -/* SRM_UPD_EVQ_REG: Buffer table update register */ -#define FR_AZ_SRM_UPD_EVQ 0x00000660 -#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 -#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 - -/* SRAM_PARITY_REG: SRAM parity register. */ -#define FR_AZ_SRAM_PARITY 0x00000670 -#define FRF_CZ_BYPASS_ECC_LBN 3 -#define FRF_CZ_BYPASS_ECC_WIDTH 1 -#define FRF_CZ_SEC_INT_LBN 2 -#define FRF_CZ_SEC_INT_WIDTH 1 -#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 -#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 -#define FRF_AB_FORCE_SRAM_PERR_LBN 0 -#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 -#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 -#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 - -/* RX_CFG_REG: Receive configuration register */ -#define FR_AZ_RX_CFG 0x00000800 -#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 -#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 -#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 -#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 -#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 -#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 -#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 -#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 -#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 -#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 -#define FRF_BZ_RX_TCP_SUP_LBN 48 -#define FRF_BZ_RX_TCP_SUP_WIDTH 1 -#define FRF_BZ_RX_INGR_EN_LBN 47 -#define FRF_BZ_RX_INGR_EN_WIDTH 1 -#define FRF_BZ_RX_IP_HASH_LBN 46 -#define FRF_BZ_RX_IP_HASH_WIDTH 1 -#define FRF_BZ_RX_HASH_ALG_LBN 45 -#define FRF_BZ_RX_HASH_ALG_WIDTH 1 -#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 -#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 -#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 -#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 -#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 -#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 -#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 -#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 -#define FRF_BZ_RX_OWNERR_CTL_LBN 38 -#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 -#define FRF_BZ_RX_XON_TX_TH_LBN 33 -#define FRF_BZ_RX_XON_TX_TH_WIDTH 5 -#define FRF_AA_RX_DESC_PUSH_EN_LBN 35 -#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 -#define FRF_AA_RX_RDW_PATCH_EN_LBN 34 -#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 -#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 -#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 -#define FRF_BZ_RX_XOFF_TX_TH_LBN 28 -#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 -#define FRF_AA_RX_OWNERR_CTL_LBN 30 -#define FRF_AA_RX_OWNERR_CTL_WIDTH 1 -#define FRF_AA_RX_XON_TX_TH_LBN 25 -#define FRF_AA_RX_XON_TX_TH_WIDTH 5 -#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 -#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 -#define FRF_AA_RX_XOFF_TX_TH_LBN 20 -#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 -#define FRF_AA_RX_USR_BUF_SIZE_LBN 11 -#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 -#define FRF_BZ_RX_XON_MAC_TH_LBN 10 -#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 -#define FRF_AA_RX_XON_MAC_TH_LBN 6 -#define FRF_AA_RX_XON_MAC_TH_WIDTH 5 -#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 -#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 -#define FRF_AA_RX_XOFF_MAC_TH_LBN 1 -#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 -#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 -#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 - -/* RX_FILTER_CTL_REG: Receive filter control registers */ -#define FR_BZ_RX_FILTER_CTL 0x00000810 -#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 -#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 -#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 -#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 -#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 -#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 -#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 -#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 -#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 -#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 -#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 -#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 -#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 -#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 -#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 -#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 -#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 -#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 -#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 -#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 -#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 -#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 -#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 -#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 -#define FRF_BZ_NUM_KER_LBN 24 -#define FRF_BZ_NUM_KER_WIDTH 2 -#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 -#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 -#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 -#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 -#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 -#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 - -/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ -#define FR_AZ_RX_FLUSH_DESCQ 0x00000820 -#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 -#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 -#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 -#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 - -/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ -#define FR_BZ_RX_DESC_UPD_P0 0x00000830 -#define FR_BZ_RX_DESC_UPD_P0_STEP 8192 -#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 -/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ -#define FR_AA_RX_DESC_UPD_KER 0x00000830 -#define FR_AA_RX_DESC_UPD_KER_STEP 8192 -#define FR_AA_RX_DESC_UPD_KER_ROWS 4 -/* RX_DESC_UPD_REGP123: Receive descriptor update register. */ -#define FR_BB_RX_DESC_UPD_P123 0x01000830 -#define FR_BB_RX_DESC_UPD_P123_STEP 8192 -#define FR_BB_RX_DESC_UPD_P123_ROWS 3072 -#define FRF_AZ_RX_DESC_WPTR_LBN 96 -#define FRF_AZ_RX_DESC_WPTR_WIDTH 12 -#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 -#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 -#define FRF_AZ_RX_DESC_LBN 0 -#define FRF_AZ_RX_DESC_WIDTH 64 - -/* RX_DC_CFG_REG: Receive descriptor cache configuration register */ -#define FR_AZ_RX_DC_CFG 0x00000840 -#define FRF_AB_RX_MAX_PF_LBN 2 -#define FRF_AB_RX_MAX_PF_WIDTH 2 -#define FRF_AZ_RX_DC_SIZE_LBN 0 -#define FRF_AZ_RX_DC_SIZE_WIDTH 2 -#define FFE_AZ_RX_DC_SIZE_64 3 -#define FFE_AZ_RX_DC_SIZE_32 2 -#define FFE_AZ_RX_DC_SIZE_16 1 -#define FFE_AZ_RX_DC_SIZE_8 0 - -/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ -#define FR_AZ_RX_DC_PF_WM 0x00000850 -#define FRF_AZ_RX_DC_PF_HWM_LBN 6 -#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 -#define FRF_AZ_RX_DC_PF_LWM_LBN 0 -#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 - -/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ -#define FR_BZ_RX_RSS_TKEY 0x00000860 -#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 -#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 -#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 -#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 - -/* RX_NODESC_DROP_REG: Receive dropped packet counter register */ -#define FR_AZ_RX_NODESC_DROP 0x00000880 -#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 -#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 -#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 -#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 - -/* RX_SELF_RST_REG: Receive self reset register */ -#define FR_AA_RX_SELF_RST 0x00000890 -#define FRF_AA_RX_ISCSI_DIS_LBN 17 -#define FRF_AA_RX_ISCSI_DIS_WIDTH 1 -#define FRF_AA_RX_SW_RST_REG_LBN 16 -#define FRF_AA_RX_SW_RST_REG_WIDTH 1 -#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 -#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 -#define FRF_AA_RX_SELF_RST_EN_LBN 8 -#define FRF_AA_RX_SELF_RST_EN_WIDTH 1 -#define FRF_AA_RX_MAX_PF_LAT_LBN 4 -#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 -#define FRF_AA_RX_MAX_LU_LAT_LBN 0 -#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 - -/* RX_DEBUG_REG: undocumented register */ -#define FR_AZ_RX_DEBUG 0x000008a0 -#define FRF_AZ_RX_DEBUG_LBN 0 -#define FRF_AZ_RX_DEBUG_WIDTH 64 - -/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ -#define FR_AZ_RX_PUSH_DROP 0x000008b0 -#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 -#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 - -/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ -#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 - -/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ -#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 - -/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ -#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 -#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 -#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 -#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 -#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 -#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 -#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 -#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 -#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 - -/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ -#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 -#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 -#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 -#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 -#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 - -/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ -#define FR_BZ_TX_DESC_UPD_P0 0x00000a10 -#define FR_BZ_TX_DESC_UPD_P0_STEP 8192 -#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 -/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ -#define FR_AA_TX_DESC_UPD_KER 0x00000a10 -#define FR_AA_TX_DESC_UPD_KER_STEP 8192 -#define FR_AA_TX_DESC_UPD_KER_ROWS 8 -/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ -#define FR_BB_TX_DESC_UPD_P123 0x01000a10 -#define FR_BB_TX_DESC_UPD_P123_STEP 8192 -#define FR_BB_TX_DESC_UPD_P123_ROWS 3072 -#define FRF_AZ_TX_DESC_WPTR_LBN 96 -#define FRF_AZ_TX_DESC_WPTR_WIDTH 12 -#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 -#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 -#define FRF_AZ_TX_DESC_LBN 0 -#define FRF_AZ_TX_DESC_WIDTH 95 - -/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ -#define FR_AZ_TX_DC_CFG 0x00000a20 -#define FRF_AZ_TX_DC_SIZE_LBN 0 -#define FRF_AZ_TX_DC_SIZE_WIDTH 2 -#define FFE_AZ_TX_DC_SIZE_32 2 -#define FFE_AZ_TX_DC_SIZE_16 1 -#define FFE_AZ_TX_DC_SIZE_8 0 - -/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ -#define FR_AA_TX_CHKSM_CFG 0x00000a30 -#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 -#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 -#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 -#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 -#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 -#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 -#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 -#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 - -/* TX_CFG_REG: Transmit configuration register */ -#define FR_AZ_TX_CFG 0x00000a50 -#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 -#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 -#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 -#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 -#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 -#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 -#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 -#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 -#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 -#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 -#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 -#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 -#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 -#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 -#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 -#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 -#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 -#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 -#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 -#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 -#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 -#define FRF_AZ_TX_P1_PRI_EN_LBN 4 -#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 -#define FRF_AZ_TX_OWNERR_CTL_LBN 2 -#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 -#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 -#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 -#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 -#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 - -/* TX_PUSH_DROP_REG: Transmit push dropped register */ -#define FR_AZ_TX_PUSH_DROP 0x00000a60 -#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 -#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 - -/* TX_RESERVED_REG: Transmit configuration register */ -#define FR_AZ_TX_RESERVED 0x00000a80 -#define FRF_AZ_TX_EVT_CNT_LBN 121 -#define FRF_AZ_TX_EVT_CNT_WIDTH 7 -#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 -#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 -#define FRF_AZ_TX_RD_COMP_TMR_LBN 96 -#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 -#define FRF_AZ_TX_PUSH_EN_LBN 89 -#define FRF_AZ_TX_PUSH_EN_WIDTH 1 -#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 -#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 -#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 -#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 -#define FRF_AZ_TX_DMAR_ST_P0_LBN 81 -#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 -#define FRF_AZ_TX_DMAQ_ST_LBN 78 -#define FRF_AZ_TX_DMAQ_ST_WIDTH 1 -#define FRF_AZ_TX_RX_SPACER_LBN 64 -#define FRF_AZ_TX_RX_SPACER_WIDTH 8 -#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 -#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 -#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 -#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 -#define FRF_AZ_TX_PS_EVT_DIS_LBN 58 -#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 -#define FRF_AZ_TX_RX_SPACER_EN_LBN 57 -#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 -#define FRF_AZ_TX_XP_TIMER_LBN 52 -#define FRF_AZ_TX_XP_TIMER_WIDTH 5 -#define FRF_AZ_TX_PREF_SPACER_LBN 44 -#define FRF_AZ_TX_PREF_SPACER_WIDTH 8 -#define FRF_AZ_TX_PREF_WD_TMR_LBN 22 -#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 -#define FRF_AZ_TX_ONLY1TAG_LBN 21 -#define FRF_AZ_TX_ONLY1TAG_WIDTH 1 -#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 -#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 -#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 -#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 -#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 -#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 -#define FRF_AA_TX_DMA_FF_THR_LBN 16 -#define FRF_AA_TX_DMA_FF_THR_WIDTH 1 -#define FRF_AZ_TX_DMA_SPACER_LBN 8 -#define FRF_AZ_TX_DMA_SPACER_WIDTH 8 -#define FRF_AA_TX_TCP_DIS_LBN 7 -#define FRF_AA_TX_TCP_DIS_WIDTH 1 -#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 -#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 -#define FRF_AA_TX_IP_DIS_LBN 6 -#define FRF_AA_TX_IP_DIS_WIDTH 1 -#define FRF_AZ_TX_MAX_CPL_LBN 2 -#define FRF_AZ_TX_MAX_CPL_WIDTH 2 -#define FFE_AZ_TX_MAX_CPL_16 3 -#define FFE_AZ_TX_MAX_CPL_8 2 -#define FFE_AZ_TX_MAX_CPL_4 1 -#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 -#define FRF_AZ_TX_MAX_PREF_LBN 0 -#define FRF_AZ_TX_MAX_PREF_WIDTH 2 -#define FFE_AZ_TX_MAX_PREF_32 3 -#define FFE_AZ_TX_MAX_PREF_16 2 -#define FFE_AZ_TX_MAX_PREF_8 1 -#define FFE_AZ_TX_MAX_PREF_OFF 0 - -/* TX_PACE_REG: Transmit pace control register */ -#define FR_BZ_TX_PACE 0x00000a90 -#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 -#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 -#define FRF_BZ_TX_PACE_SB_AF_LBN 9 -#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 -#define FRF_BZ_TX_PACE_FB_BASE_LBN 5 -#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 -#define FRF_BZ_TX_PACE_BIN_TH_LBN 0 -#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 - -/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ -#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 -#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 -#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 - -/* TX_VLAN_REG: Transmit VLAN tag register */ -#define FR_BB_TX_VLAN 0x00000ae0 -#define FRF_BB_TX_VLAN_EN_LBN 127 -#define FRF_BB_TX_VLAN_EN_WIDTH 1 -#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 -#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 -#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN7_LBN 112 -#define FRF_BB_TX_VLAN7_WIDTH 12 -#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 -#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 -#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN6_LBN 96 -#define FRF_BB_TX_VLAN6_WIDTH 12 -#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 -#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 -#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN5_LBN 80 -#define FRF_BB_TX_VLAN5_WIDTH 12 -#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 -#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 -#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN4_LBN 64 -#define FRF_BB_TX_VLAN4_WIDTH 12 -#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 -#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 -#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN3_LBN 48 -#define FRF_BB_TX_VLAN3_WIDTH 12 -#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 -#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 -#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN2_LBN 32 -#define FRF_BB_TX_VLAN2_WIDTH 12 -#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 -#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 -#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN1_LBN 16 -#define FRF_BB_TX_VLAN1_WIDTH 12 -#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 -#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 -#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 -#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 -#define FRF_BB_TX_VLAN0_LBN 0 -#define FRF_BB_TX_VLAN0_WIDTH 12 - -/* TX_IPFIL_PORTEN_REG: Transmit filter control register */ -#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 -#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 -#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 -#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 -#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 -#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 -#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 -#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 -#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 -#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 -#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 -#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 -#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 -#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 -#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 -#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 -#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 -#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 -#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 -#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 -#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 -#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 -#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 -#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 -#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 -#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 -#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 -#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 -#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 -#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 -#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 -#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 -#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 -#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 -#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 -#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 - -/* TX_IPFIL_TBL: Transmit IP source address filter table */ -#define FR_BB_TX_IPFIL_TBL 0x00000b00 -#define FR_BB_TX_IPFIL_TBL_STEP 16 -#define FR_BB_TX_IPFIL_TBL_ROWS 16 -#define FRF_BB_TX_IPFIL_MASK_1_LBN 96 -#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 -#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 -#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 -#define FRF_BB_TX_IPFIL_MASK_0_LBN 32 -#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 -#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 -#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 - -/* MD_TXD_REG: PHY management transmit data register */ -#define FR_AB_MD_TXD 0x00000c00 -#define FRF_AB_MD_TXD_LBN 0 -#define FRF_AB_MD_TXD_WIDTH 16 - -/* MD_RXD_REG: PHY management receive data register */ -#define FR_AB_MD_RXD 0x00000c10 -#define FRF_AB_MD_RXD_LBN 0 -#define FRF_AB_MD_RXD_WIDTH 16 - -/* MD_CS_REG: PHY management configuration & status register */ -#define FR_AB_MD_CS 0x00000c20 -#define FRF_AB_MD_RD_EN_CMD_LBN 15 -#define FRF_AB_MD_RD_EN_CMD_WIDTH 1 -#define FRF_AB_MD_WR_EN_CMD_LBN 14 -#define FRF_AB_MD_WR_EN_CMD_WIDTH 1 -#define FRF_AB_MD_ADDR_CMD_LBN 13 -#define FRF_AB_MD_ADDR_CMD_WIDTH 1 -#define FRF_AB_MD_PT_LBN 7 -#define FRF_AB_MD_PT_WIDTH 3 -#define FRF_AB_MD_PL_LBN 6 -#define FRF_AB_MD_PL_WIDTH 1 -#define FRF_AB_MD_INT_CLR_LBN 5 -#define FRF_AB_MD_INT_CLR_WIDTH 1 -#define FRF_AB_MD_GC_LBN 4 -#define FRF_AB_MD_GC_WIDTH 1 -#define FRF_AB_MD_PRSP_LBN 3 -#define FRF_AB_MD_PRSP_WIDTH 1 -#define FRF_AB_MD_RIC_LBN 2 -#define FRF_AB_MD_RIC_WIDTH 1 -#define FRF_AB_MD_RDC_LBN 1 -#define FRF_AB_MD_RDC_WIDTH 1 -#define FRF_AB_MD_WRC_LBN 0 -#define FRF_AB_MD_WRC_WIDTH 1 - -/* MD_PHY_ADR_REG: PHY management PHY address register */ -#define FR_AB_MD_PHY_ADR 0x00000c30 -#define FRF_AB_MD_PHY_ADR_LBN 0 -#define FRF_AB_MD_PHY_ADR_WIDTH 16 - -/* MD_ID_REG: PHY management ID register */ -#define FR_AB_MD_ID 0x00000c40 -#define FRF_AB_MD_PRT_ADR_LBN 11 -#define FRF_AB_MD_PRT_ADR_WIDTH 5 -#define FRF_AB_MD_DEV_ADR_LBN 6 -#define FRF_AB_MD_DEV_ADR_WIDTH 5 - -/* MD_STAT_REG: PHY management status & mask register */ -#define FR_AB_MD_STAT 0x00000c50 -#define FRF_AB_MD_PINT_LBN 4 -#define FRF_AB_MD_PINT_WIDTH 1 -#define FRF_AB_MD_DONE_LBN 3 -#define FRF_AB_MD_DONE_WIDTH 1 -#define FRF_AB_MD_BSERR_LBN 2 -#define FRF_AB_MD_BSERR_WIDTH 1 -#define FRF_AB_MD_LNFL_LBN 1 -#define FRF_AB_MD_LNFL_WIDTH 1 -#define FRF_AB_MD_BSY_LBN 0 -#define FRF_AB_MD_BSY_WIDTH 1 - -/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ -#define FR_AB_MAC_STAT_DMA 0x00000c60 -#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 -#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 -#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 -#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 - -/* MAC_CTRL_REG: Port MAC control register */ -#define FR_AB_MAC_CTRL 0x00000c80 -#define FRF_AB_MAC_XOFF_VAL_LBN 16 -#define FRF_AB_MAC_XOFF_VAL_WIDTH 16 -#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 -#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 -#define FRF_AB_MAC_XG_DISTXCRC_LBN 5 -#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 -#define FRF_AB_MAC_BCAD_ACPT_LBN 4 -#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 -#define FRF_AB_MAC_UC_PROM_LBN 3 -#define FRF_AB_MAC_UC_PROM_WIDTH 1 -#define FRF_AB_MAC_LINK_STATUS_LBN 2 -#define FRF_AB_MAC_LINK_STATUS_WIDTH 1 -#define FRF_AB_MAC_SPEED_LBN 0 -#define FRF_AB_MAC_SPEED_WIDTH 2 -#define FFE_AB_MAC_SPEED_10G 3 -#define FFE_AB_MAC_SPEED_1G 2 -#define FFE_AB_MAC_SPEED_100M 1 -#define FFE_AB_MAC_SPEED_10M 0 - -/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ -#define FR_BB_GEN_MODE 0x00000c90 -#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 -#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 -#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 -#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 -#define FRF_BB_XFP_PHY_INT_MASK_LBN 1 -#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 -#define FRF_BB_XG_PHY_INT_MASK_LBN 0 -#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 - -/* MAC_MC_HASH_REG0: Multicast address hash table */ -#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 -#define FRF_AB_MAC_MCAST_HASH0_LBN 0 -#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 - -/* MAC_MC_HASH_REG1: Multicast address hash table */ -#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 -#define FRF_AB_MAC_MCAST_HASH1_LBN 0 -#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 - -/* GM_CFG1_REG: GMAC configuration register 1 */ -#define FR_AB_GM_CFG1 0x00000e00 -#define FRF_AB_GM_SW_RST_LBN 31 -#define FRF_AB_GM_SW_RST_WIDTH 1 -#define FRF_AB_GM_SIM_RST_LBN 30 -#define FRF_AB_GM_SIM_RST_WIDTH 1 -#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 -#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 -#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 -#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 -#define FRF_AB_GM_RST_RX_FUNC_LBN 17 -#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 -#define FRF_AB_GM_RST_TX_FUNC_LBN 16 -#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 -#define FRF_AB_GM_LOOP_LBN 8 -#define FRF_AB_GM_LOOP_WIDTH 1 -#define FRF_AB_GM_RX_FC_EN_LBN 5 -#define FRF_AB_GM_RX_FC_EN_WIDTH 1 -#define FRF_AB_GM_TX_FC_EN_LBN 4 -#define FRF_AB_GM_TX_FC_EN_WIDTH 1 -#define FRF_AB_GM_SYNC_RXEN_LBN 3 -#define FRF_AB_GM_SYNC_RXEN_WIDTH 1 -#define FRF_AB_GM_RX_EN_LBN 2 -#define FRF_AB_GM_RX_EN_WIDTH 1 -#define FRF_AB_GM_SYNC_TXEN_LBN 1 -#define FRF_AB_GM_SYNC_TXEN_WIDTH 1 -#define FRF_AB_GM_TX_EN_LBN 0 -#define FRF_AB_GM_TX_EN_WIDTH 1 - -/* GM_CFG2_REG: GMAC configuration register 2 */ -#define FR_AB_GM_CFG2 0x00000e10 -#define FRF_AB_GM_PAMBL_LEN_LBN 12 -#define FRF_AB_GM_PAMBL_LEN_WIDTH 4 -#define FRF_AB_GM_IF_MODE_LBN 8 -#define FRF_AB_GM_IF_MODE_WIDTH 2 -#define FFE_AB_IF_MODE_BYTE_MODE 2 -#define FFE_AB_IF_MODE_NIBBLE_MODE 1 -#define FRF_AB_GM_HUGE_FRM_EN_LBN 5 -#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 -#define FRF_AB_GM_LEN_CHK_LBN 4 -#define FRF_AB_GM_LEN_CHK_WIDTH 1 -#define FRF_AB_GM_PAD_CRC_EN_LBN 2 -#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 -#define FRF_AB_GM_CRC_EN_LBN 1 -#define FRF_AB_GM_CRC_EN_WIDTH 1 -#define FRF_AB_GM_FD_LBN 0 -#define FRF_AB_GM_FD_WIDTH 1 - -/* GM_IPG_REG: GMAC IPG register */ -#define FR_AB_GM_IPG 0x00000e20 -#define FRF_AB_GM_NONB2B_IPG1_LBN 24 -#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 -#define FRF_AB_GM_NONB2B_IPG2_LBN 16 -#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 -#define FRF_AB_GM_MIN_IPG_ENF_LBN 8 -#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 -#define FRF_AB_GM_B2B_IPG_LBN 0 -#define FRF_AB_GM_B2B_IPG_WIDTH 7 - -/* GM_HD_REG: GMAC half duplex register */ -#define FR_AB_GM_HD 0x00000e30 -#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 -#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 -#define FRF_AB_GM_ALT_BOFF_EN_LBN 19 -#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 -#define FRF_AB_GM_BP_NO_BOFF_LBN 18 -#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 -#define FRF_AB_GM_DIS_BOFF_LBN 17 -#define FRF_AB_GM_DIS_BOFF_WIDTH 1 -#define FRF_AB_GM_EXDEF_TX_EN_LBN 16 -#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 -#define FRF_AB_GM_RTRY_LIMIT_LBN 12 -#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 -#define FRF_AB_GM_COL_WIN_LBN 0 -#define FRF_AB_GM_COL_WIN_WIDTH 10 - -/* GM_MAX_FLEN_REG: GMAC maximum frame length register */ -#define FR_AB_GM_MAX_FLEN 0x00000e40 -#define FRF_AB_GM_MAX_FLEN_LBN 0 -#define FRF_AB_GM_MAX_FLEN_WIDTH 16 - -/* GM_TEST_REG: GMAC test register */ -#define FR_AB_GM_TEST 0x00000e70 -#define FRF_AB_GM_MAX_BOFF_LBN 3 -#define FRF_AB_GM_MAX_BOFF_WIDTH 1 -#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 -#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 -#define FRF_AB_GM_TEST_PAUSE_LBN 1 -#define FRF_AB_GM_TEST_PAUSE_WIDTH 1 -#define FRF_AB_GM_SHORT_SLOT_LBN 0 -#define FRF_AB_GM_SHORT_SLOT_WIDTH 1 - -/* GM_ADR1_REG: GMAC station address register 1 */ -#define FR_AB_GM_ADR1 0x00000f00 -#define FRF_AB_GM_ADR_B0_LBN 24 -#define FRF_AB_GM_ADR_B0_WIDTH 8 -#define FRF_AB_GM_ADR_B1_LBN 16 -#define FRF_AB_GM_ADR_B1_WIDTH 8 -#define FRF_AB_GM_ADR_B2_LBN 8 -#define FRF_AB_GM_ADR_B2_WIDTH 8 -#define FRF_AB_GM_ADR_B3_LBN 0 -#define FRF_AB_GM_ADR_B3_WIDTH 8 - -/* GM_ADR2_REG: GMAC station address register 2 */ -#define FR_AB_GM_ADR2 0x00000f10 -#define FRF_AB_GM_ADR_B4_LBN 24 -#define FRF_AB_GM_ADR_B4_WIDTH 8 -#define FRF_AB_GM_ADR_B5_LBN 16 -#define FRF_AB_GM_ADR_B5_WIDTH 8 - -/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ -#define FR_AB_GMF_CFG0 0x00000f20 -#define FRF_AB_GMF_FTFENRPLY_LBN 20 -#define FRF_AB_GMF_FTFENRPLY_WIDTH 1 -#define FRF_AB_GMF_STFENRPLY_LBN 19 -#define FRF_AB_GMF_STFENRPLY_WIDTH 1 -#define FRF_AB_GMF_FRFENRPLY_LBN 18 -#define FRF_AB_GMF_FRFENRPLY_WIDTH 1 -#define FRF_AB_GMF_SRFENRPLY_LBN 17 -#define FRF_AB_GMF_SRFENRPLY_WIDTH 1 -#define FRF_AB_GMF_WTMENRPLY_LBN 16 -#define FRF_AB_GMF_WTMENRPLY_WIDTH 1 -#define FRF_AB_GMF_FTFENREQ_LBN 12 -#define FRF_AB_GMF_FTFENREQ_WIDTH 1 -#define FRF_AB_GMF_STFENREQ_LBN 11 -#define FRF_AB_GMF_STFENREQ_WIDTH 1 -#define FRF_AB_GMF_FRFENREQ_LBN 10 -#define FRF_AB_GMF_FRFENREQ_WIDTH 1 -#define FRF_AB_GMF_SRFENREQ_LBN 9 -#define FRF_AB_GMF_SRFENREQ_WIDTH 1 -#define FRF_AB_GMF_WTMENREQ_LBN 8 -#define FRF_AB_GMF_WTMENREQ_WIDTH 1 -#define FRF_AB_GMF_HSTRSTFT_LBN 4 -#define FRF_AB_GMF_HSTRSTFT_WIDTH 1 -#define FRF_AB_GMF_HSTRSTST_LBN 3 -#define FRF_AB_GMF_HSTRSTST_WIDTH 1 -#define FRF_AB_GMF_HSTRSTFR_LBN 2 -#define FRF_AB_GMF_HSTRSTFR_WIDTH 1 -#define FRF_AB_GMF_HSTRSTSR_LBN 1 -#define FRF_AB_GMF_HSTRSTSR_WIDTH 1 -#define FRF_AB_GMF_HSTRSTWT_LBN 0 -#define FRF_AB_GMF_HSTRSTWT_WIDTH 1 - -/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ -#define FR_AB_GMF_CFG1 0x00000f30 -#define FRF_AB_GMF_CFGFRTH_LBN 16 -#define FRF_AB_GMF_CFGFRTH_WIDTH 5 -#define FRF_AB_GMF_CFGXOFFRTX_LBN 0 -#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 - -/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ -#define FR_AB_GMF_CFG2 0x00000f40 -#define FRF_AB_GMF_CFGHWM_LBN 16 -#define FRF_AB_GMF_CFGHWM_WIDTH 6 -#define FRF_AB_GMF_CFGLWM_LBN 0 -#define FRF_AB_GMF_CFGLWM_WIDTH 6 - -/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ -#define FR_AB_GMF_CFG3 0x00000f50 -#define FRF_AB_GMF_CFGHWMFT_LBN 16 -#define FRF_AB_GMF_CFGHWMFT_WIDTH 6 -#define FRF_AB_GMF_CFGFTTH_LBN 0 -#define FRF_AB_GMF_CFGFTTH_WIDTH 6 - -/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ -#define FR_AB_GMF_CFG4 0x00000f60 -#define FRF_AB_GMF_HSTFLTRFRM_LBN 0 -#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 - -/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ -#define FR_AB_GMF_CFG5 0x00000f70 -#define FRF_AB_GMF_CFGHDPLX_LBN 22 -#define FRF_AB_GMF_CFGHDPLX_WIDTH 1 -#define FRF_AB_GMF_SRFULL_LBN 21 -#define FRF_AB_GMF_SRFULL_WIDTH 1 -#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 -#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 -#define FRF_AB_GMF_CFGBYTMODE_LBN 19 -#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 -#define FRF_AB_GMF_HSTDRPLT64_LBN 18 -#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 -#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 -#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 - -/* TX_SRC_MAC_TBL: Transmit IP source address filter table */ -#define FR_BB_TX_SRC_MAC_TBL 0x00001000 -#define FR_BB_TX_SRC_MAC_TBL_STEP 16 -#define FR_BB_TX_SRC_MAC_TBL_ROWS 16 -#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 -#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 -#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 -#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 - -/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ -#define FR_BB_TX_SRC_MAC_CTL 0x00001100 -#define FRF_BB_TX_SRC_DROP_CTR_LBN 16 -#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 -#define FRF_BB_TX_SRC_FLTR_EN_LBN 15 -#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 -#define FRF_BB_TX_DROP_CTR_CLR_LBN 12 -#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 -#define FRF_BB_TX_MAC_QID_SEL_LBN 0 -#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 - -/* XM_ADR_LO_REG: XGMAC address register low */ -#define FR_AB_XM_ADR_LO 0x00001200 -#define FRF_AB_XM_ADR_LO_LBN 0 -#define FRF_AB_XM_ADR_LO_WIDTH 32 - -/* XM_ADR_HI_REG: XGMAC address register high */ -#define FR_AB_XM_ADR_HI 0x00001210 -#define FRF_AB_XM_ADR_HI_LBN 0 -#define FRF_AB_XM_ADR_HI_WIDTH 16 - -/* XM_GLB_CFG_REG: XGMAC global configuration */ -#define FR_AB_XM_GLB_CFG 0x00001220 -#define FRF_AB_XM_RMTFLT_GEN_LBN 17 -#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 -#define FRF_AB_XM_DEBUG_MODE_LBN 16 -#define FRF_AB_XM_DEBUG_MODE_WIDTH 1 -#define FRF_AB_XM_RX_STAT_EN_LBN 11 -#define FRF_AB_XM_RX_STAT_EN_WIDTH 1 -#define FRF_AB_XM_TX_STAT_EN_LBN 10 -#define FRF_AB_XM_TX_STAT_EN_WIDTH 1 -#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 -#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 -#define FRF_AB_XM_WAN_MODE_LBN 5 -#define FRF_AB_XM_WAN_MODE_WIDTH 1 -#define FRF_AB_XM_INTCLR_MODE_LBN 3 -#define FRF_AB_XM_INTCLR_MODE_WIDTH 1 -#define FRF_AB_XM_CORE_RST_LBN 0 -#define FRF_AB_XM_CORE_RST_WIDTH 1 - -/* XM_TX_CFG_REG: XGMAC transmit configuration */ -#define FR_AB_XM_TX_CFG 0x00001230 -#define FRF_AB_XM_TX_PROG_LBN 24 -#define FRF_AB_XM_TX_PROG_WIDTH 1 -#define FRF_AB_XM_IPG_LBN 16 -#define FRF_AB_XM_IPG_WIDTH 4 -#define FRF_AB_XM_FCNTL_LBN 10 -#define FRF_AB_XM_FCNTL_WIDTH 1 -#define FRF_AB_XM_TXCRC_LBN 8 -#define FRF_AB_XM_TXCRC_WIDTH 1 -#define FRF_AB_XM_EDRC_LBN 6 -#define FRF_AB_XM_EDRC_WIDTH 1 -#define FRF_AB_XM_AUTO_PAD_LBN 5 -#define FRF_AB_XM_AUTO_PAD_WIDTH 1 -#define FRF_AB_XM_TX_PRMBL_LBN 2 -#define FRF_AB_XM_TX_PRMBL_WIDTH 1 -#define FRF_AB_XM_TXEN_LBN 1 -#define FRF_AB_XM_TXEN_WIDTH 1 -#define FRF_AB_XM_TX_RST_LBN 0 -#define FRF_AB_XM_TX_RST_WIDTH 1 - -/* XM_RX_CFG_REG: XGMAC receive configuration */ -#define FR_AB_XM_RX_CFG 0x00001240 -#define FRF_AB_XM_PASS_LENERR_LBN 26 -#define FRF_AB_XM_PASS_LENERR_WIDTH 1 -#define FRF_AB_XM_PASS_CRC_ERR_LBN 25 -#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 -#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 -#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 -#define FRF_AB_XM_REJ_BCAST_LBN 20 -#define FRF_AB_XM_REJ_BCAST_WIDTH 1 -#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 -#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 -#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 -#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 -#define FRF_AB_XM_AUTO_DEPAD_LBN 8 -#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 -#define FRF_AB_XM_RXCRC_LBN 3 -#define FRF_AB_XM_RXCRC_WIDTH 1 -#define FRF_AB_XM_RX_PRMBL_LBN 2 -#define FRF_AB_XM_RX_PRMBL_WIDTH 1 -#define FRF_AB_XM_RXEN_LBN 1 -#define FRF_AB_XM_RXEN_WIDTH 1 -#define FRF_AB_XM_RX_RST_LBN 0 -#define FRF_AB_XM_RX_RST_WIDTH 1 - -/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ -#define FR_AB_XM_MGT_INT_MASK 0x00001250 -#define FRF_AB_XM_MSK_STA_INTR_LBN 16 -#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 -#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 -#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 -#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 -#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 -#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 -#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 -#define FRF_AB_XM_MSK_RMTFLT_LBN 1 -#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 -#define FRF_AB_XM_MSK_LCLFLT_LBN 0 -#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 - -/* XM_FC_REG: XGMAC flow control register */ -#define FR_AB_XM_FC 0x00001270 -#define FRF_AB_XM_PAUSE_TIME_LBN 16 -#define FRF_AB_XM_PAUSE_TIME_WIDTH 16 -#define FRF_AB_XM_RX_MAC_STAT_LBN 11 -#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 -#define FRF_AB_XM_TX_MAC_STAT_LBN 10 -#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 -#define FRF_AB_XM_MCNTL_PASS_LBN 8 -#define FRF_AB_XM_MCNTL_PASS_WIDTH 2 -#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 -#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 -#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 -#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 -#define FRF_AB_XM_ZPAUSE_LBN 2 -#define FRF_AB_XM_ZPAUSE_WIDTH 1 -#define FRF_AB_XM_XMIT_PAUSE_LBN 1 -#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 -#define FRF_AB_XM_DIS_FCNTL_LBN 0 -#define FRF_AB_XM_DIS_FCNTL_WIDTH 1 - -/* XM_PAUSE_TIME_REG: XGMAC pause time register */ -#define FR_AB_XM_PAUSE_TIME 0x00001290 -#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 -#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 -#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 -#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 - -/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ -#define FR_AB_XM_TX_PARAM 0x000012d0 -#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 -#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 -#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 -#define FRF_AB_XM_PAD_CHAR_LBN 0 -#define FRF_AB_XM_PAD_CHAR_WIDTH 8 - -/* XM_RX_PARAM_REG: XGMAC receive parameter register */ -#define FR_AB_XM_RX_PARAM 0x000012e0 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 -#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 - -/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ -#define FR_AB_XM_MGT_INT_MSK 0x000012f0 -#define FRF_AB_XM_STAT_CNTR_OF_LBN 9 -#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 -#define FRF_AB_XM_STAT_CNTR_HF_LBN 8 -#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 -#define FRF_AB_XM_PRMBLE_ERR_LBN 2 -#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 -#define FRF_AB_XM_RMTFLT_LBN 1 -#define FRF_AB_XM_RMTFLT_WIDTH 1 -#define FRF_AB_XM_LCLFLT_LBN 0 -#define FRF_AB_XM_LCLFLT_WIDTH 1 - -/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ -#define FR_AB_XX_PWR_RST 0x00001300 -#define FRF_AB_XX_PWRDND_SIG_LBN 31 -#define FRF_AB_XX_PWRDND_SIG_WIDTH 1 -#define FRF_AB_XX_PWRDNC_SIG_LBN 30 -#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 -#define FRF_AB_XX_PWRDNB_SIG_LBN 29 -#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 -#define FRF_AB_XX_PWRDNA_SIG_LBN 28 -#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 -#define FRF_AB_XX_SIM_MODE_LBN 27 -#define FRF_AB_XX_SIM_MODE_WIDTH 1 -#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 -#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 -#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 -#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 -#define FRF_AB_XX_RESETD_SIG_LBN 23 -#define FRF_AB_XX_RESETD_SIG_WIDTH 1 -#define FRF_AB_XX_RESETC_SIG_LBN 22 -#define FRF_AB_XX_RESETC_SIG_WIDTH 1 -#define FRF_AB_XX_RESETB_SIG_LBN 21 -#define FRF_AB_XX_RESETB_SIG_WIDTH 1 -#define FRF_AB_XX_RESETA_SIG_LBN 20 -#define FRF_AB_XX_RESETA_SIG_WIDTH 1 -#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 -#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 -#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 -#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 -#define FRF_AB_XX_SD_RST_ACT_LBN 16 -#define FRF_AB_XX_SD_RST_ACT_WIDTH 1 -#define FRF_AB_XX_PWRDND_EN_LBN 15 -#define FRF_AB_XX_PWRDND_EN_WIDTH 1 -#define FRF_AB_XX_PWRDNC_EN_LBN 14 -#define FRF_AB_XX_PWRDNC_EN_WIDTH 1 -#define FRF_AB_XX_PWRDNB_EN_LBN 13 -#define FRF_AB_XX_PWRDNB_EN_WIDTH 1 -#define FRF_AB_XX_PWRDNA_EN_LBN 12 -#define FRF_AB_XX_PWRDNA_EN_WIDTH 1 -#define FRF_AB_XX_RSTPLLCD_EN_LBN 9 -#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 -#define FRF_AB_XX_RSTPLLAB_EN_LBN 8 -#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 -#define FRF_AB_XX_RESETD_EN_LBN 7 -#define FRF_AB_XX_RESETD_EN_WIDTH 1 -#define FRF_AB_XX_RESETC_EN_LBN 6 -#define FRF_AB_XX_RESETC_EN_WIDTH 1 -#define FRF_AB_XX_RESETB_EN_LBN 5 -#define FRF_AB_XX_RESETB_EN_WIDTH 1 -#define FRF_AB_XX_RESETA_EN_LBN 4 -#define FRF_AB_XX_RESETA_EN_WIDTH 1 -#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 -#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 -#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 -#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 -#define FRF_AB_XX_RST_XX_EN_LBN 0 -#define FRF_AB_XX_RST_XX_EN_WIDTH 1 - -/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ -#define FR_AB_XX_SD_CTL 0x00001310 -#define FRF_AB_XX_TERMADJ1_LBN 17 -#define FRF_AB_XX_TERMADJ1_WIDTH 1 -#define FRF_AB_XX_TERMADJ0_LBN 16 -#define FRF_AB_XX_TERMADJ0_WIDTH 1 -#define FRF_AB_XX_HIDRVD_LBN 15 -#define FRF_AB_XX_HIDRVD_WIDTH 1 -#define FRF_AB_XX_LODRVD_LBN 14 -#define FRF_AB_XX_LODRVD_WIDTH 1 -#define FRF_AB_XX_HIDRVC_LBN 13 -#define FRF_AB_XX_HIDRVC_WIDTH 1 -#define FRF_AB_XX_LODRVC_LBN 12 -#define FRF_AB_XX_LODRVC_WIDTH 1 -#define FRF_AB_XX_HIDRVB_LBN 11 -#define FRF_AB_XX_HIDRVB_WIDTH 1 -#define FRF_AB_XX_LODRVB_LBN 10 -#define FRF_AB_XX_LODRVB_WIDTH 1 -#define FRF_AB_XX_HIDRVA_LBN 9 -#define FRF_AB_XX_HIDRVA_WIDTH 1 -#define FRF_AB_XX_LODRVA_LBN 8 -#define FRF_AB_XX_LODRVA_WIDTH 1 -#define FRF_AB_XX_LPBKD_LBN 3 -#define FRF_AB_XX_LPBKD_WIDTH 1 -#define FRF_AB_XX_LPBKC_LBN 2 -#define FRF_AB_XX_LPBKC_WIDTH 1 -#define FRF_AB_XX_LPBKB_LBN 1 -#define FRF_AB_XX_LPBKB_WIDTH 1 -#define FRF_AB_XX_LPBKA_LBN 0 -#define FRF_AB_XX_LPBKA_WIDTH 1 - -/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ -#define FR_AB_XX_TXDRV_CTL 0x00001320 -#define FRF_AB_XX_DEQD_LBN 28 -#define FRF_AB_XX_DEQD_WIDTH 4 -#define FRF_AB_XX_DEQC_LBN 24 -#define FRF_AB_XX_DEQC_WIDTH 4 -#define FRF_AB_XX_DEQB_LBN 20 -#define FRF_AB_XX_DEQB_WIDTH 4 -#define FRF_AB_XX_DEQA_LBN 16 -#define FRF_AB_XX_DEQA_WIDTH 4 -#define FRF_AB_XX_DTXD_LBN 12 -#define FRF_AB_XX_DTXD_WIDTH 4 -#define FRF_AB_XX_DTXC_LBN 8 -#define FRF_AB_XX_DTXC_WIDTH 4 -#define FRF_AB_XX_DTXB_LBN 4 -#define FRF_AB_XX_DTXB_WIDTH 4 -#define FRF_AB_XX_DTXA_LBN 0 -#define FRF_AB_XX_DTXA_WIDTH 4 - -/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ -#define FR_AB_XX_PRBS_CTL 0x00001330 -#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 -#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 -#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 -#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 -#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 -#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 -#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 -#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 -#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 -#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 -#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 -#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 -#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 -#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 -#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 -#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 -#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 -#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 -#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 -#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 -#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 -#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 -#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 -#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 -#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 -#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 -#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 -#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 - -/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ -#define FR_AB_XX_PRBS_CHK 0x00001340 -#define FRF_AB_XX_REV_LB_EN_LBN 16 -#define FRF_AB_XX_REV_LB_EN_WIDTH 1 -#define FRF_AB_XX_CH3_DEG_DET_LBN 15 -#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 -#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 -#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH3_ERR_CHK_LBN 12 -#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 -#define FRF_AB_XX_CH2_DEG_DET_LBN 11 -#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 -#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 -#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH2_ERR_CHK_LBN 8 -#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 -#define FRF_AB_XX_CH1_DEG_DET_LBN 7 -#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 -#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 -#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH1_ERR_CHK_LBN 4 -#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 -#define FRF_AB_XX_CH0_DEG_DET_LBN 3 -#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 -#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 -#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 -#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 -#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 -#define FRF_AB_XX_CH0_ERR_CHK_LBN 0 -#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 - -/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ -#define FR_AB_XX_PRBS_ERR 0x00001350 -#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 -#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 -#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 -#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 -#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 -#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 -#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 -#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 - -/* XX_CORE_STAT_REG: XAUI XGXS core status register */ -#define FR_AB_XX_CORE_STAT 0x00001360 -#define FRF_AB_XX_FORCE_SIG3_LBN 31 -#define FRF_AB_XX_FORCE_SIG3_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 -#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG2_LBN 29 -#define FRF_AB_XX_FORCE_SIG2_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 -#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG1_LBN 27 -#define FRF_AB_XX_FORCE_SIG1_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 -#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG0_LBN 25 -#define FRF_AB_XX_FORCE_SIG0_WIDTH 1 -#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 -#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 -#define FRF_AB_XX_XGXS_LB_EN_LBN 23 -#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 -#define FRF_AB_XX_XGMII_LB_EN_LBN 22 -#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 -#define FRF_AB_XX_MATCH_FAULT_LBN 21 -#define FRF_AB_XX_MATCH_FAULT_WIDTH 1 -#define FRF_AB_XX_ALIGN_DONE_LBN 20 -#define FRF_AB_XX_ALIGN_DONE_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT3_LBN 19 -#define FRF_AB_XX_SYNC_STAT3_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT2_LBN 18 -#define FRF_AB_XX_SYNC_STAT2_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT1_LBN 17 -#define FRF_AB_XX_SYNC_STAT1_WIDTH 1 -#define FRF_AB_XX_SYNC_STAT0_LBN 16 -#define FRF_AB_XX_SYNC_STAT0_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH3_LBN 15 -#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH2_LBN 14 -#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH1_LBN 13 -#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 -#define FRF_AB_XX_COMMA_DET_CH0_LBN 12 -#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 -#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 -#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 -#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 -#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 -#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 -#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 -#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 -#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 -#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 -#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH3_LBN 3 -#define FRF_AB_XX_DISPERR_CH3_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH2_LBN 2 -#define FRF_AB_XX_DISPERR_CH2_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH1_LBN 1 -#define FRF_AB_XX_DISPERR_CH1_WIDTH 1 -#define FRF_AB_XX_DISPERR_CH0_LBN 0 -#define FRF_AB_XX_DISPERR_CH0_WIDTH 1 - -/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ -#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 -#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 -#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 -/* RX_DESC_PTR_TBL: Receive descriptor pointer table */ -#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 -#define FR_BZ_RX_DESC_PTR_TBL_STEP 16 -#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 -#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 -#define FRF_CZ_RX_HDR_SPLIT_LBN 90 -#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 -#define FRF_AA_RX_RESET_LBN 89 -#define FRF_AA_RX_RESET_WIDTH 1 -#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 -#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 -#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 -#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 -#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 -#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 -#define FRF_AZ_RX_DC_HW_RPTR_LBN 80 -#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 -#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 -#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 -#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 -#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 -#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 -#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 -#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 -#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 -#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 -#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 -#define FRF_AZ_RX_DESCQ_LABEL_LBN 5 -#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 -#define FRF_AZ_RX_DESCQ_SIZE_LBN 3 -#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 -#define FFE_AZ_RX_DESCQ_SIZE_4K 3 -#define FFE_AZ_RX_DESCQ_SIZE_2K 2 -#define FFE_AZ_RX_DESCQ_SIZE_1K 1 -#define FFE_AZ_RX_DESCQ_SIZE_512 0 -#define FRF_AZ_RX_DESCQ_TYPE_LBN 2 -#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 -#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 -#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 -#define FRF_AZ_RX_DESCQ_EN_LBN 0 -#define FRF_AZ_RX_DESCQ_EN_WIDTH 1 - -/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ -#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 -#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 -#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 -/* TX_DESC_PTR_TBL: Transmit descriptor pointer */ -#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 -#define FR_BZ_TX_DESC_PTR_TBL_STEP 16 -#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 -#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 -#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 -#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 -#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 -#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 -#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 -#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 -#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 -#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 -#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 -#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 -#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 -#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 -#define FRF_AZ_TX_DESCQ_EN_LBN 88 -#define FRF_AZ_TX_DESCQ_EN_WIDTH 1 -#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 -#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 -#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 -#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 -#define FRF_AZ_TX_DC_HW_RPTR_LBN 80 -#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 -#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 -#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 -#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 -#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 -#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 -#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 -#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 -#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 -#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 -#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 -#define FRF_AZ_TX_DESCQ_LABEL_LBN 5 -#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 -#define FRF_AZ_TX_DESCQ_SIZE_LBN 3 -#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 -#define FFE_AZ_TX_DESCQ_SIZE_4K 3 -#define FFE_AZ_TX_DESCQ_SIZE_2K 2 -#define FFE_AZ_TX_DESCQ_SIZE_1K 1 -#define FFE_AZ_TX_DESCQ_SIZE_512 0 -#define FRF_AZ_TX_DESCQ_TYPE_LBN 1 -#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 -#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 -#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 - -/* EVQ_PTR_TBL_KER: Event queue pointer table */ -#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 -#define FR_AA_EVQ_PTR_TBL_KER_STEP 16 -#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 -/* EVQ_PTR_TBL: Event queue pointer table */ -#define FR_BZ_EVQ_PTR_TBL 0x00f60000 -#define FR_BZ_EVQ_PTR_TBL_STEP 16 -#define FR_CZ_EVQ_PTR_TBL_ROWS 1024 -#define FR_BB_EVQ_PTR_TBL_ROWS 4096 -#define FRF_BZ_EVQ_RPTR_IGN_LBN 40 -#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 -#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 -#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 -#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 -#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 -#define FRF_AZ_EVQ_NXT_WPTR_LBN 24 -#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 -#define FRF_AZ_EVQ_EN_LBN 23 -#define FRF_AZ_EVQ_EN_WIDTH 1 -#define FRF_AZ_EVQ_SIZE_LBN 20 -#define FRF_AZ_EVQ_SIZE_WIDTH 3 -#define FFE_AZ_EVQ_SIZE_32K 6 -#define FFE_AZ_EVQ_SIZE_16K 5 -#define FFE_AZ_EVQ_SIZE_8K 4 -#define FFE_AZ_EVQ_SIZE_4K 3 -#define FFE_AZ_EVQ_SIZE_2K 2 -#define FFE_AZ_EVQ_SIZE_1K 1 -#define FFE_AZ_EVQ_SIZE_512 0 -#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 -#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 - -/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ -#define FR_AA_BUF_HALF_TBL_KER 0x00018000 -#define FR_AA_BUF_HALF_TBL_KER_STEP 8 -#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 -/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ -#define FR_BZ_BUF_HALF_TBL 0x00800000 -#define FR_BZ_BUF_HALF_TBL_STEP 8 -#define FR_CZ_BUF_HALF_TBL_ROWS 147456 -#define FR_BB_BUF_HALF_TBL_ROWS 524288 -#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 -#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 -#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 -#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 -#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 -#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 -#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 -#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 - -/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ -#define FR_AA_BUF_FULL_TBL_KER 0x00018000 -#define FR_AA_BUF_FULL_TBL_KER_STEP 8 -#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 -/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ -#define FR_BZ_BUF_FULL_TBL 0x00800000 -#define FR_BZ_BUF_FULL_TBL_STEP 8 -#define FR_CZ_BUF_FULL_TBL_ROWS 147456 -#define FR_BB_BUF_FULL_TBL_ROWS 917504 -#define FRF_AZ_BUF_FULL_UNUSED_LBN 51 -#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 -#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 -#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 -#define FRF_AZ_BUF_ADR_REGION_LBN 48 -#define FRF_AZ_BUF_ADR_REGION_WIDTH 2 -#define FFE_AZ_BUF_ADR_REGN3 3 -#define FFE_AZ_BUF_ADR_REGN2 2 -#define FFE_AZ_BUF_ADR_REGN1 1 -#define FFE_AZ_BUF_ADR_REGN0 0 -#define FRF_AZ_BUF_ADR_FBUF_LBN 14 -#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 -#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 -#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 - -/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ -#define FR_BZ_RX_FILTER_TBL0 0x00f00000 -#define FR_BZ_RX_FILTER_TBL0_STEP 32 -#define FR_BZ_RX_FILTER_TBL0_ROWS 8192 -/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ -#define FR_BB_RX_FILTER_TBL1 0x00f00010 -#define FR_BB_RX_FILTER_TBL1_STEP 32 -#define FR_BB_RX_FILTER_TBL1_ROWS 8192 -#define FRF_BZ_RSS_EN_LBN 110 -#define FRF_BZ_RSS_EN_WIDTH 1 -#define FRF_BZ_SCATTER_EN_LBN 109 -#define FRF_BZ_SCATTER_EN_WIDTH 1 -#define FRF_BZ_TCP_UDP_LBN 108 -#define FRF_BZ_TCP_UDP_WIDTH 1 -#define FRF_BZ_RXQ_ID_LBN 96 -#define FRF_BZ_RXQ_ID_WIDTH 12 -#define FRF_BZ_DEST_IP_LBN 64 -#define FRF_BZ_DEST_IP_WIDTH 32 -#define FRF_BZ_DEST_PORT_TCP_LBN 48 -#define FRF_BZ_DEST_PORT_TCP_WIDTH 16 -#define FRF_BZ_SRC_IP_LBN 16 -#define FRF_BZ_SRC_IP_WIDTH 32 -#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 -#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 - -/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ -#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 -#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 -#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 -#define FRF_CZ_RMFT_RSS_EN_LBN 75 -#define FRF_CZ_RMFT_RSS_EN_WIDTH 1 -#define FRF_CZ_RMFT_SCATTER_EN_LBN 74 -#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 -#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 -#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 -#define FRF_CZ_RMFT_RXQ_ID_LBN 61 -#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 -#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 -#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 -#define FRF_CZ_RMFT_DEST_MAC_LBN 12 -#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48 -#define FRF_CZ_RMFT_VLAN_ID_LBN 0 -#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 - -/* TIMER_TBL: Timer table */ -#define FR_BZ_TIMER_TBL 0x00f70000 -#define FR_BZ_TIMER_TBL_STEP 16 -#define FR_CZ_TIMER_TBL_ROWS 1024 -#define FR_BB_TIMER_TBL_ROWS 4096 -#define FRF_CZ_TIMER_Q_EN_LBN 33 -#define FRF_CZ_TIMER_Q_EN_WIDTH 1 -#define FRF_CZ_INT_ARMD_LBN 32 -#define FRF_CZ_INT_ARMD_WIDTH 1 -#define FRF_CZ_INT_PEND_LBN 31 -#define FRF_CZ_INT_PEND_WIDTH 1 -#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 -#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 -#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 -#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 -#define FRF_CZ_TIMER_MODE_LBN 14 -#define FRF_CZ_TIMER_MODE_WIDTH 2 -#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 -#define FFE_CZ_TIMER_MODE_TRIG_START 2 -#define FFE_CZ_TIMER_MODE_IMMED_START 1 -#define FFE_CZ_TIMER_MODE_DIS 0 -#define FRF_BB_TIMER_MODE_LBN 12 -#define FRF_BB_TIMER_MODE_WIDTH 2 -#define FFE_BB_TIMER_MODE_INT_HLDOFF 2 -#define FFE_BB_TIMER_MODE_TRIG_START 2 -#define FFE_BB_TIMER_MODE_IMMED_START 1 -#define FFE_BB_TIMER_MODE_DIS 0 -#define FRF_CZ_TIMER_VAL_LBN 0 -#define FRF_CZ_TIMER_VAL_WIDTH 14 -#define FRF_BB_TIMER_VAL_LBN 0 -#define FRF_BB_TIMER_VAL_WIDTH 12 - -/* TX_PACE_TBL: Transmit pacing table */ -#define FR_BZ_TX_PACE_TBL 0x00f80000 -#define FR_BZ_TX_PACE_TBL_STEP 16 -#define FR_CZ_TX_PACE_TBL_ROWS 1024 -#define FR_BB_TX_PACE_TBL_ROWS 4096 -#define FRF_BZ_TX_PACE_LBN 0 -#define FRF_BZ_TX_PACE_WIDTH 5 - -/* RX_INDIRECTION_TBL: RX Indirection Table */ -#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 -#define FR_BZ_RX_INDIRECTION_TBL_STEP 16 -#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 -#define FRF_BZ_IT_QUEUE_LBN 0 -#define FRF_BZ_IT_QUEUE_WIDTH 6 - -/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ -#define FR_CZ_TX_FILTER_TBL0 0x00fc0000 -#define FR_CZ_TX_FILTER_TBL0_STEP 16 -#define FR_CZ_TX_FILTER_TBL0_ROWS 8192 -#define FRF_CZ_TIFT_TCP_UDP_LBN 108 -#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 -#define FRF_CZ_TIFT_TXQ_ID_LBN 96 -#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 -#define FRF_CZ_TIFT_DEST_IP_LBN 64 -#define FRF_CZ_TIFT_DEST_IP_WIDTH 32 -#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 -#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 -#define FRF_CZ_TIFT_SRC_IP_LBN 16 -#define FRF_CZ_TIFT_SRC_IP_WIDTH 32 -#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 -#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 - -/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ -#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 -#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 -#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 -#define FRF_CZ_TMFT_TXQ_ID_LBN 61 -#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 -#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 -#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 -#define FRF_CZ_TMFT_SRC_MAC_LBN 12 -#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48 -#define FRF_CZ_TMFT_VLAN_ID_LBN 0 -#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 - -/* MC_TREG_SMEM: MC Shared Memory */ -#define FR_CZ_MC_TREG_SMEM 0x00ff0000 -#define FR_CZ_MC_TREG_SMEM_STEP 4 -#define FR_CZ_MC_TREG_SMEM_ROWS 512 -#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 -#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 - -/* MSIX_VECTOR_TABLE: MSIX Vector Table */ -#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 -#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 -#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 -/* MSIX_VECTOR_TABLE: MSIX Vector Table */ -#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 -/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ -#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 -#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 -#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 -#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 -#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 -#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 -#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 -#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 - -/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ -#define FR_BB_MSIX_PBA_TABLE 0x00ff2000 -#define FR_BZ_MSIX_PBA_TABLE_STEP 4 -#define FR_BB_MSIX_PBA_TABLE_ROWS 2 -/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ -#define FR_CZ_MSIX_PBA_TABLE 0x00008000 -/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ -#define FR_CZ_MSIX_PBA_TABLE_ROWS 32 -#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 -#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 - -/* SRM_DBG_REG: SRAM debug access */ -#define FR_BZ_SRM_DBG 0x03000000 -#define FR_BZ_SRM_DBG_STEP 8 -#define FR_CZ_SRM_DBG_ROWS 262144 -#define FR_BB_SRM_DBG_ROWS 2097152 -#define FRF_BZ_SRM_DBG_LBN 0 -#define FRF_BZ_SRM_DBG_WIDTH 64 - -/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ -#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 -#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 -#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 -#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 -#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 - -/* DRIVER_EV */ -#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 -#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 -#define FSE_BZ_TX_DSC_ERROR_EV 15 -#define FSE_BZ_RX_DSC_ERROR_EV 14 -#define FSE_AA_RX_RECOVER_EV 11 -#define FSE_AZ_TIMER_EV 10 -#define FSE_AZ_TX_PKT_NON_TCP_UDP 9 -#define FSE_AZ_WAKE_UP_EV 6 -#define FSE_AZ_SRM_UPD_DONE_EV 5 -#define FSE_AB_EVQ_NOT_EN_EV 3 -#define FSE_AZ_EVQ_INIT_DONE_EV 2 -#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 -#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 -#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 -#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 - -/* EVENT_ENTRY */ -#define FSF_AZ_EV_CODE_LBN 60 -#define FSF_AZ_EV_CODE_WIDTH 4 -#define FSE_CZ_EV_CODE_MCDI_EV 12 -#define FSE_CZ_EV_CODE_USER_EV 8 -#define FSE_AZ_EV_CODE_DRV_GEN_EV 7 -#define FSE_AZ_EV_CODE_GLOBAL_EV 6 -#define FSE_AZ_EV_CODE_DRIVER_EV 5 -#define FSE_AZ_EV_CODE_TX_EV 2 -#define FSE_AZ_EV_CODE_RX_EV 0 -#define FSF_AZ_EV_DATA_LBN 0 -#define FSF_AZ_EV_DATA_WIDTH 60 - -/* GLOBAL_EV */ -#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 -#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 -#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 -#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 -#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 -#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 -#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 -#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 -#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 -#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 -#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 -#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 - -/* LEGACY_INT_VEC */ -#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 -#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 -#define FSF_AZ_NET_IVEC_INT_Q_LBN 40 -#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 -#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 -#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 -#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 - -/* MC_XGMAC_FLTR_RULE_DEF */ -#define FSF_CZ_MC_XFRC_MODE_LBN 416 -#define FSF_CZ_MC_XFRC_MODE_WIDTH 1 -#define FSE_CZ_MC_XFRC_MODE_LAYERED 1 -#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 -#define FSF_CZ_MC_XFRC_HASH_LBN 384 -#define FSF_CZ_MC_XFRC_HASH_WIDTH 32 -#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 -#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 -#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 -#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 -#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 -#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 - -/* RX_EV */ -#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 -#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 -#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 -#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 -#define FSF_AZ_RX_EV_PKT_OK_LBN 56 -#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 -#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 -#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 -#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 -#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 -#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 -#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 -#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 -#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 -#define FSF_AA_RX_EV_DRIB_NIB_LBN 49 -#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 -#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 -#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 -#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 -#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 -#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 -#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 -#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 -#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 -#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 -#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 -#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 -#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 -#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 -#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 -#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 -#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 -#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 -#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 -#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 -#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 -#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 -#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 -#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 -#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 -#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 -#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 -#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 -#define FSF_AZ_RX_EV_Q_LABEL_LBN 32 -#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 -#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 -#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 -#define FSF_AZ_RX_EV_PORT_LBN 30 -#define FSF_AZ_RX_EV_PORT_WIDTH 1 -#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 -#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 -#define FSF_AZ_RX_EV_SOP_LBN 15 -#define FSF_AZ_RX_EV_SOP_WIDTH 1 -#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 -#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 -#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 -#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 -#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 -#define FSF_AZ_RX_EV_DESC_PTR_LBN 0 -#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 - -/* RX_KER_DESC */ -#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 -#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 -#define FSF_AZ_RX_KER_BUF_REGION_LBN 46 -#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 -#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 -#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 - -/* RX_USER_DESC */ -#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 -#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 -#define FSF_AZ_RX_USER_BUF_ID_LBN 0 -#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 - -/* TX_EV */ -#define FSF_AZ_TX_EV_PKT_ERR_LBN 38 -#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 -#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 -#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 -#define FSF_AZ_TX_EV_Q_LABEL_LBN 32 -#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 -#define FSF_AZ_TX_EV_PORT_LBN 16 -#define FSF_AZ_TX_EV_PORT_WIDTH 1 -#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 -#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 -#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 -#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 -#define FSF_AZ_TX_EV_COMP_LBN 12 -#define FSF_AZ_TX_EV_COMP_WIDTH 1 -#define FSF_AZ_TX_EV_DESC_PTR_LBN 0 -#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 - -/* TX_KER_DESC */ -#define FSF_AZ_TX_KER_CONT_LBN 62 -#define FSF_AZ_TX_KER_CONT_WIDTH 1 -#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 -#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 -#define FSF_AZ_TX_KER_BUF_REGION_LBN 46 -#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 -#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 -#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 - -/* TX_USER_DESC */ -#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 -#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 -#define FSF_AZ_TX_USER_CONT_LBN 46 -#define FSF_AZ_TX_USER_CONT_WIDTH 1 -#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 -#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 -#define FSF_AZ_TX_USER_BUF_ID_LBN 13 -#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 -#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 -#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 - -/* USER_EV */ -#define FSF_CZ_USER_QID_LBN 32 -#define FSF_CZ_USER_QID_WIDTH 10 -#define FSF_CZ_USER_EV_REG_VALUE_LBN 0 -#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 - -/************************************************************************** - * - * Falcon B0 PCIe core indirect registers - * - ************************************************************************** - */ - -#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 - -#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 - -#define FPCR_BB_ACK_RPL_TIMER 0x700 -#define FPCRF_BB_ACK_TL_LBN 0 -#define FPCRF_BB_ACK_TL_WIDTH 16 -#define FPCRF_BB_RPL_TL_LBN 16 -#define FPCRF_BB_RPL_TL_WIDTH 16 - -#define FPCR_BB_ACK_FREQ 0x70C -#define FPCRF_BB_ACK_FREQ_LBN 0 -#define FPCRF_BB_ACK_FREQ_WIDTH 7 - -/************************************************************************** - * - * Pseudo-registers and fields - * - ************************************************************************** - */ - -/* Interrupt acknowledge work-around register (A0/A1 only) */ -#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 - -/* EE_SPI_HCMD_REG: SPI host command register */ -/* Values for the EE_SPI_HCMD_SF_SEL register field */ -#define FFE_AB_SPI_DEVICE_EEPROM 0 -#define FFE_AB_SPI_DEVICE_FLASH 1 - -/* NIC_STAT_REG: NIC status register */ -#define FRF_AB_STRAP_10G_LBN 2 -#define FRF_AB_STRAP_10G_WIDTH 1 -#define FRF_AA_STRAP_PCIE_LBN 0 -#define FRF_AA_STRAP_PCIE_WIDTH 1 - -/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ -#define FRF_AZ_FATAL_INTR_LBN 0 -#define FRF_AZ_FATAL_INTR_WIDTH 12 - -/* SRM_CFG_REG: SRAM configuration register */ -/* We treat the number of SRAM banks and bank size as a single field */ -#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN -#define FRF_AZ_SRM_NB_SZ_WIDTH \ - (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) -#define FFE_AB_SRM_NB1_SZ2M 0 -#define FFE_AB_SRM_NB1_SZ4M 1 -#define FFE_AB_SRM_NB1_SZ8M 2 -#define FFE_AB_SRM_NB_SZ_DEF 3 -#define FFE_AB_SRM_NB2_SZ4M 4 -#define FFE_AB_SRM_NB2_SZ8M 5 -#define FFE_AB_SRM_NB2_SZ16M 6 -#define FFE_AB_SRM_NB_SZ_RES 7 - -/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ -/* We write just the last dword of these registers */ -#define FR_AZ_RX_DESC_UPD_DWORD_P0 \ - (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ - FR_BZ_RX_DESC_UPD_P0 + 3 * 4) -#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) -#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH - -/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ -#define FR_AZ_TX_DESC_UPD_DWORD_P0 \ - (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ - FR_BZ_TX_DESC_UPD_P0 + 3 * 4) -#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) -#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH - -/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ -#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 -#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 - -/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ -#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 -#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 - -/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ -#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN -#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ - FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) - -/* XM_RX_PARAM_REG: XGMAC receive parameter register */ -#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN -#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ - FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) - -/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ -/* Default values */ -#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ -#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ -#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ - -/* XX_CORE_STAT_REG: XAUI XGXS core status register */ -/* XGXS all-lanes status fields */ -#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN -#define FRF_AB_XX_SYNC_STAT_WIDTH 4 -#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN -#define FRF_AB_XX_COMMA_DET_WIDTH 4 -#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN -#define FRF_AB_XX_CHAR_ERR_WIDTH 4 -#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN -#define FRF_AB_XX_DISPERR_WIDTH 4 -#define FFE_AB_XX_STAT_ALL_LANES 0xf -#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN -#define FRF_AB_XX_FORCE_SIG_WIDTH 8 -#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff - -/* RX_MAC_FILTER_TBL0 */ -/* RMFT_DEST_MAC is wider than 32 bits */ -#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN -#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 -#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32) -#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32) - -/* TX_MAC_FILTER_TBL0 */ -/* TMFT_SRC_MAC is wider than 32 bits */ -#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN -#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 -#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32) -#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32) - -/* TX_PACE_TBL */ -/* Values >20 are documented as reserved, but will result in a queue going - * into the fast bin with a pace value of zero. */ -#define FFE_BZ_TX_PACE_OFF 0 -#define FFE_BZ_TX_PACE_RESERVED 21 - -/* DRIVER_EV */ -/* Sub-fields of an RX flush completion event */ -#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 -#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 -#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 -#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 - -/* EVENT_ENTRY */ -/* Magic number field for event test */ -#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 -#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 - -/* RX packet prefix */ -#define FS_BZ_RX_PREFIX_HASH_OFST 12 -#define FS_BZ_RX_PREFIX_SIZE 16 - -#endif /* EFX_FARCH_REGS_H */ diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index 5f201a547e5b..0d45900afa76 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -30,13 +30,6 @@ * * Only some combinations are supported, depending on NIC type: * - * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or - * local 2-tuple (only implemented for Falcon B0) - * - * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple - * or local 2-tuple, or local MAC with or without outer VID, and RX - * default filters - * * - Huntington supports filter matching controlled by firmware, potentially * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit, * with or without outer and inner VID diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 30439cc83a89..4cc7b501135f 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -17,46 +17,22 @@ * ************************************************************************** * - * Notes on locking strategy for the Falcon architecture: - * - * Many CSRs are very wide and cannot be read or written atomically. - * Writes from the host are buffered by the Bus Interface Unit (BIU) - * up to 128 bits. Whenever the host writes part of such a register, - * the BIU collects the written value and does not write to the - * underlying register until all 4 dwords have been written. A - * similar buffering scheme applies to host access to the NIC's 64-bit - * SRAM. - * - * Writes to different CSRs and 64-bit SRAM words must be serialised, - * since interleaved access can result in lost writes. We use - * efx_nic::biu_lock for this. - * - * We also serialise reads from 128-bit CSRs and SRAM with the same - * spinlock. This may not be necessary, but it doesn't really matter - * as there are no such reads on the fast path. + * The EF10 architecture exposes very few registers to the host and + * most of them are only 32 bits wide. The only exceptions are the MC + * doorbell register pair, which has its own latching, and + * TX_DESC_UPD. * - * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are - * 128-bit but are special-cased in the BIU to avoid the need for - * locking in the host: + * The TX_DESC_UPD DMA descriptor pointer is 128-bits but is a special + * case in the BIU to avoid the need for locking in the host: * - * - They are write-only. - * - The semantics of writing to these registers are such that + * - It is write-only. + * - The semantics of writing to this register is such that * replacing the low 96 bits with zero does not affect functionality. - * - If the host writes to the last dword address of such a register + * - If the host writes to the last dword address of the register * (i.e. the high 32 bits) the underlying register will always be * written. If the collector and the current write together do not * provide values for all 128 bits of the register, the low 96 bits * will be written as zero. - * - If the host writes to the address of any other part of such a - * register while the collector already holds values for some other - * register, the write is discarded and the collector maintains its - * current state. - * - * The EF10 architecture exposes very few registers to the host and - * most of them are only 32 bits wide. The only exceptions are the MC - * doorbell register pair, which has its own latching, and - * TX_DESC_UPD, which works in a similar way to the Falcon - * architecture. */ #if BITS_PER_LONG == 64 @@ -70,7 +46,7 @@ */ #ifdef CONFIG_X86_64 /* PIO is a win only if write-combining is possible */ -#ifdef ARCH_HAS_IOREMAP_WC +#ifdef ioremap_wc #define EFX_USE_PIO 1 #endif #endif @@ -125,27 +101,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, spin_unlock_irqrestore(&efx->biu_lock, flags); } -/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ -static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, - const efx_qword_t *value, unsigned int index) -{ - unsigned int addr = index * sizeof(*value); - unsigned long flags __attribute__ ((unused)); - - netif_vdbg(efx, hw, efx->net_dev, - "writing SRAM address %x with " EFX_QWORD_FMT "\n", - addr, EFX_QWORD_VAL(*value)); - - spin_lock_irqsave(&efx->biu_lock, flags); -#ifdef EFX_USE_QWORD_IO - __raw_writeq((__force u64)value->u64[0], membase + addr); -#else - __raw_writel((__force u32)value->u32[0], membase + addr); - __raw_writel((__force u32)value->u32[1], membase + addr + 4); -#endif - spin_unlock_irqrestore(&efx->biu_lock, flags); -} - /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned int reg) @@ -176,27 +131,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, EFX_OWORD_VAL(*value)); } -/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ -static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, - efx_qword_t *value, unsigned int index) -{ - unsigned int addr = index * sizeof(*value); - unsigned long flags __attribute__ ((unused)); - - spin_lock_irqsave(&efx->biu_lock, flags); -#ifdef EFX_USE_QWORD_IO - value->u64[0] = (__force __le64)__raw_readq(membase + addr); -#else - value->u32[0] = (__force __le32)__raw_readl(membase + addr); - value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); -#endif - spin_unlock_irqrestore(&efx->biu_lock, flags); - - netif_vdbg(efx, hw, efx->net_dev, - "read from SRAM address %x, got "EFX_QWORD_FMT"\n", - addr, EFX_QWORD_VAL(*value)); -} - /* Read a 32-bit CSR or SRAM */ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, unsigned int reg) diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index 0cab508f2f9d..10709d828a63 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -16,6 +16,7 @@ #include "mcdi_pcol.h" #include "mcdi_pcol_mae.h" #include "tc_encap_actions.h" +#include "tc_conntrack.h" int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label) { @@ -227,6 +228,256 @@ void efx_mae_counters_grant_credits(struct work_struct *work) rx_queue->granted_count += credits; } +static int efx_mae_table_get_desc(struct efx_nic *efx, + struct efx_tc_table_desc *desc, + u32 table_id) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(16)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_DESCRIPTOR_IN_LEN); + unsigned int offset = 0, i; + size_t outlen; + int rc; + + memset(desc, 0, sizeof(*desc)); + + MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_TABLE_ID, table_id); +more: + MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX, offset); + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DESCRIPTOR, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(1)) { + rc = -EIO; + goto fail; + } + if (!offset) { /* first iteration: get metadata */ + desc->type = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_TYPE); + desc->key_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_KEY_WIDTH); + desc->resp_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_RESP_WIDTH); + desc->n_keys = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS); + desc->n_resps = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS); + desc->n_prios = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_PRIORITIES); + desc->flags = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_FLAGS); + rc = -EOPNOTSUPP; + if (desc->flags) + goto fail; + desc->scheme = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_SCHEME); + if (desc->scheme) + goto fail; + rc = -ENOMEM; + desc->keys = kcalloc(desc->n_keys, + sizeof(struct efx_tc_table_field_fmt), + GFP_KERNEL); + if (!desc->keys) + goto fail; + desc->resps = kcalloc(desc->n_resps, + sizeof(struct efx_tc_table_field_fmt), + GFP_KERNEL); + if (!desc->resps) + goto fail; + } + /* FW could have returned more than the 16 field_descrs we + * made room for in our outbuf + */ + outlen = min(outlen, sizeof(outbuf)); + for (i = 0; i + offset < desc->n_keys + desc->n_resps; i++) { + struct efx_tc_table_field_fmt *field; + MCDI_DECLARE_STRUCT_PTR(fdesc); + + if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(i + 1)) { + offset += i; + goto more; + } + if (i + offset < desc->n_keys) + field = desc->keys + i + offset; + else + field = desc->resps + (i + offset - desc->n_keys); + fdesc = MCDI_ARRAY_STRUCT_PTR(outbuf, + TABLE_DESCRIPTOR_OUT_FIELDS, i); + field->field_id = MCDI_STRUCT_WORD(fdesc, + TABLE_FIELD_DESCR_FIELD_ID); + field->lbn = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_LBN); + field->width = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_WIDTH); + field->masking = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_MASK_TYPE); + field->scheme = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_SCHEME); + } + return 0; + +fail: + kfree(desc->keys); + kfree(desc->resps); + return rc; +} + +static int efx_mae_table_hook_find(u16 n_fields, + struct efx_tc_table_field_fmt *fields, + u16 field_id) +{ + unsigned int i; + + for (i = 0; i < n_fields; i++) { + if (fields[i].field_id == field_id) + return i; + } + return -EPROTO; +} + +#define TABLE_FIND_KEY(_desc, _id) \ + efx_mae_table_hook_find((_desc)->n_keys, (_desc)->keys, _id) +#define TABLE_FIND_RESP(_desc, _id) \ + efx_mae_table_hook_find((_desc)->n_resps, (_desc)->resps, _id) + +#define TABLE_HOOK_KEY(_meta, _name, _mcdi_name) ({ \ + int _rc = TABLE_FIND_KEY(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \ + \ + if (_rc > U8_MAX) \ + _rc = -EOPNOTSUPP; \ + if (_rc >= 0) { \ + _meta->keys._name##_idx = _rc; \ + _rc = 0; \ + } \ + _rc; \ +}) +#define TABLE_HOOK_RESP(_meta, _name, _mcdi_name) ({ \ + int _rc = TABLE_FIND_RESP(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \ + \ + if (_rc > U8_MAX) \ + _rc = -EOPNOTSUPP; \ + if (_rc >= 0) { \ + _meta->resps._name##_idx = _rc; \ + _rc = 0; \ + } \ + _rc; \ +}) + +static int efx_mae_table_hook_ct(struct efx_nic *efx, + struct efx_tc_table_ct *meta_ct) +{ + int rc; + + rc = TABLE_HOOK_KEY(meta_ct, eth_proto, ETHER_TYPE); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, ip_proto, IP_PROTO); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, src_ip, SRC_IP); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, dst_ip, DST_IP); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, l4_sport, SRC_PORT); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, l4_dport, DST_PORT); + if (rc) + return rc; + rc = TABLE_HOOK_KEY(meta_ct, zone, DOMAIN); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, dnat, NAT_DIR); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, nat_ip, NAT_IP); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, l4_natport, NAT_PORT); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, mark, CT_MARK); + if (rc) + return rc; + rc = TABLE_HOOK_RESP(meta_ct, counter_id, COUNTER_ID); + if (rc) + return rc; + meta_ct->hooked = true; + return 0; +} + +static void efx_mae_table_free_desc(struct efx_tc_table_desc *desc) +{ + kfree(desc->keys); + kfree(desc->resps); + memset(desc, 0, sizeof(*desc)); +} + +static bool efx_mae_check_table_exists(struct efx_nic *efx, u32 tbl_req) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_LIST_OUT_LEN(16)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_LIST_IN_LEN); + u32 tbl_id, tbl_total, tbl_cnt, pos = 0; + size_t outlen, msg_max; + bool ct_tbl = false; + int rc, idx; + + msg_max = sizeof(outbuf); + efx->tc->meta_ct.hooked = false; +more: + memset(outbuf, 0, sizeof(*outbuf)); + MCDI_SET_DWORD(inbuf, TABLE_LIST_IN_FIRST_TABLE_ID_INDEX, pos); + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_LIST, inbuf, sizeof(inbuf), outbuf, + msg_max, &outlen); + if (rc) + return false; + + if (outlen < MC_CMD_TABLE_LIST_OUT_LEN(1)) + return false; + + tbl_total = MCDI_DWORD(outbuf, TABLE_LIST_OUT_N_TABLES); + tbl_cnt = MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(min(outlen, msg_max)); + + for (idx = 0; idx < tbl_cnt; idx++) { + tbl_id = MCDI_ARRAY_DWORD(outbuf, TABLE_LIST_OUT_TABLE_ID, idx); + if (tbl_id == tbl_req) { + ct_tbl = true; + break; + } + } + + pos += tbl_cnt; + if (!ct_tbl && pos < tbl_total) + goto more; + + return ct_tbl; +} + +int efx_mae_get_tables(struct efx_nic *efx) +{ + int rc; + + efx->tc->meta_ct.hooked = false; + if (efx_mae_check_table_exists(efx, TABLE_ID_CONNTRACK_TABLE)) { + rc = efx_mae_table_get_desc(efx, &efx->tc->meta_ct.desc, + TABLE_ID_CONNTRACK_TABLE); + if (rc) { + pci_info(efx->pci_dev, + "FW does not support conntrack desc rc %d\n", + rc); + return 0; + } + + rc = efx_mae_table_hook_ct(efx, &efx->tc->meta_ct); + if (rc) { + pci_info(efx->pci_dev, + "FW does not support conntrack hook rc %d\n", + rc); + return 0; + } + } else { + pci_info(efx->pci_dev, + "FW does not support conntrack table\n"); + } + return 0; +} + +void efx_mae_free_tables(struct efx_nic *efx) +{ + efx_mae_table_free_desc(&efx->tc->meta_ct.desc); + efx->tc->meta_ct.hooked = false; +} + static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps) { MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN); @@ -444,8 +695,13 @@ int efx_mae_match_check_caps(struct efx_nic *efx, CHECK(L4_SPORT, l4_sport) || CHECK(L4_DPORT, l4_dport) || CHECK(TCP_FLAGS, tcp_flags) || + CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst) || CHECK_BIT(IS_IP_FRAG, ip_frag) || CHECK_BIT(IP_FIRST_FRAG, ip_firstfrag) || + CHECK_BIT(DO_CT, ct_state_trk) || + CHECK_BIT(CT_HIT, ct_state_est) || + CHECK(CT_MARK, ct_mark) || + CHECK(CT_DOMAIN, ct_zone) || CHECK(RECIRC_ID, recirc_id)) return rc; /* Matches on outer fields are done in a separate hardware table, @@ -471,6 +727,90 @@ int efx_mae_match_check_caps(struct efx_nic *efx, } return 0; } + +/* Checks for match fields not supported in LHS Outer Rules */ +#define UNSUPPORTED(_field) ({ \ + enum mask_type typ = classify_mask((const u8 *)&mask->_field, \ + sizeof(mask->_field)); \ + \ + if (typ != MASK_ZEROES) { \ + NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\ + rc = -EOPNOTSUPP; \ + } \ + rc; \ +}) +#define UNSUPPORTED_BIT(_field) ({ \ + if (mask->_field) { \ + NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\ + rc = -EOPNOTSUPP; \ + } \ + rc; \ +}) + +/* LHS rules are (normally) inserted in the Outer Rule table, which means + * they use ENC_ fields in hardware to match regular (not enc_) fields from + * &struct efx_tc_match_fields. + */ +int efx_mae_match_check_caps_lhs(struct efx_nic *efx, + const struct efx_tc_match_fields *mask, + struct netlink_ext_ack *extack) +{ + const u8 *supported_fields = efx->tc->caps->outer_rule_fields; + __be32 ingress_port = cpu_to_be32(mask->ingress_port); + enum mask_type ingress_port_mask_type; + int rc; + + /* Check for _PREFIX assumes big-endian, so we need to convert */ + ingress_port_mask_type = classify_mask((const u8 *)&ingress_port, + sizeof(ingress_port)); + rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT], + ingress_port_mask_type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n", + mask_type_name(ingress_port_mask_type), + "ingress_port"); + return rc; + } + if (CHECK(ENC_ETHER_TYPE, eth_proto) || + CHECK(ENC_VLAN0_TCI, vlan_tci[0]) || + CHECK(ENC_VLAN0_PROTO, vlan_proto[0]) || + CHECK(ENC_VLAN1_TCI, vlan_tci[1]) || + CHECK(ENC_VLAN1_PROTO, vlan_proto[1]) || + CHECK(ENC_ETH_SADDR, eth_saddr) || + CHECK(ENC_ETH_DADDR, eth_daddr) || + CHECK(ENC_IP_PROTO, ip_proto) || + CHECK(ENC_IP_TOS, ip_tos) || + CHECK(ENC_IP_TTL, ip_ttl) || + CHECK_BIT(ENC_IP_FRAG, ip_frag) || + UNSUPPORTED_BIT(ip_firstfrag) || + CHECK(ENC_SRC_IP4, src_ip) || + CHECK(ENC_DST_IP4, dst_ip) || +#ifdef CONFIG_IPV6 + CHECK(ENC_SRC_IP6, src_ip6) || + CHECK(ENC_DST_IP6, dst_ip6) || +#endif + CHECK(ENC_L4_SPORT, l4_sport) || + CHECK(ENC_L4_DPORT, l4_dport) || + UNSUPPORTED(tcp_flags) || + CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst)) + return rc; + if (efx_tc_match_is_encap(mask)) { + /* can't happen; disallowed for local rules, translated + * for foreign rules. + */ + NL_SET_ERR_MSG_MOD(extack, "Unexpected encap match in LHS rule"); + return -EOPNOTSUPP; + } + if (UNSUPPORTED(enc_keyid) || + /* Can't filter on conntrack in LHS rules */ + UNSUPPORTED_BIT(ct_state_trk) || + UNSUPPORTED_BIT(ct_state_est) || + UNSUPPORTED(ct_mark) || + UNSUPPORTED(recirc_id)) + return rc; + return 0; +} +#undef UNSUPPORTED #undef CHECK_BIT #undef CHECK @@ -879,6 +1219,71 @@ fail: return rc; } +/** + * efx_mae_allocate_pedit_mac() - allocate pedit MAC address in HW. + * @efx: NIC we're installing a pedit MAC address on + * @ped: pedit MAC action to be installed + * + * Attempts to install @ped in HW and populates its id with an index of this + * entry in the firmware MAC address table on success. + * + * Return: negative value on error, 0 in success. + */ +int efx_mae_allocate_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN != + sizeof(ped->h_addr)); + memcpy(MCDI_PTR(inbuf, MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR), ped->h_addr, + sizeof(ped->h_addr)); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + ped->fw_id = MCDI_DWORD(outbuf, MAE_MAC_ADDR_ALLOC_OUT_MAC_ID); + return 0; +} + +/** + * efx_mae_free_pedit_mac() - free pedit MAC address in HW. + * @efx: NIC we're installing a pedit MAC address on + * @ped: pedit MAC action that needs to be freed + * + * Frees @ped in HW, check that firmware did not free a different one and clears + * the id (which denotes the index of the entry in the MAC address table). + */ +void efx_mae_free_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_MAC_ADDR_FREE_IN_MAC_ID, ped->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_FREE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc || outlen < sizeof(outbuf)) + return; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what MAC addresses exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID) != ped->fw_id)) + return; + /* We're probably about to free @ped, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + ped->fw_id = MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL; +} + int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act) { MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN); @@ -886,15 +1291,28 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act) size_t outlen; int rc; - MCDI_POPULATE_DWORD_3(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS, + MCDI_POPULATE_DWORD_5(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS, MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push, MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop, - MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap); + MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap, + MAE_ACTION_SET_ALLOC_IN_DO_NAT, act->do_nat, + MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL, + act->do_ttl_dec); + + if (act->src_mac) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID, + act->src_mac->fw_id); + else + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID, + MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); + + if (act->dst_mac) + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID, + act->dst_mac->fw_id); + else + MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID, + MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); - MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID, - MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); - MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID, - MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL); if (act->count && !WARN_ON(!act->count->cnt)) MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID, act->count->cnt->fw_id); @@ -1153,6 +1571,520 @@ int efx_mae_unregister_encap_match(struct efx_nic *efx, return 0; } +static int efx_mae_populate_lhs_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match) +{ + if (match->mask.ingress_port) { + if (~match->mask.ingress_port) + return -EOPNOTSUPP; + MCDI_STRUCT_SET_DWORD(match_crit, + MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR, + match->value.ingress_port); + } + MCDI_STRUCT_SET_DWORD(match_crit, MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK, + match->mask.ingress_port); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE, + match->value.eth_proto); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK, + match->mask.eth_proto); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE, + match->value.vlan_tci[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK, + match->mask.vlan_tci[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE, + match->value.vlan_proto[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK, + match->mask.vlan_proto[0]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE, + match->value.vlan_tci[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK, + match->mask.vlan_tci[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE, + match->value.vlan_proto[1]); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK, + match->mask.vlan_proto[1]); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE), + match->value.eth_saddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK), + match->mask.eth_saddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE), + match->value.eth_daddr, ETH_ALEN); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK), + match->mask.eth_daddr, ETH_ALEN); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO, + match->value.ip_proto); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK, + match->mask.ip_proto); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS, + match->value.ip_tos); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK, + match->mask.ip_tos); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL, + match->value.ip_ttl); + MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK, + match->mask.ip_ttl); + MCDI_STRUCT_POPULATE_BYTE_1(match_crit, + MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS, + MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG, + match->value.ip_frag); + MCDI_STRUCT_POPULATE_BYTE_1(match_crit, + MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK, + MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK, + match->mask.ip_frag); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE, + match->value.src_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK, + match->mask.src_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE, + match->value.dst_ip); + MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK, + match->mask.dst_ip); +#ifdef CONFIG_IPV6 + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE), + &match->value.src_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK), + &match->mask.src_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE), + &match->value.dst_ip6, sizeof(struct in6_addr)); + memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK), + &match->mask.dst_ip6, sizeof(struct in6_addr)); +#endif + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE, + match->value.l4_sport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK, + match->mask.l4_sport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE, + match->value.l4_dport); + MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK, + match->mask.l4_dport); + /* No enc-keys in LHS rules. Caps check should have caught this; any + * enc-keys from an fLHS should have been translated to regular keys + * and any EM should be a pseudo (we're an OR so can't have a direct + * EM with another OR). + */ + if (WARN_ON_ONCE(match->encap && !match->encap->type)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_src_ip)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_dst_ip)) + return -EOPNOTSUPP; +#ifdef CONFIG_IPV6 + if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_src_ip6))) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_dst_ip6))) + return -EOPNOTSUPP; +#endif + if (WARN_ON_ONCE(match->mask.enc_ip_tos)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_ip_ttl)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_sport)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_dport)) + return -EOPNOTSUPP; + if (WARN_ON_ONCE(match->mask.enc_keyid)) + return -EOPNOTSUPP; + return 0; +} + +static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule, u32 prio) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(MAE_ENC_FIELD_PAIRS_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN); + MCDI_DECLARE_STRUCT_PTR(match_crit); + const struct efx_tc_lhs_action *act; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_PRIO, prio); + /* match */ + match_crit = _MCDI_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA); + rc = efx_mae_populate_lhs_match_criteria(match_crit, &rule->match); + if (rc) + return rc; + + /* action */ + act = &rule->lhs_act; + rc = efx_mae_encap_type_to_mae_type(act->tun_type); + if (rc < 0) + return rc; + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, rc); + /* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the + * SW path needs to process the packet to update the conntrack tables + * on connection establishment (SYN) or termination (FIN, RST). + */ + MCDI_POPULATE_DWORD_6(inbuf, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL, + MAE_OUTER_RULE_INSERT_IN_DO_CT, !!act->zone, + MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT, 1, + MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN, + act->zone ? act->zone->zone : 0, + MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE, + MAE_CT_VNI_MODE_ZERO, + MAE_OUTER_RULE_INSERT_IN_DO_COUNT, !!act->count, + MAE_OUTER_RULE_INSERT_IN_RECIRC_ID, + act->rid ? act->rid->fw_id : 0); + if (act->count) + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_COUNTER_ID, + act->count->cnt->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_INSERT, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + rule->fw_id = MCDI_DWORD(outbuf, MAE_OUTER_RULE_INSERT_OUT_OR_ID); + return 0; +} + +static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), + const struct efx_tc_match *match); + +static int efx_mae_insert_lhs_action_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule, + u32 prio) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN); + struct efx_tc_lhs_action *act = &rule->lhs_act; + MCDI_DECLARE_STRUCT_PTR(match_crit); + MCDI_DECLARE_STRUCT_PTR(response); + size_t outlen; + int rc; + + match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA); + response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, + MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, + MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL); + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(response, MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL), + MAE_ACTION_RULE_RESPONSE_DO_CT, !!act->zone, + MAE_ACTION_RULE_RESPONSE_DO_RECIRC, + act->rid && !act->zone, + MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE, + MAE_CT_VNI_MODE_ZERO, + MAE_ACTION_RULE_RESPONSE_RECIRC_ID, + act->rid ? act->rid->fw_id : 0, + MAE_ACTION_RULE_RESPONSE_CT_DOMAIN, + act->zone ? act->zone->zone : 0); + MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_COUNTER_ID, + act->count ? act->count->cnt->fw_id : + MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL); + MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio); + rc = efx_mae_populate_match_criteria(match_crit, &rule->match); + if (rc) + return rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + rule->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID); + return 0; +} + +int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule, + u32 prio) +{ + if (rule->is_ar) + return efx_mae_insert_lhs_action_rule(efx, rule, prio); + return efx_mae_insert_lhs_outer_rule(efx, rule, prio); +} + +static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx, + struct efx_tc_lhs_rule *rule) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1)); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_REMOVE_IN_OR_ID, rule->fw_id); + rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_REMOVE, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + /* FW freed a different ID than we asked for, should also never happen. + * Warn because it means we've now got a different idea to the FW of + * what encap_mds exist, which could cause mayhem later. + */ + if (WARN_ON(MCDI_DWORD(outbuf, MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID) != rule->fw_id)) + return -EIO; + /* We're probably about to free @rule, but let's just make sure its + * fw_id is blatted so that it won't look valid if it leaks out. + */ + rule->fw_id = MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL; + return 0; +} + +int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule) +{ + if (rule->is_ar) + return efx_mae_delete_rule(efx, rule->fw_id); + return efx_mae_remove_lhs_outer_rule(efx, rule); +} + +/* Populating is done by taking each byte of @value in turn and storing + * it in the appropriate bits of @row. @value must be big-endian; we + * convert it to little-endianness as we go. + */ +static int efx_mae_table_populate(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, + void *value, size_t value_size) +{ + unsigned int i; + + /* For now only scheme 0 is supported for any field, so we check here + * (rather than, say, in calling code, which knows the semantics and + * could in principle encode for other schemes). + */ + if (field.scheme) + return -EOPNOTSUPP; + if (DIV_ROUND_UP(field.width, 8) != value_size) + return -EINVAL; + if (field.lbn + field.width > row_bits) + return -EINVAL; + for (i = 0; i < value_size; i++) { + unsigned int bn = field.lbn + i * 8; + unsigned int wn = bn / 32; + u64 v; + + v = ((u8 *)value)[value_size - i - 1]; + v <<= (bn % 32); + row[wn] |= cpu_to_le32(v & 0xffffffff); + if (wn * 32 < row_bits) + row[wn + 1] |= cpu_to_le32(v >> 32); + } + return 0; +} + +static int efx_mae_table_populate_bool(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, bool value) +{ + u8 v = value ? 1 : 0; + + if (field.width != 1) + return -EINVAL; + return efx_mae_table_populate(field, row, row_bits, &v, 1); +} + +static int efx_mae_table_populate_ipv4(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, __be32 value) +{ + /* IPv4 is placed in the first 4 bytes of an IPv6-sized field */ + struct in6_addr v = {}; + + if (field.width != 128) + return -EINVAL; + v.s6_addr32[0] = value; + return efx_mae_table_populate(field, row, row_bits, &v, sizeof(v)); +} + +static int efx_mae_table_populate_u24(struct efx_tc_table_field_fmt field, + __le32 *row, size_t row_bits, u32 value) +{ + __be32 v = cpu_to_be32(value); + + /* We adjust value_size here since just 3 bytes will be copied, and + * the pointer to the value is set discarding the first byte which is + * the most significant byte for a big-endian 4-bytes value. + */ + return efx_mae_table_populate(field, row, row_bits, ((void *)&v) + 1, + sizeof(v) - 1); +} + +#define _TABLE_POPULATE(dst, dw, _field, _value) ({ \ + typeof(_value) _v = _value; \ + \ + (_field.width == sizeof(_value) * 8) ? \ + efx_mae_table_populate(_field, dst, dw, &_v, \ + sizeof(_v)) : -EINVAL; \ +}) +#define TABLE_POPULATE_KEY_IPV4(dst, _table, _field, _value) \ + efx_mae_table_populate_ipv4(efx->tc->meta_##_table.desc.keys \ + [efx->tc->meta_##_table.keys._field##_idx],\ + dst, efx->tc->meta_##_table.desc.key_width,\ + _value) +#define TABLE_POPULATE_KEY(dst, _table, _field, _value) \ + _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.key_width, \ + efx->tc->meta_##_table.desc.keys \ + [efx->tc->meta_##_table.keys._field##_idx], \ + _value) + +#define TABLE_POPULATE_RESP_BOOL(dst, _table, _field, _value) \ + efx_mae_table_populate_bool(efx->tc->meta_##_table.desc.resps \ + [efx->tc->meta_##_table.resps._field##_idx],\ + dst, efx->tc->meta_##_table.desc.resp_width,\ + _value) +#define TABLE_POPULATE_RESP(dst, _table, _field, _value) \ + _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.resp_width, \ + efx->tc->meta_##_table.desc.resps \ + [efx->tc->meta_##_table.resps._field##_idx], \ + _value) + +#define TABLE_POPULATE_RESP_U24(dst, _table, _field, _value) \ + efx_mae_table_populate_u24(efx->tc->meta_##_table.desc.resps \ + [efx->tc->meta_##_table.resps._field##_idx],\ + dst, efx->tc->meta_##_table.desc.resp_width,\ + _value) + +static int efx_mae_populate_ct_key(struct efx_nic *efx, __le32 *key, size_t kw, + struct efx_tc_ct_entry *conn) +{ + bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6); + int rc; + + rc = TABLE_POPULATE_KEY(key, ct, eth_proto, conn->eth_proto); + if (rc) + return rc; + rc = TABLE_POPULATE_KEY(key, ct, ip_proto, conn->ip_proto); + if (rc) + return rc; + if (ipv6) + rc = TABLE_POPULATE_KEY(key, ct, src_ip, conn->src_ip6); + else + rc = TABLE_POPULATE_KEY_IPV4(key, ct, src_ip, conn->src_ip); + if (rc) + return rc; + if (ipv6) + rc = TABLE_POPULATE_KEY(key, ct, dst_ip, conn->dst_ip6); + else + rc = TABLE_POPULATE_KEY_IPV4(key, ct, dst_ip, conn->dst_ip); + if (rc) + return rc; + rc = TABLE_POPULATE_KEY(key, ct, l4_sport, conn->l4_sport); + if (rc) + return rc; + rc = TABLE_POPULATE_KEY(key, ct, l4_dport, conn->l4_dport); + if (rc) + return rc; + return TABLE_POPULATE_KEY(key, ct, zone, cpu_to_be16(conn->zone->zone)); +} + +int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6); + __le32 *key = NULL, *resp = NULL; + size_t inlen, kw, rw; + efx_dword_t *inbuf; + int rc = -ENOMEM; + + /* Check table access is supported */ + if (!efx->tc->meta_ct.hooked) + return -EOPNOTSUPP; + + /* key/resp widths are in bits; convert to dwords for IN_LEN */ + kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32); + rw = DIV_ROUND_UP(efx->tc->meta_ct.desc.resp_width, 32); + BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_INSERT_IN_DATA_LEN); + inlen = MC_CMD_TABLE_INSERT_IN_LEN(kw + rw); + if (inlen > MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2) + return -E2BIG; + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + + key = kcalloc(kw, sizeof(__le32), GFP_KERNEL); + if (!key) + goto out_free; + resp = kcalloc(rw, sizeof(__le32), GFP_KERNEL); + if (!resp) + goto out_free; + + rc = efx_mae_populate_ct_key(efx, key, kw, conn); + if (rc) + goto out_free; + + rc = TABLE_POPULATE_RESP_BOOL(resp, ct, dnat, conn->dnat); + if (rc) + goto out_free; + /* No support in hw for IPv6 NAT; field is only 32 bits */ + if (!ipv6) + rc = TABLE_POPULATE_RESP(resp, ct, nat_ip, conn->nat_ip); + if (rc) + goto out_free; + rc = TABLE_POPULATE_RESP(resp, ct, l4_natport, conn->l4_natport); + if (rc) + goto out_free; + rc = TABLE_POPULATE_RESP(resp, ct, mark, cpu_to_be32(conn->mark)); + if (rc) + goto out_free; + rc = TABLE_POPULATE_RESP_U24(resp, ct, counter_id, conn->cnt->fw_id); + if (rc) + goto out_free; + + MCDI_SET_DWORD(inbuf, TABLE_INSERT_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE); + MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_KEY_WIDTH, + efx->tc->meta_ct.desc.key_width); + /* MASK_WIDTH is zero as CT is a BCAM */ + MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_RESP_WIDTH, + efx->tc->meta_ct.desc.resp_width); + memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA), key, kw * sizeof(__le32)); + memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA) + kw * sizeof(__le32), + resp, rw * sizeof(__le32)); + + BUILD_BUG_ON(MC_CMD_TABLE_INSERT_OUT_LEN); + + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_INSERT, inbuf, inlen, NULL, 0, NULL); + +out_free: + kfree(resp); + kfree(key); + kfree(inbuf); + return rc; +} + +int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + __le32 *key = NULL; + efx_dword_t *inbuf; + size_t inlen, kw; + int rc = -ENOMEM; + + /* Check table access is supported */ + if (!efx->tc->meta_ct.hooked) + return -EOPNOTSUPP; + + /* key width is in bits; convert to dwords for IN_LEN */ + kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32); + BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_DELETE_IN_DATA_LEN); + inlen = MC_CMD_TABLE_DELETE_IN_LEN(kw); + if (inlen > MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2) + return -E2BIG; + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + + key = kcalloc(kw, sizeof(__le32), GFP_KERNEL); + if (!key) + goto out_free; + + rc = efx_mae_populate_ct_key(efx, key, kw, conn); + if (rc) + goto out_free; + + MCDI_SET_DWORD(inbuf, TABLE_DELETE_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE); + MCDI_SET_WORD(inbuf, TABLE_DELETE_IN_KEY_WIDTH, + efx->tc->meta_ct.desc.key_width); + /* MASK_WIDTH is zero as CT is a BCAM */ + /* RESP_WIDTH is zero for DELETE */ + memcpy(MCDI_PTR(inbuf, TABLE_DELETE_IN_DATA), key, kw * sizeof(__le32)); + + BUILD_BUG_ON(MC_CMD_TABLE_DELETE_OUT_LEN); + + rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DELETE, inbuf, inlen, NULL, 0, NULL); + +out_free: + kfree(key); + kfree(inbuf); + return rc; +} + static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), const struct efx_tc_match *match) { @@ -1165,20 +2097,40 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit), } MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK, match->mask.ingress_port); - EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS), + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS), + MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT, + match->value.ct_state_trk, + MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT, + match->value.ct_state_est, MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG, match->value.ip_frag, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG, - match->value.ip_firstfrag); - EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK), + match->value.ip_firstfrag, + MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST, + match->value.tcp_syn_fin_rst); + EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK), + MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT, + match->mask.ct_state_trk, + MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT, + match->mask.ct_state_est, MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG, match->mask.ip_frag, MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG, - match->mask.ip_firstfrag); + match->mask.ip_firstfrag, + MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST, + match->mask.tcp_syn_fin_rst); MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID, match->value.recirc_id); MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK, match->mask.recirc_id); + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK, + match->value.ct_mark); + MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK, + match->mask.ct_mark); + MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN, + match->value.ct_zone); + MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK, + match->mask.ct_zone); MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE, match->value.eth_proto); MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK, diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h index 24abfe509690..8df30bc4f3ba 100644 --- a/drivers/net/ethernet/sfc/mae.h +++ b/drivers/net/ethernet/sfc/mae.h @@ -66,6 +66,9 @@ int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue); int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue); void efx_mae_counters_grant_credits(struct work_struct *work); +int efx_mae_get_tables(struct efx_nic *efx); +void efx_mae_free_tables(struct efx_nic *efx); + #define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1) struct mae_caps { @@ -81,6 +84,9 @@ int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps); int efx_mae_match_check_caps(struct efx_nic *efx, const struct efx_tc_match_fields *mask, struct netlink_ext_ack *extack); +int efx_mae_match_check_caps_lhs(struct efx_nic *efx, + const struct efx_tc_match_fields *mask, + struct netlink_ext_ack *extack); int efx_mae_check_encap_match_caps(struct efx_nic *efx, bool ipv6, u8 ip_tos_mask, __be16 udp_sport_mask, struct netlink_ext_ack *extack); @@ -97,6 +103,10 @@ int efx_mae_update_encap_md(struct efx_nic *efx, int efx_mae_free_encap_md(struct efx_nic *efx, struct efx_tc_encap_action *encap); +int efx_mae_allocate_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped); +void efx_mae_free_pedit_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped); int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act); int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id); @@ -109,6 +119,12 @@ int efx_mae_register_encap_match(struct efx_nic *efx, struct efx_tc_encap_match *encap); int efx_mae_unregister_encap_match(struct efx_nic *efx, struct efx_tc_encap_match *encap); +int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule, + u32 prio); +int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule); +struct efx_tc_ct_entry; /* see tc_conntrack.h */ +int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn); +int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn); int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match, u32 prio, u32 acts_id, u32 *id); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index a7f2c31071e8..76578502226e 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -10,7 +10,6 @@ #include "net_driver.h" #include "nic.h" #include "io.h" -#include "farch_regs.h" #include "mcdi_pcol.h" /************************************************************************** @@ -1353,12 +1352,6 @@ void efx_mcdi_process_event(struct efx_channel *channel, case MCDI_EVENT_CODE_MAC_STATS_DMA: /* MAC stats are gather lazily. We can ignore this. */ break; - case MCDI_EVENT_CODE_FLR: - if (efx->type->sriov_flr) - efx->type->sriov_flr(efx, - MCDI_EVENT_FIELD(*event, FLR_VF)); - break; - case MCDI_EVENT_CODE_PTP_RX: case MCDI_EVENT_CODE_PTP_FAULT: case MCDI_EVENT_CODE_PTP_PPS: efx_ptp_event(efx, event); @@ -2212,10 +2205,9 @@ int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, goto out_free; } - strncpy(desc, + strscpy(desc, MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION), MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)); - desc[MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)] = '\0'; } else { desc[0] = '\0'; } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 454e9d51a4c2..ea612c619874 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -218,14 +218,28 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); BUILD_BUG_ON(_field ## _LEN != 1); \ *(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \ } while (0) +#define MCDI_STRUCT_POPULATE_BYTE_1(_buf, _field, _name, _value) do { \ + efx_dword_t _temp; \ + EFX_POPULATE_DWORD_1(_temp, _name, _value); \ + MCDI_STRUCT_SET_BYTE(_buf, _field, \ + EFX_DWORD_FIELD(_temp, EFX_BYTE_0)); \ + } while (0) #define MCDI_BYTE(_buf, _field) \ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \ *MCDI_PTR(_buf, _field)) +#define MCDI_STRUCT_BYTE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 1), \ + *MCDI_STRUCT_PTR(_buf, _field)) #define MCDI_SET_WORD(_buf, _field, _value) do { \ BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \ BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \ *(__force __le16 *)MCDI_PTR(_buf, _field) = cpu_to_le16(_value);\ } while (0) +#define MCDI_STRUCT_SET_WORD(_buf, _field, _value) do { \ + BUILD_BUG_ON(_field ## _LEN != 2); \ + BUILD_BUG_ON(_field ## _OFST & 1); \ + *(__force __le16 *)MCDI_STRUCT_PTR(_buf, _field) = cpu_to_le16(_value);\ + } while (0) #define MCDI_WORD(_buf, _field) \ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c index d3e6d8239f5c..ff8424167384 100644 --- a/drivers/net/ethernet/sfc/mcdi_functions.c +++ b/drivers/net/ethernet/sfc/mcdi_functions.c @@ -62,7 +62,7 @@ int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis, int efx_mcdi_ev_probe(struct efx_channel *channel) { - return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, + return efx_nic_alloc_buffer(channel->efx, &channel->eventq, (channel->eventq_mask + 1) * sizeof(efx_qword_t), GFP_KERNEL); @@ -74,14 +74,14 @@ int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2) MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / EFX_BUF_SIZE)); MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); - size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; + size_t entries = channel->eventq.len / EFX_BUF_SIZE; struct efx_nic *efx = channel->efx; size_t inlen, outlen; dma_addr_t dma_addr; int rc, i; /* Fill event queue with all ones (i.e. empty events) */ - memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); + memset(channel->eventq.addr, 0xff, channel->eventq.len); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); @@ -112,7 +112,7 @@ int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2) INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru); } - dma_addr = channel->eventq.buf.dma_addr; + dma_addr = channel->eventq.dma_addr; for (i = 0; i < entries; ++i) { MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); dma_addr += EFX_BUF_SIZE; @@ -134,7 +134,7 @@ int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2) void efx_mcdi_ev_remove(struct efx_channel *channel) { - efx_nic_free_buffer(channel->efx, &channel->eventq.buf); + efx_nic_free_buffer(channel->efx, &channel->eventq); } void efx_mcdi_ev_fini(struct efx_channel *channel) @@ -166,7 +166,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue) EFX_BUF_SIZE)); bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM; - size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; + size_t entries = tx_queue->txd.len / EFX_BUF_SIZE; struct efx_channel *channel = tx_queue->channel; struct efx_nic *efx = tx_queue->efx; dma_addr_t dma_addr; @@ -182,7 +182,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue) MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id); - dma_addr = tx_queue->txd.buf.dma_addr; + dma_addr = tx_queue->txd.dma_addr; netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", tx_queue->queue, entries, (u64)dma_addr); @@ -240,7 +240,7 @@ fail: void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue) { - efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); + efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd); } void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue) @@ -269,7 +269,7 @@ fail: int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) { - return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, + return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd, (rx_queue->ptr_mask + 1) * sizeof(efx_qword_t), GFP_KERNEL); @@ -278,7 +278,7 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) { struct efx_channel *channel = efx_rx_queue_channel(rx_queue); - size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; + size_t entries = rx_queue->rxd.len / EFX_BUF_SIZE; MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN); struct efx_nic *efx = rx_queue->efx; unsigned int buffer_size; @@ -306,7 +306,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id); MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size); - dma_addr = rx_queue->rxd.buf.dma_addr; + dma_addr = rx_queue->rxd.dma_addr; netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); @@ -325,7 +325,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue) { - efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); + efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd); } void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue) diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index 0ab14f3d01d4..76ea26722ca4 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -1106,11 +1106,6 @@ int efx_mcdi_set_mac(struct efx_nic *efx) MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx)); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); - - /* Set simple MAC filter for Siena */ - MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT, - SET_MAC_IN_REJECT_UNCST, efx->unicast_filter); - MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS, SET_MAC_IN_FLAG_INCLUDE_FCS, !!(efx->net_dev->features & NETIF_F_RXFCS)); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index a7a22b019794..27d86e90a3bb 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -67,9 +67,7 @@ #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) #define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */ #define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */ -#define EFX_TXQ_TYPE_HIGHPRI 4 /* High-priority (for TC) */ -#define EFX_TXQ_TYPES 8 -/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */ +#define EFX_TXQ_TYPES 4 #define EFX_MAX_TXQ_PER_CHANNEL 4 #define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS) @@ -125,26 +123,6 @@ struct efx_buffer { }; /** - * struct efx_special_buffer - DMA buffer entered into buffer table - * @buf: Standard &struct efx_buffer - * @index: Buffer index within controller;s buffer table - * @entries: Number of buffer table entries - * - * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE. - * Event and descriptor rings are addressed via one or more buffer - * table entries (and so can be physically non-contiguous, although we - * currently do not take advantage of that). On Falcon and Siena we - * have to take care of allocating and initialising the entries - * ourselves. On later hardware this is managed by the firmware and - * @index and @entries are left as 0. - */ -struct efx_special_buffer { - struct efx_buffer buf; - unsigned int index; - unsigned int entries; -}; - -/** * struct efx_tx_buffer - buffer state for a TX descriptor * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be * freed when descriptor completes @@ -237,7 +215,7 @@ struct efx_tx_buffer { * Normally this will equal @write_count, but as option descriptors * don't produce completion events, they won't update this. * Filled in iff @efx->type->option_descriptors; only used for PIO. - * Thus, this is written and used on EF10, and neither on farch. + * Thus, this is only written and used on EF10. * @old_read_count: The value of read_count when last checked. * This is here for performance reasons. The xmit path will * only get the up-to-date value of read_count if this @@ -270,7 +248,7 @@ struct efx_tx_queue { struct netdev_queue *core_txq; struct efx_tx_buffer *buffer; struct efx_buffer *cb_page; - struct efx_special_buffer txd; + struct efx_buffer txd; unsigned int ptr_mask; void __iomem *piobuf; unsigned int piobuf_offset; @@ -399,7 +377,7 @@ struct efx_rx_queue { struct efx_nic *efx; int core_index; struct efx_rx_buffer *buffer; - struct efx_special_buffer rxd; + struct efx_buffer rxd; unsigned int ptr_mask; bool refill_enabled; bool flush_pending; @@ -515,7 +493,7 @@ struct efx_channel { #ifdef CONFIG_NET_RX_BUSY_POLL unsigned long busy_poll_state; #endif - struct efx_special_buffer eventq; + struct efx_buffer eventq; unsigned int eventq_mask; unsigned int eventq_read_ptr; int event_test_cpu; @@ -754,18 +732,6 @@ struct efx_hw_stat_desc { u16 offset; }; -/* Number of bits used in a multicast filter hash address */ -#define EFX_MCAST_HASH_BITS 8 - -/* Number of (single-bit) entries in a multicast filter hash */ -#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) - -/* An Efx multicast filter hash */ -union efx_multicast_hash { - u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; - efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; -}; - struct vfdi_status; /* The reserved RSS context value */ @@ -895,7 +861,6 @@ struct efx_mae; * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches * @sram_lim_qw: Qword address limit of SRAM - * @next_buffer_table: First available buffer table id * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX @@ -957,10 +922,6 @@ struct efx_mae; * see &enum ethtool_fec_config_bits. * @link_state: Current state of the link * @n_link_state_changes: Number of times the link has changed state - * @unicast_filter: Flag for Falcon-arch simple unicast filter. - * Protected by @mac_lock. - * @multicast_hash: Multicast hash table for Falcon-arch. - * Protected by @mac_lock. * @wanted_fc: Wanted flow control flags * @fc_disable: When non-zero flow control is disabled. Typically used to * ensure that network back pressure doesn't delay dma queue flushes. @@ -1064,7 +1025,6 @@ struct efx_nic { unsigned tx_dc_base; unsigned rx_dc_base; unsigned sram_lim_qw; - unsigned next_buffer_table; unsigned int max_channels; unsigned int max_vis; @@ -1139,8 +1099,6 @@ struct efx_nic { struct efx_link_state link_state; unsigned int n_link_state_changes; - bool unicast_filter; - union efx_multicast_hash multicast_hash; u8 wanted_fc; unsigned fc_disable; @@ -1263,10 +1221,6 @@ struct efx_udp_tunnel { * @remove_port: Free resources allocated by probe_port() * @handle_global_event: Handle a "global" event (may be %NULL) * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) - * @prepare_flush: Prepare the hardware for flushing the DMA queues - * (for Falcon architecture) - * @finish_flush: Clean up after flushing the DMA queues (for Falcon - * architecture) * @prepare_flr: Prepare for an FLR * @finish_flr: Clean up after an FLR * @describe_stats: Describe statistics for ethtool @@ -1288,8 +1242,7 @@ struct efx_udp_tunnel { * @set_wol: Push WoL configuration to the NIC * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) * @get_fec_stats: Get standard FEC statistics. - * @test_chip: Test registers. May use efx_farch_test_registers(), and is - * expected to reset the NIC. + * @test_chip: Test registers. This is expected to reset the NIC. * @test_nvram: Test validity of NVRAM contents * @mcdi_request: Send an MCDI request with the given header and SDU. * The SDU length may be any value from 0 up to the protocol- @@ -1414,8 +1367,6 @@ struct efx_nic_type { void (*remove_port)(struct efx_nic *efx); bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); int (*fini_dmaq)(struct efx_nic *efx); - void (*prepare_flush)(struct efx_nic *efx); - void (*finish_flush)(struct efx_nic *efx); void (*prepare_flr)(struct efx_nic *efx); void (*finish_flr)(struct efx_nic *efx); size_t (*describe_stats)(struct efx_nic *efx, u8 *names); @@ -1531,8 +1482,6 @@ struct efx_nic_type { int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); - void (*sriov_reset)(struct efx_nic *efx); - void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i); int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac); int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 63e2394382bb..a33ed473cc8a 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -17,7 +17,6 @@ #include "efx.h" #include "nic.h" #include "ef10_regs.h" -#include "farch_regs.h" #include "io.h" #include "workarounds.h" #include "mcdi_pcol.h" @@ -172,10 +171,6 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) /* Register dump */ -#define REGISTER_REVISION_FA 1 -#define REGISTER_REVISION_FB 2 -#define REGISTER_REVISION_FC 3 -#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */ #define REGISTER_REVISION_ED 4 #define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ @@ -189,117 +184,9 @@ struct efx_nic_reg { REGISTER_REVISION_ ## arch ## min_rev, \ REGISTER_REVISION_ ## arch ## max_rev \ } -#define REGISTER_AA(name) REGISTER(name, F, A, A) -#define REGISTER_AB(name) REGISTER(name, F, A, B) -#define REGISTER_AZ(name) REGISTER(name, F, A, Z) -#define REGISTER_BB(name) REGISTER(name, F, B, B) -#define REGISTER_BZ(name) REGISTER(name, F, B, Z) -#define REGISTER_CZ(name) REGISTER(name, F, C, Z) #define REGISTER_DZ(name) REGISTER(name, E, D, Z) static const struct efx_nic_reg efx_nic_regs[] = { - REGISTER_AZ(ADR_REGION), - REGISTER_AZ(INT_EN_KER), - REGISTER_BZ(INT_EN_CHAR), - REGISTER_AZ(INT_ADR_KER), - REGISTER_BZ(INT_ADR_CHAR), - /* INT_ACK_KER is WO */ - /* INT_ISR0 is RC */ - REGISTER_AZ(HW_INIT), - REGISTER_CZ(USR_EV_CFG), - REGISTER_AB(EE_SPI_HCMD), - REGISTER_AB(EE_SPI_HADR), - REGISTER_AB(EE_SPI_HDATA), - REGISTER_AB(EE_BASE_PAGE), - REGISTER_AB(EE_VPD_CFG0), - /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ - /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ - /* PCIE_CORE_INDIRECT is indirect */ - REGISTER_AB(NIC_STAT), - REGISTER_AB(GPIO_CTL), - REGISTER_AB(GLB_CTL), - /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ - REGISTER_BZ(DP_CTRL), - REGISTER_AZ(MEM_STAT), - REGISTER_AZ(CS_DEBUG), - REGISTER_AZ(ALTERA_BUILD), - REGISTER_AZ(CSR_SPARE), - REGISTER_AB(PCIE_SD_CTL0123), - REGISTER_AB(PCIE_SD_CTL45), - REGISTER_AB(PCIE_PCS_CTL_STAT), - /* DEBUG_DATA_OUT is not used */ - /* DRV_EV is WO */ - REGISTER_AZ(EVQ_CTL), - REGISTER_AZ(EVQ_CNT1), - REGISTER_AZ(EVQ_CNT2), - REGISTER_AZ(BUF_TBL_CFG), - REGISTER_AZ(SRM_RX_DC_CFG), - REGISTER_AZ(SRM_TX_DC_CFG), - REGISTER_AZ(SRM_CFG), - /* BUF_TBL_UPD is WO */ - REGISTER_AZ(SRM_UPD_EVQ), - REGISTER_AZ(SRAM_PARITY), - REGISTER_AZ(RX_CFG), - REGISTER_BZ(RX_FILTER_CTL), - /* RX_FLUSH_DESCQ is WO */ - REGISTER_AZ(RX_DC_CFG), - REGISTER_AZ(RX_DC_PF_WM), - REGISTER_BZ(RX_RSS_TKEY), - /* RX_NODESC_DROP is RC */ - REGISTER_AA(RX_SELF_RST), - /* RX_DEBUG, RX_PUSH_DROP are not used */ - REGISTER_CZ(RX_RSS_IPV6_REG1), - REGISTER_CZ(RX_RSS_IPV6_REG2), - REGISTER_CZ(RX_RSS_IPV6_REG3), - /* TX_FLUSH_DESCQ is WO */ - REGISTER_AZ(TX_DC_CFG), - REGISTER_AA(TX_CHKSM_CFG), - REGISTER_AZ(TX_CFG), - /* TX_PUSH_DROP is not used */ - REGISTER_AZ(TX_RESERVED), - REGISTER_BZ(TX_PACE), - /* TX_PACE_DROP_QID is RC */ - REGISTER_BB(TX_VLAN), - REGISTER_BZ(TX_IPFIL_PORTEN), - REGISTER_AB(MD_TXD), - REGISTER_AB(MD_RXD), - REGISTER_AB(MD_CS), - REGISTER_AB(MD_PHY_ADR), - REGISTER_AB(MD_ID), - /* MD_STAT is RC */ - REGISTER_AB(MAC_STAT_DMA), - REGISTER_AB(MAC_CTRL), - REGISTER_BB(GEN_MODE), - REGISTER_AB(MAC_MC_HASH_REG0), - REGISTER_AB(MAC_MC_HASH_REG1), - REGISTER_AB(GM_CFG1), - REGISTER_AB(GM_CFG2), - /* GM_IPG and GM_HD are not used */ - REGISTER_AB(GM_MAX_FLEN), - /* GM_TEST is not used */ - REGISTER_AB(GM_ADR1), - REGISTER_AB(GM_ADR2), - REGISTER_AB(GMF_CFG0), - REGISTER_AB(GMF_CFG1), - REGISTER_AB(GMF_CFG2), - REGISTER_AB(GMF_CFG3), - REGISTER_AB(GMF_CFG4), - REGISTER_AB(GMF_CFG5), - REGISTER_BB(TX_SRC_MAC_CTL), - REGISTER_AB(XM_ADR_LO), - REGISTER_AB(XM_ADR_HI), - REGISTER_AB(XM_GLB_CFG), - REGISTER_AB(XM_TX_CFG), - REGISTER_AB(XM_RX_CFG), - REGISTER_AB(XM_MGT_INT_MASK), - REGISTER_AB(XM_FC), - REGISTER_AB(XM_PAUSE_TIME), - REGISTER_AB(XM_TX_PARAM), - REGISTER_AB(XM_RX_PARAM), - /* XM_MGT_INT_MSK (note no 'A') is RC */ - REGISTER_AB(XX_PWR_RST), - REGISTER_AB(XX_SD_CTL), - REGISTER_AB(XX_TXDRV_CTL), /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ /* XX_CORE_STAT is partly RC */ REGISTER_DZ(BIU_HW_REV_ID), @@ -325,49 +212,9 @@ struct efx_nic_reg_table { arch, min_rev, max_rev, \ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) -#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A) -#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z) -#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B) -#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z) -#define REGISTER_TABLE_BB_CZ(name) \ - REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \ - FR_BZ_ ## name ## _STEP, \ - FR_BB_ ## name ## _ROWS), \ - REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \ - FR_BZ_ ## name ## _STEP, \ - FR_CZ_ ## name ## _ROWS) -#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) #define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) static const struct efx_nic_reg_table efx_nic_reg_tables[] = { - /* DRIVER is not used */ - /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ - REGISTER_TABLE_BB(TX_IPFIL_TBL), - REGISTER_TABLE_BB(TX_SRC_MAC_TBL), - REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), - REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), - REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), - REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), - REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), - REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), - /* We can't reasonably read all of the buffer table (up to 8MB!). - * However this driver will only use a few entries. Reading - * 1K entries allows for some expansion of queue count and - * size before we need to change the version. */ - REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, - F, A, A, 8, 1024), - REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, - F, B, Z, 8, 1024), - REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), - REGISTER_TABLE_BB_CZ(TIMER_TBL), - REGISTER_TABLE_BB_CZ(TX_PACE_TBL), - REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), - /* TX_FILTER_TBL0 is huge and not used by this driver */ - REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), - REGISTER_TABLE_CZ(MC_TREG_SMEM), - /* MSIX_PBA_TABLE is not mapped */ - /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ - REGISTER_TABLE_BZ(RX_FILTER_TBL0), REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), }; @@ -425,11 +272,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) case 4: /* 32-bit SRAM */ efx_readd(efx, buf, table->offset + 4 * i); break; - case 8: /* 64-bit SRAM */ - efx_sram_readq(efx, - efx->membase + table->offset, - buf, i); - break; case 16: /* 128-bit-readable register */ efx_reado_table(efx, buf, table->offset, i); break; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 251868235ae4..1db64fc6e909 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -11,8 +11,6 @@ #include "nic_common.h" #include "efx.h" -u32 efx_farch_fpga_ver(struct efx_nic *efx); - enum { PHY_TYPE_NONE = 0, PHY_TYPE_TXC43128 = 1, @@ -26,97 +24,6 @@ enum { }; enum { - SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT, - SIENA_STAT_tx_good_bytes, - SIENA_STAT_tx_bad_bytes, - SIENA_STAT_tx_packets, - SIENA_STAT_tx_bad, - SIENA_STAT_tx_pause, - SIENA_STAT_tx_control, - SIENA_STAT_tx_unicast, - SIENA_STAT_tx_multicast, - SIENA_STAT_tx_broadcast, - SIENA_STAT_tx_lt64, - SIENA_STAT_tx_64, - SIENA_STAT_tx_65_to_127, - SIENA_STAT_tx_128_to_255, - SIENA_STAT_tx_256_to_511, - SIENA_STAT_tx_512_to_1023, - SIENA_STAT_tx_1024_to_15xx, - SIENA_STAT_tx_15xx_to_jumbo, - SIENA_STAT_tx_gtjumbo, - SIENA_STAT_tx_collision, - SIENA_STAT_tx_single_collision, - SIENA_STAT_tx_multiple_collision, - SIENA_STAT_tx_excessive_collision, - SIENA_STAT_tx_deferred, - SIENA_STAT_tx_late_collision, - SIENA_STAT_tx_excessive_deferred, - SIENA_STAT_tx_non_tcpudp, - SIENA_STAT_tx_mac_src_error, - SIENA_STAT_tx_ip_src_error, - SIENA_STAT_rx_bytes, - SIENA_STAT_rx_good_bytes, - SIENA_STAT_rx_bad_bytes, - SIENA_STAT_rx_packets, - SIENA_STAT_rx_good, - SIENA_STAT_rx_bad, - SIENA_STAT_rx_pause, - SIENA_STAT_rx_control, - SIENA_STAT_rx_unicast, - SIENA_STAT_rx_multicast, - SIENA_STAT_rx_broadcast, - SIENA_STAT_rx_lt64, - SIENA_STAT_rx_64, - SIENA_STAT_rx_65_to_127, - SIENA_STAT_rx_128_to_255, - SIENA_STAT_rx_256_to_511, - SIENA_STAT_rx_512_to_1023, - SIENA_STAT_rx_1024_to_15xx, - SIENA_STAT_rx_15xx_to_jumbo, - SIENA_STAT_rx_gtjumbo, - SIENA_STAT_rx_bad_gtjumbo, - SIENA_STAT_rx_overflow, - SIENA_STAT_rx_false_carrier, - SIENA_STAT_rx_symbol_error, - SIENA_STAT_rx_align_error, - SIENA_STAT_rx_length_error, - SIENA_STAT_rx_internal_error, - SIENA_STAT_rx_nodesc_drop_cnt, - SIENA_STAT_COUNT -}; - -/** - * struct siena_nic_data - Siena NIC state - * @efx: Pointer back to main interface structure - * @wol_filter_id: Wake-on-LAN packet filter id - * @stats: Hardware statistics - * @vf: Array of &struct siena_vf objects - * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. - * @vfdi_status: Common VFDI status page to be dmad to VF address space. - * @local_addr_list: List of local addresses. Protected by %local_lock. - * @local_page_list: List of DMA addressable pages used to broadcast - * %local_addr_list. Protected by %local_lock. - * @local_lock: Mutex protecting %local_addr_list and %local_page_list. - * @peer_work: Work item to broadcast peer addresses to VMs. - */ -struct siena_nic_data { - struct efx_nic *efx; - int wol_filter_id; - u64 stats[SIENA_STAT_COUNT]; -#ifdef CONFIG_SFC_SRIOV - struct siena_vf *vf; - struct efx_channel *vfdi_channel; - unsigned vf_buftbl_base; - struct efx_buffer vfdi_status; - struct list_head local_addr_list; - struct list_head local_page_list; - struct mutex local_lock; - struct work_struct peer_work; -#endif -}; - -enum { EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, EF10_STAT_port_tx_packets, EF10_STAT_port_tx_pause, @@ -304,89 +211,4 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, extern const struct efx_nic_type efx_hunt_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; -int falcon_probe_board(struct efx_nic *efx, u16 revision_info); - -/* Falcon/Siena queue operations */ -int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); -void efx_farch_tx_init(struct efx_tx_queue *tx_queue); -void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); -void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); -void efx_farch_tx_write(struct efx_tx_queue *tx_queue); -unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, - dma_addr_t dma_addr, unsigned int len); -int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); -void efx_farch_rx_init(struct efx_rx_queue *rx_queue); -void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); -void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); -void efx_farch_rx_write(struct efx_rx_queue *rx_queue); -void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); -int efx_farch_ev_probe(struct efx_channel *channel); -int efx_farch_ev_init(struct efx_channel *channel); -void efx_farch_ev_fini(struct efx_channel *channel); -void efx_farch_ev_remove(struct efx_channel *channel); -int efx_farch_ev_process(struct efx_channel *channel, int quota); -void efx_farch_ev_read_ack(struct efx_channel *channel); -void efx_farch_ev_test_generate(struct efx_channel *channel); - -/* Falcon/Siena filter operations */ -int efx_farch_filter_table_probe(struct efx_nic *efx); -void efx_farch_filter_table_restore(struct efx_nic *efx); -void efx_farch_filter_table_remove(struct efx_nic *efx); -void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); -s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, - bool replace); -int efx_farch_filter_remove_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id); -int efx_farch_filter_get_safe(struct efx_nic *efx, - enum efx_filter_priority priority, u32 filter_id, - struct efx_filter_spec *); -int efx_farch_filter_clear_rx(struct efx_nic *efx, - enum efx_filter_priority priority); -u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, - enum efx_filter_priority priority); -u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); -s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, - enum efx_filter_priority priority, u32 *buf, - u32 size); -#ifdef CONFIG_RFS_ACCEL -bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, - unsigned int index); -#endif -void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); - -/* Falcon/Siena interrupts */ -void efx_farch_irq_enable_master(struct efx_nic *efx); -int efx_farch_irq_test_generate(struct efx_nic *efx); -void efx_farch_irq_disable_master(struct efx_nic *efx); -irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); -irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); -irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); - -/* Global Resources */ -void siena_prepare_flush(struct efx_nic *efx); -int efx_farch_fini_dmaq(struct efx_nic *efx); -void efx_farch_finish_flr(struct efx_nic *efx); -void siena_finish_flush(struct efx_nic *efx); -void falcon_start_nic_stats(struct efx_nic *efx); -void falcon_stop_nic_stats(struct efx_nic *efx); -int falcon_reset_xaui(struct efx_nic *efx); -void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); -void efx_farch_init_common(struct efx_nic *efx); -void efx_farch_rx_push_indir_table(struct efx_nic *efx); -void efx_farch_rx_pull_indir_table(struct efx_nic *efx); - -/* Tests */ -struct efx_farch_register_test { - unsigned address; - efx_oword_t mask; -}; - -int efx_farch_test_registers(struct efx_nic *efx, - const struct efx_farch_register_test *regs, - size_t n_regs); - -void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, - efx_qword_t *event); - #endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index 0cef35c0c559..466df5348b29 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -15,11 +15,10 @@ #include "ptp.h" enum { - /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. + /* Revisions 0-3 were Falcon A0, A1, B0 and Siena respectively. * They are not supported by this driver but these revision numbers * form part of the ethtool API for register dumping. */ - EFX_REV_SIENA_A0 = 3, EFX_REV_HUNT_A0 = 4, EFX_REV_EF100 = 5, }; @@ -33,7 +32,7 @@ static inline int efx_nic_rev(struct efx_nic *efx) static inline efx_qword_t *efx_event(struct efx_channel *channel, unsigned int index) { - return ((efx_qword_t *) (channel->eventq.buf.addr)) + + return ((efx_qword_t *)(channel->eventq.addr)) + (index & channel->eventq_mask); } @@ -59,7 +58,7 @@ static inline int efx_event_present(efx_qword_t *event) static inline efx_qword_t * efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) { - return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; + return ((efx_qword_t *)(tx_queue->txd.addr)) + index; } /* Report whether this TX queue would be empty for the given write_count. @@ -80,9 +79,7 @@ int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, /* Decide whether to push a TX descriptor to the NIC vs merely writing * the doorbell. This can reduce latency when we are adding a single - * descriptor to an empty queue, but is otherwise pointless. Further, - * Falcon and Siena have hardware bugs (SF bug 33851) that may be - * triggered if we don't check this. + * descriptor to an empty queue, but is otherwise pointless. * We use the write_count used for the last doorbell push, to get the * NIC's view of the tx queue. */ @@ -99,7 +96,7 @@ static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, static inline efx_qword_t * efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) { - return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; + return ((efx_qword_t *)(rx_queue->rxd.addr)) + index; } /* Alignment of PCIe DMA boundaries (4KB) */ diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 0c40571133cb..b04fdbb8aece 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -43,7 +43,6 @@ #include "mcdi.h" #include "mcdi_pcol.h" #include "io.h" -#include "farch_regs.h" #include "tx.h" #include "nic.h" /* indirectly includes ptp.h */ #include "efx_channels.h" @@ -87,9 +86,6 @@ #define PTP_V1_VERSION_LENGTH 2 #define PTP_V1_VERSION_OFFSET 28 -#define PTP_V1_UUID_LENGTH 6 -#define PTP_V1_UUID_OFFSET 50 - #define PTP_V1_SEQUENCE_LENGTH 2 #define PTP_V1_SEQUENCE_OFFSET 58 @@ -101,17 +97,6 @@ #define PTP_V2_VERSION_LENGTH 1 #define PTP_V2_VERSION_OFFSET 29 -#define PTP_V2_UUID_LENGTH 8 -#define PTP_V2_UUID_OFFSET 48 - -/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), - * the MC only captures the last six bytes of the clock identity. These values - * reflect those, not the ones used in the standard. The standard permits - * mapping of V1 UUIDs to V2 UUIDs with these same values. - */ -#define PTP_V2_MC_UUID_LENGTH 6 -#define PTP_V2_MC_UUID_OFFSET 50 - #define PTP_V2_SEQUENCE_LENGTH 2 #define PTP_V2_SEQUENCE_OFFSET 58 @@ -123,11 +108,17 @@ #define PTP_MIN_LENGTH 63 #define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */ -#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0x01, 0x81} /* ff0e::181 */ + +/* ff0e::181 */ +static const struct in6_addr ptp_addr_ipv6 = { { { + 0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x81 } } }; + +/* 01-1B-19-00-00-00 */ +static const u8 ptp_addr_ether[ETH_ALEN] __aligned(2) = { + 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 }; + #define PTP_EVENT_PORT 319 #define PTP_GENERAL_PORT 320 -#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */ /* Annoyingly the format of the version numbers are different between * versions 1 and 2 so it isn't possible to simply look for 1 or 2. @@ -167,14 +158,12 @@ enum ptp_packet_state { /** * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. - * @words: UUID and (partial) sequence number * @expiry: Time after which the packet should be delivered irrespective of * event arrival. * @state: The state of the packet - whether it is ready for processing or * whether that is of no interest. */ struct efx_ptp_match { - u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)]; unsigned long expiry; enum ptp_packet_state state; }; @@ -236,15 +225,9 @@ struct efx_ptp_rxfilter { /** * struct efx_ptp_data - Precision Time Protocol (PTP) state * @efx: The NIC context - * @channel: The PTP channel (Siena only) - * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are - * separate events) + * @channel: The PTP channel (for Medford and Medford2) * @rxq: Receive SKB queue (awaiting timestamps) * @txq: Transmit SKB queue - * @evt_list: List of MC receive events awaiting packets - * @evt_free_list: List of free events - * @evt_lock: Lock for manipulating evt_list and evt_free_list - * @rx_evts: Instantiated events (on evt_list and evt_free_list) * @workwq: Work queue for processing pending PTP operations * @work: Work task * @cleanup_work: Work task for periodic cleanup @@ -310,13 +293,8 @@ struct efx_ptp_rxfilter { struct efx_ptp_data { struct efx_nic *efx; struct efx_channel *channel; - bool rx_ts_inline; struct sk_buff_head rxq; struct sk_buff_head txq; - struct list_head evt_list; - struct list_head evt_free_list; - spinlock_t evt_lock; - struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; struct workqueue_struct *workwq; struct work_struct work; struct delayed_work cleanup_work; @@ -465,25 +443,6 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) return PTP_STAT_COUNT; } -/* For Siena platforms NIC time is s and ns */ -static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) -{ - struct timespec64 ts = ns_to_timespec64(ns); - *nic_major = (u32)ts.tv_sec; - *nic_minor = ts.tv_nsec; -} - -static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, - s32 correction) -{ - ktime_t kt = ktime_set(nic_major, nic_minor); - if (correction >= 0) - kt = ktime_add_ns(kt, (u64)correction); - else - kt = ktime_sub_ns(kt, (u64)-correction); - return kt; -} - /* To convert from s27 format to ns we multiply then divide by a power of 2. * For the conversion from ns to s27, the operation is also converted to a * multiply and shift. @@ -697,12 +656,6 @@ static int efx_ptp_get_attributes(struct efx_nic *efx) ptp->nic_time.minor_max = 1 << 27; ptp->nic_time.sync_event_minor_shift = 19; break; - case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS: - ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns; - ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction; - ptp->nic_time.minor_max = 1000000000; - ptp->nic_time.sync_event_minor_shift = 22; - break; case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS: ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns; ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction; @@ -1217,76 +1170,6 @@ fail: return; } -static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) -{ - struct efx_ptp_data *ptp = efx->ptp_data; - struct list_head *cursor; - struct list_head *next; - - if (ptp->rx_ts_inline) - return; - - /* Drop time-expired events */ - spin_lock_bh(&ptp->evt_lock); - list_for_each_safe(cursor, next, &ptp->evt_list) { - struct efx_ptp_event_rx *evt; - - evt = list_entry(cursor, struct efx_ptp_event_rx, - link); - if (time_after(jiffies, evt->expiry)) { - list_move(&evt->link, &ptp->evt_free_list); - netif_warn(efx, hw, efx->net_dev, - "PTP rx event dropped\n"); - } - } - spin_unlock_bh(&ptp->evt_lock); -} - -static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, - struct sk_buff *skb) -{ - struct efx_ptp_data *ptp = efx->ptp_data; - bool evts_waiting; - struct list_head *cursor; - struct list_head *next; - struct efx_ptp_match *match; - enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; - - WARN_ON_ONCE(ptp->rx_ts_inline); - - spin_lock_bh(&ptp->evt_lock); - evts_waiting = !list_empty(&ptp->evt_list); - spin_unlock_bh(&ptp->evt_lock); - - if (!evts_waiting) - return PTP_PACKET_STATE_UNMATCHED; - - match = (struct efx_ptp_match *)skb->cb; - /* Look for a matching timestamp in the event queue */ - spin_lock_bh(&ptp->evt_lock); - list_for_each_safe(cursor, next, &ptp->evt_list) { - struct efx_ptp_event_rx *evt; - - evt = list_entry(cursor, struct efx_ptp_event_rx, link); - if ((evt->seq0 == match->words[0]) && - (evt->seq1 == match->words[1])) { - struct skb_shared_hwtstamps *timestamps; - - /* Match - add in hardware timestamp */ - timestamps = skb_hwtstamps(skb); - timestamps->hwtstamp = evt->hwtimestamp; - - match->state = PTP_PACKET_STATE_MATCHED; - rc = PTP_PACKET_STATE_MATCHED; - list_move(&evt->link, &ptp->evt_free_list); - break; - } - } - spin_unlock_bh(&ptp->evt_lock); - - return rc; -} - /* Process any queued receive events and corresponding packets * * q is returned with all the packets that are ready for delivery. @@ -1302,9 +1185,6 @@ static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) match = (struct efx_ptp_match *)skb->cb; if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { __skb_queue_tail(q, skb); - } else if (efx_ptp_match_rx(efx, skb) == - PTP_PACKET_STATE_MATCHED) { - __skb_queue_tail(q, skb); } else if (time_after(jiffies, match->expiry)) { match->state = PTP_PACKET_STATE_TIMED_OUT; ++ptp->rx_no_timestamp; @@ -1422,7 +1302,7 @@ static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx, static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, struct list_head *filter_list, - struct in6_addr *addr, u16 port, + const struct in6_addr *addr, u16 port, unsigned long expiry) { struct efx_filter_spec spec; @@ -1435,11 +1315,10 @@ static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, static int efx_ptp_insert_eth_multicast_filter(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; - const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER; struct efx_filter_spec spec; efx_ptp_init_filter(efx, &spec); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, ptp_addr_ether); spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; spec.ether_type = htons(ETH_P_1588); return efx_ptp_insert_filter(efx, &ptp->rxfilters_mcast, &spec, 0); @@ -1472,20 +1351,20 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) * PTP over IPv6 and Ethernet */ if (efx_ptp_use_mac_tx_timestamps(efx)) { - struct in6_addr ipv6_addr = {{PTP_ADDR_IPV6}}; - rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, - &ipv6_addr, PTP_EVENT_PORT, 0); + &ptp_addr_ipv6, PTP_EVENT_PORT, 0); if (rc < 0) goto fail; rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast, - &ipv6_addr, PTP_GENERAL_PORT, 0); + &ptp_addr_ipv6, PTP_GENERAL_PORT, 0); if (rc < 0) goto fail; rc = efx_ptp_insert_eth_multicast_filter(efx); - if (rc < 0) + + /* Not all firmware variants support this filter */ + if (rc < 0 && rc != -EPROTONOSUPPORT) goto fail; } @@ -1503,9 +1382,7 @@ static bool efx_ptp_valid_unicast_event_pkt(struct sk_buff *skb) ip_hdr(skb)->protocol == IPPROTO_UDP && udp_hdr(skb)->source == htons(PTP_EVENT_PORT); } else if (skb->protocol == htons(ETH_P_IPV6)) { - struct in6_addr mcast_addr = {{PTP_ADDR_IPV6}}; - - return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &mcast_addr) && + return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &ptp_addr_ipv6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP && udp_hdr(skb)->source == htons(PTP_EVENT_PORT); } @@ -1581,8 +1458,6 @@ fail: static int efx_ptp_stop(struct efx_nic *efx) { struct efx_ptp_data *ptp = efx->ptp_data; - struct list_head *cursor; - struct list_head *next; int rc; if (ptp == NULL) @@ -1597,13 +1472,6 @@ static int efx_ptp_stop(struct efx_nic *efx) efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->txq); - /* Drop any pending receive events */ - spin_lock_bh(&efx->ptp_data->evt_lock); - list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { - list_move(cursor, &efx->ptp_data->evt_free_list); - } - spin_unlock_bh(&efx->ptp_data->evt_lock); - return rc; } @@ -1643,8 +1511,6 @@ static void efx_ptp_worker(struct work_struct *work) return; } - efx_ptp_drop_time_expired_events(efx); - __skb_queue_head_init(&tempq); efx_ptp_process_events(efx, &tempq); @@ -1693,7 +1559,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) { struct efx_ptp_data *ptp; int rc = 0; - unsigned int pos; if (efx->ptp_data) { efx->ptp_data->channel = channel; @@ -1707,7 +1572,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) ptp->efx = efx; ptp->channel = channel; - ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); if (rc != 0) @@ -1734,12 +1598,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) ptp->config.flags = 0; ptp->config.tx_type = HWTSTAMP_TX_OFF; ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; - INIT_LIST_HEAD(&ptp->evt_list); - INIT_LIST_HEAD(&ptp->evt_free_list); - spin_lock_init(&ptp->evt_lock); - for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) - list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); - INIT_LIST_HEAD(&ptp->rxfilters_mcast); INIT_LIST_HEAD(&ptp->rxfilters_ucast); @@ -1879,7 +1737,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) struct efx_nic *efx = channel->efx; struct efx_ptp_data *ptp = efx->ptp_data; struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; - u8 *match_data_012, *match_data_345; unsigned int version; u8 *data; @@ -1895,12 +1752,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) if (version != PTP_VERSION_V1) { return false; } - - /* PTP V1 uses all six bytes of the UUID to match the packet - * to the timestamp - */ - match_data_012 = data + PTP_V1_UUID_OFFSET; - match_data_345 = data + PTP_V1_UUID_OFFSET + 3; } else { if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { return false; @@ -1910,21 +1761,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { return false; } - - /* The original V2 implementation uses bytes 2-7 of - * the UUID to match the packet to the timestamp. This - * discards two of the bytes of the MAC address used - * to create the UUID (SF bug 33070). The PTP V2 - * enhanced mode fixes this issue and uses bytes 0-2 - * and byte 5-7 of the UUID. - */ - match_data_345 = data + PTP_V2_UUID_OFFSET + 5; - if (ptp->mode == MC_CMD_PTP_MODE_V2) { - match_data_012 = data + PTP_V2_UUID_OFFSET + 2; - } else { - match_data_012 = data + PTP_V2_UUID_OFFSET + 0; - BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); - } } /* Does this packet require timestamping? */ @@ -1936,17 +1772,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) */ BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); - - /* Extract UUID/Sequence information */ - match->words[0] = (match_data_012[0] | - (match_data_012[1] << 8) | - (match_data_012[2] << 16) | - (match_data_345[0] << 24)); - match->words[1] = (match_data_345[1] | - (match_data_345[2] << 8) | - (data[PTP_V1_SEQUENCE_OFFSET + - PTP_V1_SEQUENCE_LENGTH - 1] << - 16)); } else { match->state = PTP_PACKET_STATE_MATCH_UNWANTED; } @@ -2110,50 +1935,6 @@ static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) queue_work(ptp->workwq, &ptp->work); } -/* Process a completed receive event. Put it on the event queue and - * start worker thread. This is required because event and their - * correspoding packets may come in either order. - */ -static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) -{ - struct efx_ptp_event_rx *evt = NULL; - - if (WARN_ON_ONCE(ptp->rx_ts_inline)) - return; - - if (ptp->evt_frag_idx != 3) { - ptp_event_failure(efx, 3); - return; - } - - spin_lock_bh(&ptp->evt_lock); - if (!list_empty(&ptp->evt_free_list)) { - evt = list_first_entry(&ptp->evt_free_list, - struct efx_ptp_event_rx, link); - list_del(&evt->link); - - evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA); - evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2], - MCDI_EVENT_SRC) | - (EFX_QWORD_FIELD(ptp->evt_frags[1], - MCDI_EVENT_SRC) << 8) | - (EFX_QWORD_FIELD(ptp->evt_frags[0], - MCDI_EVENT_SRC) << 16)); - evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time( - EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), - EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA), - ptp->ts_corrections.ptp_rx); - evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); - list_add_tail(&evt->link, &ptp->evt_list); - - queue_work(ptp->workwq, &ptp->work); - } else if (net_ratelimit()) { - /* Log a rate-limited warning message. */ - netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); - } - spin_unlock_bh(&ptp->evt_lock); -} - static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) { int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); @@ -2200,9 +1981,6 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) if (!MCDI_EVENT_FIELD(*ev, CONT)) { /* Process resulting event */ switch (code) { - case MCDI_EVENT_CODE_PTP_RX: - ptp_event_rx(efx, ptp); - break; case MCDI_EVENT_CODE_PTP_FAULT: ptp_event_fault(efx, ptp); break; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 2375cef577e4..f77a2d3ef37e 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -359,26 +359,36 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, /* Handle a received packet. Second half: Touches packet payload. */ void __efx_rx_packet(struct efx_channel *channel) { + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); struct efx_nic *efx = channel->efx; struct efx_rx_buffer *rx_buf = - efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); + efx_rx_buffer(rx_queue, channel->rx_pkt_index); u8 *eh = efx_rx_buf_va(rx_buf); /* Read length from the prefix if necessary. This already * excludes the length of the prefix itself. */ - if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) + if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) { rx_buf->len = le16_to_cpup((__le16 *) (eh + efx->rx_packet_len_offset)); + /* A known issue may prevent this being filled in; + * if that happens, just drop the packet. + * Must do that in the driver since passing a zero-length + * packet up to the stack may cause a crash. + */ + if (unlikely(!rx_buf->len)) { + efx_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); + channel->n_rx_frm_trunc++; + goto out; + } + } /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { - struct efx_rx_queue *rx_queue; - efx_loopback_rx_packet(efx, eh, rx_buf->len); - rx_queue = efx_channel_get_rx_queue(channel); efx_free_rx_buffers(rx_queue, rx_buf, channel->rx_pkt_n_frags); goto out; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 563c1e317ce9..894fad0bb5ea 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -38,8 +38,7 @@ /* * Loopback test packet structure * - * The self-test should stress every RSS vector, and unfortunately - * Falcon only performs RSS on TCP/UDP packets. + * The self-test should stress every RSS vector. */ struct efx_loopback_payload { char pad[2]; /* Ensures ip is 4-byte aligned */ @@ -584,10 +583,6 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, return 0; } -/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but - * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it - * to delay and retry. Therefore, it's safer to just poll directly. Wait - * for link up and any faults to dissipate. */ static int efx_wait_for_link(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 1776f7f8a7a9..a7346e965bfe 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget) spent = efx_process_channel(channel, budget); - xdp_do_flush_map(); + xdp_do_flush(); if (spent < budget) { if (efx_channel_has_rx_queue(channel) && diff --git a/drivers/net/ethernet/sfc/siena/io.h b/drivers/net/ethernet/sfc/siena/io.h index 30439cc83a89..07f99ad14bf3 100644 --- a/drivers/net/ethernet/sfc/siena/io.h +++ b/drivers/net/ethernet/sfc/siena/io.h @@ -70,7 +70,7 @@ */ #ifdef CONFIG_X86_64 /* PIO is a win only if write-combining is possible */ -#ifdef ARCH_HAS_IOREMAP_WC +#ifdef ioremap_wc #define EFX_USE_PIO 1 #endif #endif diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index fe268b6c1cac..82e8891a619a 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -12,9 +12,11 @@ #include <net/pkt_cls.h> #include <net/vxlan.h> #include <net/geneve.h> +#include <net/tc_act/tc_ct.h> #include "tc.h" #include "tc_bindings.h" #include "tc_encap_actions.h" +#include "tc_conntrack.h" #include "mae.h" #include "ef100_rep.h" #include "efx.h" @@ -29,6 +31,9 @@ enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev) return EFX_ENCAP_TYPE_NONE; } +#define EFX_TC_HDR_TYPE_TTL_MASK ((u32)0xff) +/* Hoplimit is stored in the most significant byte in the pedit ipv6 header action */ +#define EFX_TC_HDR_TYPE_HLIMIT_MASK ~((u32)0xff000000) #define EFX_EFV_PF NULL /* Look up the representor information (efv) for a device. * May return NULL for the PF (us), or an error pointer for a device that @@ -84,6 +89,12 @@ s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv) return mport; } +static const struct rhashtable_params efx_tc_mac_ht_params = { + .key_len = offsetofend(struct efx_tc_mac_pedit_action, h_addr), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_mac_pedit_action, linkage), +}; + static const struct rhashtable_params efx_tc_encap_match_ht_params = { .key_len = offsetof(struct efx_tc_encap_match, linkage), .key_offset = 0, @@ -96,6 +107,70 @@ static const struct rhashtable_params efx_tc_match_action_ht_params = { .head_offset = offsetof(struct efx_tc_flow_rule, linkage), }; +static const struct rhashtable_params efx_tc_lhs_rule_ht_params = { + .key_len = sizeof(unsigned long), + .key_offset = offsetof(struct efx_tc_lhs_rule, cookie), + .head_offset = offsetof(struct efx_tc_lhs_rule, linkage), +}; + +static const struct rhashtable_params efx_tc_recirc_ht_params = { + .key_len = offsetof(struct efx_tc_recirc_id, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_recirc_id, linkage), +}; + +static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac(struct efx_nic *efx, + unsigned char h_addr[ETH_ALEN], + struct netlink_ext_ack *extack) +{ + struct efx_tc_mac_pedit_action *ped, *old; + int rc; + + ped = kzalloc(sizeof(*ped), GFP_USER); + if (!ped) + return ERR_PTR(-ENOMEM); + memcpy(ped->h_addr, h_addr, ETH_ALEN); + old = rhashtable_lookup_get_insert_fast(&efx->tc->mac_ht, + &ped->linkage, + efx_tc_mac_ht_params); + if (old) { + /* don't need our new entry */ + kfree(ped); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found, ref taken */ + return old; + } + + rc = efx_mae_allocate_pedit_mac(efx, ped); + if (rc < 0) { + NL_SET_ERR_MSG_MOD(extack, "Failed to store pedit MAC address in hw"); + goto out_remove; + } + + /* ref and return */ + refcount_set(&ped->ref, 1); + return ped; +out_remove: + rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage, + efx_tc_mac_ht_params); + kfree(ped); + return ERR_PTR(rc); +} + +static void efx_tc_flower_put_mac(struct efx_nic *efx, + struct efx_tc_mac_pedit_action *ped) +{ + if (!refcount_dec_and_test(&ped->ref)) + return; /* still in use */ + rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage, + efx_tc_mac_ht_params); + efx_mae_free_pedit_mac(efx, ped); + kfree(ped); +} + static void efx_tc_free_action_set(struct efx_nic *efx, struct efx_tc_action_set *act, bool in_hw) { @@ -121,6 +196,10 @@ static void efx_tc_free_action_set(struct efx_nic *efx, list_del(&act->encap_user); efx_tc_flower_release_encap_md(efx, act->encap_md); } + if (act->src_mac) + efx_tc_flower_put_mac(efx, act->src_mac); + if (act->dst_mac) + efx_tc_flower_put_mac(efx, act->dst_mac); kfree(act); } @@ -201,23 +280,24 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, } } if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_CVLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IP) | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_TCP) | - BIT(FLOW_DISSECTOR_KEY_IP))) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x", + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_CT) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP))) { + NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#llx", dissector->used_keys); return -EOPNOTSUPP; } @@ -228,12 +308,13 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, !(match->value.eth_proto == htons(ETH_P_IP) || match->value.eth_proto == htons(ETH_P_IPV6))) if (dissector->used_keys & - (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_IP) | - BIT(FLOW_DISSECTOR_KEY_TCP))) { - NL_SET_ERR_MSG_FMT_MOD(extack, "L3/L4 flower keys %#x require protocol ipv[46]", + (BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP))) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "L3/L4 flower keys %#llx require protocol ipv[46]", dissector->used_keys); return -EINVAL; } @@ -281,9 +362,10 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, if ((match->value.ip_proto != IPPROTO_UDP && match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto)) if (dissector->used_keys & - (BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_TCP))) { - NL_SET_ERR_MSG_FMT_MOD(extack, "L4 flower keys %#x require ipproto udp or tcp", + (BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP))) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "L4 flower keys %#llx require ipproto udp or tcp", dissector->used_keys); return -EINVAL; } @@ -344,15 +426,41 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, dst, enc_dport); MAP_ENC_KEY_AND_MASK(KEYID, enc_keyid, enc_keyid, keyid, enc_keyid); } else if (dissector->used_keys & - (BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_ENC_IP) | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Flower enc keys require enc_control (keys: %#x)", + (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Flower enc keys require enc_control (keys: %#llx)", dissector->used_keys); return -EOPNOTSUPP; } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) { + struct flow_match_ct fm; + + flow_rule_match_ct(rule, &fm); + match->value.ct_state_trk = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED); + match->mask.ct_state_trk = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED); + match->value.ct_state_est = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED); + match->mask.ct_state_est = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED); + if (fm.mask->ct_state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Unsupported ct_state match %#x", + fm.mask->ct_state); + return -EOPNOTSUPP; + } + match->value.ct_mark = fm.key->ct_mark; + match->mask.ct_mark = fm.mask->ct_mark; + match->value.ct_zone = fm.key->ct_zone; + match->mask.ct_zone = fm.mask->ct_zone; + + if (memchr_inv(fm.mask->ct_labels, 0, sizeof(fm.mask->ct_labels))) { + NL_SET_ERR_MSG_MOD(extack, "Matching on ct_label not supported"); + return -EOPNOTSUPP; + } + } return 0; } @@ -496,6 +604,8 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx, kfree(encap); if (pseudo) /* don't need our new pseudo either */ efx_tc_flower_release_encap_match(efx, pseudo); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return PTR_ERR(old); /* check old and new em_types are compatible */ switch (old->type) { case EFX_TC_EM_DIRECT: @@ -519,19 +629,28 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx, } if (child_ip_tos_mask != old->child_ip_tos_mask) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x", + "Pseudo encap match for TOS mask %#04x conflicts with existing mask %#04x", child_ip_tos_mask, old->child_ip_tos_mask); return -EEXIST; } if (child_udp_sport_mask != old->child_udp_sport_mask) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x", + "Pseudo encap match for UDP src port mask %#x conflicts with existing mask %#x", child_udp_sport_mask, old->child_udp_sport_mask); return -EEXIST; } break; + case EFX_TC_EM_PSEUDO_OR: + /* old EM corresponds to an OR that has to be unique + * (it must not overlap with any other OR, whether + * direct-EM or pseudo). + */ + NL_SET_ERR_MSG_FMT_MOD(extack, + "%s encap match conflicts with existing pseudo(OR) entry", + em_type ? "Pseudo" : "Direct"); + return -EEXIST; default: /* Unrecognised pseudo-type. Just say no */ NL_SET_ERR_MSG_FMT_MOD(extack, "%s encap match conflicts with existing pseudo(%d) entry", @@ -572,12 +691,67 @@ fail_pseudo: return rc; } +static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx, + u32 chain_index, + struct net_device *net_dev) +{ + struct efx_tc_recirc_id *rid, *old; + int rc; + + rid = kzalloc(sizeof(*rid), GFP_USER); + if (!rid) + return ERR_PTR(-ENOMEM); + rid->chain_index = chain_index; + /* We don't take a reference here, because it's implied - if there's + * a rule on the net_dev that's been offloaded to us, then the net_dev + * can't go away until the rule has been deoffloaded. + */ + rid->net_dev = net_dev; + old = rhashtable_lookup_get_insert_fast(&efx->tc->recirc_ht, + &rid->linkage, + efx_tc_recirc_ht_params); + if (old) { + /* don't need our new entry */ + kfree(rid); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found */ + rid = old; + } else { + rc = ida_alloc_range(&efx->tc->recirc_ida, 1, U8_MAX, GFP_USER); + if (rc < 0) { + rhashtable_remove_fast(&efx->tc->recirc_ht, + &rid->linkage, + efx_tc_recirc_ht_params); + kfree(rid); + return ERR_PTR(rc); + } + rid->fw_id = rc; + refcount_set(&rid->ref, 1); + } + return rid; +} + +static void efx_tc_put_recirc_id(struct efx_nic *efx, struct efx_tc_recirc_id *rid) +{ + if (!refcount_dec_and_test(&rid->ref)) + return; /* still in use */ + rhashtable_remove_fast(&efx->tc->recirc_ht, &rid->linkage, + efx_tc_recirc_ht_params); + ida_free(&efx->tc->recirc_ida, rid->fw_id); + kfree(rid); +} + static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) { efx_mae_delete_rule(efx, rule->fw_id); /* Release entries in subsidiary tables */ efx_tc_free_action_set_list(efx, &rule->acts, true); + if (rule->match.rid) + efx_tc_put_recirc_id(efx, rule->match.rid); if (rule->match.encap) efx_tc_flower_release_encap_match(efx, rule->match.encap); rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; @@ -601,6 +775,8 @@ static const char *efx_tc_encap_type_name(enum efx_encap_type typ) /* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */ enum efx_tc_action_order { EFX_TC_AO_DECAP, + EFX_TC_AO_DEC_TTL, + EFX_TC_AO_PEDIT_MAC_ADDRS, EFX_TC_AO_VLAN_POP, EFX_TC_AO_VLAN_PUSH, EFX_TC_AO_COUNT, @@ -615,6 +791,15 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act, case EFX_TC_AO_DECAP: if (act->decap) return false; + /* PEDIT_MAC_ADDRS must not happen before DECAP, though it + * can wait until much later + */ + if (act->dst_mac || act->src_mac) + return false; + + /* Decrementing ttl must not happen before DECAP */ + if (act->do_ttl_dec) + return false; fallthrough; case EFX_TC_AO_VLAN_POP: if (act->vlan_pop >= 2) @@ -634,12 +819,17 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act, if (act->count) return false; fallthrough; + case EFX_TC_AO_PEDIT_MAC_ADDRS: case EFX_TC_AO_ENCAP: if (act->encap_md) return false; fallthrough; case EFX_TC_AO_DELIVER: return !act->deliver; + case EFX_TC_AO_DEC_TTL: + if (act->encap_md) + return false; + return !act->do_ttl_dec; default: /* Bad caller. Whatever they wanted to do, say they can't. */ WARN_ON_ONCE(1); @@ -647,6 +837,838 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act, } } +/** + * DOC: TC conntrack sequences + * + * The MAE hardware can handle at most two rounds of action rule matching, + * consequently we support conntrack through the notion of a "left-hand side + * rule". This is a rule which typically contains only the actions "ct" and + * "goto chain N", and corresponds to one or more "right-hand side rules" in + * chain N, which typically match on +trk+est, and may perform ct(nat) actions. + * RHS rules go in the Action Rule table as normal but with a nonzero recirc_id + * (the hardware equivalent of chain_index), while LHS rules may go in either + * the Action Rule or the Outer Rule table, the latter being preferred for + * performance reasons, and set both DO_CT and a recirc_id in their response. + * + * Besides the RHS rules, there are often also similar rules matching on + * +trk+new which perform the ct(commit) action. These are not offloaded. + */ + +static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr, + struct efx_tc_match *match) +{ + const struct flow_action_entry *fa; + int i; + + flow_action_for_each(i, fa, &fr->action) { + switch (fa->id) { + case FLOW_ACTION_GOTO: + return true; + case FLOW_ACTION_CT: + /* If rule is -trk, or doesn't mention trk at all, then + * a CT action implies a conntrack lookup (hence it's an + * LHS rule). If rule is +trk, then a CT action could + * just be ct(nat) or even ct(commit) (though the latter + * can't be offloaded). + */ + if (!match->mask.ct_state_trk || !match->value.ct_state_trk) + return true; + break; + default: + break; + } + } + return false; +} + +/* A foreign LHS rule has matches on enc_ keys at the TC layer (including an + * implied match on enc_ip_proto UDP). Translate these into non-enc_ keys, + * so that we can use the same MAE machinery as local LHS rules (and so that + * the lhs_rules entries have uniform semantics). It may seem odd to do it + * this way round, given that the corresponding fields in the MAE MCDIs are + * all ENC_, but (a) we don't have enc_L2 or enc_ip_proto in struct + * efx_tc_match_fields and (b) semantically an LHS rule doesn't have inner + * fields so it's just matching on *the* header rather than the outer header. + * Make sure that the non-enc_ keys were not already being matched on, as that + * would imply a rule that needed a triple lookup. (Hardware can do that, + * with OR-AR-CT-AR, but it halves packet rate so we avoid it where possible; + * see efx_tc_flower_flhs_needs_ar().) + */ +static int efx_tc_flower_translate_flhs_match(struct efx_tc_match *match) +{ + int rc = 0; + +#define COPY_MASK_AND_VALUE(_key, _ekey) ({ \ + if (match->mask._key) { \ + rc = -EOPNOTSUPP; \ + } else { \ + match->mask._key = match->mask._ekey; \ + match->mask._ekey = 0; \ + match->value._key = match->value._ekey; \ + match->value._ekey = 0; \ + } \ + rc; \ +}) +#define COPY_FROM_ENC(_key) COPY_MASK_AND_VALUE(_key, enc_##_key) + if (match->mask.ip_proto) + return -EOPNOTSUPP; + match->mask.ip_proto = ~0; + match->value.ip_proto = IPPROTO_UDP; + if (COPY_FROM_ENC(src_ip) || COPY_FROM_ENC(dst_ip)) + return rc; +#ifdef CONFIG_IPV6 + if (!ipv6_addr_any(&match->mask.src_ip6)) + return -EOPNOTSUPP; + match->mask.src_ip6 = match->mask.enc_src_ip6; + memset(&match->mask.enc_src_ip6, 0, sizeof(struct in6_addr)); + if (!ipv6_addr_any(&match->mask.dst_ip6)) + return -EOPNOTSUPP; + match->mask.dst_ip6 = match->mask.enc_dst_ip6; + memset(&match->mask.enc_dst_ip6, 0, sizeof(struct in6_addr)); +#endif + if (COPY_FROM_ENC(ip_tos) || COPY_FROM_ENC(ip_ttl)) + return rc; + /* should really copy enc_ip_frag but we don't have that in + * parse_match yet + */ + if (COPY_MASK_AND_VALUE(l4_sport, enc_sport) || + COPY_MASK_AND_VALUE(l4_dport, enc_dport)) + return rc; + return 0; +#undef COPY_FROM_ENC +#undef COPY_MASK_AND_VALUE +} + +/* If a foreign LHS rule wants to match on keys that are only available after + * encap header identification and parsing, then it can't be done in the Outer + * Rule lookup, because that lookup determines the encap type used to parse + * beyond the outer headers. Thus, such rules must use the OR-AR-CT-AR lookup + * sequence, with an EM (struct efx_tc_encap_match) in the OR step. + * Return true iff the passed match requires this. + */ +static bool efx_tc_flower_flhs_needs_ar(struct efx_tc_match *match) +{ + /* matches on inner-header keys can't be done in OR */ + return match->mask.eth_proto || + match->mask.vlan_tci[0] || match->mask.vlan_tci[1] || + match->mask.vlan_proto[0] || match->mask.vlan_proto[1] || + memchr_inv(match->mask.eth_saddr, 0, ETH_ALEN) || + memchr_inv(match->mask.eth_daddr, 0, ETH_ALEN) || + match->mask.ip_proto || + match->mask.ip_tos || match->mask.ip_ttl || + match->mask.src_ip || match->mask.dst_ip || +#ifdef CONFIG_IPV6 + !ipv6_addr_any(&match->mask.src_ip6) || + !ipv6_addr_any(&match->mask.dst_ip6) || +#endif + match->mask.ip_frag || match->mask.ip_firstfrag || + match->mask.l4_sport || match->mask.l4_dport || + match->mask.tcp_flags || + /* nor can VNI */ + match->mask.enc_keyid; +} + +static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct net_device *net_dev, + struct efx_tc_lhs_rule *rule) + +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_action *act = &rule->lhs_act; + const struct flow_action_entry *fa; + enum efx_tc_counter_type ctype; + bool pipe = true; + int i; + + ctype = rule->is_ar ? EFX_TC_COUNTER_TYPE_AR : EFX_TC_COUNTER_TYPE_OR; + + flow_action_for_each(i, fa, &fr->action) { + struct efx_tc_ct_zone *ct_zone; + struct efx_tc_recirc_id *rid; + + if (!pipe) { + /* more actions after a non-pipe action */ + NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action"); + return -EINVAL; + } + switch (fa->id) { + case FLOW_ACTION_GOTO: + if (!fa->chain_index) { + NL_SET_ERR_MSG_MOD(extack, "Can't goto chain 0, no looping in hw"); + return -EOPNOTSUPP; + } + rid = efx_tc_get_recirc_id(efx, fa->chain_index, + net_dev); + if (IS_ERR(rid)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to allocate a hardware recirculation ID for this chain_index"); + return PTR_ERR(rid); + } + act->rid = rid; + if (fa->hw_stats) { + struct efx_tc_counter_index *cnt; + + if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "hw_stats_type %u not supported (only 'delayed')", + fa->hw_stats); + return -EOPNOTSUPP; + } + cnt = efx_tc_flower_get_counter_index(efx, tc->cookie, + ctype); + if (IS_ERR(cnt)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter"); + return PTR_ERR(cnt); + } + WARN_ON(act->count); /* can't happen */ + act->count = cnt; + } + pipe = false; + break; + case FLOW_ACTION_CT: + if (act->zone) { + NL_SET_ERR_MSG_MOD(extack, "Can't offload multiple ct actions"); + return -EOPNOTSUPP; + } + if (fa->ct.action & (TCA_CT_ACT_COMMIT | + TCA_CT_ACT_FORCE)) { + NL_SET_ERR_MSG_MOD(extack, "Can't offload ct commit/force"); + return -EOPNOTSUPP; + } + if (fa->ct.action & TCA_CT_ACT_CLEAR) { + NL_SET_ERR_MSG_MOD(extack, "Can't clear ct in LHS rule"); + return -EOPNOTSUPP; + } + if (fa->ct.action & (TCA_CT_ACT_NAT | + TCA_CT_ACT_NAT_SRC | + TCA_CT_ACT_NAT_DST)) { + NL_SET_ERR_MSG_MOD(extack, "Can't perform NAT in LHS rule - packet isn't conntracked yet"); + return -EOPNOTSUPP; + } + if (fa->ct.action) { + NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n", + fa->ct.action); + return -EOPNOTSUPP; + } + ct_zone = efx_tc_ct_register_zone(efx, fa->ct.zone, + fa->ct.flow_table); + if (IS_ERR(ct_zone)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to register for CT updates"); + return PTR_ERR(ct_zone); + } + act->zone = ct_zone; + break; + default: + NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n", + fa->id); + return -EOPNOTSUPP; + } + } + + if (pipe) { + NL_SET_ERR_MSG_MOD(extack, "Missing goto chain in LHS rule"); + return -EOPNOTSUPP; + } + return 0; +} + +static void efx_tc_flower_release_lhs_actions(struct efx_nic *efx, + struct efx_tc_lhs_action *act) +{ + if (act->rid) + efx_tc_put_recirc_id(efx, act->rid); + if (act->zone) + efx_tc_ct_unregister_zone(efx, act->zone); + if (act->count) + efx_tc_flower_put_counter_index(efx, act->count); +} + +/** + * struct efx_tc_mangler_state - accumulates 32-bit pedits into fields + * + * @dst_mac_32: dst_mac[0:3] has been populated + * @dst_mac_16: dst_mac[4:5] has been populated + * @src_mac_16: src_mac[0:1] has been populated + * @src_mac_32: src_mac[2:5] has been populated + * @dst_mac: h_dest field of ethhdr + * @src_mac: h_source field of ethhdr + * + * Since FLOW_ACTION_MANGLE comes in 32-bit chunks that do not + * necessarily equate to whole fields of the packet header, this + * structure is used to hold the cumulative effect of the partial + * field pedits that have been processed so far. + */ +struct efx_tc_mangler_state { + u8 dst_mac_32:1; /* eth->h_dest[0:3] */ + u8 dst_mac_16:1; /* eth->h_dest[4:5] */ + u8 src_mac_16:1; /* eth->h_source[0:1] */ + u8 src_mac_32:1; /* eth->h_source[2:5] */ + unsigned char dst_mac[ETH_ALEN]; + unsigned char src_mac[ETH_ALEN]; +}; + +/** efx_tc_complete_mac_mangle() - pull complete field pedits out of @mung + * @efx: NIC we're installing a flow rule on + * @act: action set (cursor) to update + * @mung: accumulated partial mangles + * @extack: netlink extended ack for reporting errors + * + * Check @mung to find any combinations of partial mangles that can be + * combined into a complete packet field edit, add that edit to @act, + * and consume the partial mangles from @mung. + */ + +static int efx_tc_complete_mac_mangle(struct efx_nic *efx, + struct efx_tc_action_set *act, + struct efx_tc_mangler_state *mung, + struct netlink_ext_ack *extack) +{ + struct efx_tc_mac_pedit_action *ped; + + if (mung->dst_mac_32 && mung->dst_mac_16) { + ped = efx_tc_flower_get_mac(efx, mung->dst_mac, extack); + if (IS_ERR(ped)) + return PTR_ERR(ped); + + /* Check that we have not already populated dst_mac */ + if (act->dst_mac) + efx_tc_flower_put_mac(efx, act->dst_mac); + + act->dst_mac = ped; + + /* consume the incomplete state */ + mung->dst_mac_32 = 0; + mung->dst_mac_16 = 0; + } + if (mung->src_mac_16 && mung->src_mac_32) { + ped = efx_tc_flower_get_mac(efx, mung->src_mac, extack); + if (IS_ERR(ped)) + return PTR_ERR(ped); + + /* Check that we have not already populated src_mac */ + if (act->src_mac) + efx_tc_flower_put_mac(efx, act->src_mac); + + act->src_mac = ped; + + /* consume the incomplete state */ + mung->src_mac_32 = 0; + mung->src_mac_16 = 0; + } + return 0; +} + +static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act, + const struct flow_action_entry *fa, + struct netlink_ext_ack *extack) +{ + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, ttl): + /* check that pedit applies to ttl only */ + if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) + break; + + /* Adding 0xff is equivalent to decrementing the ttl. + * Other added values are not supported. + */ + if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) != U8_MAX) + break; + + /* check that we do not decrement ttl twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported"); + return -EOPNOTSUPP; + } + act->do_ttl_dec = 1; + return 0; + default: + break; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + switch (fa->mangle.offset) { + case round_down(offsetof(struct ipv6hdr, hop_limit), 4): + /* check that pedit applies to hoplimit only */ + if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) + break; + + /* Adding 0xff is equivalent to decrementing the hoplimit. + * Other added values are not supported. + */ + if ((fa->mangle.val >> 24) != U8_MAX) + break; + + /* check that we do not decrement hoplimit twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported"); + return -EOPNOTSUPP; + } + act->do_ttl_dec = 1; + return 0; + default: + break; + } + break; + default: + break; + } + + NL_SET_ERR_MSG_FMT_MOD(extack, + "ttl add action type %x %x %x/%x is not supported", + fa->mangle.htype, fa->mangle.offset, + fa->mangle.val, fa->mangle.mask); + return -EOPNOTSUPP; +} + +/** + * efx_tc_mangle() - handle a single 32-bit (or less) pedit + * @efx: NIC we're installing a flow rule on + * @act: action set (cursor) to update + * @fa: FLOW_ACTION_MANGLE action metadata + * @mung: accumulator for partial mangles + * @extack: netlink extended ack for reporting errors + * @match: original match used along with the mangle action + * + * Identify the fields written by a FLOW_ACTION_MANGLE, and record + * the partial mangle state in @mung. If this mangle completes an + * earlier partial mangle, consume and apply to @act by calling + * efx_tc_complete_mac_mangle(). + */ + +static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, + const struct flow_action_entry *fa, + struct efx_tc_mangler_state *mung, + struct netlink_ext_ack *extack, + struct efx_tc_match *match) +{ + __le32 mac32; + __le16 mac16; + u8 tr_ttl; + + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + BUILD_BUG_ON(offsetof(struct ethhdr, h_dest) != 0); + BUILD_BUG_ON(offsetof(struct ethhdr, h_source) != 6); + if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_PEDIT_MAC_ADDRS)) { + NL_SET_ERR_MSG_MOD(extack, + "Pedit mangle mac action violates action order"); + return -EOPNOTSUPP; + } + switch (fa->mangle.offset) { + case 0: + if (fa->mangle.mask) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "mask (%#x) of eth.dst32 mangle is not supported", + fa->mangle.mask); + return -EOPNOTSUPP; + } + /* Ethernet address is little-endian */ + mac32 = cpu_to_le32(fa->mangle.val); + memcpy(mung->dst_mac, &mac32, sizeof(mac32)); + mung->dst_mac_32 = 1; + return efx_tc_complete_mac_mangle(efx, act, mung, extack); + case 4: + if (fa->mangle.mask == 0xffff) { + mac16 = cpu_to_le16(fa->mangle.val >> 16); + memcpy(mung->src_mac, &mac16, sizeof(mac16)); + mung->src_mac_16 = 1; + } else if (fa->mangle.mask == 0xffff0000) { + mac16 = cpu_to_le16((u16)fa->mangle.val); + memcpy(mung->dst_mac + 4, &mac16, sizeof(mac16)); + mung->dst_mac_16 = 1; + } else { + NL_SET_ERR_MSG_FMT_MOD(extack, + "mask (%#x) of eth+4 mangle is not high or low 16b", + fa->mangle.mask); + return -EOPNOTSUPP; + } + return efx_tc_complete_mac_mangle(efx, act, mung, extack); + case 8: + if (fa->mangle.mask) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "mask (%#x) of eth.src32 mangle is not supported", + fa->mangle.mask); + return -EOPNOTSUPP; + } + mac32 = cpu_to_le32(fa->mangle.val); + memcpy(mung->src_mac + 2, &mac32, sizeof(mac32)); + mung->src_mac_32 = 1; + return efx_tc_complete_mac_mangle(efx, act, mung, extack); + default: + NL_SET_ERR_MSG_FMT_MOD(extack, "mangle eth+%u %x/%x is not supported", + fa->mangle.offset, fa->mangle.val, fa->mangle.mask); + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, ttl): + /* we currently only support pedit IP4 when it applies + * to TTL and then only when it can be achieved with a + * decrement ttl action + */ + + /* check that pedit applies to ttl only */ + if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "mask (%#x) out of range, only support mangle action on ipv4.ttl", + fa->mangle.mask); + return -EOPNOTSUPP; + } + + /* we can only convert to a dec ttl when we have an + * exact match on the ttl field + */ + if (match->mask.ip_ttl != U8_MAX) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "only support mangle ttl when we have an exact match, current mask (%#x)", + match->mask.ip_ttl); + return -EOPNOTSUPP; + } + + /* check that we don't try to decrement 0, which equates + * to setting the ttl to 0xff + */ + if (match->value.ip_ttl == 0) { + NL_SET_ERR_MSG_MOD(extack, + "decrement ttl past 0 is not supported"); + return -EOPNOTSUPP; + } + + /* check that we do not decrement ttl twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + NL_SET_ERR_MSG_MOD(extack, + "multiple dec ttl is not supported"); + return -EOPNOTSUPP; + } + + /* check pedit can be achieved with decrement action */ + tr_ttl = match->value.ip_ttl - 1; + if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) == tr_ttl) { + act->do_ttl_dec = 1; + return 0; + } + + fallthrough; + default: + NL_SET_ERR_MSG_FMT_MOD(extack, + "only support mangle on the ttl field (offset is %u)", + fa->mangle.offset); + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + switch (fa->mangle.offset) { + case round_down(offsetof(struct ipv6hdr, hop_limit), 4): + /* we currently only support pedit IP6 when it applies + * to the hoplimit and then only when it can be achieved + * with a decrement hoplimit action + */ + + /* check that pedit applies to ttl only */ + if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "mask (%#x) out of range, only support mangle action on ipv6.hop_limit", + fa->mangle.mask); + + return -EOPNOTSUPP; + } + + /* we can only convert to a dec ttl when we have an + * exact match on the ttl field + */ + if (match->mask.ip_ttl != U8_MAX) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "only support hop_limit when we have an exact match, current mask (%#x)", + match->mask.ip_ttl); + return -EOPNOTSUPP; + } + + /* check that we don't try to decrement 0, which equates + * to setting the ttl to 0xff + */ + if (match->value.ip_ttl == 0) { + NL_SET_ERR_MSG_MOD(extack, + "decrementing hop_limit past 0 is not supported"); + return -EOPNOTSUPP; + } + + /* check that we do not decrement hoplimit twice */ + if (!efx_tc_flower_action_order_ok(act, + EFX_TC_AO_DEC_TTL)) { + NL_SET_ERR_MSG_MOD(extack, + "multiple dec ttl is not supported"); + return -EOPNOTSUPP; + } + + /* check pedit can be achieved with decrement action */ + tr_ttl = match->value.ip_ttl - 1; + if ((fa->mangle.val >> 24) == tr_ttl) { + act->do_ttl_dec = 1; + return 0; + } + + fallthrough; + default: + NL_SET_ERR_MSG_FMT_MOD(extack, + "only support mangle on the hop_limit field"); + return -EOPNOTSUPP; + } + default: + NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled mangle htype %u for action rule", + fa->mangle.htype); + return -EOPNOTSUPP; + } + return 0; +} + +/** + * efx_tc_incomplete_mangle() - check for leftover partial pedits + * @mung: accumulator for partial mangles + * @extack: netlink extended ack for reporting errors + * + * Since the MAE can only overwrite whole fields, any partial + * field mangle left over on reaching packet delivery (mirred or + * end of TC actions) cannot be offloaded. Check for any such + * and reject them with -%EOPNOTSUPP. + */ + +static int efx_tc_incomplete_mangle(struct efx_tc_mangler_state *mung, + struct netlink_ext_ack *extack) +{ + if (mung->dst_mac_32 || mung->dst_mac_16) { + NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of destination MAC address"); + return -EOPNOTSUPP; + } + if (mung->src_mac_16 || mung->src_mac_32) { + NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of source MAC address"); + return -EOPNOTSUPP; + } + return 0; +} + +static int efx_tc_flower_replace_foreign_lhs_ar(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + int rc; + + type = efx_tc_indr_netdev_type(net_dev); + if (type == EFX_ENCAP_TYPE_NONE) { + NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Firmware reports no support for %s encap match", + efx_tc_encap_type_name(type)); + return rc; + } + /* This is an Action Rule, so it needs a separate Encap Match in the + * Outer Rule table. Insert that now. + */ + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_DIRECT, 0, 0, extack); + if (rc) + return rc; + + match->mask.recirc_id = 0xff; + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + /* We must inhibit match on TCP SYN/FIN/RST, so that SW can see + * the packet and update the conntrack table. + * Outer Rules will do that with CT_TCP_FLAGS_INHIBIT, but Action + * Rules don't have that; instead they support matching on + * TCP_SYN_FIN_RST (aka TCP_INTERESTING_FLAGS), so use that. + * This is only strictly needed if there will be a DO_CT action, + * which we don't know yet, but typically there will be and it's + * simpler not to bother checking here. + */ + match->mask.tcp_syn_fin_rst = true; + + rc = efx_mae_match_check_caps(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + rule->is_ar = true; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + +static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct net_device *net_dev) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *rule, *old; + enum efx_encap_type type; + int rc; + + if (tc->common.chain_index) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0"); + return -EOPNOTSUPP; + } + + if (!efx_tc_match_is_encap(&match->mask)) { + /* This is not a tunnel decap rule, ignore it */ + netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign LHS filter without encap match\n"); + return -EOPNOTSUPP; + } + + if (efx_tc_flower_flhs_needs_ar(match)) + return efx_tc_flower_replace_foreign_lhs_ar(efx, tc, fr, match, + net_dev); + + type = efx_tc_indr_netdev_type(net_dev); + if (type == EFX_ENCAP_TYPE_NONE) { + NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n"); + return -EOPNOTSUPP; + } + + rc = efx_mae_check_encap_type_supported(efx, type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Firmware reports no support for %s encap match", + efx_tc_encap_type_name(type)); + return rc; + } + /* Reserve the outer tuple with a pseudo Encap Match */ + rc = efx_tc_flower_record_encap_match(efx, match, type, + EFX_TC_EM_PSEUDO_OR, 0, 0, + extack); + if (rc) + return rc; + + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk"); + rc = -EOPNOTSUPP; + goto release_encap_match; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + + rc = efx_tc_flower_translate_flhs_match(match); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule cannot match on inner fields"); + goto release_encap_match; + } + + rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack); + if (rc) + goto release_encap_match; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) { + rc = -ENOMEM; + goto release_encap_match; + } + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + rule->lhs_act.tun_type = type; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); +release_encap_match: + if (match->encap) + efx_tc_flower_release_encap_match(efx, match->encap); + return rc; +} + static int efx_tc_flower_replace_foreign(struct efx_nic *efx, struct net_device *net_dev, struct flow_cls_offload *tc) @@ -664,7 +1686,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, /* Parse match */ memset(&match, 0, sizeof(match)); - rc = efx_tc_flower_parse_match(efx, fr, &match, NULL); + rc = efx_tc_flower_parse_match(efx, fr, &match, extack); if (rc) return rc; /* The rule as given to us doesn't specify a source netdevice. @@ -680,12 +1702,46 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, match.value.ingress_port = rc; match.mask.ingress_port = ~0; + if (efx_tc_rule_is_lhs_rule(fr, &match)) + return efx_tc_flower_replace_foreign_lhs(efx, tc, fr, &match, + net_dev); + if (tc->common.chain_index) { - NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index"); - return -EOPNOTSUPP; + struct efx_tc_recirc_id *rid; + + rid = efx_tc_get_recirc_id(efx, tc->common.chain_index, net_dev); + if (IS_ERR(rid)) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Failed to allocate a hardware recirculation ID for chain_index %u", + tc->common.chain_index); + return PTR_ERR(rid); + } + match.rid = rid; + match.value.recirc_id = rid->fw_id; } match.mask.recirc_id = 0xff; + /* AR table can't match on DO_CT (+trk). But a commonly used pattern is + * +trk+est, which is strictly implied by +est, so rewrite it to that. + */ + if (match.mask.ct_state_trk && match.value.ct_state_trk && + match.mask.ct_state_est && match.value.ct_state_est) + match.mask.ct_state_trk = 0; + /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could + * match +trk-est (CT_HIT=0) despite being on an established connection. + * So make -est imply -tcp_syn_fin_rst match to ensure these packets + * still hit the software path. + */ + if (match.mask.ct_state_est && !match.value.ct_state_est) { + if (match.value.tcp_syn_fin_rst) { + /* Can't offload this combination */ + NL_SET_ERR_MSG_MOD(extack, "TCP flags and -est conflict for offload"); + rc = -EOPNOTSUPP; + goto release; + } + match.mask.tcp_syn_fin_rst = true; + } + flow_action_for_each(i, fa, &fr->action) { switch (fa->id) { case FLOW_ACTION_REDIRECT: @@ -702,12 +1758,13 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (!found) { /* We don't care. */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter that doesn't egdev us\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; + goto release; } - rc = efx_mae_match_check_caps(efx, &match.mask, NULL); + rc = efx_mae_match_check_caps(efx, &match.mask, extack); if (rc) - return rc; + goto release; if (efx_tc_match_is_encap(&match.mask)) { enum efx_encap_type type; @@ -716,7 +1773,8 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (type == EFX_ENCAP_TYPE_NONE) { NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; + goto release; } rc = efx_mae_check_encap_type_supported(efx, type); @@ -724,37 +1782,41 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, NL_SET_ERR_MSG_FMT_MOD(extack, "Firmware reports no support for %s encap match", efx_tc_encap_type_name(type)); - return rc; + goto release; } rc = efx_tc_flower_record_encap_match(efx, &match, type, EFX_TC_EM_DIRECT, 0, 0, extack); if (rc) - return rc; - } else { + goto release; + } else if (!tc->common.chain_index) { /* This is not a tunnel decap rule, ignore it */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter without encap match\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; + goto release; } rule = kzalloc(sizeof(*rule), GFP_USER); if (!rule) { rc = -ENOMEM; - goto out_free; + goto release; } INIT_LIST_HEAD(&rule->acts.list); rule->cookie = tc->cookie; old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, &rule->linkage, efx_tc_match_action_ht_params); - if (old) { + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { netif_dbg(efx, drv, efx->net_dev, "Ignoring already-offloaded rule (cookie %lx)\n", tc->cookie); rc = -EEXIST; - goto out_free; + goto release; } act = kzalloc(sizeof(*act), GFP_USER); @@ -788,6 +1850,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, goto release; } if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { + NL_SET_ERR_MSG_MOD(extack, "Count action violates action order (can't happen)"); rc = -EOPNOTSUPP; goto release; } @@ -912,21 +1975,98 @@ release: /* We failed to insert the rule, so free up any entries we created in * subsidiary tables. */ + if (match.rid) + efx_tc_put_recirc_id(efx, match.rid); if (act) efx_tc_free_action_set(efx, act, false); if (rule) { - rhashtable_remove_fast(&efx->tc->match_action_ht, - &rule->linkage, - efx_tc_match_action_ht_params); + if (!old) + rhashtable_remove_fast(&efx->tc->match_action_ht, + &rule->linkage, + efx_tc_match_action_ht_params); efx_tc_free_action_set_list(efx, &rule->acts, false); } -out_free: kfree(rule); if (match.encap) efx_tc_flower_release_encap_match(efx, match.encap); return rc; } +static int efx_tc_flower_replace_lhs(struct efx_nic *efx, + struct flow_cls_offload *tc, + struct flow_rule *fr, + struct efx_tc_match *match, + struct efx_rep *efv, + struct net_device *net_dev) +{ + struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *rule, *old; + int rc; + + if (tc->common.chain_index) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0"); + return -EOPNOTSUPP; + } + + if (match->mask.ct_state_trk && match->value.ct_state_trk) { + NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk"); + return -EOPNOTSUPP; + } + /* LHS rules are always -trk, so we don't need to match on that */ + match->mask.ct_state_trk = 0; + match->value.ct_state_trk = 0; + + rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack); + if (rc) + return rc; + + rule = kzalloc(sizeof(*rule), GFP_USER); + if (!rule) + return -ENOMEM; + rule->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht, + &rule->linkage, + efx_tc_lhs_rule_ht_params); + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded rule (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); + goto release; + } + + /* Parse actions */ + /* See note in efx_tc_flower_replace() regarding passed net_dev + * (used for efx_tc_get_recirc_id()). + */ + rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, efx->net_dev, rule); + if (rc) + goto release; + + rule->match = *match; + + rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); + goto release; + } + netif_dbg(efx, drv, efx->net_dev, + "Successfully parsed lhs rule (cookie %lx)\n", + tc->cookie); + return 0; + +release: + efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act); + if (!old) + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage, + efx_tc_lhs_rule_ht_params); + kfree(rule); + return rc; +} + static int efx_tc_flower_replace(struct efx_nic *efx, struct net_device *net_dev, struct flow_cls_offload *tc, @@ -936,6 +2076,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, struct netlink_ext_ack *extack = tc->common.extack; const struct ip_tunnel_info *encap_info = NULL; struct efx_tc_flow_rule *rule = NULL, *old; + struct efx_tc_mangler_state mung = {}; struct efx_tc_action_set *act = NULL; const struct flow_action_entry *fa; struct efx_rep *from_efv, *to_efv; @@ -982,30 +2123,83 @@ static int efx_tc_flower_replace(struct efx_nic *efx, return -EOPNOTSUPP; } + if (efx_tc_rule_is_lhs_rule(fr, &match)) + return efx_tc_flower_replace_lhs(efx, tc, fr, &match, efv, + net_dev); + + /* chain_index 0 is always recirc_id 0 (and does not appear in recirc_ht). + * Conveniently, match.rid == NULL and match.value.recirc_id == 0 owing + * to the initial memset(), so we don't need to do anything in that case. + */ if (tc->common.chain_index) { - NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index"); - return -EOPNOTSUPP; + struct efx_tc_recirc_id *rid; + + /* Note regarding passed net_dev: + * VFreps and PF can share chain namespace, as they have + * distinct ingress_mports. So we don't need to burn an + * extra recirc_id if both use the same chain_index. + * (Strictly speaking, we could give each VFrep its own + * recirc_id namespace that doesn't take IDs away from the + * PF, but that would require a bunch of additional IDAs - + * one for each representor - and that's not likely to be + * the main cause of recirc_id exhaustion anyway.) + */ + rid = efx_tc_get_recirc_id(efx, tc->common.chain_index, + efx->net_dev); + if (IS_ERR(rid)) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Failed to allocate a hardware recirculation ID for chain_index %u", + tc->common.chain_index); + return PTR_ERR(rid); + } + match.rid = rid; + match.value.recirc_id = rid->fw_id; } match.mask.recirc_id = 0xff; + /* AR table can't match on DO_CT (+trk). But a commonly used pattern is + * +trk+est, which is strictly implied by +est, so rewrite it to that. + */ + if (match.mask.ct_state_trk && match.value.ct_state_trk && + match.mask.ct_state_est && match.value.ct_state_est) + match.mask.ct_state_trk = 0; + /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could + * match +trk-est (CT_HIT=0) despite being on an established connection. + * So make -est imply -tcp_syn_fin_rst match to ensure these packets + * still hit the software path. + */ + if (match.mask.ct_state_est && !match.value.ct_state_est) { + if (match.value.tcp_syn_fin_rst) { + /* Can't offload this combination */ + rc = -EOPNOTSUPP; + goto release; + } + match.mask.tcp_syn_fin_rst = true; + } + rc = efx_mae_match_check_caps(efx, &match.mask, extack); if (rc) - return rc; + goto release; rule = kzalloc(sizeof(*rule), GFP_USER); - if (!rule) - return -ENOMEM; + if (!rule) { + rc = -ENOMEM; + goto release; + } INIT_LIST_HEAD(&rule->acts.list); rule->cookie = tc->cookie; old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, &rule->linkage, efx_tc_match_action_ht_params); - if (old) { + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { netif_dbg(efx, drv, efx->net_dev, "Already offloaded rule (cookie %lx)\n", tc->cookie); NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); - kfree(rule); - return -EEXIST; + rc = -EEXIST; + goto release; } /* Parse actions */ @@ -1222,6 +2416,16 @@ static int efx_tc_flower_replace(struct efx_nic *efx, act->vlan_proto[act->vlan_push] = fa->vlan.proto; act->vlan_push++; break; + case FLOW_ACTION_ADD: + rc = efx_tc_pedit_add(efx, act, fa, extack); + if (rc < 0) + goto release; + break; + case FLOW_ACTION_MANGLE: + rc = efx_tc_mangle(efx, act, fa, &mung, extack, &match); + if (rc < 0) + goto release; + break; case FLOW_ACTION_TUNNEL_ENCAP: if (encap_info) { /* Can't specify encap multiple times. @@ -1253,6 +2457,14 @@ static int efx_tc_flower_replace(struct efx_nic *efx, NL_SET_ERR_MSG_MOD(extack, "Cannot offload tunnel decap action without tunnel device"); rc = -EOPNOTSUPP; goto release; + case FLOW_ACTION_CT: + if (fa->ct.action != TCA_CT_ACT_NAT) { + rc = -EOPNOTSUPP; + NL_SET_ERR_MSG_FMT_MOD(extack, "Can only offload CT 'nat' action in RHS rules, not %d", fa->ct.action); + goto release; + } + act->do_nat = 1; + break; default: NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", fa->id); @@ -1261,6 +2473,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx, } } + rc = efx_tc_incomplete_mangle(&mung, extack); + if (rc < 0) + goto release; if (act) { /* Not shot/redirected, so deliver to default dest */ if (from_efv == EFX_EFV_PF) @@ -1323,12 +2538,15 @@ release: /* We failed to insert the rule, so free up any entries we created in * subsidiary tables. */ + if (match.rid) + efx_tc_put_recirc_id(efx, match.rid); if (act) efx_tc_free_action_set(efx, act, false); if (rule) { - rhashtable_remove_fast(&efx->tc->match_action_ht, - &rule->linkage, - efx_tc_match_action_ht_params); + if (!old) + rhashtable_remove_fast(&efx->tc->match_action_ht, + &rule->linkage, + efx_tc_match_action_ht_params); efx_tc_free_action_set_list(efx, &rule->acts, false); } kfree(rule); @@ -1340,8 +2558,26 @@ static int efx_tc_flower_destroy(struct efx_nic *efx, struct flow_cls_offload *tc) { struct netlink_ext_ack *extack = tc->common.extack; + struct efx_tc_lhs_rule *lhs_rule; struct efx_tc_flow_rule *rule; + lhs_rule = rhashtable_lookup_fast(&efx->tc->lhs_rule_ht, &tc->cookie, + efx_tc_lhs_rule_ht_params); + if (lhs_rule) { + /* Remove it from HW */ + efx_mae_remove_lhs_rule(efx, lhs_rule); + /* Delete it from SW */ + efx_tc_flower_release_lhs_actions(efx, &lhs_rule->lhs_act); + rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &lhs_rule->linkage, + efx_tc_lhs_rule_ht_params); + if (lhs_rule->match.encap) + efx_tc_flower_release_encap_match(efx, lhs_rule->match.encap); + netif_dbg(efx, drv, efx->net_dev, "Removed (lhs) filter %lx\n", + lhs_rule->cookie); + kfree(lhs_rule); + return 0; + } + rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie, efx_tc_match_action_ht_params); if (!rule) { @@ -1657,11 +2893,17 @@ int efx_init_tc(struct efx_nic *efx) rc = efx_tc_configure_fallback_acts_reps(efx); if (rc) return rc; - rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); + rc = efx_mae_get_tables(efx); if (rc) return rc; + rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); + if (rc) + goto out_free; efx->tc->up = true; return 0; +out_free: + efx_mae_free_tables(efx); + return rc; } void efx_fini_tc(struct efx_nic *efx) @@ -1677,6 +2919,7 @@ void efx_fini_tc(struct efx_nic *efx) efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf); efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps); efx->tc->up = false; + efx_mae_free_tables(efx); } /* At teardown time, all TC filter rules (and thus all resources they created) @@ -1691,6 +2934,42 @@ static void efx_tc_encap_match_free(void *ptr, void *__unused) kfree(encap); } +static void efx_tc_recirc_free(void *ptr, void *arg) +{ + struct efx_tc_recirc_id *rid = ptr; + struct efx_nic *efx = arg; + + WARN_ON(refcount_read(&rid->ref)); + ida_free(&efx->tc->recirc_ida, rid->fw_id); + kfree(rid); +} + +static void efx_tc_lhs_free(void *ptr, void *arg) +{ + struct efx_tc_lhs_rule *rule = ptr; + struct efx_nic *efx = arg; + + netif_err(efx, drv, efx->net_dev, + "tc lhs_rule %lx still present at teardown, removing\n", + rule->cookie); + + if (rule->lhs_act.zone) + efx_tc_ct_unregister_zone(efx, rule->lhs_act.zone); + if (rule->lhs_act.count) + efx_tc_flower_put_counter_index(efx, rule->lhs_act.count); + efx_mae_remove_lhs_rule(efx, rule); + + kfree(rule); +} + +static void efx_tc_mac_free(void *ptr, void *__unused) +{ + struct efx_tc_mac_pedit_action *ped = ptr; + + WARN_ON(refcount_read(&ped->ref)); + kfree(ped); +} + static void efx_tc_flow_free(void *ptr, void *arg) { struct efx_tc_flow_rule *rule = ptr; @@ -1731,12 +3010,25 @@ int efx_init_struct_tc(struct efx_nic *efx) rc = efx_tc_init_counters(efx); if (rc < 0) goto fail_counters; + rc = rhashtable_init(&efx->tc->mac_ht, &efx_tc_mac_ht_params); + if (rc < 0) + goto fail_mac_ht; rc = rhashtable_init(&efx->tc->encap_match_ht, &efx_tc_encap_match_ht_params); if (rc < 0) goto fail_encap_match_ht; rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params); if (rc < 0) goto fail_match_action_ht; + rc = rhashtable_init(&efx->tc->lhs_rule_ht, &efx_tc_lhs_rule_ht_params); + if (rc < 0) + goto fail_lhs_rule_ht; + rc = efx_tc_init_conntrack(efx); + if (rc < 0) + goto fail_conntrack; + rc = rhashtable_init(&efx->tc->recirc_ht, &efx_tc_recirc_ht_params); + if (rc < 0) + goto fail_recirc_ht; + ida_init(&efx->tc->recirc_ida); efx->tc->reps_filter_uc = -1; efx->tc->reps_filter_mc = -1; INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); @@ -1749,9 +3041,17 @@ int efx_init_struct_tc(struct efx_nic *efx) efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL; efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type; return 0; +fail_recirc_ht: + efx_tc_destroy_conntrack(efx); +fail_conntrack: + rhashtable_destroy(&efx->tc->lhs_rule_ht); +fail_lhs_rule_ht: + rhashtable_destroy(&efx->tc->match_action_ht); fail_match_action_ht: rhashtable_destroy(&efx->tc->encap_match_ht); fail_encap_match_ht: + rhashtable_destroy(&efx->tc->mac_ht); +fail_mac_ht: efx_tc_destroy_counters(efx); fail_counters: efx_tc_destroy_encap_actions(efx); @@ -1778,10 +3078,16 @@ void efx_fini_struct_tc(struct efx_nic *efx) MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id != MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); + rhashtable_free_and_destroy(&efx->tc->lhs_rule_ht, efx_tc_lhs_free, efx); rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free, efx); rhashtable_free_and_destroy(&efx->tc->encap_match_ht, efx_tc_encap_match_free, NULL); + efx_tc_fini_conntrack(efx); + rhashtable_free_and_destroy(&efx->tc->recirc_ht, efx_tc_recirc_free, efx); + WARN_ON(!ida_is_empty(&efx->tc->recirc_ida)); + ida_destroy(&efx->tc->recirc_ida); + rhashtable_free_and_destroy(&efx->tc->mac_ht, efx_tc_mac_free, NULL); efx_tc_fini_counters(efx); efx_tc_fini_encap_actions(efx); mutex_unlock(&efx->tc->mutex); diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h index 1549c3df43bb..7b5190078bee 100644 --- a/drivers/net/ethernet/sfc/tc.h +++ b/drivers/net/ethernet/sfc/tc.h @@ -18,36 +18,78 @@ #define IS_ALL_ONES(v) (!(typeof (v))~(v)) -#ifdef CONFIG_IPV6 +/** + * struct efx_tc_mac_pedit_action - mac pedit action fields + * + * @h_addr: mac address field of ethernet header + * @linkage: rhashtable reference + * @ref: reference count + * @fw_id: index of this entry in firmware MAC address table + * + * MAC address edits are indirected through a table in the hardware + */ +struct efx_tc_mac_pedit_action { + u8 h_addr[ETH_ALEN]; + struct rhash_head linkage; + refcount_t ref; + u32 fw_id; /* index of this entry in firmware MAC address table */ +}; + static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr) { return !memchr_inv(addr, 0xff, sizeof(*addr)); } -#endif struct efx_tc_encap_action; /* see tc_encap_actions.h */ +/** + * struct efx_tc_action_set - collection of tc action fields + * + * @vlan_push: the number of vlan headers to push + * @vlan_pop: the number of vlan headers to pop + * @decap: used to indicate a tunnel header decapsulation should take place + * @do_nat: perform NAT/NPT with values returned by conntrack match + * @do_ttl_dec: used to indicate IP TTL / Hop Limit should be decremented + * @deliver: used to indicate a deliver action should take place + * @vlan_tci: tci fields for vlan push actions + * @vlan_proto: ethernet types for vlan push actions + * @count: counter mapping + * @encap_md: encap entry in tc_encap_ht table + * @encap_user: linked list of encap users (encap_md->users) + * @user: owning action-set-list. Only populated if @encap_md is; used by efx_tc_update_encap() fallback handling + * @count_user: linked list of counter users (counter->users) + * @dest_mport: destination mport + * @src_mac: source mac entry in tc_mac_ht table + * @dst_mac: destination mac entry in tc_mac_ht table + * @fw_id: index of this entry in firmware actions table + * @list: linked list of tc actions + * + */ struct efx_tc_action_set { u16 vlan_push:2; u16 vlan_pop:2; u16 decap:1; + u16 do_nat:1; + u16 do_ttl_dec:1; u16 deliver:1; - __be16 vlan_tci[2]; /* TCIs for vlan_push */ - __be16 vlan_proto[2]; /* Ethertypes for vlan_push */ + __be16 vlan_tci[2]; + __be16 vlan_proto[2]; struct efx_tc_counter_index *count; - struct efx_tc_encap_action *encap_md; /* entry in tc_encap_ht table */ - struct list_head encap_user; /* entry on encap_md->users list */ - struct efx_tc_action_set_list *user; /* Only populated if encap_md */ - struct list_head count_user; /* entry on counter->users list, if encap */ + struct efx_tc_encap_action *encap_md; + struct list_head encap_user; + struct efx_tc_action_set_list *user; + struct list_head count_user; u32 dest_mport; - u32 fw_id; /* index of this entry in firmware actions table */ + struct efx_tc_mac_pedit_action *src_mac; + struct efx_tc_mac_pedit_action *dst_mac; + u32 fw_id; struct list_head list; }; struct efx_tc_match_fields { /* L1 */ u32 ingress_port; - u8 recirc_id; + u8 recirc_id; /* mapped from (u32) TC chain_index to smaller space */ /* L2 (inner when encap) */ __be16 eth_proto; __be16 vlan_tci[2], vlan_proto[2]; @@ -62,6 +104,7 @@ struct efx_tc_match_fields { /* L4 */ __be16 l4_sport, l4_dport; /* Ports (UDP, TCP) */ __be16 tcp_flags; + bool tcp_syn_fin_rst; /* true if ANY of SYN/FIN/RST are set */ /* Encap. The following are *outer* fields. Note that there are no * outer eth (L2) fields; this is because TC doesn't have them. */ @@ -70,6 +113,10 @@ struct efx_tc_match_fields { u8 enc_ip_tos, enc_ip_ttl; __be16 enc_sport, enc_dport; __be32 enc_keyid; /* e.g. VNI, VSID */ + /* Conntrack. */ + u16 ct_state_trk:1, ct_state_est:1; + u32 ct_mark; + u16 ct_zone; }; static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask) @@ -95,10 +142,14 @@ static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask) * The pseudo encap match may be referenced again by an encap match * with different values for these fields, but all masks must match the * first (stored in our child_* fields). + * @EFX_TC_EM_PSEUDO_OR: registered by an fLHS rule that fits in the OR + * table. The &struct efx_tc_lhs_rule already holds the HW OR entry. + * Only one reference to this encap match may exist. */ enum efx_tc_em_pseudo_type { EFX_TC_EM_DIRECT, EFX_TC_EM_PSEUDO_MASK, + EFX_TC_EM_PSEUDO_OR, }; struct efx_tc_encap_match { @@ -117,10 +168,19 @@ struct efx_tc_encap_match { struct efx_tc_encap_match *pseudo; /* Referenced pseudo EM if needed */ }; +struct efx_tc_recirc_id { + u32 chain_index; + struct net_device *net_dev; + struct rhash_head linkage; + refcount_t ref; + u8 fw_id; /* index allocated for use in the MAE */ +}; + struct efx_tc_match { struct efx_tc_match_fields value; struct efx_tc_match_fields mask; struct efx_tc_encap_match *encap; + struct efx_tc_recirc_id *rid; }; struct efx_tc_action_set_list { @@ -128,6 +188,13 @@ struct efx_tc_action_set_list { u32 fw_id; }; +struct efx_tc_lhs_action { + enum efx_encap_type tun_type; + struct efx_tc_recirc_id *rid; + struct efx_tc_ct_zone *zone; + struct efx_tc_counter_index *count; +}; + struct efx_tc_flow_rule { unsigned long cookie; struct rhash_head linkage; @@ -137,12 +204,63 @@ struct efx_tc_flow_rule { u32 fw_id; }; +struct efx_tc_lhs_rule { + unsigned long cookie; + struct efx_tc_match match; + struct efx_tc_lhs_action lhs_act; + struct rhash_head linkage; + u32 fw_id; + bool is_ar; /* Action Rule (for OR-AR-CT-AR sequence) */ +}; + enum efx_tc_rule_prios { EFX_TC_PRIO_TC, /* Rule inserted by TC */ EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */ EFX_TC_PRIO__NUM }; +struct efx_tc_table_field_fmt { + u16 field_id; + u16 lbn; + u16 width; + u8 masking; + u8 scheme; +}; + +struct efx_tc_table_desc { + u16 type; + u16 key_width; + u16 resp_width; + u16 n_keys; + u16 n_resps; + u16 n_prios; + u8 flags; + u8 scheme; + struct efx_tc_table_field_fmt *keys; + struct efx_tc_table_field_fmt *resps; +}; + +struct efx_tc_table_ct { /* TABLE_ID_CONNTRACK_TABLE */ + struct efx_tc_table_desc desc; + bool hooked; + struct { /* indices of named fields within @desc.keys */ + u8 eth_proto_idx; + u8 ip_proto_idx; + u8 src_ip_idx; /* either v4 or v6 */ + u8 dst_ip_idx; + u8 l4_sport_idx; + u8 l4_dport_idx; + u8 zone_idx; /* for TABLE_FIELD_ID_DOMAIN */ + } keys; + struct { /* indices of named fields within @desc.resps */ + u8 dnat_idx; + u8 nat_ip_idx; + u8 l4_natport_idx; + u8 mark_idx; + u8 counter_id_idx; + } resps; +}; + /** * struct efx_tc_state - control plane data for TC offload * @@ -152,9 +270,16 @@ enum efx_tc_rule_prios { * @counter_ht: Hashtable of TC counters (FW IDs and counter values) * @counter_id_ht: Hashtable mapping TC counter cookies to counters * @encap_ht: Hashtable of TC encap actions + * @mac_ht: Hashtable of MAC address entries (for pedits) * @encap_match_ht: Hashtable of TC encap matches * @match_action_ht: Hashtable of TC match-action rules + * @lhs_rule_ht: Hashtable of TC left-hand (act ct & goto chain) rules + * @ct_zone_ht: Hashtable of TC conntrack flowtable bindings + * @ct_ht: Hashtable of TC conntrack flow entries * @neigh_ht: Hashtable of neighbour watches (&struct efx_neigh_binder) + * @recirc_ht: Hashtable of recirculation ID mappings (&struct efx_tc_recirc_id) + * @recirc_ida: Recirculation ID allocator + * @meta_ct: MAE table layout for conntrack table * @reps_mport_id: MAE port allocated for representor RX * @reps_filter_uc: VNIC filter for representor unicast RX (promisc) * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti) @@ -183,9 +308,16 @@ struct efx_tc_state { struct rhashtable counter_ht; struct rhashtable counter_id_ht; struct rhashtable encap_ht; + struct rhashtable mac_ht; struct rhashtable encap_match_ht; struct rhashtable match_action_ht; + struct rhashtable lhs_rule_ht; + struct rhashtable ct_zone_ht; + struct rhashtable ct_ht; struct rhashtable neigh_ht; + struct rhashtable recirc_ht; + struct ida recirc_ida; + struct efx_tc_table_ct meta_ct; u32 reps_mport_id, reps_mport_vport_id; s32 reps_filter_uc, reps_filter_mc; bool flush_counters; diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c new file mode 100644 index 000000000000..d90206f27161 --- /dev/null +++ b/drivers/net/ethernet/sfc/tc_conntrack.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2023, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "tc_conntrack.h" +#include "tc.h" +#include "mae.h" + +static int efx_tc_flow_block(enum tc_setup_type type, void *type_data, + void *cb_priv); + +static const struct rhashtable_params efx_tc_ct_zone_ht_params = { + .key_len = offsetof(struct efx_tc_ct_zone, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_ct_zone, linkage), +}; + +static const struct rhashtable_params efx_tc_ct_ht_params = { + .key_len = offsetof(struct efx_tc_ct_entry, linkage), + .key_offset = 0, + .head_offset = offsetof(struct efx_tc_ct_entry, linkage), +}; + +static void efx_tc_ct_zone_free(void *ptr, void *arg) +{ + struct efx_tc_ct_zone *zone = ptr; + struct efx_nic *efx = zone->efx; + + netif_err(efx, drv, efx->net_dev, + "tc ct_zone %u still present at teardown, removing\n", + zone->zone); + + nf_flow_table_offload_del_cb(zone->nf_ft, efx_tc_flow_block, zone); + kfree(zone); +} + +static void efx_tc_ct_free(void *ptr, void *arg) +{ + struct efx_tc_ct_entry *conn = ptr; + struct efx_nic *efx = arg; + + netif_err(efx, drv, efx->net_dev, + "tc ct_entry %lx still present at teardown\n", + conn->cookie); + + /* We can release the counter, but we can't remove the CT itself + * from hardware because the table meta is already gone. + */ + efx_tc_flower_release_counter(efx, conn->cnt); + kfree(conn); +} + +int efx_tc_init_conntrack(struct efx_nic *efx) +{ + int rc; + + rc = rhashtable_init(&efx->tc->ct_zone_ht, &efx_tc_ct_zone_ht_params); + if (rc < 0) + goto fail_ct_zone_ht; + rc = rhashtable_init(&efx->tc->ct_ht, &efx_tc_ct_ht_params); + if (rc < 0) + goto fail_ct_ht; + return 0; +fail_ct_ht: + rhashtable_destroy(&efx->tc->ct_zone_ht); +fail_ct_zone_ht: + return rc; +} + +/* Only call this in init failure teardown. + * Normal exit should fini instead as there may be entries in the table. + */ +void efx_tc_destroy_conntrack(struct efx_nic *efx) +{ + rhashtable_destroy(&efx->tc->ct_ht); + rhashtable_destroy(&efx->tc->ct_zone_ht); +} + +void efx_tc_fini_conntrack(struct efx_nic *efx) +{ + rhashtable_free_and_destroy(&efx->tc->ct_zone_ht, efx_tc_ct_zone_free, NULL); + rhashtable_free_and_destroy(&efx->tc->ct_ht, efx_tc_ct_free, efx); +} + +#define EFX_NF_TCP_FLAG(flg) cpu_to_be16(be32_to_cpu(TCP_FLAG_##flg) >> 16) + +static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr, + struct efx_tc_ct_entry *conn) +{ + struct flow_dissector *dissector = fr->match.dissector; + unsigned char ipv = 0; + bool tcp = false; + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control fm; + + flow_rule_match_control(fr, &fm); + if (IS_ALL_ONES(fm.mask->addr_type)) + switch (fm.key->addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + ipv = 4; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + ipv = 6; + break; + default: + break; + } + } + + if (!ipv) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack missing ipv specification\n"); + return -EOPNOTSUPP; + } + + if (dissector->used_keys & + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | + BIT_ULL(FLOW_DISSECTOR_KEY_META))) { + netif_dbg(efx, drv, efx->net_dev, + "Unsupported conntrack keys %#llx\n", + dissector->used_keys); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic fm; + + flow_rule_match_basic(fr, &fm); + if (!IS_ALL_ONES(fm.mask->n_proto)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack eth_proto is not exact-match; mask %04x\n", + ntohs(fm.mask->n_proto)); + return -EOPNOTSUPP; + } + conn->eth_proto = fm.key->n_proto; + if (conn->eth_proto != (ipv == 4 ? htons(ETH_P_IP) + : htons(ETH_P_IPV6))) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack eth_proto is not IPv%u, is %04x\n", + ipv, ntohs(conn->eth_proto)); + return -EOPNOTSUPP; + } + if (!IS_ALL_ONES(fm.mask->ip_proto)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ip_proto is not exact-match; mask %02x\n", + fm.mask->ip_proto); + return -EOPNOTSUPP; + } + conn->ip_proto = fm.key->ip_proto; + switch (conn->ip_proto) { + case IPPROTO_TCP: + tcp = true; + break; + case IPPROTO_UDP: + break; + default: + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ip_proto not TCP or UDP, is %02x\n", + conn->ip_proto); + return -EOPNOTSUPP; + } + } else { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack missing eth_proto, ip_proto\n"); + return -EOPNOTSUPP; + } + + if (ipv == 4 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs fm; + + flow_rule_match_ipv4_addrs(fr, &fm); + if (!IS_ALL_ONES(fm.mask->src)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ipv4.src is not exact-match; mask %08x\n", + ntohl(fm.mask->src)); + return -EOPNOTSUPP; + } + conn->src_ip = fm.key->src; + if (!IS_ALL_ONES(fm.mask->dst)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ipv4.dst is not exact-match; mask %08x\n", + ntohl(fm.mask->dst)); + return -EOPNOTSUPP; + } + conn->dst_ip = fm.key->dst; + } else if (ipv == 6 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs fm; + + flow_rule_match_ipv6_addrs(fr, &fm); + if (!efx_ipv6_addr_all_ones(&fm.mask->src)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ipv6.src is not exact-match; mask %pI6\n", + &fm.mask->src); + return -EOPNOTSUPP; + } + conn->src_ip6 = fm.key->src; + if (!efx_ipv6_addr_all_ones(&fm.mask->dst)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ipv6.dst is not exact-match; mask %pI6\n", + &fm.mask->dst); + return -EOPNOTSUPP; + } + conn->dst_ip6 = fm.key->dst; + } else { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack missing IPv%u addrs\n", ipv); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports fm; + + flow_rule_match_ports(fr, &fm); + if (!IS_ALL_ONES(fm.mask->src)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ports.src is not exact-match; mask %04x\n", + ntohs(fm.mask->src)); + return -EOPNOTSUPP; + } + conn->l4_sport = fm.key->src; + if (!IS_ALL_ONES(fm.mask->dst)) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack ports.dst is not exact-match; mask %04x\n", + ntohs(fm.mask->dst)); + return -EOPNOTSUPP; + } + conn->l4_dport = fm.key->dst; + } else { + netif_dbg(efx, drv, efx->net_dev, "Conntrack missing L4 ports\n"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_TCP)) { + __be16 tcp_interesting_flags; + struct flow_match_tcp fm; + + if (!tcp) { + netif_dbg(efx, drv, efx->net_dev, + "Conntrack matching on TCP keys but ipproto is not tcp\n"); + return -EOPNOTSUPP; + } + flow_rule_match_tcp(fr, &fm); + tcp_interesting_flags = EFX_NF_TCP_FLAG(SYN) | + EFX_NF_TCP_FLAG(RST) | + EFX_NF_TCP_FLAG(FIN); + /* If any of the tcp_interesting_flags is set, we always + * inhibit CT lookup in LHS (so SW can update CT table). + */ + if (fm.key->flags & tcp_interesting_flags) { + netif_dbg(efx, drv, efx->net_dev, + "Unsupported conntrack tcp.flags %04x/%04x\n", + ntohs(fm.key->flags), ntohs(fm.mask->flags)); + return -EOPNOTSUPP; + } + /* Other TCP flags cannot be filtered at CT */ + if (fm.mask->flags & ~tcp_interesting_flags) { + netif_dbg(efx, drv, efx->net_dev, + "Unsupported conntrack tcp.flags %04x/%04x\n", + ntohs(fm.key->flags), ntohs(fm.mask->flags)); + return -EOPNOTSUPP; + } + } + + return 0; +} + +/** + * struct efx_tc_ct_mangler_state - tracks which fields have been pedited + * + * @ipv4: IP source or destination addr has been set + * @tcpudp: TCP/UDP source or destination port has been set + */ +struct efx_tc_ct_mangler_state { + u8 ipv4:1; + u8 tcpudp:1; +}; + +static int efx_tc_ct_mangle(struct efx_nic *efx, struct efx_tc_ct_entry *conn, + const struct flow_action_entry *fa, + struct efx_tc_ct_mangler_state *mung) +{ + /* Is this the first mangle we've processed for this rule? */ + bool first = !(mung->ipv4 || mung->tcpudp); + bool dnat = false; + + switch (fa->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (fa->mangle.offset) { + case offsetof(struct iphdr, daddr): + dnat = true; + fallthrough; + case offsetof(struct iphdr, saddr): + if (fa->mangle.mask) + return -EOPNOTSUPP; + conn->nat_ip = htonl(fa->mangle.val); + mung->ipv4 = 1; + break; + default: + return -EOPNOTSUPP; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + /* Both struct tcphdr and struct udphdr start with + * __be16 source; + * __be16 dest; + * so we can use the same code for both. + */ + switch (fa->mangle.offset) { + case offsetof(struct tcphdr, dest): + BUILD_BUG_ON(offsetof(struct tcphdr, dest) != + offsetof(struct udphdr, dest)); + dnat = true; + fallthrough; + case offsetof(struct tcphdr, source): + BUILD_BUG_ON(offsetof(struct tcphdr, source) != + offsetof(struct udphdr, source)); + if (~fa->mangle.mask != 0xffff) + return -EOPNOTSUPP; + conn->l4_natport = htons(fa->mangle.val); + mung->tcpudp = 1; + break; + default: + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; + } + /* first mangle tells us whether this is SNAT or DNAT; + * subsequent mangles must match that + */ + if (first) + conn->dnat = dnat; + else if (conn->dnat != dnat) + return -EOPNOTSUPP; + return 0; +} + +static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone, + struct flow_cls_offload *tc) +{ + struct flow_rule *fr = flow_cls_offload_flow_rule(tc); + struct efx_tc_ct_mangler_state mung = {}; + struct efx_tc_ct_entry *conn, *old; + struct efx_nic *efx = ct_zone->efx; + const struct flow_action_entry *fa; + struct efx_tc_counter *cnt; + int rc, i; + + if (WARN_ON(!efx->tc)) + return -ENETDOWN; + if (WARN_ON(!efx->tc->up)) + return -ENETDOWN; + + conn = kzalloc(sizeof(*conn), GFP_USER); + if (!conn) + return -ENOMEM; + conn->cookie = tc->cookie; + old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht, + &conn->linkage, + efx_tc_ct_ht_params); + if (IS_ERR(old)) { + rc = PTR_ERR(old); + goto release; + } else if (old) { + netif_dbg(efx, drv, efx->net_dev, + "Already offloaded conntrack (cookie %lx)\n", tc->cookie); + rc = -EEXIST; + goto release; + } + + /* Parse match */ + conn->zone = ct_zone; + rc = efx_tc_ct_parse_match(efx, fr, conn); + if (rc) + goto release; + + /* Parse actions */ + flow_action_for_each(i, fa, &fr->action) { + switch (fa->id) { + case FLOW_ACTION_CT_METADATA: + conn->mark = fa->ct_metadata.mark; + if (memchr_inv(fa->ct_metadata.labels, 0, sizeof(fa->ct_metadata.labels))) { + netif_dbg(efx, drv, efx->net_dev, + "Setting CT label not supported\n"); + rc = -EOPNOTSUPP; + goto release; + } + break; + case FLOW_ACTION_MANGLE: + if (conn->eth_proto != htons(ETH_P_IP)) { + netif_dbg(efx, drv, efx->net_dev, + "NAT only supported for IPv4\n"); + rc = -EOPNOTSUPP; + goto release; + } + rc = efx_tc_ct_mangle(efx, conn, fa, &mung); + if (rc) + goto release; + break; + default: + netif_dbg(efx, drv, efx->net_dev, + "Unhandled action %u for conntrack\n", fa->id); + rc = -EOPNOTSUPP; + goto release; + } + } + + /* fill in defaults for unmangled values */ + if (!mung.ipv4) + conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip; + if (!mung.tcpudp) + conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport; + + cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT); + if (IS_ERR(cnt)) { + rc = PTR_ERR(cnt); + goto release; + } + conn->cnt = cnt; + + rc = efx_mae_insert_ct(efx, conn); + if (rc) { + netif_dbg(efx, drv, efx->net_dev, + "Failed to insert conntrack, %d\n", rc); + goto release; + } + mutex_lock(&ct_zone->mutex); + list_add_tail(&conn->list, &ct_zone->cts); + mutex_unlock(&ct_zone->mutex); + return 0; +release: + if (conn->cnt) + efx_tc_flower_release_counter(efx, conn->cnt); + if (!old) + rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage, + efx_tc_ct_ht_params); + kfree(conn); + return rc; +} + +/* Caller must follow with efx_tc_ct_remove_finish() after RCU grace period! */ +static void efx_tc_ct_remove(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + int rc; + + /* Remove it from HW */ + rc = efx_mae_remove_ct(efx, conn); + /* Delete it from SW */ + rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage, + efx_tc_ct_ht_params); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Failed to remove conntrack %lx from hw, rc %d\n", + conn->cookie, rc); + } else { + netif_dbg(efx, drv, efx->net_dev, "Removed conntrack %lx\n", + conn->cookie); + } +} + +static void efx_tc_ct_remove_finish(struct efx_nic *efx, struct efx_tc_ct_entry *conn) +{ + /* Remove related CT counter. This is delayed after the conn object we + * are working with has been successfully removed. This protects the + * counter from being used-after-free inside efx_tc_ct_stats. + */ + efx_tc_flower_release_counter(efx, conn->cnt); + kfree(conn); +} + +static int efx_tc_ct_destroy(struct efx_tc_ct_zone *ct_zone, + struct flow_cls_offload *tc) +{ + struct efx_nic *efx = ct_zone->efx; + struct efx_tc_ct_entry *conn; + + conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie, + efx_tc_ct_ht_params); + if (!conn) { + netif_warn(efx, drv, efx->net_dev, + "Conntrack %lx not found to remove\n", tc->cookie); + return -ENOENT; + } + + mutex_lock(&ct_zone->mutex); + list_del(&conn->list); + efx_tc_ct_remove(efx, conn); + mutex_unlock(&ct_zone->mutex); + synchronize_rcu(); + efx_tc_ct_remove_finish(efx, conn); + return 0; +} + +static int efx_tc_ct_stats(struct efx_tc_ct_zone *ct_zone, + struct flow_cls_offload *tc) +{ + struct efx_nic *efx = ct_zone->efx; + struct efx_tc_ct_entry *conn; + struct efx_tc_counter *cnt; + + rcu_read_lock(); + conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie, + efx_tc_ct_ht_params); + if (!conn) { + netif_warn(efx, drv, efx->net_dev, + "Conntrack %lx not found for stats\n", tc->cookie); + rcu_read_unlock(); + return -ENOENT; + } + + cnt = conn->cnt; + spin_lock_bh(&cnt->lock); + /* Report only last use */ + flow_stats_update(&tc->stats, 0, 0, 0, cnt->touched, + FLOW_ACTION_HW_STATS_DELAYED); + spin_unlock_bh(&cnt->lock); + rcu_read_unlock(); + + return 0; +} + +static int efx_tc_flow_block(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload *tcb = type_data; + struct efx_tc_ct_zone *ct_zone = cb_priv; + + if (type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + + switch (tcb->command) { + case FLOW_CLS_REPLACE: + return efx_tc_ct_replace(ct_zone, tcb); + case FLOW_CLS_DESTROY: + return efx_tc_ct_destroy(ct_zone, tcb); + case FLOW_CLS_STATS: + return efx_tc_ct_stats(ct_zone, tcb); + default: + break; + } + + return -EOPNOTSUPP; +} + +struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone, + struct nf_flowtable *ct_ft) +{ + struct efx_tc_ct_zone *ct_zone, *old; + int rc; + + ct_zone = kzalloc(sizeof(*ct_zone), GFP_USER); + if (!ct_zone) + return ERR_PTR(-ENOMEM); + ct_zone->zone = zone; + old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_zone_ht, + &ct_zone->linkage, + efx_tc_ct_zone_ht_params); + if (old) { + /* don't need our new entry */ + kfree(ct_zone); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); + if (!refcount_inc_not_zero(&old->ref)) + return ERR_PTR(-EAGAIN); + /* existing entry found */ + WARN_ON_ONCE(old->nf_ft != ct_ft); + netif_dbg(efx, drv, efx->net_dev, + "Found existing ct_zone for %u\n", zone); + return old; + } + ct_zone->nf_ft = ct_ft; + ct_zone->efx = efx; + INIT_LIST_HEAD(&ct_zone->cts); + mutex_init(&ct_zone->mutex); + rc = nf_flow_table_offload_add_cb(ct_ft, efx_tc_flow_block, ct_zone); + netif_dbg(efx, drv, efx->net_dev, "Adding new ct_zone for %u, rc %d\n", + zone, rc); + if (rc < 0) + goto fail; + refcount_set(&ct_zone->ref, 1); + return ct_zone; +fail: + rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage, + efx_tc_ct_zone_ht_params); + kfree(ct_zone); + return ERR_PTR(rc); +} + +void efx_tc_ct_unregister_zone(struct efx_nic *efx, + struct efx_tc_ct_zone *ct_zone) +{ + struct efx_tc_ct_entry *conn, *next; + + if (!refcount_dec_and_test(&ct_zone->ref)) + return; /* still in use */ + nf_flow_table_offload_del_cb(ct_zone->nf_ft, efx_tc_flow_block, ct_zone); + rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage, + efx_tc_ct_zone_ht_params); + mutex_lock(&ct_zone->mutex); + list_for_each_entry(conn, &ct_zone->cts, list) + efx_tc_ct_remove(efx, conn); + synchronize_rcu(); + /* need to use _safe because efx_tc_ct_remove_finish() frees conn */ + list_for_each_entry_safe(conn, next, &ct_zone->cts, list) + efx_tc_ct_remove_finish(efx, conn); + mutex_unlock(&ct_zone->mutex); + mutex_destroy(&ct_zone->mutex); + netif_dbg(efx, drv, efx->net_dev, "Removed ct_zone for %u\n", + ct_zone->zone); + kfree(ct_zone); +} diff --git a/drivers/net/ethernet/sfc/tc_conntrack.h b/drivers/net/ethernet/sfc/tc_conntrack.h new file mode 100644 index 000000000000..e75c8eb1965d --- /dev/null +++ b/drivers/net/ethernet/sfc/tc_conntrack.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2023, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TC_CONNTRACK_H +#define EFX_TC_CONNTRACK_H +#include "net_driver.h" + +#if IS_ENABLED(CONFIG_SFC_SRIOV) +#include <linux/refcount.h> +#include <net/netfilter/nf_flow_table.h> + +struct efx_tc_ct_zone { + u16 zone; + struct rhash_head linkage; + refcount_t ref; + struct nf_flowtable *nf_ft; + struct efx_nic *efx; + struct mutex mutex; /* protects cts list */ + struct list_head cts; /* list of efx_tc_ct_entry in this zone */ +}; + +/* create/uncreate/teardown hashtables */ +int efx_tc_init_conntrack(struct efx_nic *efx); +void efx_tc_destroy_conntrack(struct efx_nic *efx); +void efx_tc_fini_conntrack(struct efx_nic *efx); + +struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone, + struct nf_flowtable *ct_ft); +void efx_tc_ct_unregister_zone(struct efx_nic *efx, + struct efx_tc_ct_zone *ct_zone); + +struct efx_tc_ct_entry { + unsigned long cookie; + struct rhash_head linkage; + __be16 eth_proto; + u8 ip_proto; + bool dnat; + __be32 src_ip, dst_ip, nat_ip; + struct in6_addr src_ip6, dst_ip6; + __be16 l4_sport, l4_dport, l4_natport; /* Ports (UDP, TCP) */ + struct efx_tc_ct_zone *zone; + u32 mark; + struct efx_tc_counter *cnt; + struct list_head list; /* entry on zone->cts */ +}; + +#endif /* CONFIG_SFC_SRIOV */ +#endif /* EFX_TC_CONNTRACK_H */ diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c index 979f49058a0c..c44088424323 100644 --- a/drivers/net/ethernet/sfc/tc_counters.c +++ b/drivers/net/ethernet/sfc/tc_counters.c @@ -129,8 +129,8 @@ static void efx_tc_counter_work(struct work_struct *work) /* Counter allocation */ -static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, - int type) +struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, + int type) { struct efx_tc_counter *cnt; int rc, rc2; @@ -169,8 +169,8 @@ fail1: return ERR_PTR(rc > 0 ? -EIO : rc); } -static void efx_tc_flower_release_counter(struct efx_nic *efx, - struct efx_tc_counter *cnt) +void efx_tc_flower_release_counter(struct efx_nic *efx, + struct efx_tc_counter *cnt) { int rc; @@ -236,6 +236,8 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index( if (old) { /* don't need our new entry */ kfree(ctr); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); if (!refcount_inc_not_zero(&old->ref)) return ERR_PTR(-EAGAIN); /* existing entry found */ diff --git a/drivers/net/ethernet/sfc/tc_counters.h b/drivers/net/ethernet/sfc/tc_counters.h index 41e57f34b763..f18d71c13600 100644 --- a/drivers/net/ethernet/sfc/tc_counters.h +++ b/drivers/net/ethernet/sfc/tc_counters.h @@ -49,6 +49,10 @@ int efx_tc_init_counters(struct efx_nic *efx); void efx_tc_destroy_counters(struct efx_nic *efx); void efx_tc_fini_counters(struct efx_nic *efx); +struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx, + int type); +void efx_tc_flower_release_counter(struct efx_nic *efx, + struct efx_tc_counter *cnt); struct efx_tc_counter_index *efx_tc_flower_get_counter_index( struct efx_nic *efx, unsigned long cookie, enum efx_tc_counter_type type); diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c index 7e8bcdb222ad..87443f9dfd22 100644 --- a/drivers/net/ethernet/sfc/tc_encap_actions.c +++ b/drivers/net/ethernet/sfc/tc_encap_actions.c @@ -132,6 +132,8 @@ static int efx_bind_neigh(struct efx_nic *efx, /* don't need our new entry */ put_net_track(neigh->net, &neigh->ns_tracker); kfree(neigh); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return PTR_ERR(old); if (!refcount_inc_not_zero(&old->ref)) return -EAGAIN; /* existing entry found, ref taken */ @@ -640,6 +642,8 @@ struct efx_tc_encap_action *efx_tc_flower_create_encap_md( if (old) { /* don't need our new entry */ kfree(encap); + if (IS_ERR(old)) /* oh dear, it's actually an error */ + return ERR_CAST(old); if (!refcount_inc_not_zero(&old->ref)) return ERR_PTR(-EAGAIN); /* existing entry found, ref taken */ diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 4ed4082836a9..fe2d476028e7 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -517,13 +517,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, unsigned index, type; EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); - index = skb_get_queue_mapping(skb); type = efx_tx_csum_type_skb(skb); - if (index >= efx->n_tx_channels) { - index -= efx->n_tx_channels; - type |= EFX_TXQ_TYPE_HIGHPRI; - } /* PTP "event" packet */ if (unlikely(efx_xmit_with_hwtstamp(skb)) && @@ -603,43 +598,5 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) /* Must be inverse of queue lookup in efx_hard_start_xmit() */ tx_queue->core_txq = netdev_get_tx_queue(efx->net_dev, - tx_queue->channel->channel + - ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? - efx->n_tx_channels : 0)); -} - -int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - void *type_data) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - struct tc_mqprio_qopt *mqprio = type_data; - unsigned tc, num_tc; - - if (type != TC_SETUP_QDISC_MQPRIO) - return -EOPNOTSUPP; - - /* Only Siena supported highpri queues */ - if (efx_nic_rev(efx) > EFX_REV_SIENA_A0) - return -EOPNOTSUPP; - - num_tc = mqprio->num_tc; - - if (num_tc > EFX_MAX_TX_TC) - return -EINVAL; - - mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - if (num_tc == net_dev->num_tc) - return 0; - - for (tc = 0; tc < num_tc; tc++) { - net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; - net_dev->tc_to_txq[tc].count = efx->n_tx_channels; - } - - net_dev->num_tc = num_tc; - - return netif_set_real_num_tx_queues(net_dev, - max_t(int, num_tc, 1) * - efx->n_tx_channels); + tx_queue->channel->channel); } diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c index d381d8164f07..64a6768f75ea 100644 --- a/drivers/net/ethernet/sfc/tx_tso.c +++ b/drivers/net/ethernet/sfc/tx_tso.c @@ -85,7 +85,7 @@ static inline void prefetch_ptr(struct efx_tx_queue *tx_queue) prefetch(ptr); prefetch(ptr + 0x80); - ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr); + ptr = (char *)(((efx_qword_t *)tx_queue->txd.addr) + insert_ptr); prefetch(ptr); prefetch(ptr + 0x80); } diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h deleted file mode 100644 index 480b872eb4d1..000000000000 --- a/drivers/net/ethernet/sfc/vfdi.h +++ /dev/null @@ -1,252 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/**************************************************************************** - * Driver for Solarflare network controllers and boards - * Copyright 2010-2012 Solarflare Communications Inc. - */ -#ifndef _VFDI_H -#define _VFDI_H - -/** - * DOC: Virtual Function Driver Interface - * - * This file contains software structures used to form a two way - * communication channel between the VF driver and the PF driver, - * named Virtual Function Driver Interface (VFDI). - * - * For the purposes of VFDI, a page is a memory region with size and - * alignment of 4K. All addresses are DMA addresses to be used within - * the domain of the relevant VF. - * - * The only hardware-defined channels for a VF driver to communicate - * with the PF driver are the event mailboxes (%FR_CZ_USR_EV - * registers). Writing to these registers generates an event with - * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox - * and USER_EV_REG_VALUE set to the value written. The PF driver may - * direct or disable delivery of these events by setting - * %FR_CZ_USR_EV_CFG. - * - * The PF driver can send arbitrary events to arbitrary event queues. - * However, for consistency, VFDI events from the PF are defined to - * follow the same form and be sent to the first event queue assigned - * to the VF while that queue is enabled by the VF driver. - * - * The general form of the variable bits of VFDI events is: - * - * 0 16 24 31 - * | DATA | TYPE | SEQ | - * - * SEQ is a sequence number which should be incremented by 1 (modulo - * 256) for each event. The sequence numbers used in each direction - * are independent. - * - * The VF submits requests of type &struct vfdi_req by sending the - * address of the request (ADDR) in a series of 4 events: - * - * 0 16 24 31 - * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ | - * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 | - * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 | - * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 | - * - * The address must be page-aligned. After receiving such a valid - * series of events, the PF driver will attempt to read the request - * and write a response to the same address. In case of an invalid - * sequence of events or a DMA error, there will be no response. - * - * The VF driver may request that the PF driver writes status - * information into its domain asynchronously. After writing the - * status, the PF driver will send an event of the form: - * - * 0 16 24 31 - * | reserved | VFDI_EV_TYPE_STATUS | SEQ | - * - * In case the VF must be reset for any reason, the PF driver will - * send an event of the form: - * - * 0 16 24 31 - * | reserved | VFDI_EV_TYPE_RESET | SEQ | - * - * It is then the responsibility of the VF driver to request - * reinitialisation of its queues. - */ -#define VFDI_EV_SEQ_LBN 24 -#define VFDI_EV_SEQ_WIDTH 8 -#define VFDI_EV_TYPE_LBN 16 -#define VFDI_EV_TYPE_WIDTH 8 -#define VFDI_EV_TYPE_REQ_WORD0 0 -#define VFDI_EV_TYPE_REQ_WORD1 1 -#define VFDI_EV_TYPE_REQ_WORD2 2 -#define VFDI_EV_TYPE_REQ_WORD3 3 -#define VFDI_EV_TYPE_STATUS 4 -#define VFDI_EV_TYPE_RESET 5 -#define VFDI_EV_DATA_LBN 0 -#define VFDI_EV_DATA_WIDTH 16 - -struct vfdi_endpoint { - u8 mac_addr[ETH_ALEN]; - __be16 tci; -}; - -/** - * enum vfdi_op - VFDI operation enumeration - * @VFDI_OP_RESPONSE: Indicates a response to the request. - * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ. - * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ. - * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ. - * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then - * finalize the SRAM entries. - * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ. - * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters. - * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates - * from PF and write the initial status. - * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status - * updates from PF. - */ -enum vfdi_op { - VFDI_OP_RESPONSE = 0, - VFDI_OP_INIT_EVQ = 1, - VFDI_OP_INIT_RXQ = 2, - VFDI_OP_INIT_TXQ = 3, - VFDI_OP_FINI_ALL_QUEUES = 4, - VFDI_OP_INSERT_FILTER = 5, - VFDI_OP_REMOVE_ALL_FILTERS = 6, - VFDI_OP_SET_STATUS_PAGE = 7, - VFDI_OP_CLEAR_STATUS_PAGE = 8, - VFDI_OP_LIMIT, -}; - -/* Response codes for VFDI operations. Other values may be used in future. */ -#define VFDI_RC_SUCCESS 0 -#define VFDI_RC_ENOMEM (-12) -#define VFDI_RC_EINVAL (-22) -#define VFDI_RC_EOPNOTSUPP (-95) -#define VFDI_RC_ETIMEDOUT (-110) - -/** - * struct vfdi_req - Request from VF driver to PF driver - * @op: Operation code or response indicator, taken from &enum vfdi_op. - * @rc: Response code. Set to 0 on success or a negative error code on failure. - * @u.init_evq.index: Index of event queue to create. - * @u.init_evq.buf_count: Number of 4k buffers backing event queue. - * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA - * address of each page backing the event queue. - * @u.init_rxq.index: Index of receive queue to create. - * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue. - * @u.init_rxq.evq: Instance of event queue to target receive events at. - * @u.init_rxq.label: Label used in receive events. - * @u.init_rxq.flags: Unused. - * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA - * address of each page backing the receive queue. - * @u.init_txq.index: Index of transmit queue to create. - * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue. - * @u.init_txq.evq: Instance of event queue to target transmit completion - * events at. - * @u.init_txq.label: Label used in transmit completion events. - * @u.init_txq.flags: Checksum offload flags. - * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA - * address of each page backing the transmit queue. - * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting - * all traffic at this receive queue. - * @u.mac_filter.flags: MAC filter flags. - * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status. - * This address must be page-aligned and the PF may write up to a - * whole page (allowing for extension of the structure). - * @u.set_status_page.peer_page_count: Number of additional pages the VF - * has provided into which peer addresses may be DMAd. - * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages. - * If the number of peers exceeds 256, then the VF must provide - * additional pages in this array. The PF will then DMA up to - * 512 vfdi_endpoint structures into each page. These addresses - * must be page-aligned. - */ -struct vfdi_req { - u32 op; - u32 reserved1; - s32 rc; - u32 reserved2; - union { - struct { - u32 index; - u32 buf_count; - u64 addr[]; - } init_evq; - struct { - u32 index; - u32 buf_count; - u32 evq; - u32 label; - u32 flags; -#define VFDI_RXQ_FLAG_SCATTER_EN 1 - u32 reserved; - u64 addr[]; - } init_rxq; - struct { - u32 index; - u32 buf_count; - u32 evq; - u32 label; - u32 flags; -#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1 -#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2 - u32 reserved; - u64 addr[]; - } init_txq; - struct { - u32 rxq; - u32 flags; -#define VFDI_MAC_FILTER_FLAG_RSS 1 -#define VFDI_MAC_FILTER_FLAG_SCATTER 2 - } mac_filter; - struct { - u64 dma_addr; - u64 peer_page_count; - u64 peer_page_addr[]; - } set_status_page; - } u; -}; - -/** - * struct vfdi_status - Status provided by PF driver to VF driver - * @generation_start: A generation count DMA'd to VF *before* the - * rest of the structure. - * @generation_end: A generation count DMA'd to VF *after* the - * rest of the structure. - * @version: Version of this structure; currently set to 1. Later - * versions must either be layout-compatible or only be sent to VFs - * that specifically request them. - * @length: Total length of this structure including embedded tables - * @vi_scale: log2 the number of VIs available on this VF. This quantity - * is used by the hardware for register decoding. - * @max_tx_channels: The maximum number of transmit queues the VF can use. - * @rss_rxq_count: The number of receive queues present in the shared RSS - * indirection table. - * @peer_count: Total number of peers in the complete peer list. If larger - * than ARRAY_SIZE(%peers), then the VF must provide sufficient - * additional pages each of which is filled with vfdi_endpoint structures. - * @local: The MAC address and outer VLAN tag of *this* VF - * @peers: Table of peer addresses. The @tci fields in these structures - * are currently unused and must be ignored. Additional peers are - * written into any additional pages provided by the VF. - * @timer_quantum_ns: Timer quantum (nominal period between timer ticks) - * for interrupt moderation timers, in nanoseconds. This member is only - * present if @length is sufficiently large. - */ -struct vfdi_status { - u32 generation_start; - u32 generation_end; - u32 version; - u32 length; - u8 vi_scale; - u8 max_tx_channels; - u8 rss_rxq_count; - u8 reserved1; - u16 peer_count; - u16 reserved2; - struct vfdi_endpoint local; - struct vfdi_endpoint peers[256]; - - /* Members below here extend version 1 of this structure */ - u32 timer_quantum_ns; -}; - -#endif diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index 815be2d20c4b..e10e7f84958d 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -12,14 +12,7 @@ * Bug numbers are from Solarflare's Bugzilla. */ -#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) #define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) -#define EFX_WORKAROUND_10G(efx) 1 - -/* Bit-bashed I2C reads cause performance drop */ -#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G -/* Legacy interrupt storm when interrupt fifo fills */ -#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA /* Lockup when writing event block registers at gen2/gen3 */ #define EFX_EF10_WORKAROUND_35388(efx) \ diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 8fc3f5272fa7..98d0b561a057 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -962,7 +962,7 @@ out_free: return err; } -static int ioc3eth_remove(struct platform_device *pdev) +static void ioc3eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ioc3_private *ip = netdev_priv(dev); @@ -973,8 +973,6 @@ static int ioc3eth_remove(struct platform_device *pdev) unregister_netdev(dev); del_timer_sync(&ip->ioc3_timer); free_netdev(dev); - - return 0; } @@ -1275,7 +1273,7 @@ static void ioc3_set_multicast_list(struct net_device *dev) static struct platform_driver ioc3eth_driver = { .probe = ioc3eth_probe, - .remove = ioc3eth_remove, + .remove_new = ioc3eth_remove, .driver = { .name = "ioc3-eth", } diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 6d850ea2b94c..18b6f93d875e 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -854,19 +854,17 @@ static int meth_probe(struct platform_device *pdev) return 0; } -static int meth_remove(struct platform_device *pdev) +static void meth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); free_netdev(dev); - - return 0; } static struct platform_driver meth_driver = { .probe = meth_probe, - .remove = meth_remove, + .remove_new = meth_remove, .driver = { .name = "meth", } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 032eccf8eb42..758347616535 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2411,7 +2411,7 @@ static int smc_drv_probe(struct platform_device *pdev) return ret; } -static int smc_drv_remove(struct platform_device *pdev) +static void smc_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct smc_local *lp = netdev_priv(ndev); @@ -2436,8 +2436,6 @@ static int smc_drv_remove(struct platform_device *pdev) release_mem_region(res->start, SMC_IO_EXTENT); free_netdev(ndev); - - return 0; } static int smc_drv_suspend(struct device *dev) @@ -2480,7 +2478,7 @@ static const struct dev_pm_ops smc_drv_pm_ops = { static struct platform_driver smc_driver = { .probe = smc_drv_probe, - .remove = smc_drv_remove, + .remove_new = smc_drv_remove, .driver = { .name = CARDNAME, .pm = &smc_drv_pm_ops, diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index c521ea8f94f2..46eee747c699 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -114,25 +114,6 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg, (lp)->cfg.pxa_u16_align4) -#elif defined(CONFIG_SH_SH4202_MICRODEV) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 - -#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) -#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) -#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000) -#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) -#define SMC_outw(lp, v, a, r) outw(v, (a) + (r) - 0xa0000000) -#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000) -#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l) -#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l) -#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) -#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) - -#define SMC_IRQ_FLAGS (0) - #elif defined(CONFIG_ATARI) #define SMC_CAN_USE_8BIT 1 diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 174dc8908b72..31cb7d0166f0 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -43,7 +43,6 @@ #include <linux/smsc911x.h> #include <linux/device.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/of_net.h> #include <linux/acpi.h> @@ -552,7 +551,7 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata, /* Get a phy register */ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) { - struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + struct smsc911x_data *pdata = bus->priv; unsigned long flags; unsigned int addr; int i, reg; @@ -591,7 +590,7 @@ out: static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx, u16 val) { - struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + struct smsc911x_data *pdata = bus->priv; unsigned long flags; unsigned int addr; int i, reg; @@ -2315,7 +2314,7 @@ static int smsc911x_init(struct net_device *dev) return 0; } -static int smsc911x_drv_remove(struct platform_device *pdev) +static void smsc911x_drv_remove(struct platform_device *pdev) { struct net_device *dev; struct smsc911x_data *pdata; @@ -2349,8 +2348,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev) free_netdev(dev); pm_runtime_disable(&pdev->dev); - - return 0; } /* standard register acces */ @@ -2669,7 +2666,7 @@ MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match); static struct platform_driver smsc911x_driver = { .probe = smsc911x_drv_probe, - .remove = smsc911x_drv_remove, + .remove_new = smsc911x_drv_remove, .driver = { .name = SMSC_CHIPNAME, .pm = SMSC911X_PM_OPS, diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 71fbb358bb7d..e1c4a11c1f18 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -102,7 +102,7 @@ static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx) { - struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; + struct smsc9420_pdata *pd = bus->priv; unsigned long flags; u32 addr; int i, reg = -EIO; @@ -140,7 +140,7 @@ out: static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx, u16 val) { - struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; + struct smsc9420_pdata *pd = bus->priv; unsigned long flags; u32 addr; int i, reg = -EIO; @@ -1144,8 +1144,7 @@ static int smsc9420_mii_init(struct net_device *dev) goto err_out_1; } pd->mii_bus->name = DRV_MDIONAME; - snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", - (pd->pdev->bus->number << 8) | pd->pdev->devfn); + snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(pd->pdev)); pd->mii_bus->priv = pd; pd->mii_bus->read = smsc9420_mii_read; pd->mii_bus->write = smsc9420_mii_write; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 0dcd6a568b06..0891e9e49ecb 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -15,7 +15,7 @@ #include <linux/bpf_trace.h> #include <net/tcp.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <net/ip6_checksum.h> #define NETSEC_REG_SOFT_RST 0x104 @@ -780,7 +780,7 @@ static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res, u16 pkts) { if (xdp_res & NETSEC_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_res & NETSEC_XDP_TX) netsec_xdp_ring_tx_db(priv, pkts); @@ -2150,7 +2150,7 @@ free_ndev: return ret; } -static int netsec_remove(struct platform_device *pdev) +static void netsec_remove(struct platform_device *pdev) { struct netsec_priv *priv = platform_get_drvdata(pdev); @@ -2162,8 +2162,6 @@ static int netsec_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); free_netdev(priv->ndev); - - return 0; } #ifdef CONFIG_PM @@ -2211,7 +2209,7 @@ MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); static struct platform_driver netsec_driver = { .probe = netsec_probe, - .remove = netsec_remove, + .remove_new = netsec_remove, .driver = { .name = "netsec", .pm = &netsec_pm_ops, diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 492c39c08af1..eed24e67c5a6 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -15,10 +15,11 @@ #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> #include <linux/phy.h> +#include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/types.h> @@ -1718,7 +1719,7 @@ out_del_napi: return ret; } -static int ave_remove(struct platform_device *pdev) +static void ave_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ave_private *priv = netdev_priv(ndev); @@ -1726,8 +1727,6 @@ static int ave_remove(struct platform_device *pdev) unregister_netdev(ndev); netif_napi_del(&priv->napi_rx); netif_napi_del(&priv->napi_tx); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -1975,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, of_ave_match); static struct platform_driver ave_driver = { .probe = ave_probe, - .remove = ave_remove, + .remove_new = ave_remove, .driver = { .name = "ave", .pm = AVE_PM_OPS, diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 5583f0b055ec..85dcda51df05 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -121,17 +121,6 @@ config DWMAC_MESON the stmmac device driver. This driver is used for Meson6, Meson8, Meson8b and GXBB SoCs. -config DWMAC_OXNAS - tristate "Oxford Semiconductor OXNAS dwmac support" - default ARCH_OXNAS - depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST) - select MFD_SYSCON - help - Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs. - - This selects the Oxford Semiconductor OXNASSoC glue layer support for - the stmmac device driver. This driver is used for OX820. - config DWMAC_QCOM_ETHQOS tristate "Qualcomm ETHQOS support" default ARCH_QCOM @@ -250,6 +239,17 @@ config DWMAC_INTEL_PLAT the stmmac device driver. This driver is used for the Intel Keem Bay SoC. +config DWMAC_LOONGSON1 + tristate "Loongson1 GMAC support" + default MACH_LOONGSON32 + depends on OF && (MACH_LOONGSON32 || COMPILE_TEST) + help + Support for ethernet controller on Loongson1 SoC. + + This selects Loongson1 SoC glue layer support for the stmmac + device driver. This driver is used for Loongson1-based boards + like Loongson LS1B/LS1C. + config DWMAC_TEGRA tristate "NVIDIA Tegra MGBE support" depends on ARCH_TEGRA || COMPILE_TEST @@ -280,7 +280,7 @@ config DWMAC_INTEL config DWMAC_LOONGSON tristate "Loongson PCI DWMAC support" default MACH_LOONGSON64 - depends on STMMAC_ETH && PCI + depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI depends on COMMON_CLK help This selects the LOONGSON PCI bus support for the stmmac driver, diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 7dd3d388068b..80e598bd4255 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -19,7 +19,6 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o -obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o @@ -30,6 +29,7 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o +obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o obj-$(CONFIG_DWMAC_TEGRA) += dwmac-tegra.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 16e67c18b6f7..e3f650e88f82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -35,6 +35,7 @@ #define DWMAC_CORE_5_10 0x51 #define DWMAC_CORE_5_20 0x52 #define DWXGMAC_CORE_2_10 0x21 +#define DWXGMAC_CORE_2_20 0x22 #define DWXLGMAC_CORE_2_00 0x20 /* Device ID */ @@ -59,14 +60,26 @@ /* #define FRAME_FILTER_DEBUG */ struct stmmac_txq_stats { - unsigned long tx_pkt_n; - unsigned long tx_normal_irq_n; -}; + u64 tx_bytes; + u64 tx_packets; + u64 tx_pkt_n; + u64 tx_normal_irq_n; + u64 napi_poll; + u64 tx_clean; + u64 tx_set_ic_bit; + u64 tx_tso_frames; + u64 tx_tso_nfrags; + struct u64_stats_sync syncp; +} ____cacheline_aligned_in_smp; struct stmmac_rxq_stats { - unsigned long rx_pkt_n; - unsigned long rx_normal_irq_n; -}; + u64 rx_bytes; + u64 rx_packets; + u64 rx_pkt_n; + u64 rx_normal_irq_n; + u64 napi_poll; + struct u64_stats_sync syncp; +} ____cacheline_aligned_in_smp; /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { @@ -81,6 +94,7 @@ struct stmmac_extra_stats { unsigned long tx_frame_flushed; unsigned long tx_payload_error; unsigned long tx_ip_header_error; + unsigned long tx_collision; /* Receive errors */ unsigned long rx_desc; unsigned long sa_filter_fail; @@ -113,14 +127,6 @@ struct stmmac_extra_stats { /* Tx/Rx IRQ Events */ unsigned long rx_early_irq; unsigned long threshold; - unsigned long tx_pkt_n; - unsigned long rx_pkt_n; - unsigned long normal_irq_n; - unsigned long rx_normal_irq_n; - unsigned long napi_poll; - unsigned long tx_normal_irq_n; - unsigned long tx_clean; - unsigned long tx_set_ic_bit; unsigned long irq_receive_pmt_irq_n; /* MMC info */ unsigned long mmc_tx_irq_n; @@ -190,9 +196,6 @@ struct stmmac_extra_stats { unsigned long mtl_rx_fifo_ctrl_active; unsigned long mac_rx_frame_ctrl_fifo; unsigned long mac_gmii_rx_proto_engine; - /* TSO */ - unsigned long tx_tso_frames; - unsigned long tx_tso_nfrags; /* EST */ unsigned long mtl_est_cgce; unsigned long mtl_est_hlbs; @@ -202,6 +205,10 @@ struct stmmac_extra_stats { /* per queue statistics */ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; + unsigned long rx_dropped; + unsigned long rx_errors; + unsigned long tx_dropped; + unsigned long tx_errors; }; /* Safety Feature statistics exposed by ethtool */ @@ -286,7 +293,7 @@ struct stmmac_safety_stats { #define MIN_DMA_RIWT 0x10 #define DEF_DMA_RIWT 0xa0 /* Tx coalesce parameters */ -#define STMMAC_COAL_TX_TIMER 1000 +#define STMMAC_COAL_TX_TIMER 5000 #define STMMAC_MAX_COAL_TX_TICK 100000 #define STMMAC_TX_MAX_FRAMES 256 #define STMMAC_TX_FRAMES 25 @@ -406,6 +413,18 @@ struct dma_features { unsigned int number_tx_queues; /* PPS output */ unsigned int pps_out_num; + /* Number of Traffic Classes */ + unsigned int numtc; + /* DCB Feature Enable */ + unsigned int dcben; + /* IEEE 1588 High Word Register Enable */ + unsigned int advthword; + /* PTP Offload Enable */ + unsigned int ptoen; + /* One-Step Timestamping Enable */ + unsigned int osten; + /* Priority-Based Flow Control Enable */ + unsigned int pfcen; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; /* TX and RX FIFO sizes */ @@ -426,14 +445,40 @@ struct dma_features { unsigned int dvlan; unsigned int l3l4fnum; unsigned int arpoffsel; + /* One Step for PTP over UDP/IP Feature Enable */ + unsigned int pou_ost_en; + /* Tx Timestamp FIFO Depth */ + unsigned int ttsfd; + /* Queue/Channel-Based VLAN tag insertion on Tx */ + unsigned int cbtisel; + /* Supported Parallel Instruction Processor Engines */ + unsigned int frppipe_num; + /* Number of Extended VLAN Tag Filters */ + unsigned int nrvf_num; /* TSN Features */ unsigned int estwid; unsigned int estdep; unsigned int estsel; unsigned int fpesel; unsigned int tbssel; + /* Number of DMA channels enabled for TBS */ + unsigned int tbs_ch_num; + /* Per-Stream Filtering Enable */ + unsigned int sgfsel; /* Numbers of Auxiliary Snapshot Inputs */ unsigned int aux_snapshot_n; + /* Timestamp System Time Source */ + unsigned int tssrc; + /* Enhanced DMA Enable */ + unsigned int edma; + /* Different Descriptor Cache Enable */ + unsigned int ediffc; + /* VxLAN/NVGRE Enable */ + unsigned int vxn; + /* Debug Memory Interface Enable */ + unsigned int dbgmem; + /* Number of Policing Counters */ + unsigned int pcsel; }; /* RX Buffer size must be multiple of 4/8/16 bytes */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 58a7f08e8d78..643ee6d8d4dd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev) if (IS_ERR(gmac)) return PTR_ERR(gmac); - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -124,13 +124,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev) anarion_gmac_init(pdev, gmac); plat_dat->bsp_priv = gmac; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) { - stmmac_remove_config_dt(pdev, plat_dat); - return ret; - } - - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id anarion_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 9f88530c5e8c..ec924c6c76c6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -14,7 +14,7 @@ #include <linux/iopoll.h> #include <linux/ioport.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_net.h> #include <linux/mfd/syscon.h> #include <linux/platform_device.h> @@ -113,7 +113,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, /* dwc-qos needs GMAC4, AAL, TSO and PMT */ plat_dat->has_gmac4 = 1; plat_dat->dma_cfg->aal = 1; - plat_dat->tso_en = 1; + plat_dat->flags |= STMMAC_FLAG_TSO_EN; plat_dat->pmt = 1; return 0; @@ -178,7 +178,7 @@ static void dwc_qos_remove(struct platform_device *pdev) #define AUTO_CAL_STATUS 0x880c #define AUTO_CAL_STATUS_ACTIVE BIT(31) -static void tegra_eqos_fix_speed(void *priv, unsigned int speed) +static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode) { struct tegra_eqos *eqos = priv; unsigned long rate = 125000000; @@ -359,7 +359,7 @@ bypass_clk_reset_gpio: data->fix_mac_speed = tegra_eqos_fix_speed; data->init = tegra_eqos_init; data->bsp_priv = eqos; - data->sph_disable = 1; + data->flags |= STMMAC_FLAG_SPH_DISABLE; err = tegra_eqos_init(pdev, eqos); if (err < 0) @@ -435,15 +435,14 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(stmmac_res.addr)) return PTR_ERR(stmmac_res.addr); - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); ret = data->probe(pdev, plat_dat, &stmmac_res); if (ret < 0) { dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n"); - - goto remove_config; + return ret; } ret = dwc_eth_dwmac_config_dt(pdev, plat_dat); @@ -458,25 +457,17 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) remove: data->remove(pdev); -remove_config: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } static void dwc_eth_dwmac_remove(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - const struct dwc_eth_dwmac_data *data; - - data = device_get_match_data(&pdev->dev); + const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev); stmmac_dvr_remove(&pdev->dev); data->remove(pdev); - - stmmac_remove_config_dt(pdev, priv->plat); } static const struct of_device_id dwc_eth_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index 20fc455b3337..598eff926815 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) return ret; if (pdev->dev.of_node) { - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); @@ -46,17 +46,7 @@ static int dwmac_generic_probe(struct platform_device *pdev) plat_dat->unicast_filter_entries = 1; } - ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - if (pdev->dev.of_node) - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } static const struct of_device_id dwmac_generic_match[] = { @@ -77,7 +67,6 @@ MODULE_DEVICE_TABLE(of, dwmac_generic_match); static struct platform_driver dwmac_generic_driver = { .probe = dwmac_generic_probe, - .remove_new = stmmac_pltfr_remove, .driver = { .name = STMMAC_RESOURCE_NAME, .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index b9378a63f0e8..8f730ada71f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -12,7 +12,6 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> @@ -32,6 +31,7 @@ #define GPR_ENET_QOS_RGMII_EN (0x1 << 21) #define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0) +#define MX93_GPR_ENET_QOS_INTF_MASK GENMASK(3, 1) #define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1) #define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1) #define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1) @@ -40,13 +40,16 @@ #define DMA_BUS_MODE 0x00001000 #define DMA_BUS_MODE_SFT_RESET (0x1 << 0) #define RMII_RESET_SPEED (0x3 << 14) +#define CTRL_SPEED_MASK GENMASK(15, 14) struct imx_dwmac_ops { u32 addr_width; + u32 flags; bool mac_rgmii_txclk_auto_adj; int (*fix_soc_reset)(void *priv, void __iomem *ioaddr); int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat); + void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode); }; struct imx_priv_data { @@ -56,6 +59,7 @@ struct imx_priv_data { struct regmap *intf_regmap; u32 intf_reg_off; bool rmii_refclk_ext; + void __iomem *base_addr; const struct imx_dwmac_ops *ops; struct plat_stmmacenet_data *plat_dat; @@ -66,7 +70,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat) struct imx_priv_data *dwmac = plat_dat->bsp_priv; int val; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: val = GPR_ENET_QOS_INTF_SEL_MII; break; @@ -83,7 +87,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat) break; default: pr_debug("imx dwmac doesn't support %d interface\n", - plat_dat->interface); + plat_dat->mac_interface); return -EINVAL; } @@ -106,7 +110,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat) struct imx_priv_data *dwmac = plat_dat->bsp_priv; int val; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: val = MX93_GPR_ENET_QOS_INTF_SEL_MII; break; @@ -121,7 +125,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat) break; default: dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n", - plat_dat->interface); + plat_dat->mac_interface); return -EINVAL; } @@ -178,7 +182,7 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv) /* nothing to do now */ } -static void imx_dwmac_fix_speed(void *priv, unsigned int speed) +static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) { struct plat_stmmacenet_data *plat_dat; struct imx_priv_data *dwmac = priv; @@ -188,8 +192,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed) plat_dat = dwmac->plat_dat; if (dwmac->ops->mac_rgmii_txclk_auto_adj || - (plat_dat->interface == PHY_INTERFACE_MODE_RMII) || - (plat_dat->interface == PHY_INTERFACE_MODE_MII)) + (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) || + (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII)) return; switch (speed) { @@ -212,6 +216,41 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed) dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate); } +static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) +{ + struct imx_priv_data *dwmac = priv; + unsigned int iface; + int ctrl, old_ctrl; + + imx_dwmac_fix_speed(priv, speed, mode); + + if (!dwmac || mode != MLO_AN_FIXED) + return; + + if (regmap_read(dwmac->intf_regmap, dwmac->intf_reg_off, &iface)) + return; + + iface &= MX93_GPR_ENET_QOS_INTF_MASK; + if (iface != MX93_GPR_ENET_QOS_INTF_SEL_RGMII) + return; + + old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG); + ctrl = old_ctrl & ~CTRL_SPEED_MASK; + regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, + MX93_GPR_ENET_QOS_INTF_MODE_MASK, 0); + writel(ctrl, dwmac->base_addr + MAC_CTRL_REG); + + /* Ensure the settings for CTRL are applied. */ + readl(dwmac->base_addr + MAC_CTRL_REG); + + usleep_range(10, 20); + iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN; + regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, + MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface); + + writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG); +} + static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr) { struct plat_stmmacenet_data *plat_dat = priv; @@ -221,7 +260,7 @@ static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr) value |= DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + DMA_BUS_MODE); - if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) { + if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) { usleep_range(100, 200); writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG); } @@ -292,15 +331,14 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); data = of_device_get_match_data(&pdev->dev); if (!data) { dev_err(&pdev->dev, "failed to get match data\n"); - ret = -EINVAL; - goto err_match_data; + return -EINVAL; } dwmac->ops = data; @@ -309,9 +347,12 @@ static int imx_dwmac_probe(struct platform_device *pdev) ret = imx_dwmac_parse_dt(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "failed to parse OF data\n"); - goto err_parse_dt; + return ret; } + if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) + plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY; + plat_dat->host_dma_width = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; @@ -319,15 +360,18 @@ static int imx_dwmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = imx_dwmac_fix_speed; plat_dat->bsp_priv = dwmac; dwmac->plat_dat = plat_dat; + dwmac->base_addr = stmmac_res.addr; ret = imx_dwmac_clks_config(dwmac, true); if (ret) - goto err_clks_config; + return ret; ret = imx_dwmac_init(pdev, dwmac); if (ret) goto err_dwmac_init; + if (dwmac->ops->fix_mac_speed) + plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed; dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); @@ -340,10 +384,6 @@ err_drv_probe: imx_dwmac_exit(pdev, plat_dat->bsp_priv); err_dwmac_init: imx_dwmac_clks_config(dwmac, false); -err_clks_config: -err_parse_dt: -err_match_data: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } @@ -351,6 +391,7 @@ static struct imx_dwmac_ops imx8mp_dwmac_data = { .addr_width = 34, .mac_rgmii_txclk_auto_adj = false, .set_intf_mode = imx8mp_set_intf_mode, + .flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY, }; static struct imx_dwmac_ops imx8dxl_dwmac_data = { @@ -364,6 +405,7 @@ static struct imx_dwmac_ops imx93_dwmac_data = { .mac_rgmii_txclk_auto_adj = true, .set_intf_mode = imx93_set_intf_mode, .fix_soc_reset = imx_dwmac_mx93_reset, + .fix_mac_speed = imx93_dwmac_fix_speed, }; static const struct of_device_id imx_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index 8063ba1c3ce8..19c93b998fb3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -11,7 +11,6 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> @@ -90,7 +89,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat) struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII); @@ -119,7 +118,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat) break; default: - dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface); + dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface); return -EINVAL; } @@ -131,13 +130,13 @@ static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat) { struct ingenic_mac *mac = plat_dat->bsp_priv; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_RMII: dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); break; default: - dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface); + dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface); return -EINVAL; } @@ -150,14 +149,14 @@ static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat) struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_RMII: val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); break; default: - dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface); + dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface); return -EINVAL; } @@ -170,7 +169,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat) struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_RMII: val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) | FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); @@ -178,7 +177,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat) break; default: - dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface); + dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface); return -EINVAL; } @@ -191,7 +190,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat) struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_RMII: val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) | FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) | @@ -221,7 +220,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat) break; default: - dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface); + dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface); return -EINVAL; } @@ -242,29 +241,25 @@ static int ingenic_mac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); mac = devm_kzalloc(&pdev->dev, sizeof(*mac), GFP_KERNEL); - if (!mac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!mac) + return -ENOMEM; data = of_device_get_match_data(&pdev->dev); if (!data) { dev_err(&pdev->dev, "No of match data provided\n"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } /* Get MAC PHY control register */ mac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mode-reg"); if (IS_ERR(mac->regmap)) { dev_err(&pdev->dev, "%s: Failed to get syscon regmap\n", __func__); - ret = PTR_ERR(mac->regmap); - goto err_remove_config_dt; + return PTR_ERR(mac->regmap); } if (!of_property_read_u32(pdev->dev.of_node, "tx-clk-delay-ps", &tx_delay_ps)) { @@ -273,8 +268,7 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->tx_delay = tx_delay_ps * 1000; } else { dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } @@ -284,8 +278,7 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->rx_delay = rx_delay_ps * 1000; } else { dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } @@ -296,18 +289,9 @@ static int ingenic_mac_probe(struct platform_device *pdev) ret = ingenic_mac_init(plat_dat); if (ret) - goto err_remove_config_dt; - - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); + return ret; - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index a5e639ab0b9e..d68f0c4e7835 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -7,8 +7,8 @@ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/stmmac.h> #include "dwmac4.h" @@ -22,13 +22,13 @@ struct intel_dwmac { }; struct intel_dwmac_data { - void (*fix_mac_speed)(void *priv, unsigned int speed); + void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode); unsigned long ptp_ref_clk_rate; unsigned long tx_clk_rate; bool tx_clk_en; }; -static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed) +static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct intel_dwmac *dwmac = priv; unsigned long rate; @@ -76,7 +76,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; - const struct of_device_id *match; struct intel_dwmac *dwmac; unsigned long rate; int ret; @@ -85,35 +84,29 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { dev_err(&pdev->dev, "dt configuration failed\n"); return PTR_ERR(plat_dat); } dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->dev = &pdev->dev; dwmac->tx_clk = NULL; - match = of_match_device(intel_eth_plat_match, &pdev->dev); - if (match && match->data) { - dwmac->data = (const struct intel_dwmac_data *)match->data; - + dwmac->data = device_get_match_data(&pdev->dev); + if (dwmac->data) { if (dwmac->data->fix_mac_speed) plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed; /* Enable TX clock */ if (dwmac->data->tx_clk_en) { dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); - if (IS_ERR(dwmac->tx_clk)) { - ret = PTR_ERR(dwmac->tx_clk); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->tx_clk)) + return PTR_ERR(dwmac->tx_clk); clk_prepare_enable(dwmac->tx_clk); @@ -126,7 +119,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set tx_clk\n"); - goto err_remove_config_dt; + return ret; } } } @@ -140,7 +133,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set clk_ptp_ref\n"); - goto err_remove_config_dt; + return ret; } } } @@ -158,15 +151,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev) ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) { clk_disable_unprepare(dwmac->tx_clk); - goto err_remove_config_dt; + return ret; } return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; } static void intel_eth_plat_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index ab9f876b6df7..60283543ffc8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -257,9 +257,8 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) /* Program PTP Clock Frequency for different variant of * Intel mGBE that has slightly different GPO mapping */ -static void intel_mgbe_ptp_clk_freq_config(void *npriv) +static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv) { - struct stmmac_priv *priv = (struct stmmac_priv *)npriv; struct intel_priv_data *intel_priv; u32 gpio_value; @@ -326,10 +325,10 @@ static int intel_crosststamp(ktime_t *device, /* Both internal crosstimestamping and external triggered event * timestamping cannot be run concurrently. */ - if (priv->plat->ext_snapshot_en) + if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN) return -EBUSY; - priv->plat->int_snapshot_en = 1; + priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN; mutex_lock(&priv->aux_ts_lock); /* Enable Internal snapshot trigger */ @@ -350,7 +349,7 @@ static int intel_crosststamp(ktime_t *device, break; default: mutex_unlock(&priv->aux_ts_lock); - priv->plat->int_snapshot_en = 0; + priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; return -EINVAL; } writel(acr_value, ptpaddr + PTP_ACR); @@ -376,7 +375,7 @@ static int intel_crosststamp(ktime_t *device, if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait, stmmac_cross_ts_isr(priv), HZ / 100)) { - priv->plat->int_snapshot_en = 0; + priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; return -ETIMEDOUT; } @@ -395,7 +394,7 @@ static int intel_crosststamp(ktime_t *device, } system->cycles *= intel_priv->crossts_adj; - priv->plat->int_snapshot_en = 0; + priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; return 0; } @@ -458,8 +457,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->has_gmac = 0; plat->has_gmac4 = 1; plat->force_sf_dma_mode = 0; - plat->tso_en = 1; - plat->sph_disable = 1; + plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE); /* Multiplying factor to the clk_eee_i clock time * period to make it closer to 100 ns. This value @@ -561,7 +559,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Set the maxmtu to a default of JUMBO_LEN */ plat->maxmtu = JUMBO_LEN; - plat->vlan_fail_q_en = true; + plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN; /* Use the last Rx queue */ plat->vlan_fail_q = plat->rx_queues_to_use - 1; @@ -607,10 +605,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR; plat->int_snapshot_num = AUX_SNAPSHOT1; - plat->ext_snapshot_num = AUX_SNAPSHOT0; plat->crosststamp = intel_crosststamp; - plat->int_snapshot_en = 0; + plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; /* Setup MSI vector offset specific to Intel mGbE controller */ plat->msi_mac_vec = 29; @@ -628,7 +625,8 @@ static int ehl_common_data(struct pci_dev *pdev, { plat->rx_queues_to_use = 8; plat->tx_queues_to_use = 8; - plat->use_phy_wol = 1; + plat->flags |= STMMAC_FLAG_USE_PHY_WOL; + plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY; plat->safety_feat_cfg->tsoee = 1; plat->safety_feat_cfg->mrxpee = 1; @@ -954,7 +952,7 @@ static int stmmac_config_single_msi(struct pci_dev *pdev, res->irq = pci_irq_vector(pdev, 0); res->wol_irq = res->irq; - plat->multi_msi_en = 0; + plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN; dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n", __func__); @@ -1006,7 +1004,7 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev, if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec); - plat->multi_msi_en = 1; + plat->flags |= STMMAC_FLAG_MULTI_MSI_EN; dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__); return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index e39406df8516..281687d7083b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -257,7 +257,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) return PTR_ERR_OR_ZERO(gmac->qsgmii_csr); } -static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) +static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct ipq806x_gmac *gmac = priv; @@ -384,22 +384,20 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) if (val) return val; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) { - err = -ENOMEM; - goto err_remove_config_dt; - } + if (!gmac) + return -ENOMEM; gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); if (err) { dev_err(dev, "device tree parsing error\n"); - goto err_remove_config_dt; + return err; } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, @@ -459,11 +457,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { err = ipq806x_gmac_configure_qsgmii_params(gmac); if (err) - goto err_remove_config_dt; + return err; err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac); if (err) - goto err_remove_config_dt; + return err; } plat_dat->has_gmac = true; @@ -473,21 +471,12 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->tx_fifo_size = 8192; plat_dat->rx_fifo_size = 8192; - err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (err) - goto err_remove_config_dt; - - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); err_unsupported_phy: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - err = -EINVAL; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return err; + return -EINVAL; } static const struct of_device_id ipq806x_gmac_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index a25c187d3185..9e40c28d453a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -59,26 +59,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id return -ENODEV; } - if (!of_device_is_compatible(np, "loongson, pci-gmac")) { - pr_info("dwmac_loongson_pci: Incompatible OF node\n"); - return -ENODEV; - } - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) return -ENOMEM; + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + plat->mdio_node = of_get_child_by_name(np, "mdio"); if (plat->mdio_node) { dev_info(&pdev->dev, "Found MDIO subnode\n"); - - plat->mdio_bus_data = devm_kzalloc(&pdev->dev, - sizeof(*plat->mdio_bus_data), - GFP_KERNEL); - if (!plat->mdio_bus_data) { - ret = -ENOMEM; - goto err_put_node; - } plat->mdio_bus_data->needs_reset = true; } @@ -117,7 +110,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id } plat->phy_interface = phy_mode; - plat->interface = PHY_INTERFACE_MODE_GMII; + plat->mac_interface = PHY_INTERFACE_MODE_GMII; pci_set_master(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c new file mode 100644 index 000000000000..3e86810717d3 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Loongson-1 DWMAC glue layer + * + * Copyright (C) 2011-2023 Keguang Zhang <keguang.zhang@gmail.com> + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "stmmac.h" +#include "stmmac_platform.h" + +#define LS1B_GMAC0_BASE (0x1fe10000) +#define LS1B_GMAC1_BASE (0x1fe20000) + +/* Loongson-1 SYSCON Registers */ +#define LS1X_SYSCON0 (0x0) +#define LS1X_SYSCON1 (0x4) + +/* Loongson-1B SYSCON Register Bits */ +#define GMAC1_USE_UART1 BIT(4) +#define GMAC1_USE_UART0 BIT(3) + +#define GMAC1_SHUT BIT(13) +#define GMAC0_SHUT BIT(12) + +#define GMAC1_USE_TXCLK BIT(3) +#define GMAC0_USE_TXCLK BIT(2) +#define GMAC1_USE_PWM23 BIT(1) +#define GMAC0_USE_PWM01 BIT(0) + +/* Loongson-1C SYSCON Register Bits */ +#define GMAC_SHUT BIT(6) + +#define PHY_INTF_SELI GENMASK(30, 28) +#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0) +#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4) + +struct ls1x_dwmac { + struct plat_stmmacenet_data *plat_dat; + struct regmap *regmap; +}; + +static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) +{ + struct ls1x_dwmac *dwmac = priv; + struct plat_stmmacenet_data *plat = dwmac->plat_dat; + struct regmap *regmap = dwmac->regmap; + struct resource *res; + unsigned long reg_base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Could not get IO_MEM resources\n"); + return -EINVAL; + } + reg_base = (unsigned long)res->start; + + if (reg_base == LS1B_GMAC0_BASE) { + switch (plat->phy_interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + regmap_update_bits(regmap, LS1X_SYSCON0, + GMAC0_USE_TXCLK | GMAC0_USE_PWM01, + 0); + break; + case PHY_INTERFACE_MODE_MII: + regmap_update_bits(regmap, LS1X_SYSCON0, + GMAC0_USE_TXCLK | GMAC0_USE_PWM01, + GMAC0_USE_TXCLK | GMAC0_USE_PWM01); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode %u\n", + plat->phy_interface); + return -EOPNOTSUPP; + } + + regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0); + } else if (reg_base == LS1B_GMAC1_BASE) { + regmap_update_bits(regmap, LS1X_SYSCON0, + GMAC1_USE_UART1 | GMAC1_USE_UART0, + GMAC1_USE_UART1 | GMAC1_USE_UART0); + + switch (plat->phy_interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + regmap_update_bits(regmap, LS1X_SYSCON1, + GMAC1_USE_TXCLK | GMAC1_USE_PWM23, + 0); + + break; + case PHY_INTERFACE_MODE_MII: + regmap_update_bits(regmap, LS1X_SYSCON1, + GMAC1_USE_TXCLK | GMAC1_USE_PWM23, + GMAC1_USE_TXCLK | GMAC1_USE_PWM23); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode %u\n", + plat->phy_interface); + return -EOPNOTSUPP; + } + + regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0); + } else { + dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx", + reg_base); + return -EINVAL; + } + + return 0; +} + +static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv) +{ + struct ls1x_dwmac *dwmac = priv; + struct plat_stmmacenet_data *plat = dwmac->plat_dat; + struct regmap *regmap = dwmac->regmap; + + switch (plat->phy_interface) { + case PHY_INTERFACE_MODE_MII: + regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, + PHY_INTF_MII); + break; + case PHY_INTERFACE_MODE_RMII: + regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, + PHY_INTF_RMII); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY-mode %u\n", + plat->phy_interface); + return -EOPNOTSUPP; + } + + regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0); + + return 0; +} + +static int ls1x_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct regmap *regmap; + struct ls1x_dwmac *dwmac; + int (*init)(struct platform_device *pdev, void *priv); + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + /* Probe syscon */ + regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "loongson,ls1-syscon"); + if (IS_ERR(regmap)) + return dev_err_probe(&pdev->dev, PTR_ERR(regmap), + "Unable to find syscon\n"); + + init = of_device_get_match_data(&pdev->dev); + if (!init) { + dev_err(&pdev->dev, "No of match data provided\n"); + return -EINVAL; + } + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return -ENOMEM; + + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat), + "dt configuration failed\n"); + + plat_dat->bsp_priv = dwmac; + plat_dat->init = init; + dwmac->plat_dat = plat_dat; + dwmac->regmap = regmap; + + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); +} + +static const struct of_device_id ls1x_dwmac_match[] = { + { + .compatible = "loongson,ls1b-gmac", + .data = &ls1b_dwmac_syscon_init, + }, + { + .compatible = "loongson,ls1c-emac", + .data = &ls1c_dwmac_syscon_init, + }, + { } +}; +MODULE_DEVICE_TABLE(of, ls1x_dwmac_match); + +static struct platform_driver ls1x_dwmac_driver = { + .probe = ls1x_dwmac_probe, + .driver = { + .name = "loongson1-dwmac", + .of_match_table = ls1x_dwmac_match, + }, +}; +module_platform_driver(ls1x_dwmac_driver); + +MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>"); +MODULE_DESCRIPTION("Loongson-1 DWMAC glue layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 18e84ba693a6..4c810d8f5bea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -46,33 +46,22 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); if (IS_ERR(reg)) { dev_err(&pdev->dev, "syscon lookup failed\n"); - ret = PTR_ERR(reg); - goto err_remove_config_dt; + return PTR_ERR(reg); } - if (plat_dat->interface == PHY_INTERFACE_MODE_MII) { + if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII; - } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) { + } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; } else { dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } regmap_update_bits(reg, LPC18XX_CREG_CREG6, LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id lpc18xx_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 73c1dfa7ecb1..2a9132d6d743 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -7,8 +7,8 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_net.h> +#include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/stmmac.h> @@ -587,8 +587,11 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, { int i; - plat->interface = priv_plat->phy_mode; - plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1; + plat->mac_interface = priv_plat->phy_mode; + if (priv_plat->mac_wol) + plat->flags |= STMMAC_FLAG_USE_PHY_WOL; + else + plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL; plat->riwt_off = 1; plat->maxmtu = ETH_DATA_LEN; plat->host_dma_width = priv_plat->variant->dma_bit_mask; @@ -653,7 +656,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -662,7 +665,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) ret = mediatek_dwmac_clks_config(priv_plat, true); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -672,8 +675,6 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) err_drv_probe: mediatek_dwmac_clks_config(priv_plat, false); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 7aa5e6bc04eb..a16bfa9089ea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -22,7 +22,7 @@ struct meson_dwmac { void __iomem *reg; }; -static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed) +static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct meson_dwmac *dwmac = priv; unsigned int val; @@ -52,35 +52,22 @@ static int meson6_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->reg = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(dwmac->reg)) { - ret = PTR_ERR(dwmac->reg); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->reg)) + return PTR_ERR(dwmac->reg); plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id meson6_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 92b16048f91c..b23944aa344e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -13,7 +13,7 @@ #include <linux/io.h> #include <linux/ioport.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_net.h> #include <linux/mfd/syscon.h> #include <linux/platform_device.h> @@ -400,33 +400,27 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->data = (const struct meson8b_dwmac_data *) of_device_get_match_data(&pdev->dev); - if (!dwmac->data) { - ret = -EINVAL; - goto err_remove_config_dt; - } + if (!dwmac->data) + return -EINVAL; dwmac->regs = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(dwmac->regs)) { - ret = PTR_ERR(dwmac->regs); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->regs)) + return PTR_ERR(dwmac->regs); dwmac->dev = &pdev->dev; ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode); if (ret) { dev_err(&pdev->dev, "missing phy-mode property\n"); - goto err_remove_config_dt; + return ret; } /* use 2ns as fallback since this value was previously hardcoded */ @@ -448,53 +442,40 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) { dev_err(dwmac->dev, "The RGMII RX delay range is 0..3000ps in 200ps steps"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } else { if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) { dev_err(dwmac->dev, "The only allowed RGMII RX delays values are: 0ps, 2000ps"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } } dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev, "timing-adjustment"); - if (IS_ERR(dwmac->timing_adj_clk)) { - ret = PTR_ERR(dwmac->timing_adj_clk); - goto err_remove_config_dt; - } + if (IS_ERR(dwmac->timing_adj_clk)) + return PTR_ERR(dwmac->timing_adj_clk); ret = meson8b_init_rgmii_delays(dwmac); if (ret) - goto err_remove_config_dt; + return ret; ret = meson8b_init_rgmii_tx_clk(dwmac); if (ret) - goto err_remove_config_dt; + return ret; ret = dwmac->data->set_phy_mode(dwmac); if (ret) - goto err_remove_config_dt; + return ret; ret = meson8b_init_prg_eth(dwmac); if (ret) - goto err_remove_config_dt; + return ret; plat_dat->bsp_priv = dwmac; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_remove_config_dt; - - return 0; - -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct meson8b_dwmac_data meson8b_dwmac_data = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c deleted file mode 100644 index 42954020de2c..000000000000 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c +++ /dev/null @@ -1,245 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Oxford Semiconductor OXNAS DWMAC glue layer - * - * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com> - * Copyright (C) 2014 Daniel Golle <daniel@makrotopia.org> - * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com> - * Copyright (C) 2012 John Crispin <blogic@openwrt.org> - */ - -#include <linux/device.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> -#include <linux/regmap.h> -#include <linux/mfd/syscon.h> -#include <linux/stmmac.h> - -#include "stmmac_platform.h" - -/* System Control regmap offsets */ -#define OXNAS_DWMAC_CTRL_REGOFFSET 0x78 -#define OXNAS_DWMAC_DELAY_REGOFFSET 0x100 - -/* Control Register */ -#define DWMAC_CKEN_RX_IN 14 -#define DWMAC_CKEN_RXN_OUT 13 -#define DWMAC_CKEN_RX_OUT 12 -#define DWMAC_CKEN_TX_IN 10 -#define DWMAC_CKEN_TXN_OUT 9 -#define DWMAC_CKEN_TX_OUT 8 -#define DWMAC_RX_SOURCE 7 -#define DWMAC_TX_SOURCE 6 -#define DWMAC_LOW_TX_SOURCE 4 -#define DWMAC_AUTO_TX_SOURCE 3 -#define DWMAC_RGMII 2 -#define DWMAC_SIMPLE_MUX 1 -#define DWMAC_CKEN_GTX 0 - -/* Delay register */ -#define DWMAC_TX_VARDELAY_SHIFT 0 -#define DWMAC_TXN_VARDELAY_SHIFT 8 -#define DWMAC_RX_VARDELAY_SHIFT 16 -#define DWMAC_RXN_VARDELAY_SHIFT 24 -#define DWMAC_TX_VARDELAY(d) ((d) << DWMAC_TX_VARDELAY_SHIFT) -#define DWMAC_TXN_VARDELAY(d) ((d) << DWMAC_TXN_VARDELAY_SHIFT) -#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT) -#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT) - -struct oxnas_dwmac; - -struct oxnas_dwmac_data { - int (*setup)(struct oxnas_dwmac *dwmac); -}; - -struct oxnas_dwmac { - struct device *dev; - struct clk *clk; - struct regmap *regmap; - const struct oxnas_dwmac_data *data; -}; - -static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac) -{ - unsigned int value; - int ret; - - ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value); - if (ret < 0) - return ret; - - /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */ - value |= BIT(DWMAC_CKEN_GTX) | - /* Use simple mux for 25/125 Mhz clock switching */ - BIT(DWMAC_SIMPLE_MUX); - - regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value); - - return 0; -} - -static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac) -{ - unsigned int value; - int ret; - - ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value); - if (ret < 0) - return ret; - - /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */ - value |= BIT(DWMAC_CKEN_GTX) | - /* Use simple mux for 25/125 Mhz clock switching */ - BIT(DWMAC_SIMPLE_MUX) | - /* set auto switch tx clock source */ - BIT(DWMAC_AUTO_TX_SOURCE) | - /* enable tx & rx vardelay */ - BIT(DWMAC_CKEN_TX_OUT) | - BIT(DWMAC_CKEN_TXN_OUT) | - BIT(DWMAC_CKEN_TX_IN) | - BIT(DWMAC_CKEN_RX_OUT) | - BIT(DWMAC_CKEN_RXN_OUT) | - BIT(DWMAC_CKEN_RX_IN); - regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value); - - /* set tx & rx vardelay */ - value = DWMAC_TX_VARDELAY(4) | - DWMAC_TXN_VARDELAY(2) | - DWMAC_RX_VARDELAY(10) | - DWMAC_RXN_VARDELAY(8); - regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value); - - return 0; -} - -static int oxnas_dwmac_init(struct platform_device *pdev, void *priv) -{ - struct oxnas_dwmac *dwmac = priv; - int ret; - - /* Reset HW here before changing the glue configuration */ - ret = device_reset(dwmac->dev); - if (ret) - return ret; - - ret = clk_prepare_enable(dwmac->clk); - if (ret) - return ret; - - ret = dwmac->data->setup(dwmac); - if (ret) - clk_disable_unprepare(dwmac->clk); - - return ret; -} - -static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv) -{ - struct oxnas_dwmac *dwmac = priv; - - clk_disable_unprepare(dwmac->clk); -} - -static int oxnas_dwmac_probe(struct platform_device *pdev) -{ - struct plat_stmmacenet_data *plat_dat; - struct stmmac_resources stmmac_res; - struct oxnas_dwmac *dwmac; - int ret; - - ret = stmmac_get_platform_resources(pdev, &stmmac_res); - if (ret) - return ret; - - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); - if (IS_ERR(plat_dat)) - return PTR_ERR(plat_dat); - - dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } - - dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev); - if (!dwmac->data) { - ret = -EINVAL; - goto err_remove_config_dt; - } - - dwmac->dev = &pdev->dev; - plat_dat->bsp_priv = dwmac; - plat_dat->init = oxnas_dwmac_init; - plat_dat->exit = oxnas_dwmac_exit; - - dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "oxsemi,sys-ctrl"); - if (IS_ERR(dwmac->regmap)) { - dev_err(&pdev->dev, "failed to have sysctrl regmap\n"); - ret = PTR_ERR(dwmac->regmap); - goto err_remove_config_dt; - } - - dwmac->clk = devm_clk_get(&pdev->dev, "gmac"); - if (IS_ERR(dwmac->clk)) { - ret = PTR_ERR(dwmac->clk); - goto err_remove_config_dt; - } - - ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv); - if (ret) - goto err_remove_config_dt; - - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_dwmac_exit; - - - return 0; - -err_dwmac_exit: - oxnas_dwmac_exit(pdev, plat_dat->bsp_priv); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); - - return ret; -} - -static const struct oxnas_dwmac_data ox810se_dwmac_data = { - .setup = oxnas_dwmac_setup_ox810se, -}; - -static const struct oxnas_dwmac_data ox820_dwmac_data = { - .setup = oxnas_dwmac_setup_ox820, -}; - -static const struct of_device_id oxnas_dwmac_match[] = { - { - .compatible = "oxsemi,ox810se-dwmac", - .data = &ox810se_dwmac_data, - }, - { - .compatible = "oxsemi,ox820-dwmac", - .data = &ox820_dwmac_data, - }, - { } -}; -MODULE_DEVICE_TABLE(of, oxnas_dwmac_match); - -static struct platform_driver oxnas_dwmac_driver = { - .probe = oxnas_dwmac_probe, - .remove_new = stmmac_pltfr_remove, - .driver = { - .name = "oxnas-dwmac", - .pm = &stmmac_pltfr_pm_ops, - .of_match_table = oxnas_dwmac_match, - }, -}; -module_platform_driver(oxnas_dwmac_driver); - -MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); -MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e62940414e54..31631e3f89d0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -3,11 +3,10 @@ #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_net.h> #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/phy/phy.h> -#include <linux/property.h> #include "stmmac.h" #include "stmmac_platform.h" @@ -35,6 +34,7 @@ #define RGMII_CONFIG_LOOPBACK_EN BIT(2) #define RGMII_CONFIG_PROG_SWAP BIT(1) #define RGMII_CONFIG_DDR_MODE BIT(0) +#define RGMII_CONFIG_SGMII_CLK_DVDR GENMASK(18, 10) /* SDCC_HC_REG_DLL_CONFIG fields */ #define SDCC_DLL_CONFIG_DLL_RST BIT(30) @@ -79,6 +79,8 @@ #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) +#define SGMII_10M_RX_CLK_DVDR 0x31 + struct ethqos_emac_por { unsigned int offset; unsigned int value; @@ -104,7 +106,7 @@ struct qcom_ethqos { struct clk *link_clk; struct phy *serdes_phy; unsigned int speed; - int phy_mode; + phy_interface_t phy_mode; const struct ethqos_emac_por *por; unsigned int num_por; @@ -599,6 +601,9 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) return 0; } +/* On interface toggle MAC registers gets reset. + * Configure MAC block for SGMII on ethernet phy link up + */ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) { int val; @@ -618,6 +623,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) case SPEED_10: val |= ETHQOS_MAC_CTRL_PORT_SEL; val &= ~ETHQOS_MAC_CTRL_SPEED_MODE; + rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR, + FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR, + SGMII_10M_RX_CLK_DVDR), + RGMII_IO_MACRO_CONFIG); break; } @@ -631,7 +640,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos) return ethqos->configure_func(ethqos); } -static void ethqos_fix_mac_speed(void *priv, unsigned int speed) +static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct qcom_ethqos *ethqos = priv; @@ -694,6 +703,23 @@ static void ethqos_clks_disable(void *data) ethqos_clks_config(data, false); } +static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv) +{ + struct plat_stmmacenet_data *plat_dat = priv->plat; + int err; + + if (!plat_dat->clk_ptp_ref) + return; + + /* Max the PTP ref clock out to get the best resolution possible */ + err = clk_set_rate(plat_dat->clk_ptp_ref, ULONG_MAX); + if (err) + netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err); + plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref); + + netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate); +} + static int qcom_ethqos_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -706,12 +732,13 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ret = stmmac_get_platform_resources(pdev, &stmmac_res); if (ret) - return ret; + return dev_err_probe(dev, ret, + "Failed to get platform resources\n"); plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) { - dev_err(dev, "dt configuration failed\n"); - return PTR_ERR(plat_dat); + return dev_err_probe(dev, PTR_ERR(plat_dat), + "dt configuration failed\n"); } plat_dat->clks_config = ethqos_clks_config; @@ -720,7 +747,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (!ethqos) return -ENOMEM; - ethqos->phy_mode = device_get_phy_mode(dev); + ret = of_get_phy_mode(np, ðqos->phy_mode); + if (ret) + return dev_err_probe(dev, ret, "Failed to get phy mode\n"); switch (ethqos->phy_mode) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: @@ -731,16 +760,17 @@ static int qcom_ethqos_probe(struct platform_device *pdev) case PHY_INTERFACE_MODE_SGMII: ethqos->configure_func = ethqos_configure_sgmii; break; - case -ENODEV: - return -ENODEV; default: + dev_err(dev, "Unsupported phy mode %s\n", + phy_modes(ethqos->phy_mode)); return -EINVAL; } ethqos->pdev = pdev; ethqos->rgmii_base = devm_platform_ioremap_resource_byname(pdev, "rgmii"); if (IS_ERR(ethqos->rgmii_base)) - return PTR_ERR(ethqos->rgmii_base); + return dev_err_probe(dev, PTR_ERR(ethqos->rgmii_base), + "Failed to map rgmii resource\n"); ethqos->mac_base = stmmac_res.addr; @@ -752,7 +782,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii"); if (IS_ERR(ethqos->link_clk)) - return PTR_ERR(ethqos->link_clk); + return dev_err_probe(dev, PTR_ERR(ethqos->link_clk), + "Failed to get link_clk\n"); ret = ethqos_clks_config(ethqos, true); if (ret) @@ -764,7 +795,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos->serdes_phy = devm_phy_optional_get(dev, "serdes"); if (IS_ERR(ethqos->serdes_phy)) - return PTR_ERR(ethqos->serdes_phy); + return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy), + "Failed to get serdes phy\n"); ethqos->speed = SPEED_1000; ethqos_update_link_clk(ethqos, SPEED_1000); @@ -773,14 +805,17 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->bsp_priv = ethqos; plat_dat->fix_mac_speed = ethqos_fix_mac_speed; plat_dat->dump_debug_regs = rgmii_dump; + plat_dat->ptp_clk_freq_config = ethqos_ptp_clk_freq_config; plat_dat->has_gmac4 = 1; if (ethqos->has_emac_ge_3) plat_dat->dwmac4_addrs = &data->dwmac4_addrs; plat_dat->pmt = 1; - plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); + if (of_property_read_bool(np, "snps,tso")) + plat_dat->flags |= STMMAC_FLAG_TSO_EN; if (of_device_is_compatible(np, "qcom,qcs404-ethqos")) - plat_dat->rx_clk_runs_in_lpi = 1; - plat_dat->has_integrated_pcs = data->has_integrated_pcs; + plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI; + if (data->has_integrated_pcs) + plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS; if (ethqos->serdes_phy) { plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index d81591b470a2..382e8de1255d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -14,8 +14,8 @@ #include <linux/of_net.h> #include <linux/gpio.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_gpio.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/delay.h> @@ -1785,7 +1785,7 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac) gmac_clk_enable(gmac, false); } -static void rk_fix_speed(void *priv, unsigned int speed) +static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode) { struct rk_priv_data *bsp_priv = priv; struct device *dev = &bsp_priv->pdev->dev; @@ -1824,7 +1824,7 @@ static int rk_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -1836,18 +1836,16 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = rk_fix_speed; plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data); - if (IS_ERR(plat_dat->bsp_priv)) { - ret = PTR_ERR(plat_dat->bsp_priv); - goto err_remove_config_dt; - } + if (IS_ERR(plat_dat->bsp_priv)) + return PTR_ERR(plat_dat->bsp_priv); ret = rk_gmac_clk_init(plat_dat); if (ret) - goto err_remove_config_dt; + return ret; ret = rk_gmac_powerup(plat_dat->bsp_priv); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -1857,8 +1855,6 @@ static int rk_gmac_probe(struct platform_device *pdev) err_gmac_powerdown: rk_gmac_powerdown(plat_dat->bsp_priv); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 6267bcb60206..ba2ce776bd4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -61,7 +61,7 @@ struct socfpga_dwmac { struct mdio_device *pcs_mdiodev; }; -static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) +static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; @@ -236,7 +236,7 @@ static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac) struct net_device *ndev = dev_get_drvdata(dwmac->dev); struct stmmac_priv *priv = netdev_priv(ndev); - return priv->plat->interface; + return priv->plat->mac_interface; } static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable) @@ -400,21 +400,19 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp"); if (IS_ERR(dwmac->stmmac_ocp_rst)) { ret = PTR_ERR(dwmac->stmmac_ocp_rst); dev_err(dev, "error getting reset control of ocp %d\n", ret); - goto err_remove_config_dt; + return ret; } reset_control_deassert(dwmac->stmmac_ocp_rst); @@ -422,7 +420,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) ret = socfpga_dwmac_parse_data(dwmac, dev); if (ret) { dev_err(dev, "Unable to parse OF data\n"); - goto err_remove_config_dt; + return ret; } dwmac->ops = ops; @@ -431,7 +429,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - goto err_remove_config_dt; + return ret; ndev = platform_get_drvdata(pdev); stpriv = netdev_priv(ndev); @@ -492,8 +490,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) err_dvr_remove: stmmac_dvr_remove(&pdev->dev); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index d3a39d2fb3a9..5d630affb4d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -7,8 +7,10 @@ * */ +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/mfd/syscon.h> -#include <linux/of_device.h> #include <linux/regmap.h> #include "stmmac_platform.h" @@ -22,7 +24,7 @@ struct starfive_dwmac { struct clk *clk_tx; }; -static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed) +static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct starfive_dwmac *dwmac = priv; unsigned long rate; @@ -58,7 +60,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) unsigned int mode; int err; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_RMII: mode = STARFIVE_DWMAC_PHY_INFT_RMII; break; @@ -70,7 +72,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) default: dev_err(dwmac->dev, "unsupported interface %d\n", - plat_dat->interface); + plat_dat->mac_interface); return -EINVAL; } @@ -103,7 +105,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, err, "failed to get resources\n"); - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat), "dt configuration failed\n"); @@ -139,13 +141,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev) if (err) return err; - err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (err) { - stmmac_remove_config_dt(pdev, plat_dat); - return err; - } - - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } static const struct of_device_id starfive_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index dcbb17c4f07a..4445cddc4cbe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -17,7 +17,6 @@ #include <linux/regmap.h> #include <linux/clk.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include "stmmac_platform.h" @@ -104,11 +103,11 @@ struct sti_dwmac { struct regmap *regmap; bool gmac_en; u32 speed; - void (*fix_retime_src)(void *priv, unsigned int speed); + void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode); }; struct sti_dwmac_of_data { - void (*fix_retime_src)(void *priv, unsigned int speed); + void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode); }; static u32 phy_intf_sels[] = { @@ -136,7 +135,7 @@ static u32 stih4xx_tx_retime_val[] = { | STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK, }; -static void stih4xx_fix_retime_src(void *priv, u32 spd) +static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode) { struct sti_dwmac *dwmac = priv; u32 src = dwmac->tx_retime_src; @@ -188,7 +187,7 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac) val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; regmap_update_bits(regmap, reg, ENMII_MASK, val); - dwmac->fix_retime_src(dwmac, dwmac->speed); + dwmac->fix_retime_src(dwmac, dwmac->speed, 0); return 0; } @@ -274,20 +273,18 @@ static int sti_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; ret = sti_dwmac_parse_data(dwmac, pdev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - goto err_remove_config_dt; + return ret; } dwmac->fix_retime_src = data->fix_retime_src; @@ -297,7 +294,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) ret = clk_prepare_enable(dwmac->clk); if (ret) - goto err_remove_config_dt; + return ret; ret = sti_dwmac_set_mode(dwmac); if (ret) @@ -311,8 +308,6 @@ static int sti_dwmac_probe(struct platform_device *pdev) disable_clk: clk_disable_unprepare(dwmac->clk); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index bdb4de59a672..c92dfc4ecf57 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -11,7 +11,6 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> @@ -99,69 +98,63 @@ struct stm32_dwmac { struct stm32_ops { int (*set_mode)(struct plat_stmmacenet_data *plat_dat); - int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare); int (*suspend)(struct stm32_dwmac *dwmac); void (*resume)(struct stm32_dwmac *dwmac); int (*parse_data)(struct stm32_dwmac *dwmac, struct device *dev); u32 syscfg_eth_mask; + bool clk_rx_enable_in_suspend; }; -static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) +static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) { - struct stm32_dwmac *dwmac = plat_dat->bsp_priv; int ret; - if (dwmac->ops->set_mode) { - ret = dwmac->ops->set_mode(plat_dat); - if (ret) - return ret; - } - ret = clk_prepare_enable(dwmac->clk_tx); if (ret) - return ret; + goto err_clk_tx; - if (!dwmac->dev->power.is_suspended) { + if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) { ret = clk_prepare_enable(dwmac->clk_rx); - if (ret) { - clk_disable_unprepare(dwmac->clk_tx); - return ret; - } + if (ret) + goto err_clk_rx; } - if (dwmac->ops->clk_prepare) { - ret = dwmac->ops->clk_prepare(dwmac, true); - if (ret) { - clk_disable_unprepare(dwmac->clk_rx); - clk_disable_unprepare(dwmac->clk_tx); - } + ret = clk_prepare_enable(dwmac->syscfg_clk); + if (ret) + goto err_syscfg_clk; + + if (dwmac->enable_eth_ck) { + ret = clk_prepare_enable(dwmac->clk_eth_ck); + if (ret) + goto err_clk_eth_ck; } return ret; + +err_clk_eth_ck: + clk_disable_unprepare(dwmac->syscfg_clk); +err_syscfg_clk: + if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) + clk_disable_unprepare(dwmac->clk_rx); +err_clk_rx: + clk_disable_unprepare(dwmac->clk_tx); +err_clk_tx: + return ret; } -static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) +static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) { - int ret = 0; + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + int ret; - if (prepare) { - ret = clk_prepare_enable(dwmac->syscfg_clk); + if (dwmac->ops->set_mode) { + ret = dwmac->ops->set_mode(plat_dat); if (ret) return ret; - if (dwmac->enable_eth_ck) { - ret = clk_prepare_enable(dwmac->clk_eth_ck); - if (ret) { - clk_disable_unprepare(dwmac->syscfg_clk); - return ret; - } - } - } else { - clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->enable_eth_ck) - clk_disable_unprepare(dwmac->clk_eth_ck); } - return ret; + + return stm32_dwmac_clk_enable(dwmac, resume); } static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) @@ -172,7 +165,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) clk_rate = clk_get_rate(dwmac->clk_eth_ck); dwmac->enable_eth_ck = false; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk) dwmac->enable_eth_ck = true; @@ -211,7 +204,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) break; default: pr_debug("SYSCFG init : Do not manage %d interface\n", - plat_dat->interface); + plat_dat->mac_interface); /* Do not manage others interfaces */ return -EINVAL; } @@ -231,7 +224,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) u32 reg = dwmac->mode_reg; int val; - switch (plat_dat->interface) { + switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: val = SYSCFG_MCU_ETH_SEL_MII; pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); @@ -242,7 +235,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) break; default: pr_debug("SYSCFG init : Do not manage %d interface\n", - plat_dat->interface); + plat_dat->mac_interface); /* Do not manage others interfaces */ return -EINVAL; } @@ -251,13 +244,15 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) dwmac->ops->syscfg_eth_mask, val << 23); } -static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) +static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend) { clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->clk_rx); + if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend) + clk_disable_unprepare(dwmac->clk_rx); - if (dwmac->ops->clk_prepare) - dwmac->ops->clk_prepare(dwmac, false); + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->enable_eth_ck) + clk_disable_unprepare(dwmac->clk_eth_ck); } static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, @@ -371,21 +366,18 @@ static int stm32_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!dwmac) + return -ENOMEM; data = of_device_get_match_data(&pdev->dev); if (!data) { dev_err(&pdev->dev, "no of match data provided\n"); - ret = -EINVAL; - goto err_remove_config_dt; + return -EINVAL; } dwmac->ops = data; @@ -394,14 +386,14 @@ static int stm32_dwmac_probe(struct platform_device *pdev) ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - goto err_remove_config_dt; + return ret; } plat_dat->bsp_priv = dwmac; - ret = stm32_dwmac_init(plat_dat); + ret = stm32_dwmac_init(plat_dat, false); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -410,9 +402,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev) return 0; err_clk_disable: - stm32_dwmac_clk_disable(dwmac); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); + stm32_dwmac_clk_disable(dwmac, false); return ret; } @@ -425,7 +415,7 @@ static void stm32_dwmac_remove(struct platform_device *pdev) stmmac_dvr_remove(&pdev->dev); - stm32_dwmac_clk_disable(priv->plat->bsp_priv); + stm32_dwmac_clk_disable(dwmac, false); if (dwmac->irq_pwr_wakeup >= 0) { dev_pm_clear_wake_irq(&pdev->dev); @@ -435,18 +425,7 @@ static void stm32_dwmac_remove(struct platform_device *pdev) static int stm32mp1_suspend(struct stm32_dwmac *dwmac) { - int ret = 0; - - ret = clk_prepare_enable(dwmac->clk_ethstp); - if (ret) - return ret; - - clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->enable_eth_ck) - clk_disable_unprepare(dwmac->clk_eth_ck); - - return ret; + return clk_prepare_enable(dwmac->clk_ethstp); } static void stm32mp1_resume(struct stm32_dwmac *dwmac) @@ -454,14 +433,6 @@ static void stm32mp1_resume(struct stm32_dwmac *dwmac) clk_disable_unprepare(dwmac->clk_ethstp); } -static int stm32mcu_suspend(struct stm32_dwmac *dwmac) -{ - clk_disable_unprepare(dwmac->clk_tx); - clk_disable_unprepare(dwmac->clk_rx); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int stm32_dwmac_suspend(struct device *dev) { @@ -472,6 +443,10 @@ static int stm32_dwmac_suspend(struct device *dev) int ret; ret = stmmac_suspend(dev); + if (ret) + return ret; + + stm32_dwmac_clk_disable(dwmac, true); if (dwmac->ops->suspend) ret = dwmac->ops->suspend(dwmac); @@ -489,7 +464,7 @@ static int stm32_dwmac_resume(struct device *dev) if (dwmac->ops->resume) dwmac->ops->resume(dwmac); - ret = stm32_dwmac_init(priv->plat); + ret = stm32_dwmac_init(priv->plat, true); if (ret) return ret; @@ -504,17 +479,16 @@ static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, static struct stm32_ops stm32mcu_dwmac_data = { .set_mode = stm32mcu_set_mode, - .suspend = stm32mcu_suspend, .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK }; static struct stm32_ops stm32mp1_dwmac_data = { .set_mode = stm32mp1_set_mode, - .clk_prepare = stm32mp1_clk_prepare, .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, - .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, + .clk_rx_enable_in_suspend = true }; static const struct of_device_id stm32_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 1e714380d125..137741b94122 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -11,9 +11,10 @@ #include <linux/mdio-mux.h> #include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> +#include <linux/of_platform.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -440,8 +441,10 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - u32 v; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; int ret = 0; + u32 v; v = readl(ioaddr + EMAC_INT_STA); @@ -452,7 +455,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_TX_INT) { ret |= handle_tx; - x->tx_normal_irq_n++; + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); } if (v & EMAC_TX_DMA_STOP_INT) @@ -474,7 +479,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_RX_INT) { ret |= handle_rx; - x->rx_normal_irq_n++; + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); } if (v & EMAC_RX_BUF_UA_INT) @@ -1009,7 +1016,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, if (gmac->variant->support_rmii) reg &= ~SYSCON_RMII_EN; - switch (plat->interface) { + switch (plat->mac_interface) { case PHY_INTERFACE_MODE_MII: /* default */ break; @@ -1024,7 +1031,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, break; default: dev_err(dev, "Unsupported interface mode: %s", - phy_modes(plat->interface)); + phy_modes(plat->mac_interface)); return -EINVAL; } @@ -1217,17 +1224,17 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) if (ret) return -EINVAL; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); /* platform data specifying hardware features and callbacks. * hardware features were copied from Allwinner drivers. */ - plat_dat->interface = interface; + plat_dat->mac_interface = interface; plat_dat->rx_coe = STMMAC_RX_COE_TYPE2; plat_dat->tx_coe = 1; - plat_dat->has_sun8i = true; + plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I; plat_dat->bsp_priv = gmac; plat_dat->init = sun8i_dwmac_init; plat_dat->exit = sun8i_dwmac_exit; @@ -1237,7 +1244,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat); if (ret) - goto dwmac_deconfig; + return ret; ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); if (ret) @@ -1288,8 +1295,6 @@ dwmac_exit: sun8i_dwmac_exit(pdev, gmac); dwmac_syscon: sun8i_dwmac_unset_syscon(gmac); -dwmac_deconfig: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 50963e91c347..2653a9f0958c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -72,7 +72,7 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv) regulator_disable(gmac->regulator); } -static void sun7i_fix_speed(void *priv, unsigned int speed) +static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode) { struct sunxi_priv_data *gmac = priv; @@ -108,36 +108,31 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) { - ret = -ENOMEM; - goto err_remove_config_dt; - } + if (!gmac) + return -ENOMEM; ret = of_get_phy_mode(dev->of_node, &gmac->interface); if (ret && ret != -ENODEV) { dev_err(dev, "Can't get phy-mode\n"); - goto err_remove_config_dt; + return ret; } gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); if (IS_ERR(gmac->tx_clk)) { dev_err(dev, "could not get tx clock\n"); - ret = PTR_ERR(gmac->tx_clk); - goto err_remove_config_dt; + return PTR_ERR(gmac->tx_clk); } /* Optional regulator for PHY */ gmac->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(gmac->regulator)) { - if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_remove_config_dt; - } + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; dev_info(dev, "no regulator found\n"); gmac->regulator = NULL; } @@ -155,7 +150,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); if (ret) - goto err_remove_config_dt; + return ret; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -165,8 +160,6 @@ static int sun7i_gmac_probe(struct platform_device *pdev) err_gmac_exit: sun7i_gmac_exit(pdev, plat_dat->bsp_priv); -err_remove_config_dt: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index fbb0ccf84afc..362f85136c3e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/platform_device.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/module.h> #include <linux/stmmac.h> #include <linux/clk.h> @@ -284,14 +284,14 @@ static int tegra_mgbe_probe(struct platform_device *pdev) if (err < 0) goto disable_clks; - plat = stmmac_probe_config_dt(pdev, res.mac); + plat = devm_stmmac_probe_config_dt(pdev, res.mac); if (IS_ERR(plat)) { err = PTR_ERR(plat); goto disable_clks; } plat->has_xgmac = 1; - plat->tso_en = 1; + plat->flags |= STMMAC_FLAG_TSO_EN; plat->pmt = 1; plat->bsp_priv = mgbe; @@ -303,7 +303,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) GFP_KERNEL); if (!plat->mdio_bus_data) { err = -ENOMEM; - goto remove; + goto disable_clks; } } @@ -321,7 +321,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) 500, 500 * 2000); if (err < 0) { dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n"); - goto remove; + goto disable_clks; } plat->serdes_powerup = mgbe_uphy_lane_bringup_serdes_up; @@ -338,16 +338,14 @@ static int tegra_mgbe_probe(struct platform_device *pdev) /* Program SID */ writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL); - plat->serdes_up_after_phy_linkup = 1; + plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP; err = stmmac_dvr_probe(&pdev->dev, plat, &res); if (err < 0) - goto remove; + goto disable_clks; return 0; -remove: - stmmac_remove_config_dt(pdev, plat); disable_clks: clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index acbb284be174..a5a5cfa989c6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -6,7 +6,8 @@ */ #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/of_net.h> #include <linux/stmmac.h> @@ -53,7 +54,7 @@ struct visconti_eth { spinlock_t lock; /* lock to protect register update */ }; -static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed) +static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) { struct visconti_eth *dwmac = priv; struct net_device *netdev = dev_get_drvdata(dwmac->dev); @@ -219,15 +220,13 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac); + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) { - ret = -ENOMEM; - goto remove_config; - } + if (!dwmac) + return -ENOMEM; spin_lock_init(&dwmac->lock); dwmac->reg = stmmac_res.addr; @@ -237,7 +236,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) ret = visconti_eth_clock_probe(pdev, plat_dat); if (ret) - goto remove_config; + return ret; visconti_eth_init_hw(pdev, plat_dat); @@ -251,22 +250,14 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) remove: visconti_eth_clock_remove(pdev); -remove_config: - stmmac_remove_config_dt(pdev, plat_dat); return ret; } static void visconti_eth_dwmac_remove(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - stmmac_pltfr_remove(pdev); - visconti_eth_clock_remove(pdev); - - stmmac_remove_config_dt(pdev, priv->plat); } static const struct of_device_id visconti_eth_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 1c32b1788f02..dea270f60cc3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -82,29 +82,24 @@ static void dwmac100_dump_dma_regs(struct stmmac_priv *priv, } /* DMA controller has two counters to track the number of the missed frames. */ -static void dwmac100_dma_diagnostic_fr(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static void dwmac100_dma_diagnostic_fr(struct stmmac_extra_stats *x, void __iomem *ioaddr) { u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); if (unlikely(csr8)) { if (csr8 & DMA_MISSED_FRAME_OVE) { - stats->rx_over_errors += 0x800; x->rx_overflow_cntr += 0x800; } else { unsigned int ove_cntr; ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); - stats->rx_over_errors += ove_cntr; x->rx_overflow_cntr += ove_cntr; } if (csr8 & DMA_MISSED_FRAME_OVE_M) { - stats->rx_missed_errors += 0xffff; x->rx_missed_cntr += 0xffff; } else { unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); - stats->rx_missed_errors += miss_f; x->rx_missed_cntr += miss_f; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 03b1c5a97826..c6ff1fa0e04d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -68,6 +68,11 @@ static void dwmac4_core_init(struct mac_device_info *hw, init_waitqueue_head(&priv->tstamp_busy_wait); } +static void dwmac4_phylink_get_caps(struct stmmac_priv *priv) +{ + priv->phylink_config.mac_capabilities |= MAC_2500FD; +} + static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u8 mode, u32 queue) { @@ -1131,6 +1136,7 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, + .phylink_get_caps = dwmac4_phylink_get_caps, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1173,6 +1179,7 @@ const struct stmmac_ops dwmac4_ops = { const struct stmmac_ops dwmac410_ops = { .core_init = dwmac4_core_init, + .phylink_get_caps = dwmac4_phylink_get_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1221,6 +1228,7 @@ const struct stmmac_ops dwmac410_ops = { const struct stmmac_ops dwmac510_ops = { .core_init = dwmac4_core_init, + .phylink_get_caps = dwmac4_phylink_get_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 6a011d8633e8..89a14084c611 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -13,8 +13,7 @@ #include "dwmac4.h" #include "dwmac4_descs.h" -static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { @@ -40,15 +39,13 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats, x->tx_frame_flushed++; if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) { x->tx_losscarrier++; - stats->tx_carrier_errors++; } if (unlikely(tdes3 & TDES3_NO_CARRIER)) { x->tx_carrier++; - stats->tx_carrier_errors++; } if (unlikely((tdes3 & TDES3_LATE_COLLISION) || (tdes3 & TDES3_EXCESSIVE_COLLISION))) - stats->collisions += + x->tx_collision += (tdes3 & TDES3_COLLISION_COUNT_MASK) >> TDES3_COLLISION_COUNT_SHIFT; @@ -73,8 +70,7 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats, return ret; } -static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x, struct dma_desc *p) { unsigned int rdes1 = le32_to_cpu(p->des1); @@ -93,7 +89,7 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats, if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) { if (unlikely(rdes3 & RDES3_GIANT_PACKET)) - stats->rx_length_errors++; + x->rx_length++; if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR)) x->rx_gmac_overflow++; @@ -103,10 +99,8 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats, if (unlikely(rdes3 & RDES3_RECEIVE_ERROR)) x->rx_mii++; - if (unlikely(rdes3 & RDES3_CRC_ERROR)) { + if (unlikely(rdes3 & RDES3_CRC_ERROR)) x->rx_crc_errors++; - stats->rx_crc_errors++; - } if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR)) x->dribbling_bit++; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 03ceb6a94073..9470d3fd2ded 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -171,6 +171,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; int ret = 0; if (dir == DMA_DIR_RX) @@ -198,18 +200,19 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, } } /* TX/RX NORMAL interrupts */ - if (likely(intr_status & DMA_CHAN_STATUS_NIS)) - x->normal_irq_n++; if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - x->rx_normal_irq_n++; - x->rxq_stats[chan].rx_normal_irq_n++; + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); ret |= handle_rx; } if (likely(intr_status & DMA_CHAN_STATUS_TI)) { - x->tx_normal_irq_n++; - x->txq_stats[chan].tx_normal_irq_n++; + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); ret |= handle_tx; } + if (unlikely(intr_status & DMA_CHAN_STATUS_TBU)) ret |= handle_tx; if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index e95d35f1e5a0..8fd167501fa0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -710,28 +710,22 @@ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, } } -void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable) { u32 value; - if (!enable) { - value = readl(ioaddr + MAC_FPE_CTRL_STS); - - value &= ~EFPE; - - writel(value, ioaddr + MAC_FPE_CTRL_STS); - return; + if (enable) { + cfg->fpe_csr = EFPE; + value = readl(ioaddr + GMAC_RXQ_CTRL1); + value &= ~GMAC_RXQCTRL_FPRQ; + value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT; + writel(value, ioaddr + GMAC_RXQ_CTRL1); + } else { + cfg->fpe_csr = 0; } - - value = readl(ioaddr + GMAC_RXQ_CTRL1); - value &= ~GMAC_RXQCTRL_FPRQ; - value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT; - writel(value, ioaddr + GMAC_RXQ_CTRL1); - - value = readl(ioaddr + MAC_FPE_CTRL_STS); - value |= EFPE; - writel(value, ioaddr + MAC_FPE_CTRL_STS); + writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS); } int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) @@ -741,6 +735,9 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) status = FPE_EVENT_UNKNOWN; + /* Reads from the MAC_FPE_CTRL_STS register should only be performed + * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read" + */ value = readl(ioaddr + MAC_FPE_CTRL_STS); if (value & TRSP) { @@ -766,19 +763,15 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev) return status; } -void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type) +void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + enum stmmac_mpacket_type type) { - u32 value; + u32 value = cfg->fpe_csr; - value = readl(ioaddr + MAC_FPE_CTRL_STS); - - if (type == MPACKET_VERIFY) { - value &= ~SRSP; + if (type == MPACKET_VERIFY) value |= SVER; - } else { - value &= ~SVER; + else if (type == MPACKET_RESPONSE) value |= SRSP; - } writel(value, ioaddr + MAC_FPE_CTRL_STS); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index 53c138d0ff48..34e620790eb3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -153,9 +153,11 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, unsigned int ptp_rate); void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev, struct stmmac_extra_stats *x, u32 txqcnt); -void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, +void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable); void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 0b6f999a8305..7907d62d3437 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -10,6 +10,7 @@ #include <linux/iopoll.h> #include "common.h" #include "dwmac_dma.h" +#include "stmmac.h" #define GMAC_HI_REG_AE 0x80000000 @@ -161,6 +162,8 @@ static void show_rx_process_state(unsigned int status) int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; int ret = 0; /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); @@ -208,17 +211,20 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, } /* TX/RX NORMAL interrupts */ if (likely(intr_status & DMA_STATUS_NIS)) { - x->normal_irq_n++; if (likely(intr_status & DMA_STATUS_RI)) { u32 value = readl(ioaddr + DMA_INTR_ENA); /* to schedule NAPI on real RIE event. */ if (likely(value & DMA_INTR_ENA_RIE)) { - x->rx_normal_irq_n++; + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); ret |= handle_rx; } } if (likely(intr_status & DMA_STATUS_TI)) { - x->tx_normal_irq_n++; + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); ret |= handle_tx; } if (unlikely(intr_status & DMA_STATUS_ERI)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 1913385df685..a4e8b498dea9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -74,8 +74,20 @@ #define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) #define XGMAC_RXQEN_SHIFT(x) ((x) * 2) #define XGMAC_RXQ_CTRL1 0x000000a4 +#define XGMAC_AVCPQ GENMASK(31, 28) +#define XGMAC_AVCPQ_SHIFT 28 +#define XGMAC_PTPQ GENMASK(27, 24) +#define XGMAC_PTPQ_SHIFT 24 +#define XGMAC_TACPQE BIT(23) +#define XGMAC_DCBCPQ GENMASK(19, 16) +#define XGMAC_DCBCPQ_SHIFT 16 +#define XGMAC_MCBCQEN BIT(15) +#define XGMAC_MCBCQ GENMASK(11, 8) +#define XGMAC_MCBCQ_SHIFT 8 #define XGMAC_RQ GENMASK(7, 4) #define XGMAC_RQ_SHIFT 4 +#define XGMAC_UPQ GENMASK(3, 0) +#define XGMAC_UPQ_SHIFT 0 #define XGMAC_RXQ_CTRL2 0x000000a8 #define XGMAC_RXQ_CTRL3 0x000000ac #define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) @@ -110,7 +122,12 @@ #define XGMAC_TLPIEN BIT(0) #define XGMAC_LPI_TIMER_CTRL 0x000000d4 #define XGMAC_HW_FEATURE0 0x0000011c +#define XGMAC_HWFEAT_EDMA BIT(31) +#define XGMAC_HWFEAT_EDIFFC BIT(30) +#define XGMAC_HWFEAT_VXN BIT(29) #define XGMAC_HWFEAT_SAVLANINS BIT(27) +#define XGMAC_HWFEAT_TSSTSSEL GENMASK(26, 25) +#define XGMAC_HWFEAT_ADDMACADRSEL GENMASK(22, 18) #define XGMAC_HWFEAT_RXCOESEL BIT(16) #define XGMAC_HWFEAT_TXCOESEL BIT(14) #define XGMAC_HWFEAT_EEESEL BIT(13) @@ -121,34 +138,54 @@ #define XGMAC_HWFEAT_MMCSEL BIT(8) #define XGMAC_HWFEAT_MGKSEL BIT(7) #define XGMAC_HWFEAT_RWKSEL BIT(6) +#define XGMAC_HWFEAT_SMASEL BIT(5) #define XGMAC_HWFEAT_VLHASH BIT(4) +#define XGMAC_HWFEAT_HDSEL BIT(3) #define XGMAC_HWFEAT_GMIISEL BIT(1) #define XGMAC_HW_FEATURE1 0x00000120 #define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27) #define XGMAC_HWFEAT_HASHTBLSZ GENMASK(25, 24) +#define XGMAC_HWFEAT_NUMTC GENMASK(23, 21) #define XGMAC_HWFEAT_RSSEN BIT(20) +#define XGMAC_HWFEAT_DBGMEMA BIT(19) #define XGMAC_HWFEAT_TSOEN BIT(18) #define XGMAC_HWFEAT_SPHEN BIT(17) +#define XGMAC_HWFEAT_DCBEN BIT(16) #define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14) +#define XGMAC_HWFEAT_ADVTHWORD BIT(13) +#define XGMAC_HWFEAT_PTOEN BIT(12) +#define XGMAC_HWFEAT_OSTEN BIT(11) #define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6) +#define XGMAC_HWFEAT_PFCEN BIT(5) #define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0) #define XGMAC_HW_FEATURE2 0x00000124 +#define XGMAC_HWFEAT_AUXSNAPNUM GENMASK(30, 28) #define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24) #define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18) #define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12) #define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6) #define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0) #define XGMAC_HW_FEATURE3 0x00000128 +#define XGMAC_HWFEAT_TBSCH GENMASK(31, 28) #define XGMAC_HWFEAT_TBSSEL BIT(27) #define XGMAC_HWFEAT_FPESEL BIT(26) +#define XGMAC_HWFEAT_SGFSEL BIT(25) #define XGMAC_HWFEAT_ESTWID GENMASK(24, 23) #define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20) #define XGMAC_HWFEAT_ESTSEL BIT(19) +#define XGMAC_HWFEAT_TTSFD GENMASK(18, 16) #define XGMAC_HWFEAT_ASP GENMASK(15, 14) #define XGMAC_HWFEAT_DVLAN BIT(13) #define XGMAC_HWFEAT_FRPES GENMASK(12, 11) #define XGMAC_HWFEAT_FRPPB GENMASK(10, 9) +#define XGMAC_HWFEAT_POUOST BIT(8) +#define XGMAC_HWFEAT_FRPPIPE GENMASK(7, 5) +#define XGMAC_HWFEAT_CBTISEL BIT(4) #define XGMAC_HWFEAT_FRPSEL BIT(3) +#define XGMAC_HWFEAT_NRVF GENMASK(2, 0) +#define XGMAC_HW_FEATURE4 0x0000012c +#define XGMAC_HWFEAT_EASP BIT(4) +#define XGMAC_HWFEAT_PCSEL GENMASK(1, 0) #define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150 #define XGMAC_MAC_FSM_CONTROL 0x00000158 #define XGMAC_PRTYEN BIT(1) @@ -165,7 +202,7 @@ #define XGMAC_DCS_SHIFT 16 #define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8) #define XGMAC_L3L4_ADDR_CTRL 0x00000c00 -#define XGMAC_IDDR GENMASK(15, 8) +#define XGMAC_IDDR GENMASK(16, 8) #define XGMAC_IDDR_SHIFT 8 #define XGMAC_IDDR_FNUM 4 #define XGMAC_TT BIT(1) @@ -222,7 +259,7 @@ ((val) << XGMAC_PPS_MINIDX(x)) #define XGMAC_PPSCMD_START 0x2 #define XGMAC_PPSCMD_STOP 0x5 -#define XGMAC_PPSEN0 BIT(4) +#define XGMAC_PPSENx(x) BIT(4 + (x) * 8) #define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10) #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10) #define XGMAC_TRGTBUSY0 BIT(31) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index a0c2ef8bb0ac..a74e71db79f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -47,6 +47,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); } +static void xgmac_phylink_get_caps(struct stmmac_priv *priv) +{ + priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD | + MAC_10000FD | MAC_25000FD | + MAC_40000FD | MAC_50000FD | + MAC_100000FD; +} + static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable) { u32 tx = readl(ioaddr + XGMAC_TX_CONFIG); @@ -127,6 +135,36 @@ static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio, writel(value, ioaddr + reg); } +static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw, + u8 packet, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = { + { XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT }, + { XGMAC_PTPQ, XGMAC_PTPQ_SHIFT }, + { XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT }, + { XGMAC_UPQ, XGMAC_UPQ_SHIFT }, + { XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT }, + }; + + value = readl(ioaddr + XGMAC_RXQ_CTRL1); + + /* routing configuration */ + value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask; + value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) & + dwxgmac2_route_possibilities[packet - 1].reg_mask; + + /* some packets require extra ops */ + if (packet == PACKET_AVCPQ) + value |= FIELD_PREP(XGMAC_TACPQE, 1); + else if (packet == PACKET_MCBCQ) + value |= FIELD_PREP(XGMAC_MCBCQEN, 1); + + writel(value, ioaddr + XGMAC_RXQ_CTRL1); +} + static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw, u32 rx_alg) { @@ -831,8 +869,10 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp, value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */ writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE); - /* Only ECC Protection for External Memory feature is selected */ - if (asp <= 0x1) + /* 0x2: Without ECC or Parity Ports on External Application Interface + * 0x4: Only ECC Protection for External Memory feature is selected + */ + if (asp == 0x2 || asp == 0x4) return 0; /* 4. Enable Parity and Timeout for FSM */ @@ -1138,7 +1178,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index, val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START); val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START); - val |= XGMAC_PPSEN0; + + /* XGMAC Core has 4 PPS outputs at most. + * + * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for + * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default, + * and can not be switched to Fixed mode, since PPSEN{1,2,3} are + * read-only reserved to 0. + * But we always set PPSEN{1,2,3} do not make things worse ;-) + * + * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must + * be set, or the PPS outputs stay in Fixed PPS mode by default. + */ + val |= XGMAC_PPSENx(index); writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index)); @@ -1432,7 +1484,8 @@ static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg, return 0; } -static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq, +static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable) { u32 value; @@ -1458,12 +1511,13 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq, const struct stmmac_ops dwxgmac210_ops = { .core_init = dwxgmac2_core_init, + .phylink_get_caps = xgmac_phylink_get_caps, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxgmac2_rx_queue_enable, .rx_queue_prio = dwxgmac2_rx_queue_prio, .tx_queue_prio = dwxgmac2_tx_queue_prio, - .rx_queue_routing = NULL, + .rx_queue_routing = dwxgmac2_rx_queue_routing, .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, @@ -1519,12 +1573,13 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, const struct stmmac_ops dwxlgmac2_ops = { .core_init = dwxgmac2_core_init, + .phylink_get_caps = xgmac_phylink_get_caps, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxlgmac2_rx_queue_enable, .rx_queue_prio = dwxgmac2_rx_queue_prio, .tx_queue_prio = dwxgmac2_tx_queue_prio, - .rx_queue_routing = NULL, + .rx_queue_routing = dwxgmac2_rx_queue_routing, .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms, .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 13c347ee8be9..fc82862a612c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -8,8 +8,7 @@ #include "common.h" #include "dwxgmac2.h" -static int dwxgmac2_get_tx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { unsigned int tdes3 = le32_to_cpu(p->des3); @@ -23,8 +22,7 @@ static int dwxgmac2_get_tx_status(struct net_device_stats *stats, return ret; } -static int dwxgmac2_get_rx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x, struct dma_desc *p) { unsigned int rdes3 = le32_to_cpu(p->des3); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 070bd912580b..3cde695fec91 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -337,6 +337,8 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; @@ -364,16 +366,16 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, /* TX/RX NORMAL interrupts */ if (likely(intr_status & XGMAC_NIS)) { - x->normal_irq_n++; - if (likely(intr_status & XGMAC_RI)) { - x->rx_normal_irq_n++; - x->rxq_stats[chan].rx_normal_irq_n++; + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->rx_normal_irq_n++; + u64_stats_update_end(&rxq_stats->syncp); ret |= handle_rx; } if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - x->tx_normal_irq_n++; - x->txq_stats[chan].tx_normal_irq_n++; + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->tx_normal_irq_n++; + u64_stats_update_end(&txq_stats->syncp); ret |= handle_tx; } } @@ -389,9 +391,14 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, { u32 hw_cap; - /* MAC HW feature 0 */ + /* MAC HW feature 0 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0); + dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31; + dma_cap->ediffc = (hw_cap & XGMAC_HWFEAT_EDIFFC) >> 30; + dma_cap->vxn = (hw_cap & XGMAC_HWFEAT_VXN) >> 29; dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27; + dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25; + dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18; dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16; dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14; dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; @@ -402,16 +409,31 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6; + dma_cap->sma_mdio = (hw_cap & XGMAC_HWFEAT_SMASEL) >> 5; dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4; + dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3; dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1; /* MAC HW feature 1 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1); dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27; + /* If L3L4FNUM < 8, then the number of L3L4 filters supported by + * XGMAC is equal to L3L4FNUM. From L3L4FNUM >= 8 the number of + * L3L4 filters goes on like 8, 16, 32, ... Current maximum of + * L3L4FNUM = 10. + */ + if (dma_cap->l3l4fnum >= 8 && dma_cap->l3l4fnum <= 10) + dma_cap->l3l4fnum = 8 << (dma_cap->l3l4fnum - 8); + else if (dma_cap->l3l4fnum > 10) + dma_cap->l3l4fnum = 32; + dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24; + dma_cap->numtc = ((hw_cap & XGMAC_HWFEAT_NUMTC) >> 21) + 1; dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20; + dma_cap->dbgmem = (hw_cap & XGMAC_HWFEAT_DBGMEMA) >> 19; dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18; dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17; + dma_cap->dcben = (hw_cap & XGMAC_HWFEAT_DCBEN) >> 16; dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14; switch (dma_cap->addr64) { @@ -429,13 +451,18 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, break; } + dma_cap->advthword = (hw_cap & XGMAC_HWFEAT_ADVTHWORD) >> 13; + dma_cap->ptoen = (hw_cap & XGMAC_HWFEAT_PTOEN) >> 12; + dma_cap->osten = (hw_cap & XGMAC_HWFEAT_OSTEN) >> 11; dma_cap->tx_fifo_size = 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6); + dma_cap->pfcen = (hw_cap & XGMAC_HWFEAT_PFCEN) >> 5; dma_cap->rx_fifo_size = 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0); /* MAC HW feature 2 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2); + dma_cap->aux_snapshot_n = (hw_cap & XGMAC_HWFEAT_AUXSNAPNUM) >> 28; dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24; dma_cap->number_tx_channel = ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1; @@ -448,16 +475,28 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, /* MAC HW feature 3 */ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3); + dma_cap->tbs_ch_num = ((hw_cap & XGMAC_HWFEAT_TBSCH) >> 28) + 1; dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27; dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26; + dma_cap->sgfsel = (hw_cap & XGMAC_HWFEAT_SGFSEL) >> 25; dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23; dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20; dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19; + dma_cap->ttsfd = (hw_cap & XGMAC_HWFEAT_TTSFD) >> 16; dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14; dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13; dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11; dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9; + dma_cap->pou_ost_en = (hw_cap & XGMAC_HWFEAT_POUOST) >> 8; + dma_cap->frppipe_num = ((hw_cap & XGMAC_HWFEAT_FRPPIPE) >> 5) + 1; + dma_cap->cbtisel = (hw_cap & XGMAC_HWFEAT_CBTISEL) >> 4; dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; + dma_cap->nrvf_num = (hw_cap & XGMAC_HWFEAT_NRVF) >> 0; + + /* MAC HW feature 4 */ + hw_cap = readl(ioaddr + XGMAC_HW_FEATURE4); + dma_cap->asp |= (hw_cap & XGMAC_HWFEAT_EASP) >> 2; + dma_cap->pcsel = (hw_cap & XGMAC_HWFEAT_PCSEL) >> 0; return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index a91d8f13a931..937b7a0466fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -12,8 +12,7 @@ #include "common.h" #include "descs_com.h" -static int enh_desc_get_tx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int enh_desc_get_tx_status(struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { unsigned int tdes0 = le32_to_cpu(p->des0); @@ -38,15 +37,13 @@ static int enh_desc_get_tx_status(struct net_device_stats *stats, if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) { x->tx_losscarrier++; - stats->tx_carrier_errors++; } if (unlikely(tdes0 & ETDES0_NO_CARRIER)) { x->tx_carrier++; - stats->tx_carrier_errors++; } if (unlikely((tdes0 & ETDES0_LATE_COLLISION) || (tdes0 & ETDES0_EXCESSIVE_COLLISIONS))) - stats->collisions += + x->tx_collision += (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3; if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL)) @@ -117,8 +114,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) return ret; } -static void enh_desc_get_ext_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static void enh_desc_get_ext_status(struct stmmac_extra_stats *x, struct dma_extended_desc *p) { unsigned int rdes0 = le32_to_cpu(p->basic.des0); @@ -182,8 +178,7 @@ static void enh_desc_get_ext_status(struct net_device_stats *stats, } } -static int enh_desc_get_rx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int enh_desc_get_rx_status(struct stmmac_extra_stats *x, struct dma_desc *p) { unsigned int rdes0 = le32_to_cpu(p->des0); @@ -193,14 +188,14 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats, return dma_own; if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { - stats->rx_length_errors++; + x->rx_length++; return discard_frame; } if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++; - stats->rx_length_errors++; + x->rx_length++; } if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) x->rx_gmac_overflow++; @@ -209,7 +204,7 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats, pr_err("\tIPC Csum Error/Giant frame\n"); if (unlikely(rdes0 & RDES0_COLLISION)) - stats->collisions++; + x->rx_collision++; if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG)) x->rx_watchdog++; @@ -218,7 +213,6 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats, if (unlikely(rdes0 & RDES0_CRC_ERROR)) { x->rx_crc_errors++; - stats->rx_crc_errors++; } ret = discard_frame; } diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 6ee7cf07cfd7..68aa2d5ca6e5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -57,8 +57,7 @@ struct stmmac_desc_ops { /* Last tx segment reports the transmit status */ int (*get_tx_ls)(struct dma_desc *p); /* Return the transmit status looking at the TDES1 */ - int (*tx_status)(struct net_device_stats *stats, - struct stmmac_extra_stats *x, + int (*tx_status)(struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr); /* Get the buffer size from the descriptor */ int (*get_tx_len)(struct dma_desc *p); @@ -67,11 +66,9 @@ struct stmmac_desc_ops { /* Get the receive frame size */ int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); /* Return the reception status looking at the RDES1 */ - int (*rx_status)(struct net_device_stats *stats, - struct stmmac_extra_stats *x, + int (*rx_status)(struct stmmac_extra_stats *x, struct dma_desc *p); - void (*rx_extended_status)(struct net_device_stats *stats, - struct stmmac_extra_stats *x, + void (*rx_extended_status)(struct stmmac_extra_stats *x, struct dma_extended_desc *p); /* Set tx timestamp enable bit */ void (*enable_tx_timestamp) (struct dma_desc *p); @@ -191,8 +188,7 @@ struct stmmac_dma_ops { void (*dma_tx_mode)(struct stmmac_priv *priv, void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode); /* To track extra statistic (if supported) */ - void (*dma_diagnostic_fr)(struct net_device_stats *stats, - struct stmmac_extra_stats *x, + void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x, void __iomem *ioaddr); void (*enable_dma_transmission) (void __iomem *ioaddr); void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -304,6 +300,8 @@ struct stmmac_est; struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, struct net_device *dev); + /* Get phylink capabilities */ + void (*phylink_get_caps)(struct stmmac_priv *priv); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ @@ -414,15 +412,19 @@ struct stmmac_ops { unsigned int ptp_rate); void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev, struct stmmac_extra_stats *x, u32 txqcnt); - void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq, + void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg, + u32 num_txq, u32 num_rxq, bool enable); void (*fpe_send_mpacket)(void __iomem *ioaddr, + struct stmmac_fpe_cfg *cfg, enum stmmac_mpacket_type type); int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev); }; #define stmmac_core_init(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, core_init, __args) +#define stmmac_mac_phylink_get_caps(__priv) \ + stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv) #define stmmac_mac_set(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac, __args) #define stmmac_rx_ipc(__priv, __args...) \ @@ -536,6 +538,7 @@ struct stmmac_hwtimestamp { void (*get_systime) (void __iomem *ioaddr, u64 *systime); void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time); void (*timestamp_interrupt)(struct stmmac_priv *priv); + void (*hwtstamp_correct_latency)(struct stmmac_priv *priv); }; #define stmmac_config_hw_tstamping(__priv, __args...) \ @@ -554,6 +557,8 @@ struct stmmac_hwtimestamp { stmmac_do_void_callback(__priv, ptp, get_ptptime, __args) #define stmmac_timestamp_interrupt(__priv, __args...) \ stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args) +#define stmmac_hwtstamp_correct_latency(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, hwtstamp_correct_latency, __args) struct stmmac_tx_queue; struct stmmac_rx_queue; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index ea4910ae0921..6a7c1d325c46 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -177,8 +177,10 @@ #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4 #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc +#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204 #define MMC_XGMAC_TX_FPE_FRAG 0x208 #define MMC_XGMAC_TX_HOLD_REQ 0x20c +#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224 #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228 #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 @@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) { writel(0x0, mmcaddr + MMC_RX_INTR_MASK); writel(0x0, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK); writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); } diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 350e6670a576..68a7cfcb1d8f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -12,8 +12,7 @@ #include "common.h" #include "descs_com.h" -static int ndesc_get_tx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int ndesc_get_tx_status(struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { unsigned int tdes0 = le32_to_cpu(p->des0); @@ -31,15 +30,12 @@ static int ndesc_get_tx_status(struct net_device_stats *stats, if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) { if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) { x->tx_underflow++; - stats->tx_fifo_errors++; } if (unlikely(tdes0 & TDES0_NO_CARRIER)) { x->tx_carrier++; - stats->tx_carrier_errors++; } if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) { x->tx_losscarrier++; - stats->tx_carrier_errors++; } if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) || (tdes0 & TDES0_EXCESSIVE_COLLISIONS) || @@ -47,7 +43,7 @@ static int ndesc_get_tx_status(struct net_device_stats *stats, unsigned int collisions; collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3; - stats->collisions += collisions; + x->tx_collision += collisions; } ret = tx_err; } @@ -70,8 +66,7 @@ static int ndesc_get_tx_len(struct dma_desc *p) * and, if required, updates the multicast statistics. * In case of success, it returns good_frame because the GMAC device * is supposed to be able to compute the csum in HW. */ -static int ndesc_get_rx_status(struct net_device_stats *stats, - struct stmmac_extra_stats *x, +static int ndesc_get_rx_status(struct stmmac_extra_stats *x, struct dma_desc *p) { int ret = good_frame; @@ -81,7 +76,7 @@ static int ndesc_get_rx_status(struct net_device_stats *stats, return dma_own; if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { - stats->rx_length_errors++; + x->rx_length++; return discard_frame; } @@ -96,11 +91,9 @@ static int ndesc_get_rx_status(struct net_device_stats *stats, x->ipc_csum_error++; if (unlikely(rdes0 & RDES0_COLLISION)) { x->rx_collision++; - stats->collisions++; } if (unlikely(rdes0 & RDES0_CRC_ERROR)) { x->rx_crc_errors++; - stats->rx_crc_errors++; } ret = discard_frame; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 07ea5ab0a60b..cd7a9768de5f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -21,7 +21,8 @@ #include <linux/ptp_clock_kernel.h> #include <linux/net_tstamp.h> #include <linux/reset.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> +#include <net/xdp.h> #include <uapi/linux/bpf.h> struct stmmac_resources { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 2ae73ab842d4..f628411ae4ae 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -89,14 +89,6 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { /* Tx/Rx IRQ Events */ STMMAC_STAT(rx_early_irq), STMMAC_STAT(threshold), - STMMAC_STAT(tx_pkt_n), - STMMAC_STAT(rx_pkt_n), - STMMAC_STAT(normal_irq_n), - STMMAC_STAT(rx_normal_irq_n), - STMMAC_STAT(napi_poll), - STMMAC_STAT(tx_normal_irq_n), - STMMAC_STAT(tx_clean), - STMMAC_STAT(tx_set_ic_bit), STMMAC_STAT(irq_receive_pmt_irq_n), /* MMC info */ STMMAC_STAT(mmc_tx_irq_n), @@ -163,9 +155,6 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(mtl_rx_fifo_ctrl_active), STMMAC_STAT(mac_rx_frame_ctrl_fifo), STMMAC_STAT(mac_gmii_rx_proto_engine), - /* TSO */ - STMMAC_STAT(tx_tso_frames), - STMMAC_STAT(tx_tso_nfrags), /* EST */ STMMAC_STAT(mtl_est_cgce), STMMAC_STAT(mtl_est_hlbs), @@ -175,6 +164,23 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) +/* statistics collected in queue which will be summed up for all TX or RX + * queues, or summed up for both TX and RX queues(napi_poll, normal_irq_n). + */ +static const char stmmac_qstats_string[][ETH_GSTRING_LEN] = { + "rx_pkt_n", + "rx_normal_irq_n", + "tx_pkt_n", + "tx_normal_irq_n", + "tx_clean", + "tx_set_ic_bit", + "tx_tso_frames", + "tx_tso_nfrags", + "normal_irq_n", + "napi_poll", +}; +#define STMMAC_QSTATS ARRAY_SIZE(stmmac_qstats_string) + /* HW MAC Management counters (if supported) */ #define STMMAC_MMC_STAT(m) \ { #m, sizeof_field(struct stmmac_counters, m), \ @@ -535,23 +541,44 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use; + unsigned int start; int q, stat; + u64 *pos; char *p; + pos = data; for (q = 0; q < tx_cnt; q++) { - p = (char *)priv + offsetof(struct stmmac_priv, - xstats.txq_stats[q].tx_pkt_n); + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; + struct stmmac_txq_stats snapshot; + + data = pos; + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + snapshot = *txq_stats; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + + p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n); for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { - *data++ = (*(unsigned long *)p); - p += sizeof(unsigned long); + *data++ += (*(u64 *)p); + p += sizeof(u64); } } + + pos = data; for (q = 0; q < rx_cnt; q++) { - p = (char *)priv + offsetof(struct stmmac_priv, - xstats.rxq_stats[q].rx_pkt_n); + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; + struct stmmac_rxq_stats snapshot; + + data = pos; + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + snapshot = *rxq_stats; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + + p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n); for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { - *data++ = (*(unsigned long *)p); - p += sizeof(unsigned long); + *data++ += (*(u64 *)p); + p += sizeof(u64); } } } @@ -562,8 +589,10 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; + u64 napi_poll = 0, normal_irq_n = 0; + int i, j = 0, pos, ret; unsigned long count; - int i, j = 0, ret; + unsigned int start; if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { @@ -574,8 +603,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, } /* Update the DMA HW counters for dwmac10/100 */ - ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, - priv->ioaddr); + ret = stmmac_dma_diagnostic_fr(priv, &priv->xstats, priv->ioaddr); if (ret) { /* If supported, for new GMAC chips expose the MMC counters */ if (priv->dma_cap.rmon) { @@ -606,6 +634,48 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); } + + pos = j; + for (i = 0; i < rx_queues_count; i++) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; + struct stmmac_rxq_stats snapshot; + + j = pos; + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + snapshot = *rxq_stats; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + + data[j++] += snapshot.rx_pkt_n; + data[j++] += snapshot.rx_normal_irq_n; + normal_irq_n += snapshot.rx_normal_irq_n; + napi_poll += snapshot.napi_poll; + } + + pos = j; + for (i = 0; i < tx_queues_count; i++) { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; + struct stmmac_txq_stats snapshot; + + j = pos; + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + snapshot = *txq_stats; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + + data[j++] += snapshot.tx_pkt_n; + data[j++] += snapshot.tx_normal_irq_n; + normal_irq_n += snapshot.tx_normal_irq_n; + data[j++] += snapshot.tx_clean; + data[j++] += snapshot.tx_set_ic_bit; + data[j++] += snapshot.tx_tso_frames; + data[j++] += snapshot.tx_tso_nfrags; + napi_poll += snapshot.napi_poll; + } + normal_irq_n += priv->xstats.rx_early_irq; + data[j++] = normal_irq_n; + data[j++] = napi_poll; + stmmac_get_per_qstats(priv, &data[j]); } @@ -618,7 +688,7 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: - len = STMMAC_STATS_LEN + + len = STMMAC_STATS_LEN + STMMAC_QSTATS + STMMAC_TXQ_STATS * tx_cnt + STMMAC_RXQ_STATS * rx_cnt; @@ -691,8 +761,11 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; } for (i = 0; i < STMMAC_STATS_LEN; i++) { - memcpy(p, stmmac_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); + memcpy(p, stmmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < STMMAC_QSTATS; i++) { + memcpy(p, stmmac_qstats_string[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } stmmac_get_qstats_string(priv, p); @@ -908,7 +981,7 @@ static int __stmmac_set_coalesce(struct net_device *dev, else if (queue >= max_cnt) return -EINVAL; - if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) { + if (priv->use_riwt) { rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 8b50f03056b7..f05bd757dfe5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -60,6 +60,48 @@ static void config_sub_second_increment(void __iomem *ioaddr, *ssinc = data; } +static void hwtstamp_correct_latency(struct stmmac_priv *priv) +{ + void __iomem *ioaddr = priv->ptpaddr; + u32 reg_tsic, reg_tsicsns; + u32 reg_tsec, reg_tsecsns; + u64 scaled_ns; + u32 val; + + /* MAC-internal ingress latency */ + scaled_ns = readl(ioaddr + PTP_TS_INGR_LAT); + + /* See section 11.7.2.5.3.1 "Ingress Correction" on page 4001 of + * i.MX8MP Applications Processor Reference Manual Rev. 1, 06/2021 + */ + val = readl(ioaddr + PTP_TCR); + if (val & PTP_TCR_TSCTRLSSR) + /* nanoseconds field is in decimal format with granularity of 1ns/bit */ + scaled_ns = ((u64)NSEC_PER_SEC << 16) - scaled_ns; + else + /* nanoseconds field is in binary format with granularity of ~0.466ns/bit */ + scaled_ns = ((1ULL << 31) << 16) - + DIV_U64_ROUND_CLOSEST(scaled_ns * PSEC_PER_NSEC, 466U); + + reg_tsic = scaled_ns >> 16; + reg_tsicsns = scaled_ns & 0xff00; + + /* set bit 31 for 2's compliment */ + reg_tsic |= BIT(31); + + writel(reg_tsic, ioaddr + PTP_TS_INGR_CORR_NS); + writel(reg_tsicsns, ioaddr + PTP_TS_INGR_CORR_SNS); + + /* MAC-internal egress latency */ + scaled_ns = readl(ioaddr + PTP_TS_EGR_LAT); + + reg_tsec = scaled_ns >> 16; + reg_tsecsns = scaled_ns & 0xff00; + + writel(reg_tsec, ioaddr + PTP_TS_EGR_CORR_NS); + writel(reg_tsecsns, ioaddr + PTP_TS_EGR_CORR_SNS); +} + static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) { u32 value; @@ -180,7 +222,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv) u64 ptp_time; int i; - if (priv->plat->int_snapshot_en) { + if (priv->plat->flags & STMMAC_FLAG_INT_SNAPSHOT_EN) { wake_up(&priv->tstamp_busy_wait); return; } @@ -195,7 +237,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv) */ ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS); - if (!priv->plat->ext_snapshot_en) + if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)) return; num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> @@ -221,4 +263,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .get_systime = get_systime, .get_ptptime = get_ptptime, .timestamp_interrupt = timestamp_interrupt, + .hwtstamp_correct_latency = hwtstamp_correct_latency, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4727f7be4f86..37e64283f910 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -39,6 +39,7 @@ #include <linux/phylink.h> #include <linux/udp.h> #include <linux/bpf_trace.h> +#include <net/page_pool/helpers.h> #include <net/pkt_cls.h> #include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" @@ -325,7 +326,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_250_300M; } - if (priv->plat->has_sun8i) { + if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { if (clk_rate > 160000000) priv->clk_csr = 0x03; else if (clk_rate > 80000000) @@ -421,7 +422,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv) /* Check and enter in LPI mode */ if (!priv->tx_path_in_lpi_mode) stmmac_set_eee_mode(priv, priv->hw, - priv->plat->en_tx_lpi_clockgating); + priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); return 0; } @@ -909,6 +910,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; + if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) + stmmac_hwtstamp_correct_latency(priv, priv); + return 0; } @@ -960,7 +964,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) bool *hs_enable = &fpe_cfg->hs_enable; if (is_up && *hs_enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); + stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, + MPACKET_VERIFY); } else { *lo_state = FPE_STATE_OFF; *lp_state = FPE_STATE_OFF; @@ -991,7 +996,8 @@ static void stmmac_mac_link_up(struct phylink_config *config, struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); u32 old_ctrl, ctrl; - if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) + if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && + priv->plat->serdes_powerup) priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); @@ -1059,7 +1065,7 @@ static void stmmac_mac_link_up(struct phylink_config *config, priv->speed = speed; if (priv->plat->fix_mac_speed) - priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); + priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); if (!duplex) ctrl &= ~priv->hw->link.duplex; @@ -1084,7 +1090,8 @@ static void stmmac_mac_link_up(struct phylink_config *config, stmmac_mac_set(priv, priv->ioaddr, true); if (phy && priv->dma_cap.eee) { priv->eee_active = - phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0; + phy_init_eee(phy, !(priv->plat->flags & + STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; priv->eee_enabled = stmmac_eee_init(priv); priv->tx_lpi_enabled = priv->eee_enabled; stmmac_set_eee_pls(priv, priv->hw, true); @@ -1092,6 +1099,9 @@ static void stmmac_mac_link_up(struct phylink_config *config, if (priv->dma_cap.fpesel) stmmac_fpe_link_state_handle(priv, true); + + if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) + stmmac_hwtstamp_correct_latency(priv, priv); } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { @@ -1110,7 +1120,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = { */ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) { - int interface = priv->plat->interface; + int interface = priv->plat->mac_interface; if (priv->dma_cap.pcs) { if ((interface == PHY_INTERFACE_MODE_RGMII) || @@ -1144,7 +1154,7 @@ static int stmmac_init_phy(struct net_device *dev) if (!phylink_expects_phy(priv->phylink)) return 0; - fwnode = of_fwnode_handle(priv->plat->phylink_node); + fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -1188,24 +1198,37 @@ static int stmmac_init_phy(struct net_device *dev) return ret; } +static void stmmac_set_half_duplex(struct stmmac_priv *priv) +{ + /* Half-Duplex can only work with single tx queue */ + if (priv->plat->tx_queues_to_use > 1) + priv->phylink_config.mac_capabilities &= + ~(MAC_10HD | MAC_100HD | MAC_1000HD); + else + priv->phylink_config.mac_capabilities |= + (MAC_10HD | MAC_100HD | MAC_1000HD); +} + static int stmmac_phy_setup(struct stmmac_priv *priv) { - struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; - struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); - int max_speed = priv->plat->max_speed; + struct stmmac_mdio_bus_data *mdio_bus_data; int mode = priv->plat->phy_interface; + struct fwnode_handle *fwnode; struct phylink *phylink; + int max_speed; priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; - if (priv->plat->mdio_bus_data) + priv->phylink_config.mac_managed_pm = true; + + mdio_bus_data = priv->plat->mdio_bus_data; + if (mdio_bus_data) priv->phylink_config.ovr_an_inband = mdio_bus_data->xpcs_an_inband; - if (!fwnode) - fwnode = dev_fwnode(priv->device); - - /* Set the platform/firmware specified interface mode */ + /* Set the platform/firmware specified interface mode. Note, phylink + * deals with the PHY interface mode, not the MAC interface mode. + */ __set_bit(mode, priv->phylink_config.supported_interfaces); /* If we have an xpcs, it defines which PHY interfaces are supported. */ @@ -1214,36 +1237,21 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) priv->phylink_config.supported_interfaces); priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100; - - if (!max_speed || max_speed >= 1000) - priv->phylink_config.mac_capabilities |= MAC_1000; - - if (priv->plat->has_gmac4) { - if (!max_speed || max_speed >= 2500) - priv->phylink_config.mac_capabilities |= MAC_2500FD; - } else if (priv->plat->has_xgmac) { - if (!max_speed || max_speed >= 2500) - priv->phylink_config.mac_capabilities |= MAC_2500FD; - if (!max_speed || max_speed >= 5000) - priv->phylink_config.mac_capabilities |= MAC_5000FD; - if (!max_speed || max_speed >= 10000) - priv->phylink_config.mac_capabilities |= MAC_10000FD; - if (!max_speed || max_speed >= 25000) - priv->phylink_config.mac_capabilities |= MAC_25000FD; - if (!max_speed || max_speed >= 40000) - priv->phylink_config.mac_capabilities |= MAC_40000FD; - if (!max_speed || max_speed >= 50000) - priv->phylink_config.mac_capabilities |= MAC_50000FD; - if (!max_speed || max_speed >= 100000) - priv->phylink_config.mac_capabilities |= MAC_100000FD; - } - - /* Half-Duplex can only work with single queue */ - if (priv->plat->tx_queues_to_use > 1) - priv->phylink_config.mac_capabilities &= - ~(MAC_10HD | MAC_100HD | MAC_1000HD); - priv->phylink_config.mac_managed_pm = true; + MAC_10FD | MAC_100FD | + MAC_1000FD; + + stmmac_set_half_duplex(priv); + + /* Get the MAC specific capabilities */ + stmmac_mac_phylink_get_caps(priv); + + max_speed = priv->plat->max_speed; + if (max_speed) + phylink_limit_mac_speed(&priv->phylink_config, max_speed); + + fwnode = priv->plat->port_node; + if (!fwnode) + fwnode = dev_fwnode(priv->device); phylink = phylink_create(&priv->phylink_config, fwnode, mode, &stmmac_phylink_mac_ops); @@ -2427,11 +2435,14 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) { struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct xsk_buff_pool *pool = tx_q->xsk_pool; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc = NULL; struct xdp_desc xdp_desc; bool work_done = true; + u32 tx_set_ic_bit = 0; + unsigned long flags; /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); @@ -2492,7 +2503,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) if (set_ic) { tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - priv->xstats.tx_set_ic_bit++; + tx_set_ic_bit++; } stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, @@ -2504,6 +2515,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_set_ic_bit += tx_set_ic_bit; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); @@ -2538,18 +2552,23 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) * @priv: driver private structure * @budget: napi budget limiting this functions packet handling * @queue: TX queue index + * @pending_packets: signal to arm the TX coal timer * Description: it reclaims the transmit resources after transmission completes. + * If some packets still needs to be handled, due to TX coalesce, set + * pending_packets to true to make NAPI arm the TX coal timer. */ -static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, + bool *pending_packets) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; + u32 tx_packets = 0, tx_errors = 0; + unsigned long flags; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); - priv->xstats.tx_clean++; - tx_q->xsk_frames_done = 0; entry = tx_q->dirty_tx; @@ -2580,8 +2599,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) else p = tx_q->dma_tx + entry; - status = stmmac_tx_status(priv, &priv->dev->stats, - &priv->xstats, p, priv->ioaddr); + status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); /* Check if the descriptor is owned by the DMA */ if (unlikely(status & tx_dma_own)) break; @@ -2597,13 +2615,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) if (likely(!(status & tx_not_ls))) { /* ... verify the status error condition */ if (unlikely(status & tx_err)) { - priv->dev->stats.tx_errors++; + tx_errors++; if (unlikely(status & tx_err_bump_tc)) stmmac_bump_dma_threshold(priv, queue); } else { - priv->dev->stats.tx_packets++; - priv->xstats.tx_pkt_n++; - priv->xstats.txq_stats[queue].tx_pkt_n++; + tx_packets++; } if (skb) stmmac_get_tx_hwtstamp(priv, p, skb); @@ -2703,9 +2719,15 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), - HRTIMER_MODE_REL); + *pending_packets = true; + + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_packets += tx_packets; + txq_stats->tx_pkt_n += tx_packets; + txq_stats->tx_clean++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + + priv->xstats.tx_errors += tx_errors; __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); @@ -2734,7 +2756,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) tx_q->dma_tx_phy, chan); stmmac_start_tx_dma(priv, chan); - priv->dev->stats.tx_errors++; + priv->xstats.tx_errors++; netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); } @@ -2986,10 +3008,26 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + u32 tx_coal_timer = priv->tx_coal_timer[queue]; + struct stmmac_channel *ch; + struct napi_struct *napi; + + if (!tx_coal_timer) + return; + + ch = &priv->channel[tx_q->queue_index]; + napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; - hrtimer_start(&tx_q->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), - HRTIMER_MODE_REL); + /* Arm timer only if napi is not already scheduled. + * Try to cancel any timer if napi is scheduled, timer will be armed + * again in the next scheduled napi. + */ + if (unlikely(!napi_is_scheduled(napi))) + hrtimer_start(&tx_q->txtimer, + STMMAC_COAL_TIMER(tx_coal_timer), + HRTIMER_MODE_REL); + else + hrtimer_try_to_cancel(&tx_q->txtimer); } /** @@ -3710,7 +3748,7 @@ static int stmmac_request_irq(struct net_device *dev) int ret; /* Request the IRQ lines */ - if (priv->plat->multi_msi_en) + if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) ret = stmmac_request_irq_multi_msi(dev); else ret = stmmac_request_irq_single(dev); @@ -3827,10 +3865,6 @@ static int __stmmac_open(struct net_device *dev, } } - /* Extra statistics */ - memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); - priv->xstats.threshold = tc; - priv->rx_copybreak = STMMAC_RX_COPYBREAK; buf_sz = dma_conf->dma_buf_sz; @@ -3838,7 +3872,8 @@ static int __stmmac_open(struct net_device *dev, stmmac_reset_queues_param(priv); - if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) { + if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && + priv->plat->serdes_powerup) { ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); if (ret < 0) { netdev_err(priv->dev, "%s: Serdes powerup failed\n", @@ -4106,15 +4141,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) int nfrags = skb_shinfo(skb)->nr_frags; u32 queue = skb_get_queue_mapping(skb); unsigned int first_entry, tx_packets; + struct stmmac_txq_stats *txq_stats; int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; + unsigned long flags; u32 pay_len, mss; dma_addr_t des; int i; tx_q = &priv->dma_conf.tx_queue[queue]; + txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; /* Compute header lengths */ @@ -4258,7 +4296,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); - priv->xstats.tx_set_ic_bit++; } /* We've used all descriptors we need for this skb, however, @@ -4274,9 +4311,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - dev->stats.tx_bytes += skb->len; - priv->xstats.tx_tso_frames++; - priv->xstats.tx_tso_nfrags += nfrags; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_bytes += skb->len; + txq_stats->tx_tso_frames++; + txq_stats->tx_tso_nfrags += nfrags; + if (set_ic) + txq_stats->tx_set_ic_bit++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4326,7 +4367,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) dma_map_err: dev_err(priv->device, "Tx dma map failed\n"); dev_kfree_skb(skb); - priv->dev->stats.tx_dropped++; + priv->xstats.tx_dropped++; return NETDEV_TX_OK; } @@ -4347,14 +4388,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; + struct stmmac_txq_stats *txq_stats; struct dma_edesc *tbs_desc = NULL; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; int entry, first_tx; + unsigned long flags; dma_addr_t des; tx_q = &priv->dma_conf.tx_queue[queue]; + txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) @@ -4388,6 +4432,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) WARN_ON(tx_q->tx_skbuff[first_entry]); csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); + /* DWMAC IPs can be synthesized to support tx coe only for a few tx + * queues. In that case, checksum offloading for those queues that don't + * support tx coe needs to fallback to software checksum calculation. + */ + if (csum_insertion && + priv->plat->tx_queues_cfg[queue].coe_unsupported) { + if (unlikely(skb_checksum_help(skb))) + goto dma_map_err; + csum_insertion = !csum_insertion; + } if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); @@ -4480,7 +4534,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); - priv->xstats.tx_set_ic_bit++; } /* We've used all descriptors we need for this skb, however, @@ -4507,7 +4560,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - dev->stats.tx_bytes += skb->len; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_bytes += skb->len; + if (set_ic) + txq_stats->tx_set_ic_bit++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4569,7 +4626,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dma_map_err: netdev_err(priv->dev, "Tx DMA map failed\n"); dev_kfree_skb(skb); - priv->dev->stats.tx_dropped++; + priv->xstats.tx_dropped++; return NETDEV_TX_OK; } @@ -4714,6 +4771,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, struct xdp_frame *xdpf, bool dma_map) { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc; @@ -4770,9 +4828,12 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, set_ic = false; if (set_ic) { + unsigned long flags; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - priv->xstats.tx_set_ic_bit++; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->tx_set_ic_bit++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); } stmmac_enable_dma_transmission(priv, priv->ioaddr); @@ -4917,16 +4978,18 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, struct dma_desc *p, struct dma_desc *np, struct xdp_buff *xdp) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; int coe = priv->hw->rx_csum; + unsigned long flags; struct sk_buff *skb; u32 hash; skb = stmmac_construct_skb_zc(ch, xdp); if (!skb) { - priv->dev->stats.rx_dropped++; + priv->xstats.rx_dropped++; return; } @@ -4945,8 +5008,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb); - priv->dev->stats.rx_packets++; - priv->dev->stats.rx_bytes += len; + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->rx_pkt_n++; + rxq_stats->rx_bytes += len; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); } static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@ -5019,13 +5084,16 @@ static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; unsigned int count = 0, error = 0, len = 0; int dirty = stmmac_rx_dirty(priv, queue); unsigned int next_entry = rx_q->cur_rx; + u32 rx_errors = 0, rx_dropped = 0; unsigned int desc_size; struct bpf_prog *prog; bool failure = false; + unsigned long flags; int xdp_status = 0; int status = 0; @@ -5081,8 +5149,7 @@ read_again: p = rx_q->dma_rx + entry; /* read the status of the incoming frame */ - status = stmmac_rx_status(priv, &priv->dev->stats, - &priv->xstats, p); + status = stmmac_rx_status(priv, &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ if (unlikely(status & dma_own)) break; @@ -5104,8 +5171,7 @@ read_again: break; if (priv->extend_desc) - stmmac_rx_extended_status(priv, &priv->dev->stats, - &priv->xstats, + stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { xsk_buff_free(buf->xdp); @@ -5113,7 +5179,7 @@ read_again: dirty++; error = 1; if (!priv->hwts_rx_en) - priv->dev->stats.rx_errors++; + rx_errors++; } if (unlikely(error && (status & rx_not_ls))) @@ -5161,7 +5227,7 @@ read_again: break; case STMMAC_XDP_CONSUMED: xsk_buff_free(buf->xdp); - priv->dev->stats.rx_dropped++; + rx_dropped++; break; case STMMAC_XDP_TX: case STMMAC_XDP_REDIRECT: @@ -5182,8 +5248,12 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); - priv->xstats.rx_pkt_n += count; - priv->xstats.rxq_stats[queue].rx_pkt_n += count; + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->rx_pkt_n += count; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + + priv->xstats.rx_dropped += rx_dropped; + priv->xstats.rx_errors += rx_errors; if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { if (failure || stmmac_rx_dirty(priv, queue) > 0) @@ -5207,6 +5277,8 @@ read_again: */ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { + u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned int count = 0, error = 0, len = 0; @@ -5216,11 +5288,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; + unsigned long flags; int xdp_status = 0; int buf_sz; dma_dir = page_pool_get_dma_dir(rx_q->page_pool); buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); if (netif_msg_rx_status(priv)) { void *rx_head; @@ -5256,10 +5330,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) len = 0; } +read_again: if (count >= limit) break; -read_again: buf1_len = 0; buf2_len = 0; entry = next_entry; @@ -5271,8 +5345,7 @@ read_again: p = rx_q->dma_rx + entry; /* read the status of the incoming frame */ - status = stmmac_rx_status(priv, &priv->dev->stats, - &priv->xstats, p); + status = stmmac_rx_status(priv, &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ if (unlikely(status & dma_own)) break; @@ -5289,14 +5362,13 @@ read_again: prefetch(np); if (priv->extend_desc) - stmmac_rx_extended_status(priv, &priv->dev->stats, - &priv->xstats, rx_q->dma_erx + entry); + stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { page_pool_recycle_direct(rx_q->page_pool, buf->page); buf->page = NULL; error = 1; if (!priv->hwts_rx_en) - priv->dev->stats.rx_errors++; + rx_errors++; } if (unlikely(error && (status & rx_not_ls))) @@ -5364,7 +5436,7 @@ read_again: virt_to_head_page(ctx.xdp.data), sync_len, true); buf->page = NULL; - priv->dev->stats.rx_dropped++; + rx_dropped++; /* Clear skb as it was set as * status by XDP program. @@ -5393,7 +5465,7 @@ read_again: skb = napi_alloc_skb(&ch->rx_napi, buf1_len); if (!skb) { - priv->dev->stats.rx_dropped++; + rx_dropped++; count++; goto drain_data; } @@ -5413,7 +5485,7 @@ read_again: priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->page); + skb_mark_for_recycle(skb); buf->page = NULL; } @@ -5425,7 +5497,7 @@ read_again: priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->sec_page); + skb_mark_for_recycle(skb); buf->sec_page = NULL; } @@ -5453,8 +5525,8 @@ drain_data: napi_gro_receive(&ch->rx_napi, skb); skb = NULL; - priv->dev->stats.rx_packets++; - priv->dev->stats.rx_bytes += len; + rx_packets++; + rx_bytes += len; count++; } @@ -5469,8 +5541,14 @@ drain_data: stmmac_rx_refill(priv, queue); - priv->xstats.rx_pkt_n += count; - priv->xstats.rxq_stats[queue].rx_pkt_n += count; + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->rx_packets += rx_packets; + rxq_stats->rx_bytes += rx_bytes; + rxq_stats->rx_pkt_n += count; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + + priv->xstats.rx_dropped += rx_dropped; + priv->xstats.rx_errors += rx_errors; return count; } @@ -5480,10 +5558,15 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, rx_napi); struct stmmac_priv *priv = ch->priv_data; + struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; + unsigned long flags; int work_done; - priv->xstats.napi_poll++; + rxq_stats = &priv->xstats.rxq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5502,12 +5585,18 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, tx_napi); struct stmmac_priv *priv = ch->priv_data; + struct stmmac_txq_stats *txq_stats; + bool pending_packets = false; u32 chan = ch->index; + unsigned long flags; int work_done; - priv->xstats.napi_poll++; + txq_stats = &priv->xstats.txq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); - work_done = stmmac_tx_clean(priv, budget, chan); + work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets); work_done = min(work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5518,6 +5607,10 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) spin_unlock_irqrestore(&ch->lock, flags); } + /* TX still have packet to handle, check if we need to arm tx timer */ + if (pending_packets) + stmmac_tx_timer_arm(priv, chan); + return work_done; } @@ -5526,12 +5619,24 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) struct stmmac_channel *ch = container_of(napi, struct stmmac_channel, rxtx_napi); struct stmmac_priv *priv = ch->priv_data; + bool tx_pending_packets = false; int rx_done, tx_done, rxtx_done; + struct stmmac_rxq_stats *rxq_stats; + struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; + unsigned long flags; + + rxq_stats = &priv->xstats.rxq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); + rxq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); - priv->xstats.napi_poll++; + txq_stats = &priv->xstats.txq_stats[chan]; + flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); + txq_stats->napi_poll++; + u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); - tx_done = stmmac_tx_clean(priv, budget, chan); + tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets); tx_done = min(tx_done, budget); rx_done = stmmac_rx_zc(priv, budget, chan); @@ -5556,6 +5661,10 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) spin_unlock_irqrestore(&ch->lock, flags); } + /* TX still have packet to handle, check if we need to arm tx timer */ + if (tx_pending_packets) + stmmac_tx_timer_arm(priv, chan); + return min(rxtx_done, budget - 1); } @@ -5677,7 +5786,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, features &= ~NETIF_F_CSUM_MASK; /* Disable tso if asked by ethtool */ - if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { if (features & NETIF_F_TSO) priv->tso = true; else @@ -5731,6 +5840,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) /* If user has requested FPE enable, quickly response */ if (*hs_enable) stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, MPACKET_RESPONSE); } @@ -5798,7 +5908,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) } /* PCS link status */ - if (priv->hw->pcs && !priv->plat->has_integrated_pcs) { + if (priv->hw->pcs && + !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { if (priv->xstats.pcs_link) netif_carrier_on(priv->dev); else @@ -5938,33 +6049,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling receive - used by NETCONSOLE and other diagnostic tools - * to allow network I/O with interrupts disabled. - */ -static void stmmac_poll_controller(struct net_device *dev) -{ - struct stmmac_priv *priv = netdev_priv(dev); - int i; - - /* If adapter is down, do nothing */ - if (test_bit(STMMAC_DOWN, &priv->state)) - return; - - if (priv->plat->multi_msi_en) { - for (i = 0; i < priv->plat->rx_queues_to_use; i++) - stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); - - for (i = 0; i < priv->plat->tx_queues_to_use; i++) - stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); - } else { - disable_irq(dev->irq); - stmmac_interrupt(dev->irq, dev); - enable_irq(dev->irq); - } -} -#endif - /** * stmmac_ioctl - Entry point for the Ioctl * @dev: Device pointer. @@ -6174,6 +6258,22 @@ DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); static int stmmac_dma_cap_show(struct seq_file *seq, void *v) { + static const char * const dwxgmac_timestamp_source[] = { + "None", + "Internal", + "External", + "Both", + }; + static const char * const dwxgmac_safety_feature_desc[] = { + "No", + "All Safety Features with ECC and Parity", + "All Safety Features without ECC or Parity", + "All Safety Features with Parity Only", + "ECC Only", + "UNDEFINED", + "UNDEFINED", + "UNDEFINED", + }; struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); @@ -6192,10 +6292,16 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.mbps_1000) ? "Y" : "N"); seq_printf(seq, "\tHalf duplex: %s\n", (priv->dma_cap.half_duplex) ? "Y" : "N"); - seq_printf(seq, "\tHash Filter: %s\n", - (priv->dma_cap.hash_filter) ? "Y" : "N"); - seq_printf(seq, "\tMultiple MAC address registers: %s\n", - (priv->dma_cap.multi_addr) ? "Y" : "N"); + if (priv->plat->has_xgmac) { + seq_printf(seq, + "\tNumber of Additional MAC address registers: %d\n", + priv->dma_cap.multi_addr); + } else { + seq_printf(seq, "\tHash Filter: %s\n", + (priv->dma_cap.hash_filter) ? "Y" : "N"); + seq_printf(seq, "\tMultiple MAC address registers: %s\n", + (priv->dma_cap.multi_addr) ? "Y" : "N"); + } seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", (priv->dma_cap.pcs) ? "Y" : "N"); seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", @@ -6210,12 +6316,16 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.time_stamp) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", (priv->dma_cap.atime_stamp) ? "Y" : "N"); + if (priv->plat->has_xgmac) + seq_printf(seq, "\tTimestamp System Time Source: %s\n", + dwxgmac_timestamp_source[priv->dma_cap.tssrc]); seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", (priv->dma_cap.eee) ? "Y" : "N"); seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); seq_printf(seq, "\tChecksum Offload in TX: %s\n", (priv->dma_cap.tx_coe) ? "Y" : "N"); - if (priv->synopsys_id >= DWMAC_CORE_4_00) { + if (priv->synopsys_id >= DWMAC_CORE_4_00 || + priv->plat->has_xgmac) { seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", (priv->dma_cap.rx_coe) ? "Y" : "N"); } else { @@ -6223,9 +6333,9 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", + (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); } - seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", - (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); seq_printf(seq, "\tNumber of Additional RX channel: %d\n", priv->dma_cap.number_rx_channel); seq_printf(seq, "\tNumber of Additional TX channel: %d\n", @@ -6238,12 +6348,13 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.enh_desc) ? "Y" : "N"); seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); - seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); + seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? + (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); seq_printf(seq, "\tNumber of PPS Outputs: %d\n", priv->dma_cap.pps_out_num); seq_printf(seq, "\tSafety Features: %s\n", - priv->dma_cap.asp ? "Y" : "N"); + dwxgmac_safety_feature_desc[priv->dma_cap.asp]); seq_printf(seq, "\tFlexible RX Parser: %s\n", priv->dma_cap.frpsel ? "Y" : "N"); seq_printf(seq, "\tEnhanced Addressing: %d\n", @@ -6268,6 +6379,53 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) priv->dma_cap.fpesel ? "Y" : "N"); seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", priv->dma_cap.tbssel ? "Y" : "N"); + seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n", + priv->dma_cap.tbs_ch_num); + seq_printf(seq, "\tPer-Stream Filtering: %s\n", + priv->dma_cap.sgfsel ? "Y" : "N"); + seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n", + BIT(priv->dma_cap.ttsfd) >> 1); + seq_printf(seq, "\tNumber of Traffic Classes: %d\n", + priv->dma_cap.numtc); + seq_printf(seq, "\tDCB Feature: %s\n", + priv->dma_cap.dcben ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n", + priv->dma_cap.advthword ? "Y" : "N"); + seq_printf(seq, "\tPTP Offload: %s\n", + priv->dma_cap.ptoen ? "Y" : "N"); + seq_printf(seq, "\tOne-Step Timestamping: %s\n", + priv->dma_cap.osten ? "Y" : "N"); + seq_printf(seq, "\tPriority-Based Flow Control: %s\n", + priv->dma_cap.pfcen ? "Y" : "N"); + seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n", + BIT(priv->dma_cap.frpes) << 6); + seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n", + BIT(priv->dma_cap.frpbs) << 6); + seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n", + priv->dma_cap.frppipe_num); + seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n", + priv->dma_cap.nrvf_num ? + (BIT(priv->dma_cap.nrvf_num) << 1) : 0); + seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n", + priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); + seq_printf(seq, "\tDepth of GCL: %lu\n", + priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); + seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", + priv->dma_cap.cbtisel ? "Y" : "N"); + seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n", + priv->dma_cap.aux_snapshot_n); + seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", + priv->dma_cap.pou_ost_en ? "Y" : "N"); + seq_printf(seq, "\tEnhanced DMA: %s\n", + priv->dma_cap.edma ? "Y" : "N"); + seq_printf(seq, "\tDifferent Descriptor Cache: %s\n", + priv->dma_cap.ediffc ? "Y" : "N"); + seq_printf(seq, "\tVxLAN/NVGRE: %s\n", + priv->dma_cap.vxn ? "Y" : "N"); + seq_printf(seq, "\tDebug Memory Interface: %s\n", + priv->dma_cap.dbgmem ? "Y" : "N"); + seq_printf(seq, "\tNumber of Policing Counters: %lu\n", + priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); return 0; } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); @@ -6788,6 +6946,56 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) return 0; } +static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + unsigned int start; + int q; + + for (q = 0; q < tx_cnt; q++) { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; + u64 tx_packets; + u64 tx_bytes; + + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + tx_packets = txq_stats->tx_packets; + tx_bytes = txq_stats->tx_bytes; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + for (q = 0; q < rx_cnt; q++) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; + u64 rx_packets; + u64 rx_bytes; + + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + rx_packets = rxq_stats->rx_packets; + rx_bytes = rxq_stats->rx_bytes; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + } + + stats->rx_dropped = priv->xstats.rx_dropped; + stats->rx_errors = priv->xstats.rx_errors; + stats->tx_dropped = priv->xstats.tx_dropped; + stats->tx_errors = priv->xstats.tx_errors; + stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; + stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; + stats->rx_length_errors = priv->xstats.rx_length; + stats->rx_crc_errors = priv->xstats.rx_crc_errors; + stats->rx_over_errors = priv->xstats.rx_overflow_cntr; + stats->rx_missed_errors = priv->xstats.rx_missed_cntr; +} + static const struct net_device_ops stmmac_netdev_ops = { .ndo_open = stmmac_open, .ndo_start_xmit = stmmac_xmit, @@ -6798,11 +7006,9 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_eth_ioctl = stmmac_ioctl, + .ndo_get_stats64 = stmmac_get_stats64, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = stmmac_poll_controller, -#endif .ndo_set_mac_address = stmmac_set_mac_address, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, @@ -6855,7 +7061,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv) int ret; /* dwmac-sun8i only work in chain mode */ - if (priv->plat->has_sun8i) + if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) chain_mode = 1; priv->chain_mode = chain_mode; @@ -6876,7 +7082,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv) */ priv->plat->enh_desc = priv->dma_cap.enh_desc; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && - !priv->plat->use_phy_wol; + !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); priv->hw->pmt = priv->plat->pmt; if (priv->dma_cap.hash_tb_sz) { priv->hw->multicast_filter_bins = @@ -6920,7 +7126,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv) if (priv->dma_cap.tsoen) dev_info(priv->device, "TSO supported\n"); - priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; + priv->hw->vlan_fail_q_en = + (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; /* Run HW quirks, if any */ @@ -7012,6 +7219,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); + stmmac_set_half_duplex(priv); stmmac_napi_add(dev); if (netif_running(dev)) @@ -7057,6 +7265,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work) if (*lo_state == FPE_STATE_ENTERING_ON && *lp_state == FPE_STATE_ENTERING_ON) { stmmac_fpe_configure(priv, priv->ioaddr, + fpe_cfg, priv->plat->tx_queues_to_use, priv->plat->rx_queues_to_use, *enable); @@ -7075,6 +7284,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work) netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, *lo_state, *lp_state); stmmac_fpe_send_mpacket(priv, priv->ioaddr, + fpe_cfg, MPACKET_VERIFY); } /* Sleep then retry */ @@ -7089,6 +7299,7 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) if (priv->plat->fpe_cfg->hs_enable != enable) { if (enable) { stmmac_fpe_send_mpacket(priv, priv->ioaddr, + priv->plat->fpe_cfg, MPACKET_VERIFY); } else { priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; @@ -7160,12 +7371,18 @@ int stmmac_dvr_probe(struct device *device, priv->device = device; priv->dev = ndev; + for (i = 0; i < MTL_MAX_RX_QUEUES; i++) + u64_stats_init(&priv->xstats.rxq_stats[i].syncp); + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) + u64_stats_init(&priv->xstats.txq_stats[i].syncp); + stmmac_set_ethtool_ops(ndev); priv->pause = pause; priv->plat = plat_dat; priv->ioaddr = res->addr; priv->dev->base_addr = (unsigned long)res->addr; - priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; + priv->plat->dma_cfg->multi_msi_en = + (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); priv->dev->irq = res->irq; priv->wol_irq = res->wol_irq; @@ -7249,7 +7466,7 @@ int stmmac_dvr_probe(struct device *device, ndev->hw_features |= NETIF_F_HW_TC; } - if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; if (priv->plat->has_gmac4) ndev->hw_features |= NETIF_F_GSO_UDP_L4; @@ -7257,7 +7474,8 @@ int stmmac_dvr_probe(struct device *device, dev_info(priv->device, "TSO feature enabled\n"); } - if (priv->dma_cap.sphen && !priv->plat->sph_disable) { + if (priv->dma_cap.sphen && + !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { ndev->hw_features |= NETIF_F_GRO; priv->sph_cap = true; priv->sph = priv->sph_cap; @@ -7315,6 +7533,8 @@ int stmmac_dvr_probe(struct device *device, #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); + priv->xstats.threshold = tc; + /* Initialize RSS */ rxq = priv->plat->rx_queues_to_use; netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); @@ -7540,6 +7760,7 @@ int stmmac_suspend(struct device *dev) if (priv->dma_cap.fpesel) { /* Disable FPE */ stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, priv->plat->tx_queues_to_use, priv->plat->rx_queues_to_use, false); @@ -7621,7 +7842,8 @@ int stmmac_resume(struct device *dev) stmmac_mdio_reset(priv->mii); } - if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) { + if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && + priv->plat->serdes_powerup) { ret = priv->plat->serdes_powerup(ndev, priv->plat->bsp_priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 3db1cb0fd160..0542cfd1817e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -62,11 +62,16 @@ static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr, static void stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, int phyreg, u32 *hw_addr) { - u32 tmp; + u32 tmp = 0; + if (priv->synopsys_id < DWXGMAC_CORE_2_20) { + /* Until ver 2.20 XGMAC does not support C22 addr >= 4. Those + * bits above bit 3 of XGMAC_MDIO_C22P register are reserved. + */ + tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); + tmp &= ~MII_XGMAC_C22P_MASK; + } /* Set port as Clause 22 */ - tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); - tmp &= ~MII_XGMAC_C22P_MASK; tmp |= BIT(phyaddr); writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); @@ -132,8 +137,9 @@ static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr, priv = netdev_priv(ndev); - /* HW does not support C22 addr >= 4 */ - if (phyaddr > MII_XGMAC_MAX_C22ADDR) + /* Until ver 2.20 XGMAC does not support C22 addr >= 4 */ + if (priv->synopsys_id < DWXGMAC_CORE_2_20 && + phyaddr > MII_XGMAC_MAX_C22ADDR) return -ENODEV; stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); @@ -209,8 +215,9 @@ static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr, priv = netdev_priv(ndev); - /* HW does not support C22 addr >= 4 */ - if (phyaddr > MII_XGMAC_MAX_C22ADDR) + /* Until ver 2.20 XGMAC does not support C22 addr >= 4 */ + if (priv->synopsys_id < DWXGMAC_CORE_2_20 && + phyaddr > MII_XGMAC_MAX_C22ADDR) return -ENODEV; stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); @@ -526,11 +533,11 @@ int stmmac_mdio_register(struct net_device *ndev) int err = 0; struct mii_bus *new_bus; struct stmmac_priv *priv = netdev_priv(ndev); - struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; struct device_node *mdio_node = priv->plat->mdio_node; struct device *dev = ndev->dev.parent; struct fwnode_handle *fixed_node; + struct fwnode_handle *fwnode; int addr, found, max_addr; if (!mdio_bus_data) @@ -551,13 +558,18 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45; new_bus->write_c45 = &stmmac_xgmac2_mdio_write_c45; - /* Right now only C22 phys are supported */ - max_addr = MII_XGMAC_MAX_C22ADDR + 1; + if (priv->synopsys_id < DWXGMAC_CORE_2_20) { + /* Right now only C22 phys are supported */ + max_addr = MII_XGMAC_MAX_C22ADDR + 1; - /* Check if DT specified an unsupported phy addr */ - if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR) - dev_err(dev, "Unsupported phy_addr (max=%d)\n", + /* Check if DT specified an unsupported phy addr */ + if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR) + dev_err(dev, "Unsupported phy_addr (max=%d)\n", MII_XGMAC_MAX_C22ADDR); + } else { + /* XGMAC version 2.20 onwards support 32 phy addr */ + max_addr = PHY_MAX_ADDR; + } } else { new_bus->read = &stmmac_mdio_read_c22; new_bus->write = &stmmac_mdio_write_c22; @@ -579,7 +591,11 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->parent = priv->device; err = of_mdiobus_register(new_bus, mdio_node); - if (err != 0) { + if (err == -ENODEV) { + err = 0; + dev_info(dev, "MDIO bus is disabled\n"); + goto bus_register_fail; + } else if (err) { dev_err_probe(dev, err, "Cannot register the MDIO bus\n"); goto bus_register_fail; } @@ -589,6 +605,7 @@ int stmmac_mdio_register(struct net_device *ndev) stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0); /* If fixed-link is set, skip PHY scanning */ + fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 644bb54f5f02..352b01678c22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -77,7 +77,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev, plat->clk_csr = 5; plat->has_gmac4 = 1; plat->force_sf_dma_mode = 1; - plat->tso_en = 1; + plat->flags |= STMMAC_FLAG_TSO_EN; plat->pmt = 1; /* Set default value for multicast hash bins */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 231152ee5a32..1ffde555da47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -15,7 +15,6 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include "stmmac.h" @@ -277,6 +276,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev, plat->tx_queues_cfg[queue].use_prio = true; } + plat->tx_queues_cfg[queue].coe_unsupported = + of_property_read_bool(q_node, "snps,coe-unsupported"); + queue++; } if (queue != plat->tx_queues_to_use) { @@ -386,6 +388,22 @@ static int stmmac_of_get_mac_mode(struct device_node *np) } /** + * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() + * @pdev: platform_device structure + * @plat: driver data platform structure + * + * Release resources claimed by stmmac_probe_config_dt(). + */ +static void stmmac_remove_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) +{ + clk_disable_unprepare(plat->stmmac_clk); + clk_disable_unprepare(plat->pclk); + of_node_put(plat->phy_node); + of_node_put(plat->mdio_node); +} + +/** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure * @mac: MAC address to use @@ -393,7 +411,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np) * this function is to read the driver parameters from device-tree and * set some private fields that will be used by the main at runtime. */ -struct plat_stmmacenet_data * +static struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { struct device_node *np = pdev->dev.of_node; @@ -420,16 +438,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) return ERR_PTR(phy_mode); plat->phy_interface = phy_mode; - plat->interface = stmmac_of_get_mac_mode(np); - if (plat->interface < 0) - plat->interface = plat->phy_interface; + rc = stmmac_of_get_mac_mode(np); + plat->mac_interface = rc < 0 ? plat->phy_interface : rc; /* Some wrapper drivers still rely on phy_node. Let's save it while * they are not converted to phylink. */ plat->phy_node = of_parse_phandle(np, "phy-handle", 0); /* PHYLINK automatically parses the phy-handle property */ - plat->phylink_node = np; + plat->port_node = of_fwnode_handle(np); /* Get max speed of operation from device tree */ of_property_read_u32(np, "max-speed", &plat->max_speed); @@ -466,8 +483,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); - plat->en_tx_lpi_clockgating = - of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); + if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) + plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING; /* Set the maxmtu to a default of JUMBO_LEN in case the * parameter is not present in the device tree. @@ -525,7 +542,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->has_gmac4 = 1; plat->has_gmac = 0; plat->pmt = 1; - plat->tso_en = of_property_read_bool(np, "snps,tso"); + if (of_property_read_bool(np, "snps,tso")) + plat->flags |= STMMAC_FLAG_TSO_EN; } if (of_device_is_compatible(np, "snps,dwmac-3.610") || @@ -538,7 +556,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) if (of_device_is_compatible(np, "snps,dwxgmac")) { plat->has_xgmac = 1; plat->pmt = 1; - plat->tso_en = of_property_read_bool(np, "snps,tso"); + if (of_property_read_bool(np, "snps,tso")) + plat->flags |= STMMAC_FLAG_TSO_EN; } dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), @@ -662,43 +681,14 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) return plat; } - -/** - * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() - * @pdev: platform_device structure - * @plat: driver data platform structure - * - * Release resources claimed by stmmac_probe_config_dt(). - */ -void stmmac_remove_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) -{ - clk_disable_unprepare(plat->stmmac_clk); - clk_disable_unprepare(plat->pclk); - of_node_put(plat->phy_node); - of_node_put(plat->mdio_node); -} #else struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) -{ - return ERR_PTR(-EINVAL); -} - -struct plat_stmmacenet_data * devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) { return ERR_PTR(-EINVAL); } - -void stmmac_remove_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) -{ -} #endif /* CONFIG_OF */ -EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt); -EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res) @@ -807,7 +797,7 @@ static void devm_stmmac_pltfr_remove(void *data) { struct platform_device *pdev = data; - stmmac_pltfr_remove_no_dt(pdev); + stmmac_pltfr_remove(pdev); } /** @@ -834,12 +824,12 @@ int devm_stmmac_pltfr_probe(struct platform_device *pdev, EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe); /** - * stmmac_pltfr_remove_no_dt + * stmmac_pltfr_remove * @pdev: pointer to the platform device * Description: This undoes the effects of stmmac_pltfr_probe() by removing the * driver and calling the platform's exit() callback. */ -void stmmac_pltfr_remove_no_dt(struct platform_device *pdev) +void stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); @@ -848,23 +838,6 @@ void stmmac_pltfr_remove_no_dt(struct platform_device *pdev) stmmac_dvr_remove(&pdev->dev); stmmac_pltfr_exit(pdev, plat); } -EXPORT_SYMBOL_GPL(stmmac_pltfr_remove_no_dt); - -/** - * stmmac_pltfr_remove - * @pdev: platform device pointer - * Description: this function calls the main to free the net resources - * and calls the platforms hook and release the resources (e.g. mem). - */ -void stmmac_pltfr_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - struct plat_stmmacenet_data *plat = priv->plat; - - stmmac_pltfr_remove_no_dt(pdev); - stmmac_remove_config_dt(pdev, plat); -} EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); /** @@ -901,7 +874,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); int ret; - ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv); + ret = stmmac_pltfr_init(pdev, priv->plat); if (ret) return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index c5565b2a70ac..bb6fc7e59aed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -12,11 +12,7 @@ #include "stmmac.h" struct plat_stmmacenet_data * -stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); -struct plat_stmmacenet_data * devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); -void stmmac_remove_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res); @@ -32,7 +28,6 @@ int stmmac_pltfr_probe(struct platform_device *pdev, int devm_stmmac_pltfr_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat, struct stmmac_resources *res); -void stmmac_pltfr_remove_no_dt(struct platform_device *pdev); void stmmac_pltfr_remove(struct platform_device *pdev); extern const struct dev_pm_ops stmmac_pltfr_pm_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b4388ca8d211..bffa5c017032 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -81,7 +81,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); write_unlock_irqrestore(&priv->ptp_lock, flags); - /* Caculate new basetime and re-configured EST after PTP time adjust. */ + /* Calculate new basetime and re-configured EST after PTP time adjust. */ if (est_rst) { struct timespec64 current_time, time; ktime_t current_time_ns, basetime; @@ -191,23 +191,33 @@ static int stmmac_enable(struct ptp_clock_info *ptp, priv->systime_flags); write_unlock_irqrestore(&priv->ptp_lock, flags); break; - case PTP_CLK_REQ_EXTTS: - priv->plat->ext_snapshot_en = on; + case PTP_CLK_REQ_EXTTS: { + u8 channel; + mutex_lock(&priv->aux_ts_lock); acr_value = readl(ptpaddr + PTP_ACR); + channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value)); acr_value &= ~PTP_ACR_MASK; + if (on) { + if (FIELD_GET(PTP_ACR_MASK, acr_value)) { + netdev_err(priv->dev, + "Cannot enable auxiliary snapshot %d as auxiliary snapshot %d is already enabled", + rq->extts.index, channel); + mutex_unlock(&priv->aux_ts_lock); + return -EBUSY; + } + + priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN; + /* Enable External snapshot trigger */ - acr_value |= priv->plat->ext_snapshot_num; + acr_value |= PTP_ACR_ATSEN(rq->extts.index); acr_value |= PTP_ACR_ATSFC; - netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n", - priv->plat->ext_snapshot_num >> - PTP_ACR_ATSEN_SHIFT); } else { - netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n", - priv->plat->ext_snapshot_num >> - PTP_ACR_ATSEN_SHIFT); + priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN; } + netdev_dbg(priv->dev, "Auxiliary Snapshot %d %s.\n", + rq->extts.index, on ? "enabled" : "disabled"); writel(acr_value, ptpaddr + PTP_ACR); mutex_unlock(&priv->aux_ts_lock); /* wait for auxts fifo clear to finish */ @@ -215,6 +225,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp, !(acr_value & PTP_ACR_ATSFC), 10, 10000); break; + } default: break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index bf619295d079..fce3fba2ffd2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -26,6 +26,12 @@ #define PTP_ACR 0x40 /* Auxiliary Control Reg */ #define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */ #define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */ +#define PTP_TS_INGR_CORR_NS 0x58 /* Ingress timestamp correction nanoseconds */ +#define PTP_TS_EGR_CORR_NS 0x5C /* Egress timestamp correction nanoseconds*/ +#define PTP_TS_INGR_CORR_SNS 0x60 /* Ingress timestamp correction subnanoseconds */ +#define PTP_TS_EGR_CORR_SNS 0x64 /* Egress timestamp correction subnanoseconds */ +#define PTP_TS_INGR_LAT 0x68 /* MAC internal Ingress Latency */ +#define PTP_TS_EGR_LAT 0x6c /* MAC internal Egress Latency */ #define PTP_STNSUR_ADDSUB_SHIFT 31 #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ @@ -73,7 +79,7 @@ #define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */ #define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */ #define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */ -#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */ +#define PTP_ACR_ATSEN(index) (PTP_ACR_ATSEN0 << (index)) #define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */ #define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */ #define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 687f43cd466c..3ca1c2a816ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -802,7 +802,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv) stmmac_start_rx(priv, priv->ioaddr, i); local_bh_disable(); - napi_reschedule(&ch->rx_napi); + napi_schedule(&ch->rx_napi); local_bh_enable(); } @@ -1355,7 +1355,7 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src, goto cleanup_rss; } - dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS); + dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_IPV4_ADDRS); dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0; cls = kzalloc(sizeof(*cls), GFP_KERNEL); @@ -1481,8 +1481,8 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src, goto cleanup_rss; } - dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC); - dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS); + dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_BASIC); + dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_PORTS); dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0; dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index ac41ef4cbd2f..6ad3e0a11936 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -1079,6 +1079,7 @@ disable: priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, + priv->plat->fpe_cfg, priv->plat->tx_queues_to_use, priv->plat->rx_queues_to_use, false); diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 734a817d3c94..a9a6670b5ff1 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -124,7 +124,7 @@ static void vsw_set_rx_mode(struct net_device *dev) return sunvnet_set_rx_mode_common(dev, port->vp); } -int ldmvsw_open(struct net_device *dev) +static int ldmvsw_open(struct net_device *dev) { struct vnet_port *port = netdev_priv(dev); struct vio_driver_state *vio = &port->vio; @@ -136,7 +136,6 @@ int ldmvsw_open(struct net_device *dev) return 0; } -EXPORT_SYMBOL_GPL(ldmvsw_open); #ifdef CONFIG_NET_POLL_CONTROLLER static void vsw_poll_controller(struct net_device *dev) diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 7a2e76776297..21431f43e4c2 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -31,7 +31,7 @@ #include <linux/slab.h> #include <linux/io.h> -#include <linux/of_device.h> +#include <linux/of.h> #include "niu.h" @@ -10132,7 +10132,7 @@ err_out: return err; } -static int niu_of_remove(struct platform_device *op) +static void niu_of_remove(struct platform_device *op) { struct net_device *dev = platform_get_drvdata(op); @@ -10165,7 +10165,6 @@ static int niu_of_remove(struct platform_device *op) free_netdev(dev); } - return 0; } static const struct of_device_id niu_match[] = { @@ -10183,7 +10182,7 @@ static struct platform_driver niu_of_driver = { .of_match_table = niu_match, }, .probe = niu_of_probe, - .remove = niu_of_remove, + .remove_new = niu_of_remove, }; #endif /* CONFIG_SPARC64 */ diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 34b94153bf0c..16c86b13c185 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -25,7 +25,7 @@ #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/gfp.h> #include <asm/auxio.h> @@ -1234,7 +1234,7 @@ static int bigmac_sbus_probe(struct platform_device *op) return bigmac_ether_init(op, qec_op); } -static int bigmac_sbus_remove(struct platform_device *op) +static void bigmac_sbus_remove(struct platform_device *op) { struct bigmac *bp = platform_get_drvdata(op); struct device *parent = op->dev.parent; @@ -1255,8 +1255,6 @@ static int bigmac_sbus_remove(struct platform_device *op) bp->bblock_dvma); free_netdev(net_dev); - - return 0; } static const struct of_device_id bigmac_sbus_match[] = { @@ -1274,7 +1272,7 @@ static struct platform_driver bigmac_sbus_driver = { .of_match_table = bigmac_sbus_match, }, .probe = bigmac_sbus_probe, - .remove = bigmac_sbus_remove, + .remove_new = bigmac_sbus_remove, }; module_platform_driver(bigmac_sbus_driver); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 4154e68639ac..9bd1df8308d2 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -40,6 +40,7 @@ #include <linux/bitops.h> #include <linux/mm.h> #include <linux/gfp.h> +#include <linux/of.h> #include <asm/io.h> #include <asm/byteorder.h> diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index b93613cd1994..b983b9c23be6 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -32,9 +32,10 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of_device.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/pci.h> +#include <linux/platform_device.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 6418fcc3139f..aedd13c94225 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -27,8 +27,8 @@ #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pgtable.h> +#include <linux/platform_device.h> #include <asm/io.h> #include <asm/dma.h> @@ -933,7 +933,7 @@ static int qec_sbus_probe(struct platform_device *op) return qec_ether_init(op); } -static int qec_sbus_remove(struct platform_device *op) +static void qec_sbus_remove(struct platform_device *op) { struct sunqe *qp = platform_get_drvdata(op); struct net_device *net_dev = qp->dev; @@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op) qp->buffers, qp->buffers_dvma); free_netdev(net_dev); - - return 0; } static const struct of_device_id qec_sbus_match[] = { @@ -967,7 +965,7 @@ static struct platform_driver qec_sbus_driver = { .of_match_table = qec_sbus_match, }, .probe = qec_sbus_probe, - .remove = qec_sbus_remove, + .remove_new = qec_sbus_remove, }; static int __init qec_init(void) diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index c499a14314f1..391a1bc7f446 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -511,7 +511,7 @@ out_clk_disable: return ret; } -static int spl2sw_remove(struct platform_device *pdev) +static void spl2sw_remove(struct platform_device *pdev) { struct spl2sw_common *comm; int i; @@ -538,8 +538,6 @@ static int spl2sw_remove(struct platform_device *pdev) spl2sw_mdio_remove(comm); clk_disable_unprepare(comm->clk); - - return 0; } static const struct of_device_id spl2sw_of_match[] = { @@ -551,7 +549,7 @@ MODULE_DEVICE_TABLE(of, spl2sw_of_match); static struct platform_driver spl2sw_driver = { .probe = spl2sw_probe, - .remove = spl2sw_remove, + .remove_new = spl2sw_remove, .driver = { .name = "sp7021_emac", .of_match_table = spl2sw_of_match, diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index fce06663e1e1..e60b557d59b9 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_TI bool "Texas Instruments (TI) devices" default y - depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 + depends on PCI || EISA || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 help If you have a network (Ethernet) card belonging to this class, say Y. @@ -90,12 +90,16 @@ config TI_CPTS The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the driver offers a PTP Hardware Clock. +config TI_K3_CPPI_DESC_POOL + tristate + config TI_K3_AM65_CPSW_NUSS tristate "TI K3 AM654x/J721E CPSW Ethernet driver" depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER select NET_DEVLINK select TI_DAVINCI_MDIO select PHYLINK + select TI_K3_CPPI_DESC_POOL imply PHY_TI_GMII_SEL depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS help @@ -176,11 +180,31 @@ config TLAN Please email feedback to <torben.mathiasen@compaq.com>. -config CPMAC - tristate "TI AR7 CPMAC Ethernet support" - depends on AR7 +config TI_ICSSG_PRUETH + tristate "TI Gigabit PRU Ethernet driver" select PHYLIB + select TI_ICSS_IEP + select TI_K3_CPPI_DESC_POOL + depends on PRU_REMOTEPROC + depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER + help + Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. + This subsystem is available starting with the AM65 platform. + + This driver requires firmware binaries which will run on the PRUs + to support the Ethernet operation. Currently, it supports Ethernet + with 1G and 100M link speed. + +config TI_ICSS_IEP + tristate "TI PRU ICSS IEP driver" + depends on PTP_1588_CLOCK_OPTIONAL + depends on TI_PRUSS + default TI_PRUSS help - TI AR7 CPMAC Ethernet support + This driver enables support for the PRU-ICSS Industrial Ethernet + Peripheral within a PRU-ICSS subsystem present on various TI SoCs. + + To compile this driver as a module, choose M here. The module + will be called icss_iep. endif # NET_VENDOR_TI diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 75f761efbea7..27de1d697134 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o obj-$(CONFIG_TLAN) += tlan.o -obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o @@ -24,7 +23,19 @@ keystone_netcp-y := netcp_core.o cpsw_ale.o obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o +obj-$(CONFIG_TI_K3_CPPI_DESC_POOL) += k3-cppi-desc-pool.o + obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o -ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o +ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o am65-cpsw-qos.o ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o + +obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o +icssg-prueth-y := icssg/icssg_prueth.o \ + icssg/icssg_classifier.o \ + icssg/icssg_queues.o \ + icssg/icssg_config.o \ + icssg/icssg_mii_cfg.o \ + icssg/icssg_stats.o \ + icssg/icssg_ethtool.o +obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index bebcfd5e6b57..ece9f8df98ae 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -19,6 +19,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/phylink.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> @@ -1587,10 +1588,10 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy /* rx_pause/tx_pause */ if (rx_pause) - mac_control |= CPSW_SL_CTL_RX_FLOW_EN; + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; if (tx_pause) - mac_control |= CPSW_SL_CTL_TX_FLOW_EN; + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); @@ -1746,9 +1747,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) } tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (tx_chn->irq <= 0) { + if (tx_chn->irq < 0) { dev_err(dev, "Failed to get tx dma irq %d\n", tx_chn->irq); + ret = tx_chn->irq; goto err; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index bf40c88fbd9b..f3dad2ab9828 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -192,7 +192,6 @@ struct am65_cpsw_ndev_priv { extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave; -void am65_cpsw_nuss_adjust_link(struct net_device *ndev); void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common); void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common); int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx); diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index eced87fa261c..9ac2ff05d501 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -624,9 +624,9 @@ static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port, int ret; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported keys used"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c deleted file mode 100644 index 80eeeb463c4f..000000000000 --- a/drivers/net/ethernet/ti/cpmac.c +++ /dev/null @@ -1,1251 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2006, 2007 Eugene Konev - * - */ - -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/moduleparam.h> - -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/types.h> -#include <linux/delay.h> - -#include <linux/netdevice.h> -#include <linux/if_vlan.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/skbuff.h> -#include <linux/mii.h> -#include <linux/phy.h> -#include <linux/phy_fixed.h> -#include <linux/platform_device.h> -#include <linux/dma-mapping.h> -#include <linux/clk.h> -#include <linux/gpio.h> -#include <linux/atomic.h> - -#include <asm/mach-ar7/ar7.h> - -MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); -MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:cpmac"); - -static int debug_level = 8; -static int dumb_switch; - -/* Next 2 are only used in cpmac_probe, so it's pointless to change them */ -module_param(debug_level, int, 0444); -module_param(dumb_switch, int, 0444); - -MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); -MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); - -#define CPMAC_VERSION "0.5.2" -/* frame size + 802.1q tag + FCS size */ -#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) -#define CPMAC_QUEUES 8 - -/* Ethernet registers */ -#define CPMAC_TX_CONTROL 0x0004 -#define CPMAC_TX_TEARDOWN 0x0008 -#define CPMAC_RX_CONTROL 0x0014 -#define CPMAC_RX_TEARDOWN 0x0018 -#define CPMAC_MBP 0x0100 -#define MBP_RXPASSCRC 0x40000000 -#define MBP_RXQOS 0x20000000 -#define MBP_RXNOCHAIN 0x10000000 -#define MBP_RXCMF 0x01000000 -#define MBP_RXSHORT 0x00800000 -#define MBP_RXCEF 0x00400000 -#define MBP_RXPROMISC 0x00200000 -#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) -#define MBP_RXBCAST 0x00002000 -#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) -#define MBP_RXMCAST 0x00000020 -#define MBP_MCASTCHAN(channel) ((channel) & 0x7) -#define CPMAC_UNICAST_ENABLE 0x0104 -#define CPMAC_UNICAST_CLEAR 0x0108 -#define CPMAC_MAX_LENGTH 0x010c -#define CPMAC_BUFFER_OFFSET 0x0110 -#define CPMAC_MAC_CONTROL 0x0160 -#define MAC_TXPTYPE 0x00000200 -#define MAC_TXPACE 0x00000040 -#define MAC_MII 0x00000020 -#define MAC_TXFLOW 0x00000010 -#define MAC_RXFLOW 0x00000008 -#define MAC_MTEST 0x00000004 -#define MAC_LOOPBACK 0x00000002 -#define MAC_FDX 0x00000001 -#define CPMAC_MAC_STATUS 0x0164 -#define MAC_STATUS_QOS 0x00000004 -#define MAC_STATUS_RXFLOW 0x00000002 -#define MAC_STATUS_TXFLOW 0x00000001 -#define CPMAC_TX_INT_ENABLE 0x0178 -#define CPMAC_TX_INT_CLEAR 0x017c -#define CPMAC_MAC_INT_VECTOR 0x0180 -#define MAC_INT_STATUS 0x00080000 -#define MAC_INT_HOST 0x00040000 -#define MAC_INT_RX 0x00020000 -#define MAC_INT_TX 0x00010000 -#define CPMAC_MAC_EOI_VECTOR 0x0184 -#define CPMAC_RX_INT_ENABLE 0x0198 -#define CPMAC_RX_INT_CLEAR 0x019c -#define CPMAC_MAC_INT_ENABLE 0x01a8 -#define CPMAC_MAC_INT_CLEAR 0x01ac -#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) -#define CPMAC_MAC_ADDR_MID 0x01d0 -#define CPMAC_MAC_ADDR_HI 0x01d4 -#define CPMAC_MAC_HASH_LO 0x01d8 -#define CPMAC_MAC_HASH_HI 0x01dc -#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) -#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) -#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) -#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) -#define CPMAC_REG_END 0x0680 - -/* Rx/Tx statistics - * TODO: use some of them to fill stats in cpmac_stats() - */ -#define CPMAC_STATS_RX_GOOD 0x0200 -#define CPMAC_STATS_RX_BCAST 0x0204 -#define CPMAC_STATS_RX_MCAST 0x0208 -#define CPMAC_STATS_RX_PAUSE 0x020c -#define CPMAC_STATS_RX_CRC 0x0210 -#define CPMAC_STATS_RX_ALIGN 0x0214 -#define CPMAC_STATS_RX_OVER 0x0218 -#define CPMAC_STATS_RX_JABBER 0x021c -#define CPMAC_STATS_RX_UNDER 0x0220 -#define CPMAC_STATS_RX_FRAG 0x0224 -#define CPMAC_STATS_RX_FILTER 0x0228 -#define CPMAC_STATS_RX_QOSFILTER 0x022c -#define CPMAC_STATS_RX_OCTETS 0x0230 - -#define CPMAC_STATS_TX_GOOD 0x0234 -#define CPMAC_STATS_TX_BCAST 0x0238 -#define CPMAC_STATS_TX_MCAST 0x023c -#define CPMAC_STATS_TX_PAUSE 0x0240 -#define CPMAC_STATS_TX_DEFER 0x0244 -#define CPMAC_STATS_TX_COLLISION 0x0248 -#define CPMAC_STATS_TX_SINGLECOLL 0x024c -#define CPMAC_STATS_TX_MULTICOLL 0x0250 -#define CPMAC_STATS_TX_EXCESSCOLL 0x0254 -#define CPMAC_STATS_TX_LATECOLL 0x0258 -#define CPMAC_STATS_TX_UNDERRUN 0x025c -#define CPMAC_STATS_TX_CARRIERSENSE 0x0260 -#define CPMAC_STATS_TX_OCTETS 0x0264 - -#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) -#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ - (reg))) - -/* MDIO bus */ -#define CPMAC_MDIO_VERSION 0x0000 -#define CPMAC_MDIO_CONTROL 0x0004 -#define MDIOC_IDLE 0x80000000 -#define MDIOC_ENABLE 0x40000000 -#define MDIOC_PREAMBLE 0x00100000 -#define MDIOC_FAULT 0x00080000 -#define MDIOC_FAULTDETECT 0x00040000 -#define MDIOC_INTTEST 0x00020000 -#define MDIOC_CLKDIV(div) ((div) & 0xff) -#define CPMAC_MDIO_ALIVE 0x0008 -#define CPMAC_MDIO_LINK 0x000c -#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) -#define MDIO_BUSY 0x80000000 -#define MDIO_WRITE 0x40000000 -#define MDIO_REG(reg) (((reg) & 0x1f) << 21) -#define MDIO_PHY(phy) (((phy) & 0x1f) << 16) -#define MDIO_DATA(data) ((data) & 0xffff) -#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) -#define PHYSEL_LINKSEL 0x00000040 -#define PHYSEL_LINKINT 0x00000020 - -struct cpmac_desc { - u32 hw_next; - u32 hw_data; - u16 buflen; - u16 bufflags; - u16 datalen; - u16 dataflags; -#define CPMAC_SOP 0x8000 -#define CPMAC_EOP 0x4000 -#define CPMAC_OWN 0x2000 -#define CPMAC_EOQ 0x1000 - struct sk_buff *skb; - struct cpmac_desc *next; - struct cpmac_desc *prev; - dma_addr_t mapping; - dma_addr_t data_mapping; -}; - -struct cpmac_priv { - spinlock_t lock; - spinlock_t rx_lock; - struct cpmac_desc *rx_head; - int ring_size; - struct cpmac_desc *desc_ring; - dma_addr_t dma_ring; - void __iomem *regs; - struct mii_bus *mii_bus; - char phy_name[MII_BUS_ID_SIZE + 3]; - int oldlink, oldspeed, oldduplex; - u32 msg_enable; - struct net_device *dev; - struct work_struct reset_work; - struct platform_device *pdev; - struct napi_struct napi; - atomic_t reset_pending; -}; - -static irqreturn_t cpmac_irq(int, void *); -static void cpmac_hw_start(struct net_device *dev); -static void cpmac_hw_stop(struct net_device *dev); -static int cpmac_stop(struct net_device *dev); -static int cpmac_open(struct net_device *dev); - -static void cpmac_dump_regs(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - - for (i = 0; i < CPMAC_REG_END; i += 4) { - if (i % 16 == 0) { - if (i) - printk("\n"); - printk("%s: reg[%p]:", dev->name, priv->regs + i); - } - printk(" %08x", cpmac_read(priv->regs, i)); - } - printk("\n"); -} - -static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) -{ - int i; - - printk("%s: desc[%p]:", dev->name, desc); - for (i = 0; i < sizeof(*desc) / 4; i++) - printk(" %08x", ((u32 *)desc)[i]); - printk("\n"); -} - -static void cpmac_dump_all_desc(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - struct cpmac_desc *dump = priv->rx_head; - - do { - cpmac_dump_desc(dev, dump); - dump = dump->next; - } while (dump != priv->rx_head); -} - -static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) -{ - int i; - - printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); - for (i = 0; i < skb->len; i++) { - if (i % 16 == 0) { - if (i) - printk("\n"); - printk("%s: data[%p]:", dev->name, skb->data + i); - } - printk(" %02x", ((u8 *)skb->data)[i]); - } - printk("\n"); -} - -static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) -{ - u32 val; - - while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) - cpu_relax(); - cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | - MDIO_PHY(phy_id)); - while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) - cpu_relax(); - - return MDIO_DATA(val); -} - -static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, - int reg, u16 val) -{ - while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) - cpu_relax(); - cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | - MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); - - return 0; -} - -static int cpmac_mdio_reset(struct mii_bus *bus) -{ - struct clk *cpmac_clk; - - cpmac_clk = clk_get(&bus->dev, "cpmac"); - if (IS_ERR(cpmac_clk)) { - pr_err("unable to get cpmac clock\n"); - return -1; - } - ar7_device_reset(AR7_RESET_BIT_MDIO); - cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | - MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); - - return 0; -} - -static struct mii_bus *cpmac_mii; - -static void cpmac_set_multicast_list(struct net_device *dev) -{ - struct netdev_hw_addr *ha; - u8 tmp; - u32 mbp, bit, hash[2] = { 0, }; - struct cpmac_priv *priv = netdev_priv(dev); - - mbp = cpmac_read(priv->regs, CPMAC_MBP); - if (dev->flags & IFF_PROMISC) { - cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | - MBP_RXPROMISC); - } else { - cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); - if (dev->flags & IFF_ALLMULTI) { - /* enable all multicast mode */ - cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); - cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); - } else { - /* cpmac uses some strange mac address hashing - * (not crc32) - */ - netdev_for_each_mc_addr(ha, dev) { - bit = 0; - tmp = ha->addr[0]; - bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = ha->addr[1]; - bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = ha->addr[2]; - bit ^= (tmp >> 6) ^ tmp; - tmp = ha->addr[3]; - bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = ha->addr[4]; - bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = ha->addr[5]; - bit ^= (tmp >> 6) ^ tmp; - bit &= 0x3f; - hash[bit / 32] |= 1 << (bit % 32); - } - - cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); - cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); - } - } -} - -static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, - struct cpmac_desc *desc) -{ - struct sk_buff *skb, *result = NULL; - - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_desc(priv->dev, desc); - cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); - if (unlikely(!desc->datalen)) { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, "rx: spurious interrupt\n"); - - return NULL; - } - - skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); - if (likely(skb)) { - skb_put(desc->skb, desc->datalen); - desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); - skb_checksum_none_assert(desc->skb); - priv->dev->stats.rx_packets++; - priv->dev->stats.rx_bytes += desc->datalen; - result = desc->skb; - dma_unmap_single(&priv->dev->dev, desc->data_mapping, - CPMAC_SKB_SIZE, DMA_FROM_DEVICE); - desc->skb = skb; - desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - desc->hw_data = (u32)desc->data_mapping; - if (unlikely(netif_msg_pktdata(priv))) { - netdev_dbg(priv->dev, "received packet:\n"); - cpmac_dump_skb(priv->dev, result); - } - } else { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, - "low on skbs, dropping packet\n"); - - priv->dev->stats.rx_dropped++; - } - - desc->buflen = CPMAC_SKB_SIZE; - desc->dataflags = CPMAC_OWN; - - return result; -} - -static int cpmac_poll(struct napi_struct *napi, int budget) -{ - struct sk_buff *skb; - struct cpmac_desc *desc, *restart; - struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); - int received = 0, processed = 0; - - spin_lock(&priv->rx_lock); - if (unlikely(!priv->rx_head)) { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, "rx: polling, but no queue\n"); - - spin_unlock(&priv->rx_lock); - napi_complete(napi); - return 0; - } - - desc = priv->rx_head; - restart = NULL; - while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { - processed++; - - if ((desc->dataflags & CPMAC_EOQ) != 0) { - /* The last update to eoq->hw_next didn't happen - * soon enough, and the receiver stopped here. - * Remember this descriptor so we can restart - * the receiver after freeing some space. - */ - if (unlikely(restart)) { - if (netif_msg_rx_err(priv)) - netdev_err(priv->dev, "poll found a" - " duplicate EOQ: %p and %p\n", - restart, desc); - goto fatal_error; - } - - restart = desc->next; - } - - skb = cpmac_rx_one(priv, desc); - if (likely(skb)) { - netif_receive_skb(skb); - received++; - } - desc = desc->next; - } - - if (desc != priv->rx_head) { - /* We freed some buffers, but not the whole ring, - * add what we did free to the rx list - */ - desc->prev->hw_next = (u32)0; - priv->rx_head->prev->hw_next = priv->rx_head->mapping; - } - - /* Optimization: If we did not actually process an EOQ (perhaps because - * of quota limits), check to see if the tail of the queue has EOQ set. - * We should immediately restart in that case so that the receiver can - * restart and run in parallel with more packet processing. - * This lets us handle slightly larger bursts before running - * out of ring space (assuming dev->weight < ring_size) - */ - - if (!restart && - (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) - == CPMAC_EOQ && - (priv->rx_head->dataflags & CPMAC_OWN) != 0) { - /* reset EOQ so the poll loop (above) doesn't try to - * restart this when it eventually gets to this descriptor. - */ - priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; - restart = priv->rx_head; - } - - if (restart) { - priv->dev->stats.rx_errors++; - priv->dev->stats.rx_fifo_errors++; - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(priv->dev, "rx dma ring overrun\n"); - - if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { - if (netif_msg_drv(priv)) - netdev_err(priv->dev, "cpmac_poll is trying " - "to restart rx from a descriptor " - "that's not free: %p\n", restart); - goto fatal_error; - } - - cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); - } - - priv->rx_head = desc; - spin_unlock(&priv->rx_lock); - if (unlikely(netif_msg_rx_status(priv))) - netdev_dbg(priv->dev, "poll processed %d packets\n", received); - - if (processed == 0) { - /* we ran out of packets to read, - * revert to interrupt-driven mode - */ - napi_complete(napi); - cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); - return 0; - } - - return 1; - -fatal_error: - /* Something went horribly wrong. - * Reset hardware to try to recover rather than wedging. - */ - if (netif_msg_drv(priv)) { - netdev_err(priv->dev, "cpmac_poll is confused. " - "Resetting hardware\n"); - cpmac_dump_all_desc(priv->dev); - netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", - cpmac_read(priv->regs, CPMAC_RX_PTR(0)), - cpmac_read(priv->regs, CPMAC_RX_ACK(0))); - } - - spin_unlock(&priv->rx_lock); - napi_complete(napi); - netif_tx_stop_all_queues(priv->dev); - napi_disable(&priv->napi); - - atomic_inc(&priv->reset_pending); - cpmac_hw_stop(priv->dev); - if (!schedule_work(&priv->reset_work)) - atomic_dec(&priv->reset_pending); - - return 0; - -} - -static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - int queue; - unsigned int len; - struct cpmac_desc *desc; - struct cpmac_priv *priv = netdev_priv(dev); - - if (unlikely(atomic_read(&priv->reset_pending))) - return NETDEV_TX_BUSY; - - if (unlikely(skb_padto(skb, ETH_ZLEN))) - return NETDEV_TX_OK; - - len = max_t(unsigned int, skb->len, ETH_ZLEN); - queue = skb_get_queue_mapping(skb); - netif_stop_subqueue(dev, queue); - - desc = &priv->desc_ring[queue]; - if (unlikely(desc->dataflags & CPMAC_OWN)) { - if (netif_msg_tx_err(priv) && net_ratelimit()) - netdev_warn(dev, "tx dma ring full\n"); - - return NETDEV_TX_BUSY; - } - - spin_lock(&priv->lock); - spin_unlock(&priv->lock); - desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; - desc->skb = skb; - desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, - DMA_TO_DEVICE); - desc->hw_data = (u32)desc->data_mapping; - desc->datalen = len; - desc->buflen = len; - if (unlikely(netif_msg_tx_queued(priv))) - netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len); - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_desc(dev, desc); - if (unlikely(netif_msg_pktdata(priv))) - cpmac_dump_skb(dev, skb); - cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); - - return NETDEV_TX_OK; -} - -static void cpmac_end_xmit(struct net_device *dev, int queue) -{ - struct cpmac_desc *desc; - struct cpmac_priv *priv = netdev_priv(dev); - - desc = &priv->desc_ring[queue]; - cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); - if (likely(desc->skb)) { - spin_lock(&priv->lock); - dev->stats.tx_packets++; - dev->stats.tx_bytes += desc->skb->len; - spin_unlock(&priv->lock); - dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, - DMA_TO_DEVICE); - - if (unlikely(netif_msg_tx_done(priv))) - netdev_dbg(dev, "sent 0x%p, len=%d\n", - desc->skb, desc->skb->len); - - dev_consume_skb_irq(desc->skb); - desc->skb = NULL; - if (__netif_subqueue_stopped(dev, queue)) - netif_wake_subqueue(dev, queue); - } else { - if (netif_msg_tx_err(priv) && net_ratelimit()) - netdev_warn(dev, "end_xmit: spurious interrupt\n"); - if (__netif_subqueue_stopped(dev, queue)) - netif_wake_subqueue(dev, queue); - } -} - -static void cpmac_hw_stop(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); - - ar7_device_reset(pdata->reset_bit); - cpmac_write(priv->regs, CPMAC_RX_CONTROL, - cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); - cpmac_write(priv->regs, CPMAC_TX_CONTROL, - cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); - for (i = 0; i < 8; i++) { - cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); - cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); - } - cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_CONTROL, - cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); -} - -static void cpmac_hw_start(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); - - ar7_device_reset(pdata->reset_bit); - for (i = 0; i < 8; i++) { - cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); - cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); - } - cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); - - cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | - MBP_RXMCAST); - cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); - for (i = 0; i < 8; i++) - cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); - cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); - cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | - (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | - (dev->dev_addr[3] << 24)); - cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); - cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); - cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); - cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); - cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); - cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); - - cpmac_write(priv->regs, CPMAC_RX_CONTROL, - cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); - cpmac_write(priv->regs, CPMAC_TX_CONTROL, - cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); - cpmac_write(priv->regs, CPMAC_MAC_CONTROL, - cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | - MAC_FDX); -} - -static void cpmac_clear_rx(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - struct cpmac_desc *desc; - int i; - - if (unlikely(!priv->rx_head)) - return; - desc = priv->rx_head; - for (i = 0; i < priv->ring_size; i++) { - if ((desc->dataflags & CPMAC_OWN) == 0) { - if (netif_msg_rx_err(priv) && net_ratelimit()) - netdev_warn(dev, "packet dropped\n"); - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_desc(dev, desc); - desc->dataflags = CPMAC_OWN; - dev->stats.rx_dropped++; - } - desc->hw_next = desc->next->mapping; - desc = desc->next; - } - priv->rx_head->prev->hw_next = 0; -} - -static void cpmac_clear_tx(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - int i; - - if (unlikely(!priv->desc_ring)) - return; - for (i = 0; i < CPMAC_QUEUES; i++) { - priv->desc_ring[i].dataflags = 0; - if (priv->desc_ring[i].skb) { - dev_kfree_skb_any(priv->desc_ring[i].skb); - priv->desc_ring[i].skb = NULL; - } - } -} - -static void cpmac_hw_error(struct work_struct *work) -{ - struct cpmac_priv *priv = - container_of(work, struct cpmac_priv, reset_work); - - spin_lock(&priv->rx_lock); - cpmac_clear_rx(priv->dev); - spin_unlock(&priv->rx_lock); - cpmac_clear_tx(priv->dev); - cpmac_hw_start(priv->dev); - barrier(); - atomic_dec(&priv->reset_pending); - - netif_tx_wake_all_queues(priv->dev); - cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); -} - -static void cpmac_check_status(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); - int rx_channel = (macstatus >> 8) & 7; - int rx_code = (macstatus >> 12) & 15; - int tx_channel = (macstatus >> 16) & 7; - int tx_code = (macstatus >> 20) & 15; - - if (rx_code || tx_code) { - if (netif_msg_drv(priv) && net_ratelimit()) { - /* Can't find any documentation on what these - * error codes actually are. So just log them and hope.. - */ - if (rx_code) - netdev_warn(dev, "host error %d on rx " - "channel %d (macstatus %08x), resetting\n", - rx_code, rx_channel, macstatus); - if (tx_code) - netdev_warn(dev, "host error %d on tx " - "channel %d (macstatus %08x), resetting\n", - tx_code, tx_channel, macstatus); - } - - netif_tx_stop_all_queues(dev); - cpmac_hw_stop(dev); - if (schedule_work(&priv->reset_work)) - atomic_inc(&priv->reset_pending); - if (unlikely(netif_msg_hw(priv))) - cpmac_dump_regs(dev); - } - cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); -} - -static irqreturn_t cpmac_irq(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct cpmac_priv *priv; - int queue; - u32 status; - - priv = netdev_priv(dev); - - status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); - - if (unlikely(netif_msg_intr(priv))) - netdev_dbg(dev, "interrupt status: 0x%08x\n", status); - - if (status & MAC_INT_TX) - cpmac_end_xmit(dev, (status & 7)); - - if (status & MAC_INT_RX) { - queue = (status >> 8) & 7; - if (napi_schedule_prep(&priv->napi)) { - cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); - __napi_schedule(&priv->napi); - } - } - - cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); - - if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) - cpmac_check_status(dev); - - return IRQ_HANDLED; -} - -static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - spin_lock(&priv->lock); - dev->stats.tx_errors++; - spin_unlock(&priv->lock); - if (netif_msg_tx_err(priv) && net_ratelimit()) - netdev_warn(dev, "transmit timeout\n"); - - atomic_inc(&priv->reset_pending); - barrier(); - cpmac_clear_tx(dev); - barrier(); - atomic_dec(&priv->reset_pending); - - netif_tx_wake_all_queues(priv->dev); -} - -static void cpmac_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ring, - struct netlink_ext_ack *extack) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - ring->rx_max_pending = 1024; - ring->rx_mini_max_pending = 1; - ring->rx_jumbo_max_pending = 1; - ring->tx_max_pending = 1; - - ring->rx_pending = priv->ring_size; - ring->rx_mini_pending = 1; - ring->rx_jumbo_pending = 1; - ring->tx_pending = 1; -} - -static int cpmac_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ring, - struct netlink_ext_ack *extack) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - if (netif_running(dev)) - return -EBUSY; - priv->ring_size = ring->rx_pending; - - return 0; -} - -static void cpmac_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strscpy(info->driver, "cpmac", sizeof(info->driver)); - strscpy(info->version, CPMAC_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); -} - -static const struct ethtool_ops cpmac_ethtool_ops = { - .get_drvinfo = cpmac_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ringparam = cpmac_get_ringparam, - .set_ringparam = cpmac_set_ringparam, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -static void cpmac_adjust_link(struct net_device *dev) -{ - struct cpmac_priv *priv = netdev_priv(dev); - int new_state = 0; - - spin_lock(&priv->lock); - if (dev->phydev->link) { - netif_tx_start_all_queues(dev); - if (dev->phydev->duplex != priv->oldduplex) { - new_state = 1; - priv->oldduplex = dev->phydev->duplex; - } - - if (dev->phydev->speed != priv->oldspeed) { - new_state = 1; - priv->oldspeed = dev->phydev->speed; - } - - if (!priv->oldlink) { - new_state = 1; - priv->oldlink = 1; - } - } else if (priv->oldlink) { - new_state = 1; - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - } - - if (new_state && netif_msg_link(priv) && net_ratelimit()) - phy_print_status(dev->phydev); - - spin_unlock(&priv->lock); -} - -static int cpmac_open(struct net_device *dev) -{ - int i, size, res; - struct cpmac_priv *priv = netdev_priv(dev); - struct resource *mem; - struct cpmac_desc *desc; - struct sk_buff *skb; - - mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); - if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { - if (netif_msg_drv(priv)) - netdev_err(dev, "failed to request registers\n"); - - res = -ENXIO; - goto fail_reserve; - } - - priv->regs = ioremap(mem->start, resource_size(mem)); - if (!priv->regs) { - if (netif_msg_drv(priv)) - netdev_err(dev, "failed to remap registers\n"); - - res = -ENXIO; - goto fail_remap; - } - - size = priv->ring_size + CPMAC_QUEUES; - priv->desc_ring = dma_alloc_coherent(&dev->dev, - sizeof(struct cpmac_desc) * size, - &priv->dma_ring, - GFP_KERNEL); - if (!priv->desc_ring) { - res = -ENOMEM; - goto fail_alloc; - } - - for (i = 0; i < size; i++) - priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; - - priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; - for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { - skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); - if (unlikely(!skb)) { - res = -ENOMEM; - goto fail_desc; - } - desc->skb = skb; - desc->data_mapping = dma_map_single(&dev->dev, skb->data, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - desc->hw_data = (u32)desc->data_mapping; - desc->buflen = CPMAC_SKB_SIZE; - desc->dataflags = CPMAC_OWN; - desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; - desc->next->prev = desc; - desc->hw_next = (u32)desc->next->mapping; - } - - priv->rx_head->prev->hw_next = (u32)0; - - res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev); - if (res) { - if (netif_msg_drv(priv)) - netdev_err(dev, "failed to obtain irq\n"); - - goto fail_irq; - } - - atomic_set(&priv->reset_pending, 0); - INIT_WORK(&priv->reset_work, cpmac_hw_error); - cpmac_hw_start(dev); - - napi_enable(&priv->napi); - phy_start(dev->phydev); - - return 0; - -fail_irq: -fail_desc: - for (i = 0; i < priv->ring_size; i++) { - if (priv->rx_head[i].skb) { - dma_unmap_single(&dev->dev, - priv->rx_head[i].data_mapping, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_head[i].skb); - } - } - dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size, - priv->desc_ring, priv->dma_ring); - -fail_alloc: - iounmap(priv->regs); - -fail_remap: - release_mem_region(mem->start, resource_size(mem)); - -fail_reserve: - return res; -} - -static int cpmac_stop(struct net_device *dev) -{ - int i; - struct cpmac_priv *priv = netdev_priv(dev); - struct resource *mem; - - netif_tx_stop_all_queues(dev); - - cancel_work_sync(&priv->reset_work); - napi_disable(&priv->napi); - phy_stop(dev->phydev); - - cpmac_hw_stop(dev); - - for (i = 0; i < 8; i++) - cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); - cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); - cpmac_write(priv->regs, CPMAC_MBP, 0); - - free_irq(dev->irq, dev); - iounmap(priv->regs); - mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); - release_mem_region(mem->start, resource_size(mem)); - priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; - for (i = 0; i < priv->ring_size; i++) { - if (priv->rx_head[i].skb) { - dma_unmap_single(&dev->dev, - priv->rx_head[i].data_mapping, - CPMAC_SKB_SIZE, - DMA_FROM_DEVICE); - kfree_skb(priv->rx_head[i].skb); - } - } - - dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * - (CPMAC_QUEUES + priv->ring_size), - priv->desc_ring, priv->dma_ring); - - return 0; -} - -static const struct net_device_ops cpmac_netdev_ops = { - .ndo_open = cpmac_open, - .ndo_stop = cpmac_stop, - .ndo_start_xmit = cpmac_start_xmit, - .ndo_tx_timeout = cpmac_tx_timeout, - .ndo_set_rx_mode = cpmac_set_multicast_list, - .ndo_eth_ioctl = phy_do_ioctl_running, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -}; - -static int external_switch; - -static int cpmac_probe(struct platform_device *pdev) -{ - int rc, phy_id; - char mdio_bus_id[MII_BUS_ID_SIZE]; - struct resource *mem; - struct cpmac_priv *priv; - struct net_device *dev; - struct plat_cpmac_data *pdata; - struct phy_device *phydev = NULL; - - pdata = dev_get_platdata(&pdev->dev); - - if (external_switch || dumb_switch) { - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ - phy_id = pdev->id; - } else { - for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { - if (!(pdata->phy_mask & (1 << phy_id))) - continue; - if (!mdiobus_get_phy(cpmac_mii, phy_id)) - continue; - strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); - break; - } - } - - if (phy_id == PHY_MAX_ADDR) { - dev_err(&pdev->dev, "no PHY present, falling back " - "to switch on MDIO bus 0\n"); - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ - phy_id = pdev->id; - } - mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0'; - - dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); - if (!dev) - return -ENOMEM; - - SET_NETDEV_DEV(dev, &pdev->dev); - platform_set_drvdata(pdev, dev); - priv = netdev_priv(dev); - - priv->pdev = pdev; - mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); - if (!mem) { - rc = -ENODEV; - goto fail; - } - - dev->irq = platform_get_irq_byname(pdev, "irq"); - - dev->netdev_ops = &cpmac_netdev_ops; - dev->ethtool_ops = &cpmac_ethtool_ops; - - netif_napi_add(dev, &priv->napi, cpmac_poll); - - spin_lock_init(&priv->lock); - spin_lock_init(&priv->rx_lock); - priv->dev = dev; - priv->ring_size = 64; - priv->msg_enable = netif_msg_init(debug_level, 0xff); - eth_hw_addr_set(dev, pdata->dev_addr); - - snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, - mdio_bus_id, phy_id); - - phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link, - PHY_INTERFACE_MODE_MII); - - if (IS_ERR(phydev)) { - if (netif_msg_drv(priv)) - dev_err(&pdev->dev, "Could not attach to PHY\n"); - - rc = PTR_ERR(phydev); - goto fail; - } - - rc = register_netdev(dev); - if (rc) { - dev_err(&pdev->dev, "Could not register net device\n"); - goto fail; - } - - if (netif_msg_probe(priv)) { - dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, " - "mac: %pM\n", (void *)mem->start, dev->irq, - priv->phy_name, dev->dev_addr); - } - - return 0; - -fail: - free_netdev(dev); - return rc; -} - -static int cpmac_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - - unregister_netdev(dev); - free_netdev(dev); - - return 0; -} - -static struct platform_driver cpmac_driver = { - .driver = { - .name = "cpmac", - }, - .probe = cpmac_probe, - .remove = cpmac_remove, -}; - -int __init cpmac_init(void) -{ - u32 mask; - int i, res; - - cpmac_mii = mdiobus_alloc(); - if (cpmac_mii == NULL) - return -ENOMEM; - - cpmac_mii->name = "cpmac-mii"; - cpmac_mii->read = cpmac_mdio_read; - cpmac_mii->write = cpmac_mdio_write; - cpmac_mii->reset = cpmac_mdio_reset; - - cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); - - if (!cpmac_mii->priv) { - pr_err("Can't ioremap mdio registers\n"); - res = -ENXIO; - goto fail_alloc; - } - - /* FIXME: unhardcode gpio&reset bits */ - ar7_gpio_disable(26); - ar7_gpio_disable(27); - ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); - ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); - ar7_device_reset(AR7_RESET_BIT_EPHY); - - cpmac_mii->reset(cpmac_mii); - - for (i = 0; i < 300; i++) { - mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); - if (mask) - break; - else - msleep(10); - } - - mask &= 0x7fffffff; - if (mask & (mask - 1)) { - external_switch = 1; - mask = 0; - } - - cpmac_mii->phy_mask = ~(mask | 0x80000000); - snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1"); - - res = mdiobus_register(cpmac_mii); - if (res) - goto fail_mii; - - res = platform_driver_register(&cpmac_driver); - if (res) - goto fail_cpmac; - - return 0; - -fail_cpmac: - mdiobus_unregister(cpmac_mii); - -fail_mii: - iounmap(cpmac_mii->priv); - -fail_alloc: - mdiobus_free(cpmac_mii); - - return res; -} - -void __exit cpmac_exit(void) -{ - platform_driver_unregister(&cpmac_driver); - mdiobus_unregister(cpmac_mii); - iounmap(cpmac_mii->priv); - mdiobus_free(cpmac_mii); -} - -module_init(cpmac_init); -module_exit(cpmac_exit); diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index bfa81bbfce3f..26dc906eae90 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -3,7 +3,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 25e707d7b87c..4edb7963f856 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -12,7 +12,6 @@ #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/of.h> -#include <linux/of_device.h> #include "cpsw.h" diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f9cd566d1c9b..ca4d4548f85e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -31,7 +31,7 @@ #include <linux/if_vlan.h> #include <linux/kmemleak.h> #include <linux/sys_soc.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index c61e4e44a78f..0e4f526b1753 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -30,7 +30,7 @@ #include <linux/sys_soc.h> #include <net/switchdev.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <net/pkt_cls.h> #include <net/devlink.h> diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index e966dd47e2db..764ed298b570 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -18,7 +18,7 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/skbuff.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <net/pkt_cls.h> #include <net/pkt_sched.h> @@ -1360,7 +1360,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, * particular hardware is sharing a common queue, so the * incoming device might change per packet. */ - xdp_do_flush_map(); + xdp_do_flush(); break; default: bpf_warn_invalid_xdp_action(ndev, prog, act); @@ -1396,9 +1396,9 @@ static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv, int ret; if (dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported keys used"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 34230145ca0b..0e27c433098d 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -6,6 +6,7 @@ #ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ #define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ +#include <net/xdp.h> #include <uapi/linux/bpf.h> #include "davinci_cpdma.h" diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2eb9d5a32588..b0950a318c42 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -38,6 +38,7 @@ #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/semaphore.h> #include <linux/phy.h> @@ -47,10 +48,7 @@ #include <linux/pm_runtime.h> #include <linux/davinci_emac.h> #include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> -#include <linux/of_irq.h> #include <linux/of_net.h> #include <linux/mfd/syscon.h> @@ -1726,13 +1724,10 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; -static const struct of_device_id davinci_emac_of_match[]; - static struct emac_platform_data * davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; - const struct of_device_id *match; const struct emac_platform_data *auxdata; struct emac_platform_data *pdata = NULL; @@ -1779,9 +1774,8 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) pdata->interrupt_disable = auxdata->interrupt_disable; } - match = of_match_device(davinci_emac_of_match, &pdev->dev); - if (match && match->data) { - auxdata = match->data; + auxdata = device_get_match_data(&pdev->dev); + if (auxdata) { pdata->version = auxdata->version; pdata->hw_ram_addr = auxdata->hw_ram_addr; } @@ -1934,18 +1928,20 @@ static int davinci_emac_probe(struct platform_device *pdev) goto err_free_rxchan; ndev->irq = rc; - rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); - if (!rc) - eth_hw_addr_set(ndev, priv->mac_addr); - + /* If the MAC address is not present, read the registers from the SoC */ if (!is_valid_ether_addr(priv->mac_addr)) { - /* Use random MAC if still none obtained. */ - eth_hw_addr_random(ndev); - memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len); - dev_warn(&pdev->dev, "using random MAC addr: %pM\n", - priv->mac_addr); + rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); + if (!rc) + eth_hw_addr_set(ndev, priv->mac_addr); + + if (!is_valid_ether_addr(priv->mac_addr)) { + /* Use random MAC if still none obtained. */ + eth_hw_addr_random(ndev); + memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len); + dev_warn(&pdev->dev, "using random MAC addr: %pM\n", + priv->mac_addr); + } } - ndev->netdev_ops = &emac_netdev_ops; ndev->ethtool_ops = ðtool_ops; netif_napi_add(ndev, &priv->napi, emac_poll); @@ -2002,7 +1998,7 @@ err_free_netdev: * Called when removing the device driver. We disable clock usage and release * the resources taken up by the driver and unregister network device */ -static int davinci_emac_remove(struct platform_device *pdev) +static void davinci_emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct emac_priv *priv = netdev_priv(ndev); @@ -2022,8 +2018,6 @@ static int davinci_emac_remove(struct platform_device *pdev) if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); free_netdev(ndev); - - return 0; } static int davinci_emac_suspend(struct device *dev) @@ -2076,7 +2070,7 @@ static struct platform_driver davinci_emac_driver = { .of_match_table = davinci_emac_of_match, }, .probe = davinci_emac_probe, - .remove = davinci_emac_remove, + .remove_new = davinci_emac_remove, }; /** diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 23169e36a3d4..628c87dc1d28 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -23,7 +23,6 @@ #include <linux/pm_runtime.h> #include <linux/davinci_emac.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/pinctrl/consumer.h> #include <linux/mdio-bitbang.h> @@ -674,7 +673,7 @@ bail_out: return ret; } -static int davinci_mdio_remove(struct platform_device *pdev) +static void davinci_mdio_remove(struct platform_device *pdev) { struct davinci_mdio_data *data = platform_get_drvdata(pdev); @@ -687,8 +686,6 @@ static int davinci_mdio_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); - - return 0; } #ifdef CONFIG_PM @@ -767,7 +764,7 @@ static struct platform_driver davinci_mdio_driver = { .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, .probe = davinci_mdio_probe, - .remove = davinci_mdio_remove, + .remove_new = davinci_mdio_remove, }; static int __init davinci_mdio_init(void) diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c new file mode 100644 index 000000000000..3025e9c18970 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -0,0 +1,965 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver + * + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com + * + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/timekeeping.h> +#include <linux/interrupt.h> +#include <linux/of_irq.h> + +#include "icss_iep.h" + +#define IEP_MAX_DEF_INC 0xf +#define IEP_MAX_COMPEN_INC 0xfff +#define IEP_MAX_COMPEN_COUNT 0xffffff + +#define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0) +#define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4) +#define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4 +#define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8) +#define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8 + +#define IEP_GLOBAL_STATUS_CNT_OVF BIT(0) + +#define IEP_CMP_CFG_SHADOW_EN BIT(17) +#define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0) +#define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1))) + +#define IEP_CMP_STATUS(cmp) (1 << (cmp)) + +#define IEP_SYNC_CTRL_SYNC_EN BIT(0) +#define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n))) + +#define IEP_MIN_CMP 0 +#define IEP_MAX_CMP 15 + +#define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0) +#define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1) +#define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2) + +#define LATCH_INDEX(ts_index) ((ts_index) + 6) +#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n)) +#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10) + +enum { + ICSS_IEP_GLOBAL_CFG_REG, + ICSS_IEP_GLOBAL_STATUS_REG, + ICSS_IEP_COMPEN_REG, + ICSS_IEP_SLOW_COMPEN_REG, + ICSS_IEP_COUNT_REG0, + ICSS_IEP_COUNT_REG1, + ICSS_IEP_CAPTURE_CFG_REG, + ICSS_IEP_CAPTURE_STAT_REG, + + ICSS_IEP_CAP6_RISE_REG0, + ICSS_IEP_CAP6_RISE_REG1, + + ICSS_IEP_CAP7_RISE_REG0, + ICSS_IEP_CAP7_RISE_REG1, + + ICSS_IEP_CMP_CFG_REG, + ICSS_IEP_CMP_STAT_REG, + ICSS_IEP_CMP0_REG0, + ICSS_IEP_CMP0_REG1, + ICSS_IEP_CMP1_REG0, + ICSS_IEP_CMP1_REG1, + + ICSS_IEP_CMP8_REG0, + ICSS_IEP_CMP8_REG1, + ICSS_IEP_SYNC_CTRL_REG, + ICSS_IEP_SYNC0_STAT_REG, + ICSS_IEP_SYNC1_STAT_REG, + ICSS_IEP_SYNC_PWIDTH_REG, + ICSS_IEP_SYNC0_PERIOD_REG, + ICSS_IEP_SYNC1_DELAY_REG, + ICSS_IEP_SYNC_START_REG, + ICSS_IEP_MAX_REGS, +}; + +/** + * struct icss_iep_plat_data - Plat data to handle SoC variants + * @config: Regmap configuration data + * @reg_offs: register offsets to capture offset differences across SoCs + * @flags: Flags to represent IEP properties + */ +struct icss_iep_plat_data { + struct regmap_config *config; + u32 reg_offs[ICSS_IEP_MAX_REGS]; + u32 flags; +}; + +struct icss_iep { + struct device *dev; + void __iomem *base; + const struct icss_iep_plat_data *plat_data; + struct regmap *map; + struct device_node *client_np; + unsigned long refclk_freq; + int clk_tick_time; /* one refclk tick time in ns */ + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + struct mutex ptp_clk_mutex; /* PHC access serializer */ + spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */ + u32 def_inc; + s16 slow_cmp_inc; + u32 slow_cmp_count; + const struct icss_iep_clockops *ops; + void *clockops_data; + u32 cycle_time_ns; + u32 perout_enabled; + bool pps_enabled; + int cap_cmp_irq; + u64 period; + u32 latch_enable; +}; + +/** + * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter + * @iep: Pointer to structure representing IEP. + * + * Return: upper 32 bit IEP counter + */ +int icss_iep_get_count_hi(struct icss_iep *iep) +{ + u32 val = 0; + + if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)) + val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); + + return val; +} +EXPORT_SYMBOL_GPL(icss_iep_get_count_hi); + +/** + * icss_iep_get_count_low() - Get the lower 32 bit IEP counter + * @iep: Pointer to structure representing IEP. + * + * Return: lower 32 bit IEP counter + */ +int icss_iep_get_count_low(struct icss_iep *iep) +{ + u32 val = 0; + + if (iep) + val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); + + return val; +} +EXPORT_SYMBOL_GPL(icss_iep_get_count_low); + +/** + * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver + * @iep: Pointer to structure representing IEP. + * + * Return: PTP clock index, -1 if not registered + */ +int icss_iep_get_ptp_clock_idx(struct icss_iep *iep) +{ + if (!iep || !iep->ptp_clock) + return -1; + return ptp_clock_index(iep->ptp_clock); +} +EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx); + +static void icss_iep_set_counter(struct icss_iep *iep, u64 ns) +{ + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + writel(upper_32_bits(ns), iep->base + + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); + writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); +} + +static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns); + +/** + * icss_iep_settime() - Set time of the PTP clock using IEP driver + * @iep: Pointer to structure representing IEP. + * @ns: Time to be set in nanoseconds + * + * This API uses writel() instead of regmap_write() for write operations as + * regmap_write() is too slow and this API is time sensitive. + */ +static void icss_iep_settime(struct icss_iep *iep, u64 ns) +{ + unsigned long flags; + + if (iep->ops && iep->ops->settime) { + iep->ops->settime(iep->clockops_data, ns); + return; + } + + spin_lock_irqsave(&iep->irq_lock, flags); + if (iep->pps_enabled || iep->perout_enabled) + writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); + + icss_iep_set_counter(iep, ns); + + if (iep->pps_enabled || iep->perout_enabled) { + icss_iep_update_to_next_boundary(iep, ns); + writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN, + iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); + } + spin_unlock_irqrestore(&iep->irq_lock, flags); +} + +/** + * icss_iep_gettime() - Get time of the PTP clock using IEP driver + * @iep: Pointer to structure representing IEP. + * @sts: Pointer to structure representing PTP system timestamp. + * + * This API uses readl() instead of regmap_read() for read operations as + * regmap_read() is too slow and this API is time sensitive. + * + * Return: The current timestamp of the PTP clock using IEP driver + */ +static u64 icss_iep_gettime(struct icss_iep *iep, + struct ptp_system_timestamp *sts) +{ + u32 ts_hi = 0, ts_lo; + unsigned long flags; + + if (iep->ops && iep->ops->gettime) + return iep->ops->gettime(iep->clockops_data, sts); + + /* use local_irq_x() to make it work for both RT/non-RT */ + local_irq_save(flags); + + /* no need to play with hi-lo, hi is latched when lo is read */ + ptp_read_system_prets(sts); + ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); + ptp_read_system_postts(sts); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); + + local_irq_restore(flags); + + return (u64)ts_lo | (u64)ts_hi << 32; +} + +static void icss_iep_enable(struct icss_iep *iep) +{ + regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, + IEP_GLOBAL_CFG_CNT_ENABLE, + IEP_GLOBAL_CFG_CNT_ENABLE); +} + +static void icss_iep_disable(struct icss_iep *iep) +{ + regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, + IEP_GLOBAL_CFG_CNT_ENABLE, + 0); +} + +static void icss_iep_enable_shadow_mode(struct icss_iep *iep) +{ + u32 cycle_time; + int cmp; + + cycle_time = iep->cycle_time_ns - iep->def_inc; + + icss_iep_disable(iep); + + /* disable shadow mode */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_SHADOW_EN, 0); + + /* enable shadow mode */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN); + + /* clear counters */ + icss_iep_set_counter(iep, 0); + + /* clear overflow status */ + regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG, + IEP_GLOBAL_STATUS_CNT_OVF, + IEP_GLOBAL_STATUS_CNT_OVF); + + /* clear compare status */ + for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) { + regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG, + IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp)); + } + + /* enable reset counter on CMP0 event */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP0_RST_CNT_EN, + IEP_CMP_CFG_CMP0_RST_CNT_EN); + /* enable compare */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(0), + IEP_CMP_CFG_CMP_EN(0)); + + /* set CMP0 value to cycle time */ + regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time); + + icss_iep_set_counter(iep, 0); + icss_iep_enable(iep); +} + +static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc) +{ + regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, + IEP_GLOBAL_CFG_DEFAULT_INC_MASK, + def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT); +} + +static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc) +{ + struct device *dev = regmap_get_device(iep->map); + + if (compen_inc > IEP_MAX_COMPEN_INC) { + dev_err(dev, "%s: too high compensation inc %d\n", + __func__, compen_inc); + compen_inc = IEP_MAX_COMPEN_INC; + } + + regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, + IEP_GLOBAL_CFG_COMPEN_INC_MASK, + compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT); +} + +static void icss_iep_set_compensation_count(struct icss_iep *iep, + u32 compen_count) +{ + struct device *dev = regmap_get_device(iep->map); + + if (compen_count > IEP_MAX_COMPEN_COUNT) { + dev_err(dev, "%s: too high compensation count %d\n", + __func__, compen_count); + compen_count = IEP_MAX_COMPEN_COUNT; + } + + regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count); +} + +static void icss_iep_set_slow_compensation_count(struct icss_iep *iep, + u32 compen_count) +{ + regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count); +} + +/* PTP PHC operations */ +static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); + u32 cyc_count; + u16 cmp_inc; + + mutex_lock(&iep->ptp_clk_mutex); + + /* ppb is amount of frequency we want to adjust in 1GHz (billion) + * e.g. 100ppb means we need to speed up clock by 100Hz + * i.e. at end of 1 second (1 billion ns) clock time, we should be + * counting 100 more ns. + * We use IEP slow compensation to achieve continuous freq. adjustment. + * There are 2 parts. Cycle time and adjustment per cycle. + * Simplest case would be 1 sec Cycle time. Then adjustment + * pre cycle would be (def_inc + ppb) value. + * Cycle time will have to be chosen based on how worse the ppb is. + * e.g. smaller the ppb, cycle time has to be large. + * The minimum adjustment we can do is +-1ns per cycle so let's + * reduce the cycle time to get 1ns per cycle adjustment. + * 1ppb = 1sec cycle time & 1ns adjust + * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle + */ + + if (iep->cycle_time_ns) + iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */ + else + iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */ + + if (ppb < 0) { + iep->slow_cmp_inc = -iep->slow_cmp_inc; + ppb = -ppb; + } + + cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */ + cyc_count /= ppb; /* cycle time per ppb */ + + /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */ + if (!iep->cycle_time_ns) + cyc_count /= iep->clk_tick_time; + iep->slow_cmp_count = cyc_count; + + /* iep->clk_tick_time is def_inc */ + cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc; + icss_iep_set_compensation_inc(iep, cmp_inc); + icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count); + + mutex_unlock(&iep->ptp_clk_mutex); + + return 0; +} + +static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); + s64 ns; + + mutex_lock(&iep->ptp_clk_mutex); + if (iep->ops && iep->ops->adjtime) { + iep->ops->adjtime(iep->clockops_data, delta); + } else { + ns = icss_iep_gettime(iep, NULL); + ns += delta; + icss_iep_settime(iep, ns); + } + mutex_unlock(&iep->ptp_clk_mutex); + + return 0; +} + +static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); + u64 ns; + + mutex_lock(&iep->ptp_clk_mutex); + ns = icss_iep_gettime(iep, sts); + *ts = ns_to_timespec64(ns); + mutex_unlock(&iep->ptp_clk_mutex); + + return 0; +} + +static int icss_iep_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); + u64 ns; + + mutex_lock(&iep->ptp_clk_mutex); + ns = timespec64_to_ns(ts); + icss_iep_settime(iep, ns); + mutex_unlock(&iep->ptp_clk_mutex); + + return 0; +} + +static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns) +{ + u64 ns, p_ns; + u32 offset; + + ns = icss_iep_gettime(iep, NULL); + if (start_ns < ns) + start_ns = ns; + p_ns = iep->period; + /* Round up to next period boundary */ + start_ns += p_ns - 1; + offset = do_div(start_ns, p_ns); + start_ns = start_ns * p_ns; + /* If it is too close to update, shift to next boundary */ + if (p_ns - offset < 10) + start_ns += p_ns; + + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns)); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns)); +} + +static int icss_iep_perout_enable_hw(struct icss_iep *iep, + struct ptp_perout_request *req, int on) +{ + int ret; + u64 cmp; + + if (iep->ops && iep->ops->perout_enable) { + ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp); + if (ret) + return ret; + + if (on) { + /* Configure CMP */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); + /* Configure SYNC, 1ms pulse width */ + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000); + regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); + } else { + /* Disable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), 0); + + /* clear regs */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); + } + } else { + if (on) { + u64 start_ns; + + iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + start_ns = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + icss_iep_update_to_next_boundary(iep, start_ns); + + /* Enable Sync in single shot mode */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, + IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); + } else { + /* Disable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), 0); + + /* clear CMP regs */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); + + /* Disable sync */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); + } + } + + return 0; +} + +static int icss_iep_perout_enable(struct icss_iep *iep, + struct ptp_perout_request *req, int on) +{ + unsigned long flags; + int ret = 0; + + mutex_lock(&iep->ptp_clk_mutex); + + if (iep->pps_enabled) { + ret = -EBUSY; + goto exit; + } + + if (iep->perout_enabled == !!on) + goto exit; + + spin_lock_irqsave(&iep->irq_lock, flags); + ret = icss_iep_perout_enable_hw(iep, req, on); + if (!ret) + iep->perout_enabled = !!on; + spin_unlock_irqrestore(&iep->irq_lock, flags); + +exit: + mutex_unlock(&iep->ptp_clk_mutex); + + return ret; +} + +static int icss_iep_pps_enable(struct icss_iep *iep, int on) +{ + struct ptp_clock_request rq; + struct timespec64 ts; + unsigned long flags; + int ret = 0; + u64 ns; + + mutex_lock(&iep->ptp_clk_mutex); + + if (iep->perout_enabled) { + ret = -EBUSY; + goto exit; + } + + if (iep->pps_enabled == !!on) + goto exit; + + spin_lock_irqsave(&iep->irq_lock, flags); + + rq.perout.index = 0; + if (on) { + ns = icss_iep_gettime(iep, NULL); + ts = ns_to_timespec64(ns); + rq.perout.period.sec = 1; + rq.perout.period.nsec = 0; + rq.perout.start.sec = ts.tv_sec + 2; + rq.perout.start.nsec = 0; + ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); + } else { + ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); + } + + if (!ret) + iep->pps_enabled = !!on; + + spin_unlock_irqrestore(&iep->irq_lock, flags); + +exit: + mutex_unlock(&iep->ptp_clk_mutex); + + return ret; +} + +static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) +{ + u32 val, cap, ret = 0; + + mutex_lock(&iep->ptp_clk_mutex); + + if (iep->ops && iep->ops->extts_enable) { + ret = iep->ops->extts_enable(iep->clockops_data, index, on); + goto exit; + } + + if (((iep->latch_enable & BIT(index)) >> index) == on) + goto exit; + + regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val); + cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index); + if (on) { + val |= cap; + iep->latch_enable |= BIT(index); + } else { + val &= ~cap; + iep->latch_enable &= ~BIT(index); + } + regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val); + +exit: + mutex_unlock(&iep->ptp_clk_mutex); + + return ret; +} + +static int icss_iep_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + return icss_iep_perout_enable(iep, &rq->perout, on); + case PTP_CLK_REQ_PPS: + return icss_iep_pps_enable(iep, on); + case PTP_CLK_REQ_EXTTS: + return icss_iep_extts_enable(iep, rq->extts.index, on); + default: + break; + } + + return -EOPNOTSUPP; +} + +static struct ptp_clock_info icss_iep_ptp_info = { + .owner = THIS_MODULE, + .name = "ICSS IEP timer", + .max_adj = 10000000, + .adjfine = icss_iep_ptp_adjfine, + .adjtime = icss_iep_ptp_adjtime, + .gettimex64 = icss_iep_ptp_gettimeex, + .settime64 = icss_iep_ptp_settime, + .enable = icss_iep_ptp_enable, +}; + +struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) +{ + struct platform_device *pdev; + struct device_node *iep_np; + struct icss_iep *iep; + + iep_np = of_parse_phandle(np, "ti,iep", idx); + if (!iep_np || !of_device_is_available(iep_np)) + return ERR_PTR(-ENODEV); + + pdev = of_find_device_by_node(iep_np); + of_node_put(iep_np); + + if (!pdev) + /* probably IEP not yet probed */ + return ERR_PTR(-EPROBE_DEFER); + + iep = platform_get_drvdata(pdev); + if (!iep) + return ERR_PTR(-EPROBE_DEFER); + + device_lock(iep->dev); + if (iep->client_np) { + device_unlock(iep->dev); + dev_err(iep->dev, "IEP is already acquired by %s", + iep->client_np->name); + return ERR_PTR(-EBUSY); + } + iep->client_np = np; + device_unlock(iep->dev); + get_device(iep->dev); + + return iep; +} +EXPORT_SYMBOL_GPL(icss_iep_get_idx); + +struct icss_iep *icss_iep_get(struct device_node *np) +{ + return icss_iep_get_idx(np, 0); +} +EXPORT_SYMBOL_GPL(icss_iep_get); + +void icss_iep_put(struct icss_iep *iep) +{ + device_lock(iep->dev); + iep->client_np = NULL; + device_unlock(iep->dev); + put_device(iep->dev); +} +EXPORT_SYMBOL_GPL(icss_iep_put); + +void icss_iep_init_fw(struct icss_iep *iep) +{ + /* start IEP for FW use in raw 64bit mode, no PTP support */ + iep->clk_tick_time = iep->def_inc; + iep->cycle_time_ns = 0; + iep->ops = NULL; + iep->clockops_data = NULL; + icss_iep_set_default_inc(iep, iep->def_inc); + icss_iep_set_compensation_inc(iep, iep->def_inc); + icss_iep_set_compensation_count(iep, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */ + regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); + if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT) + icss_iep_set_slow_compensation_count(iep, 0); + + icss_iep_enable(iep); + icss_iep_settime(iep, 0); +} +EXPORT_SYMBOL_GPL(icss_iep_init_fw); + +void icss_iep_exit_fw(struct icss_iep *iep) +{ + icss_iep_disable(iep); +} +EXPORT_SYMBOL_GPL(icss_iep_exit_fw); + +int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops, + void *clockops_data, u32 cycle_time_ns) +{ + int ret = 0; + + iep->cycle_time_ns = cycle_time_ns; + iep->clk_tick_time = iep->def_inc; + iep->ops = clkops; + iep->clockops_data = clockops_data; + icss_iep_set_default_inc(iep, iep->def_inc); + icss_iep_set_compensation_inc(iep, iep->def_inc); + icss_iep_set_compensation_count(iep, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */ + regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); + if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT) + icss_iep_set_slow_compensation_count(iep, 0); + + if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) || + !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)) + goto skip_perout; + + if (iep->ops && iep->ops->perout_enable) { + iep->ptp_info.n_per_out = 1; + iep->ptp_info.pps = 1; + } + + if (iep->ops && iep->ops->extts_enable) + iep->ptp_info.n_ext_ts = 2; + +skip_perout: + if (cycle_time_ns) + icss_iep_enable_shadow_mode(iep); + else + icss_iep_enable(iep); + icss_iep_settime(iep, ktime_get_real_ns()); + + iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev); + if (IS_ERR(iep->ptp_clock)) { + ret = PTR_ERR(iep->ptp_clock); + iep->ptp_clock = NULL; + dev_err(iep->dev, "Failed to register ptp clk %d\n", ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(icss_iep_init); + +int icss_iep_exit(struct icss_iep *iep) +{ + if (iep->ptp_clock) { + ptp_clock_unregister(iep->ptp_clock); + iep->ptp_clock = NULL; + } + icss_iep_disable(iep); + + return 0; +} +EXPORT_SYMBOL_GPL(icss_iep_exit); + +static int icss_iep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct icss_iep *iep; + struct clk *iep_clk; + + iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL); + if (!iep) + return -ENOMEM; + + iep->dev = dev; + iep->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(iep->base)) + return -ENODEV; + + iep_clk = devm_clk_get(dev, NULL); + if (IS_ERR(iep_clk)) + return PTR_ERR(iep_clk); + + iep->refclk_freq = clk_get_rate(iep_clk); + + iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */ + if (iep->def_inc > IEP_MAX_DEF_INC) { + dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n", + iep->def_inc); + return -EINVAL; + } + + iep->plat_data = device_get_match_data(dev); + if (!iep->plat_data) + return -EINVAL; + + iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config); + if (IS_ERR(iep->map)) { + dev_err(dev, "Failed to create regmap for IEP %ld\n", + PTR_ERR(iep->map)); + return PTR_ERR(iep->map); + } + + iep->ptp_info = icss_iep_ptp_info; + mutex_init(&iep->ptp_clk_mutex); + spin_lock_init(&iep->irq_lock); + dev_set_drvdata(dev, iep); + icss_iep_disable(iep); + + return 0; +} + +static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG: + return true; + default: + return false; + } + + return false; +} + +static int icss_iep_regmap_write(void *context, unsigned int reg, + unsigned int val) +{ + struct icss_iep *iep = context; + + writel(val, iep->base + iep->plat_data->reg_offs[reg]); + + return 0; +} + +static int icss_iep_regmap_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct icss_iep *iep = context; + + *val = readl(iep->base + iep->plat_data->reg_offs[reg]); + + return 0; +} + +static struct regmap_config am654_icss_iep_regmap_config = { + .name = "icss iep", + .reg_stride = 1, + .reg_write = icss_iep_regmap_write, + .reg_read = icss_iep_regmap_read, + .writeable_reg = am654_icss_iep_valid_reg, + .readable_reg = am654_icss_iep_valid_reg, + .fast_io = 1, +}; + +static const struct icss_iep_plat_data am654_icss_iep_plat_data = { + .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT | + ICSS_IEP_SLOW_COMPEN_REG_SUPPORT | + ICSS_IEP_SHADOW_MODE_SUPPORT, + .reg_offs = { + [ICSS_IEP_GLOBAL_CFG_REG] = 0x00, + [ICSS_IEP_COMPEN_REG] = 0x08, + [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C, + [ICSS_IEP_COUNT_REG0] = 0x10, + [ICSS_IEP_COUNT_REG1] = 0x14, + [ICSS_IEP_CAPTURE_CFG_REG] = 0x18, + [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c, + + [ICSS_IEP_CAP6_RISE_REG0] = 0x50, + [ICSS_IEP_CAP6_RISE_REG1] = 0x54, + + [ICSS_IEP_CAP7_RISE_REG0] = 0x60, + [ICSS_IEP_CAP7_RISE_REG1] = 0x64, + + [ICSS_IEP_CMP_CFG_REG] = 0x70, + [ICSS_IEP_CMP_STAT_REG] = 0x74, + [ICSS_IEP_CMP0_REG0] = 0x78, + [ICSS_IEP_CMP0_REG1] = 0x7c, + [ICSS_IEP_CMP1_REG0] = 0x80, + [ICSS_IEP_CMP1_REG1] = 0x84, + + [ICSS_IEP_CMP8_REG0] = 0xc0, + [ICSS_IEP_CMP8_REG1] = 0xc4, + [ICSS_IEP_SYNC_CTRL_REG] = 0x180, + [ICSS_IEP_SYNC0_STAT_REG] = 0x188, + [ICSS_IEP_SYNC1_STAT_REG] = 0x18c, + [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190, + [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194, + [ICSS_IEP_SYNC1_DELAY_REG] = 0x198, + [ICSS_IEP_SYNC_START_REG] = 0x19c, + }, + .config = &am654_icss_iep_regmap_config, +}; + +static const struct of_device_id icss_iep_of_match[] = { + { + .compatible = "ti,am654-icss-iep", + .data = &am654_icss_iep_plat_data, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, icss_iep_of_match); + +static struct platform_driver icss_iep_driver = { + .driver = { + .name = "icss-iep", + .of_match_table = icss_iep_of_match, + }, + .probe = icss_iep_probe, +}; +module_platform_driver(icss_iep_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI ICSS IEP driver"); +MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); +MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h new file mode 100644 index 000000000000..803a4b714893 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icss_iep.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver + * + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSS_IEP_H +#define __NET_TI_ICSS_IEP_H + +#include <linux/mutex.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/regmap.h> + +struct icss_iep; +extern const struct icss_iep_clockops prueth_iep_clockops; + +/* Firmware specific clock operations */ +struct icss_iep_clockops { + void (*settime)(void *clockops_data, u64 ns); + void (*adjtime)(void *clockops_data, s64 delta); + u64 (*gettime)(void *clockops_data, struct ptp_system_timestamp *sts); + int (*perout_enable)(void *clockops_data, + struct ptp_perout_request *req, int on, + u64 *cmp); + int (*extts_enable)(void *clockops_data, u32 index, int on); +}; + +struct icss_iep *icss_iep_get(struct device_node *np); +struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx); +void icss_iep_put(struct icss_iep *iep); +int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops, + void *clockops_data, u32 cycle_time_ns); +int icss_iep_exit(struct icss_iep *iep); +int icss_iep_get_count_low(struct icss_iep *iep); +int icss_iep_get_count_hi(struct icss_iep *iep); +int icss_iep_get_ptp_clock_idx(struct icss_iep *iep); +void icss_iep_init_fw(struct icss_iep *iep); +void icss_iep_exit_fw(struct icss_iep *iep); + +#endif /* __NET_TI_ICSS_IEP_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c new file mode 100644 index 000000000000..6df53ab17fbc --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Texas Instruments ICSSG Ethernet Driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <linux/etherdevice.h> +#include <linux/types.h> +#include <linux/regmap.h> + +#include "icssg_prueth.h" + +#define ICSSG_NUM_CLASSIFIERS 16 +#define ICSSG_NUM_FT1_SLOTS 8 +#define ICSSG_NUM_FT3_SLOTS 16 + +#define ICSSG_NUM_CLASSIFIERS_IN_USE 5 + +/* Filter 1 - FT1 */ +#define FT1_NUM_SLOTS 8 +#define FT1_SLOT_SIZE 0x10 /* bytes */ + +/* offsets from FT1 slot base i.e. slot 1 start */ +#define FT1_DA0 0x0 +#define FT1_DA1 0x4 +#define FT1_DA0_MASK 0x8 +#define FT1_DA1_MASK 0xc + +#define FT1_N_REG(slize, n, reg) \ + (offs[slice].ft1_slot_base + FT1_SLOT_SIZE * (n) + (reg)) + +#define FT1_LEN_MASK GENMASK(19, 16) +#define FT1_LEN_SHIFT 16 +#define FT1_LEN(len) (((len) << FT1_LEN_SHIFT) & FT1_LEN_MASK) +#define FT1_START_MASK GENMASK(14, 0) +#define FT1_START(start) ((start) & FT1_START_MASK) +#define FT1_MATCH_SLOT(n) (GENMASK(23, 16) & (BIT(n) << 16)) + +/* FT1 config type */ +enum ft1_cfg_type { + FT1_CFG_TYPE_DISABLED = 0, + FT1_CFG_TYPE_EQ, + FT1_CFG_TYPE_GT, + FT1_CFG_TYPE_LT, +}; + +#define FT1_CFG_SHIFT(n) (2 * (n)) +#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n))) + +/* Filter 3 - FT3 */ +#define FT3_NUM_SLOTS 16 +#define FT3_SLOT_SIZE 0x20 /* bytes */ + +/* offsets from FT3 slot n's base */ +#define FT3_START 0 +#define FT3_START_AUTO 0x4 +#define FT3_START_OFFSET 0x8 +#define FT3_JUMP_OFFSET 0xc +#define FT3_LEN 0x10 +#define FT3_CFG 0x14 +#define FT3_T 0x18 +#define FT3_T_MASK 0x1c + +#define FT3_N_REG(slize, n, reg) \ + (offs[slice].ft3_slot_base + FT3_SLOT_SIZE * (n) + (reg)) + +/* offsets from rx_class n's base */ +#define RX_CLASS_AND_EN 0 +#define RX_CLASS_OR_EN 0x4 +#define RX_CLASS_NUM_SLOTS 16 +#define RX_CLASS_EN_SIZE 0x8 /* bytes */ + +#define RX_CLASS_N_REG(slice, n, reg) \ + (offs[slice].rx_class_base + RX_CLASS_EN_SIZE * (n) + (reg)) + +/* RX Class Gates */ +#define RX_CLASS_GATES_SIZE 0x4 /* bytes */ + +#define RX_CLASS_GATES_N_REG(slice, n) \ + (offs[slice].rx_class_gates_base + RX_CLASS_GATES_SIZE * (n)) + +#define RX_CLASS_GATES_ALLOW_MASK BIT(6) +#define RX_CLASS_GATES_RAW_MASK BIT(5) +#define RX_CLASS_GATES_PHASE_MASK BIT(4) + +/* RX Class traffic data matching bits */ +#define RX_CLASS_FT_UC BIT(31) +#define RX_CLASS_FT_MC BIT(30) +#define RX_CLASS_FT_BC BIT(29) +#define RX_CLASS_FT_FW BIT(28) +#define RX_CLASS_FT_RCV BIT(27) +#define RX_CLASS_FT_VLAN BIT(26) +#define RX_CLASS_FT_DA_P BIT(25) +#define RX_CLASS_FT_DA_I BIT(24) +#define RX_CLASS_FT_FT1_MATCH_MASK GENMASK(23, 16) +#define RX_CLASS_FT_FT1_MATCH_SHIFT 16 +#define RX_CLASS_FT_FT3_MATCH_MASK GENMASK(15, 0) +#define RX_CLASS_FT_FT3_MATCH_SHIFT 0 + +#define RX_CLASS_FT_FT1_MATCH(slot) \ + ((BIT(slot) << RX_CLASS_FT_FT1_MATCH_SHIFT) & \ + RX_CLASS_FT_FT1_MATCH_MASK) + +/* RX class type */ +enum rx_class_sel_type { + RX_CLASS_SEL_TYPE_OR = 0, + RX_CLASS_SEL_TYPE_AND = 1, + RX_CLASS_SEL_TYPE_OR_AND_AND = 2, + RX_CLASS_SEL_TYPE_OR_OR_AND = 3, +}; + +#define FT1_CFG_SHIFT(n) (2 * (n)) +#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n))) + +#define RX_CLASS_SEL_SHIFT(n) (2 * (n)) +#define RX_CLASS_SEL_MASK(n) (0x3 << RX_CLASS_SEL_SHIFT((n))) + +#define ICSSG_CFG_OFFSET 0 +#define MAC_INTERFACE_0 0x18 +#define MAC_INTERFACE_1 0x1c + +#define ICSSG_CFG_RX_L2_G_EN BIT(2) + +/* These are register offsets per PRU */ +struct miig_rt_offsets { + u32 mac0; + u32 mac1; + u32 ft1_start_len; + u32 ft1_cfg; + u32 ft1_slot_base; + u32 ft3_slot_base; + u32 ft3_p_base; + u32 ft_rx_ptr; + u32 rx_class_base; + u32 rx_class_cfg1; + u32 rx_class_cfg2; + u32 rx_class_gates_base; + u32 rx_green; + u32 rx_rate_cfg_base; + u32 rx_rate_src_sel0; + u32 rx_rate_src_sel1; + u32 tx_rate_cfg_base; + u32 stat_base; + u32 tx_hsr_tag; + u32 tx_hsr_seq; + u32 tx_vlan_type; + u32 tx_vlan_ins; +}; + +/* These are the offset values for miig_rt_offsets registers */ +static const struct miig_rt_offsets offs[] = { + /* PRU0 */ + { + 0x8, + 0xc, + 0x80, + 0x84, + 0x88, + 0x108, + 0x308, + 0x408, + 0x40c, + 0x48c, + 0x490, + 0x494, + 0x4d4, + 0x4e4, + 0x504, + 0x508, + 0x50c, + 0x54c, + 0x63c, + 0x640, + 0x644, + 0x648, + }, + /* PRU1 */ + { + 0x10, + 0x14, + 0x64c, + 0x650, + 0x654, + 0x6d4, + 0x8d4, + 0x9d4, + 0x9d8, + 0xa58, + 0xa5c, + 0xa60, + 0xaa0, + 0xab0, + 0xad0, + 0xad4, + 0xad8, + 0xb18, + 0xc08, + 0xc0c, + 0xc10, + 0xc14, + }, +}; + +static void rx_class_ft1_set_start_len(struct regmap *miig_rt, int slice, + u16 start, u8 len) +{ + u32 offset, val; + + offset = offs[slice].ft1_start_len; + val = FT1_LEN(len) | FT1_START(start); + regmap_write(miig_rt, offset, val); +} + +static void rx_class_ft1_set_da(struct regmap *miig_rt, int slice, + int n, const u8 *addr) +{ + u32 offset; + + offset = FT1_N_REG(slice, n, FT1_DA0); + regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 | + addr[2] << 16 | addr[3] << 24)); + offset = FT1_N_REG(slice, n, FT1_DA1); + regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8)); +} + +static void rx_class_ft1_set_da_mask(struct regmap *miig_rt, int slice, + int n, const u8 *addr) +{ + u32 offset; + + offset = FT1_N_REG(slice, n, FT1_DA0_MASK); + regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 | + addr[2] << 16 | addr[3] << 24)); + offset = FT1_N_REG(slice, n, FT1_DA1_MASK); + regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8)); +} + +static void rx_class_ft1_cfg_set_type(struct regmap *miig_rt, int slice, int n, + enum ft1_cfg_type type) +{ + u32 offset; + + offset = offs[slice].ft1_cfg; + regmap_update_bits(miig_rt, offset, FT1_CFG_MASK(n), + type << FT1_CFG_SHIFT(n)); +} + +static void rx_class_sel_set_type(struct regmap *miig_rt, int slice, int n, + enum rx_class_sel_type type) +{ + u32 offset; + + offset = offs[slice].rx_class_cfg1; + regmap_update_bits(miig_rt, offset, RX_CLASS_SEL_MASK(n), + type << RX_CLASS_SEL_SHIFT(n)); +} + +static void rx_class_set_and(struct regmap *miig_rt, int slice, int n, + u32 data) +{ + u32 offset; + + offset = RX_CLASS_N_REG(slice, n, RX_CLASS_AND_EN); + regmap_write(miig_rt, offset, data); +} + +static void rx_class_set_or(struct regmap *miig_rt, int slice, int n, + u32 data) +{ + u32 offset; + + offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN); + regmap_write(miig_rt, offset, data); +} + +void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac) +{ + regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 | + mac[2] << 16 | mac[3] << 24)); + regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8)); +} + +void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac) +{ + regmap_write(miig_rt, offs[slice].mac0, (u32)(mac[0] | mac[1] << 8 | + mac[2] << 16 | mac[3] << 24)); + regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8)); +} + +/* disable all RX traffic */ +void icssg_class_disable(struct regmap *miig_rt, int slice) +{ + u32 data, offset; + int n; + + /* Enable RX_L2_G */ + regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_RX_L2_G_EN, + ICSSG_CFG_RX_L2_G_EN); + + for (n = 0; n < ICSSG_NUM_CLASSIFIERS; n++) { + /* AND_EN = 0 */ + rx_class_set_and(miig_rt, slice, n, 0); + /* OR_EN = 0 */ + rx_class_set_or(miig_rt, slice, n, 0); + + /* set CFG1 to OR */ + rx_class_sel_set_type(miig_rt, slice, n, RX_CLASS_SEL_TYPE_OR); + + /* configure gate */ + offset = RX_CLASS_GATES_N_REG(slice, n); + regmap_read(miig_rt, offset, &data); + /* clear class_raw so we go through filters */ + data &= ~RX_CLASS_GATES_RAW_MASK; + /* set allow and phase mask */ + data |= RX_CLASS_GATES_ALLOW_MASK | RX_CLASS_GATES_PHASE_MASK; + regmap_write(miig_rt, offset, data); + } + + /* FT1 Disabled */ + for (n = 0; n < ICSSG_NUM_FT1_SLOTS; n++) { + const u8 addr[] = { 0, 0, 0, 0, 0, 0, }; + + rx_class_ft1_cfg_set_type(miig_rt, slice, n, + FT1_CFG_TYPE_DISABLED); + rx_class_ft1_set_da(miig_rt, slice, n, addr); + rx_class_ft1_set_da_mask(miig_rt, slice, n, addr); + } + + /* clear CFG2 */ + regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); +} + +void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti) +{ + u32 data; + + /* defaults */ + icssg_class_disable(miig_rt, slice); + + /* Setup Classifier */ + /* match on Broadcast or MAC_PRU address */ + data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P; + + /* multicast */ + if (allmulti) + data |= RX_CLASS_FT_MC; + + rx_class_set_or(miig_rt, slice, 0, data); + + /* set CFG1 for OR_OR_AND for classifier */ + rx_class_sel_set_type(miig_rt, slice, 0, RX_CLASS_SEL_TYPE_OR_OR_AND); + + /* clear CFG2 */ + regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); +} + +/* required for SAV check */ +void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) +{ + const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, }; + + rx_class_ft1_set_start_len(miig_rt, slice, 0, 6); + rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr); + rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr); + rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ); +} diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c new file mode 100644 index 000000000000..99de8a40ed60 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ICSSG Ethernet driver + * + * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/iopoll.h> +#include <linux/regmap.h> +#include <uapi/linux/if_ether.h> +#include "icssg_config.h" +#include "icssg_prueth.h" +#include "icssg_switch_map.h" +#include "icssg_mii_rt.h" + +/* TX IPG Values to be set for 100M link speed. These values are + * in ocp_clk cycles. So need change if ocp_clk is changed for a specific + * h/w design. + */ + +/* IPG is in core_clk cycles */ +#define MII_RT_TX_IPG_100M 0x17 +#define MII_RT_TX_IPG_1G 0xb + +#define ICSSG_QUEUES_MAX 64 +#define ICSSG_QUEUE_OFFSET 0xd00 +#define ICSSG_QUEUE_PEEK_OFFSET 0xe00 +#define ICSSG_QUEUE_CNT_OFFSET 0xe40 +#define ICSSG_QUEUE_RESET_OFFSET 0xf40 + +#define ICSSG_NUM_TX_QUEUES 8 + +#define RECYCLE_Q_SLICE0 16 +#define RECYCLE_Q_SLICE1 17 + +#define ICSSG_NUM_OTHER_QUEUES 5 /* port, host and special queues */ + +#define PORT_HI_Q_SLICE0 32 +#define PORT_LO_Q_SLICE0 33 +#define HOST_HI_Q_SLICE0 34 +#define HOST_LO_Q_SLICE0 35 +#define HOST_SPL_Q_SLICE0 40 /* Special Queue */ + +#define PORT_HI_Q_SLICE1 36 +#define PORT_LO_Q_SLICE1 37 +#define HOST_HI_Q_SLICE1 38 +#define HOST_LO_Q_SLICE1 39 +#define HOST_SPL_Q_SLICE1 41 /* Special Queue */ + +#define MII_RXCFG_DEFAULT (PRUSS_MII_RT_RXCFG_RX_ENABLE | \ + PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \ + PRUSS_MII_RT_RXCFG_RX_L2_EN | \ + PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS) + +#define MII_TXCFG_DEFAULT (PRUSS_MII_RT_TXCFG_TX_ENABLE | \ + PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \ + PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \ + PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN) + +#define ICSSG_CFG_DEFAULT (ICSSG_CFG_TX_L1_EN | \ + ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \ + ICSSG_CFG_TX_PRU_EN | \ + ICSSG_CFG_SGMII_MODE) + +#define FDB_GEN_CFG1 0x60 +#define SMEM_VLAN_OFFSET 8 +#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8) + +#define FDB_GEN_CFG2 0x64 +#define FDB_VLAN_EN BIT(6) +#define FDB_HOST_EN BIT(2) +#define FDB_PRU1_EN BIT(1) +#define FDB_PRU0_EN BIT(0) +#define FDB_EN_ALL (FDB_PRU0_EN | FDB_PRU1_EN | \ + FDB_HOST_EN | FDB_VLAN_EN) + +/** + * struct map - ICSSG Queue Map + * @queue: Queue number + * @pd_addr_start: Packet descriptor queue reserved memory + * @flags: Flags + * @special: Indicates whether this queue is a special queue or not + */ +struct map { + int queue; + u32 pd_addr_start; + u32 flags; + bool special; +}; + +/* Hardware queue map for ICSSG */ +static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = { + { + { PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 }, + { PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 }, + { HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 }, + { HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 }, + { HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 }, + }, + { + { PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 }, + { PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 }, + { HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 }, + { HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 }, + { HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 }, + }, +}; + +static void icssg_config_mii_init(struct prueth_emac *emac) +{ + u32 rxcfg, txcfg, rxcfg_reg, txcfg_reg, pcnt_reg; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + struct regmap *mii_rt; + + mii_rt = prueth->mii_rt; + + rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 : + PRUSS_MII_RT_RXCFG1; + txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : + PRUSS_MII_RT_TXCFG1; + pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : + PRUSS_MII_RT_RX_PCNT1; + + rxcfg = MII_RXCFG_DEFAULT; + txcfg = MII_TXCFG_DEFAULT; + + if (slice == ICSS_MII1) + rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL; + + /* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need + * to be swapped also comparing to RGMII mode. + */ + if (emac->phy_if == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + + regmap_write(mii_rt, rxcfg_reg, rxcfg); + regmap_write(mii_rt, txcfg_reg, txcfg); + regmap_write(mii_rt, pcnt_reg, 0x1); +} + +static void icssg_miig_queues_init(struct prueth *prueth, int slice) +{ + struct regmap *miig_rt = prueth->miig_rt; + void __iomem *smem = prueth->shram.va; + u8 pd[ICSSG_SPECIAL_PD_SIZE]; + int queue = 0, i, j; + u32 *pdword; + + /* reset hwqueues */ + if (slice) + queue = ICSSG_NUM_TX_QUEUES; + + for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) { + regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue); + queue++; + } + + queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0; + regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue); + + for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) { + regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, + hwq_map[slice][i].queue); + } + + /* initialize packet descriptors in SMEM */ + /* push pakcet descriptors to hwqueues */ + + pdword = (u32 *)pd; + for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) { + const struct map *mp; + int pd_size, num_pds; + u32 pdaddr; + + mp = &hwq_map[slice][j]; + if (mp->special) { + pd_size = ICSSG_SPECIAL_PD_SIZE; + num_pds = ICSSG_NUM_SPECIAL_PDS; + } else { + pd_size = ICSSG_NORMAL_PD_SIZE; + num_pds = ICSSG_NUM_NORMAL_PDS; + } + + for (i = 0; i < num_pds; i++) { + memset(pd, 0, pd_size); + + pdword[0] &= ICSSG_FLAG_MASK; + pdword[0] |= mp->flags; + pdaddr = mp->pd_addr_start + i * pd_size; + + memcpy_toio(smem + pdaddr, pd, pd_size); + queue = mp->queue; + regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, + pdaddr); + } + } +} + +void icssg_config_ipg(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + + switch (emac->speed) { + case SPEED_1000: + icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_1G); + break; + case SPEED_100: + icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M); + break; + case SPEED_10: + /* IPG for 10M is same as 100M */ + icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M); + break; + default: + /* Other links speeds not supported */ + netdev_err(emac->ndev, "Unsupported link speed\n"); + return; + } +} + +static void emac_r30_cmd_init(struct prueth_emac *emac) +{ + struct icssg_r30_cmd __iomem *p; + int i; + + p = emac->dram.va + MGR_R30_CMD_OFFSET; + + for (i = 0; i < 4; i++) + writel(EMAC_NONE, &p->cmd[i]); +} + +static int emac_r30_is_done(struct prueth_emac *emac) +{ + const struct icssg_r30_cmd __iomem *p; + u32 cmd; + int i; + + p = emac->dram.va + MGR_R30_CMD_OFFSET; + + for (i = 0; i < 4; i++) { + cmd = readl(&p->cmd[i]); + if (cmd != EMAC_NONE) + return 0; + } + + return 1; +} + +static int prueth_emac_buffer_setup(struct prueth_emac *emac) +{ + struct icssg_buffer_pool_cfg __iomem *bpool_cfg; + struct icssg_rxq_ctx __iomem *rxq_ctx; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + u32 addr; + int i; + + /* Layout to have 64KB aligned buffer pool + * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1| + */ + + addr = lower_32_bits(prueth->msmcram.pa); + if (slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + + if (addr % SZ_64K) { + dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); + return -EINVAL; + } + + bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; + /* workaround for f/w bug. bpool 0 needs to be initilalized */ + writel(addr, &bpool_cfg[0].addr); + writel(0, &bpool_cfg[0].len); + + for (i = PRUETH_EMAC_BUF_POOL_START; + i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS; + i++) { + writel(addr, &bpool_cfg[i].addr); + writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); + addr += PRUETH_EMAC_BUF_POOL_SIZE; + } + + if (!slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + else + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2; + + /* Pre-emptible RX buffer queue */ + rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + writel(addr, &rxq_ctx->end); + + /* Express RX buffer queue */ + rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + writel(addr, &rxq_ctx->end); + + return 0; +} + +static void icssg_init_emac_mode(struct prueth *prueth) +{ + /* When the device is configured as a bridge and it is being brought + * back to the emac mode, the host mac address has to be set as 0. + */ + u8 mac[ETH_ALEN] = { 0 }; + + if (prueth->emacs_initialized) + return; + + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, + SMEM_VLAN_OFFSET_MASK, 0); + regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0); + /* Clear host MAC address */ + icssg_class_set_host_mac_addr(prueth->miig_rt, mac); +} + +int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) +{ + void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET; + struct icssg_flow_cfg __iomem *flow_cfg; + int ret; + + icssg_init_emac_mode(prueth); + + memset_io(config, 0, TAS_GATE_MASK_LIST0); + icssg_miig_queues_init(prueth, slice); + + emac->speed = SPEED_1000; + emac->duplex = DUPLEX_FULL; + if (!phy_interface_mode_is_rgmii(emac->phy_if)) { + emac->speed = SPEED_100; + emac->duplex = DUPLEX_FULL; + } + regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET, + ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT); + icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if); + icssg_config_mii_init(emac); + icssg_config_ipg(emac); + icssg_update_rgmii_cfg(prueth->miig_rt, emac); + + /* set GPI mode */ + pruss_cfg_gpimode(prueth->pruss, prueth->pru_id[slice], + PRUSS_GPI_MODE_MII); + + /* enable XFR shift for PRU and RTU */ + pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_PRU, true); + pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_RTU, true); + + /* set C28 to 0x100 */ + pru_rproc_set_ctable(prueth->pru[slice], PRU_C28, 0x100 << 8); + pru_rproc_set_ctable(prueth->rtu[slice], PRU_C28, 0x100 << 8); + pru_rproc_set_ctable(prueth->txpru[slice], PRU_C28, 0x100 << 8); + + flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; + writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow); + writew(0, &flow_cfg->mgm_base_flow); + writeb(0, config + SPL_PKT_DEFAULT_PRIORITY); + writeb(0, config + QUEUE_NUM_UNTAGGED); + + ret = prueth_emac_buffer_setup(emac); + if (ret) + return ret; + + emac_r30_cmd_init(emac); + + return 0; +} + +/* Bitmask for ICSSG r30 commands */ +static const struct icssg_r30_cmd emac_r32_bitmask[] = { + {{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}}, /* EMAC_PORT_DISABLE */ + {{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */ + {{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */ + {{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */ + {{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */ + {{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */ + {{0xfffc0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT UNTAGGED and PRIO */ + {{EMAC_NONE, 0xffff0020, EMAC_NONE, EMAC_NONE}}, /* TAS Trigger List change */ + {{EMAC_NONE, 0xdfff1000, EMAC_NONE, EMAC_NONE}}, /* TAS set state ENABLE*/ + {{EMAC_NONE, 0xefff2000, EMAC_NONE, EMAC_NONE}}, /* TAS set state RESET*/ + {{EMAC_NONE, 0xcfff0000, EMAC_NONE, EMAC_NONE}}, /* TAS set state DISABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xffff0400, EMAC_NONE}}, /* UC flooding ENABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xfbff0000, EMAC_NONE}}, /* UC flooding DISABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xffff0800, EMAC_NONE}}, /* MC flooding ENABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xf7ff0000, EMAC_NONE}}, /* MC flooding DISABLE*/ + {{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/ + {{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/ + {{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/ + {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/ +}; + +int emac_set_port_state(struct prueth_emac *emac, + enum icssg_port_state_cmd cmd) +{ + struct icssg_r30_cmd __iomem *p; + int ret = -ETIMEDOUT; + int done = 0; + int i; + + p = emac->dram.va + MGR_R30_CMD_OFFSET; + + if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) { + netdev_err(emac->ndev, "invalid port command\n"); + return -EINVAL; + } + + /* only one command at a time allowed to firmware */ + mutex_lock(&emac->cmd_lock); + + for (i = 0; i < 4; i++) + writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]); + + /* wait for done */ + ret = read_poll_timeout(emac_r30_is_done, done, done == 1, + 1000, 10000, false, emac); + + if (ret == -ETIMEDOUT) + netdev_err(emac->ndev, "timeout waiting for command done\n"); + + mutex_unlock(&emac->cmd_lock); + + return ret; +} + +void icssg_config_half_duplex(struct prueth_emac *emac) +{ + u32 val; + + if (!emac->half_duplex) + return; + + val = get_random_u32(); + writel(val, emac->dram.va + HD_RAND_SEED_OFFSET); +} + +void icssg_config_set_speed(struct prueth_emac *emac) +{ + u8 fw_speed; + + switch (emac->speed) { + case SPEED_1000: + fw_speed = FW_LINK_SPEED_1G; + break; + case SPEED_100: + fw_speed = FW_LINK_SPEED_100M; + break; + case SPEED_10: + fw_speed = FW_LINK_SPEED_10M; + break; + default: + /* Other links speeds not supported */ + netdev_err(emac->ndev, "Unsupported link speed\n"); + return; + } + + if (emac->duplex == DUPLEX_HALF) + fw_speed |= FW_LINK_SPEED_HD; + + writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET); +} diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h new file mode 100644 index 000000000000..43eb0922172a --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSSG_CONFIG_H +#define __NET_TI_ICSSG_CONFIG_H + +struct icssg_buffer_pool_cfg { + __le32 addr; + __le32 len; +} __packed; + +struct icssg_flow_cfg { + __le16 rx_base_flow; + __le16 mgm_base_flow; +} __packed; + +#define PRUETH_PKT_TYPE_CMD 0x10 +#define PRUETH_NAV_PS_DATA_SIZE 16 /* Protocol specific data size */ +#define PRUETH_NAV_SW_DATA_SIZE 16 /* SW related data size */ +#define PRUETH_MAX_TX_DESC 512 +#define PRUETH_MAX_RX_DESC 512 +#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */ +#define PRUETH_RX_FLOW_DATA 0 + +#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K +#define PRUETH_EMAC_POOLS_PER_SLICE 24 +#define PRUETH_EMAC_BUF_POOL_START 8 +#define PRUETH_NUM_BUF_POOLS 8 +#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */ +#define MSMC_RAM_SIZE \ + (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ + PRUETH_EMAC_RX_CTX_BUF_SIZE * 2)) + +struct icssg_rxq_ctx { + __le32 start[3]; + __le32 end; +} __packed; + +/* Load time Fiwmware Configuration */ + +#define ICSSG_FW_MGMT_CMD_HEADER 0x81 +#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03 +#define ICSSG_FW_MGMT_CMD_TYPE 0x04 +#define ICSSG_FW_MGMT_PKT 0x80000000 + +struct icssg_r30_cmd { + u32 cmd[4]; +} __packed; + +enum icssg_port_state_cmd { + ICSSG_EMAC_PORT_DISABLE = 0, + ICSSG_EMAC_PORT_BLOCK, + ICSSG_EMAC_PORT_FORWARD, + ICSSG_EMAC_PORT_FORWARD_WO_LEARNING, + ICSSG_EMAC_PORT_ACCEPT_ALL, + ICSSG_EMAC_PORT_ACCEPT_TAGGED, + ICSSG_EMAC_PORT_ACCEPT_UNTAGGED_N_PRIO, + ICSSG_EMAC_PORT_TAS_TRIGGER, + ICSSG_EMAC_PORT_TAS_ENABLE, + ICSSG_EMAC_PORT_TAS_RESET, + ICSSG_EMAC_PORT_TAS_DISABLE, + ICSSG_EMAC_PORT_UC_FLOODING_ENABLE, + ICSSG_EMAC_PORT_UC_FLOODING_DISABLE, + ICSSG_EMAC_PORT_MC_FLOODING_ENABLE, + ICSSG_EMAC_PORT_MC_FLOODING_DISABLE, + ICSSG_EMAC_PORT_PREMPT_TX_ENABLE, + ICSSG_EMAC_PORT_PREMPT_TX_DISABLE, + ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE, + ICSSG_EMAC_PORT_VLAN_AWARE_DISABLE, + ICSSG_EMAC_PORT_MAX_COMMANDS +}; + +#define EMAC_NONE 0xffff0000 +#define EMAC_PRU0_P_DI 0xffff0004 +#define EMAC_PRU1_P_DI 0xffff0040 +#define EMAC_TX_P_DI 0xffff0100 + +#define EMAC_PRU0_P_EN 0xfffb0000 +#define EMAC_PRU1_P_EN 0xffbf0000 +#define EMAC_TX_P_EN 0xfeff0000 + +#define EMAC_P_BLOCK 0xffff0040 +#define EMAC_TX_P_BLOCK 0xffff0200 +#define EMAC_P_UNBLOCK 0xffbf0000 +#define EMAC_TX_P_UNBLOCK 0xfdff0000 +#define EMAC_LEAN_EN 0xfff70000 +#define EMAC_LEAN_DI 0xffff0008 + +#define EMAC_ACCEPT_ALL 0xffff0001 +#define EMAC_ACCEPT_TAG 0xfffe0002 +#define EMAC_ACCEPT_PRIOR 0xfffc0000 + +/* Config area lies in DRAM */ +#define ICSSG_CONFIG_OFFSET 0x0 + +/* Config area lies in shared RAM */ +#define ICSSG_CONFIG_OFFSET_SLICE0 0 +#define ICSSG_CONFIG_OFFSET_SLICE1 0x8000 + +#define ICSSG_NUM_NORMAL_PDS 64 +#define ICSSG_NUM_SPECIAL_PDS 16 + +#define ICSSG_NORMAL_PD_SIZE 8 +#define ICSSG_SPECIAL_PD_SIZE 20 + +#define ICSSG_FLAG_MASK 0xff00ffff + +struct icssg_setclock_desc { + u8 request; + u8 restore; + u8 acknowledgment; + u8 cmp_status; + u32 margin; + u32 cyclecounter0_set; + u32 cyclecounter1_set; + u32 iepcount_set; + u32 rsvd1; + u32 rsvd2; + u32 CMP0_current; + u32 iepcount_current; + u32 difference; + u32 cyclecounter0_new; + u32 cyclecounter1_new; + u32 CMP0_new; +} __packed; + +#define ICSSG_CMD_POP_SLICE0 56 +#define ICSSG_CMD_POP_SLICE1 60 + +#define ICSSG_CMD_PUSH_SLICE0 57 +#define ICSSG_CMD_PUSH_SLICE1 61 + +#define ICSSG_RSP_POP_SLICE0 58 +#define ICSSG_RSP_POP_SLICE1 62 + +#define ICSSG_RSP_PUSH_SLICE0 56 +#define ICSSG_RSP_PUSH_SLICE1 60 + +#define ICSSG_TS_POP_SLICE0 59 +#define ICSSG_TS_POP_SLICE1 63 + +#define ICSSG_TS_PUSH_SLICE0 40 +#define ICSSG_TS_PUSH_SLICE1 41 + +/* FDB FID_C2 flag definitions */ +/* Indicates host port membership.*/ +#define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0) +/* Indicates that MAC ID is connected to physical port 1 */ +#define ICSSG_FDB_ENTRY_P1_MEMBERSHIP BIT(1) +/* Indicates that MAC ID is connected to physical port 2 */ +#define ICSSG_FDB_ENTRY_P2_MEMBERSHIP BIT(2) +/* Ageable bit is set for learned entries and cleared for static entries */ +#define ICSSG_FDB_ENTRY_AGEABLE BIT(3) +/* If set for DA then packet is determined to be a special packet */ +#define ICSSG_FDB_ENTRY_BLOCK BIT(4) +/* If set for DA then the SA from the packet is not learned */ +#define ICSSG_FDB_ENTRY_SECURE BIT(5) +/* If set, it means packet has been seen recently with source address + FID + * matching MAC address/FID of entry + */ +#define ICSSG_FDB_ENTRY_TOUCHED BIT(6) +/* Set if entry is valid */ +#define ICSSG_FDB_ENTRY_VALID BIT(7) + +/** + * struct prueth_vlan_tbl - VLAN table entries struct in ICSSG SMEM + * @fid_c1: membership and forwarding rules flag to this table. See + * above to defines for bit definitions + * @fid: FDB index for this VID (there is 1-1 mapping b/w VID and FID) + */ +struct prueth_vlan_tbl { + u8 fid_c1; + u8 fid; +} __packed; + +/** + * struct prueth_fdb_slot - Result of FDB slot lookup + * @mac: MAC address + * @fid: fid to be associated with MAC + * @fid_c2: FID_C2 entry for this MAC + */ +struct prueth_fdb_slot { + u8 mac[ETH_ALEN]; + u8 fid; + u8 fid_c2; +} __packed; + +enum icssg_ietfpe_verify_states { + ICSSG_IETFPE_STATE_UNKNOWN = 0, + ICSSG_IETFPE_STATE_INITIAL, + ICSSG_IETFPE_STATE_VERIFYING, + ICSSG_IETFPE_STATE_SUCCEEDED, + ICSSG_IETFPE_STATE_FAILED, + ICSSG_IETFPE_STATE_DISABLED +}; +#endif /* __NET_TI_ICSSG_CONFIG_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c new file mode 100644 index 000000000000..a27ec1dcc8d5 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include "icssg_prueth.h" +#include "icssg_stats.h" + +static void emac_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + + strscpy(info->driver, dev_driver_string(prueth->dev), + sizeof(info->driver)); + strscpy(info->bus_info, dev_name(prueth->dev), sizeof(info->bus_info)); +} + +static u32 emac_get_msglevel(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + return emac->msg_enable; +} + +static void emac_set_msglevel(struct net_device *ndev, u32 value) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + emac->msg_enable = value; +} + +static int emac_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *ecmd) +{ + return phy_ethtool_get_link_ksettings(ndev, ecmd); +} + +static int emac_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *ecmd) +{ + return phy_ethtool_set_link_ksettings(ndev, ecmd); +} + +static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + if (!ndev->phydev) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(ndev->phydev, edata); +} + +static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + if (!ndev->phydev) + return -EOPNOTSUPP; + + return phy_ethtool_set_eee(ndev->phydev, edata); +} + +static int emac_nway_reset(struct net_device *ndev) +{ + return phy_ethtool_nway_reset(ndev); +} + +static int emac_get_sset_count(struct net_device *ndev, int stringset) +{ + switch (stringset) { + case ETH_SS_STATS: + return ICSSG_NUM_ETHTOOL_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) { + if (!icssg_all_stats[i].standard_stats) { + memcpy(p, icssg_all_stats[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + break; + default: + break; + } +} + +static void emac_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int i; + + emac_update_hardware_stats(emac); + + for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) + if (!icssg_all_stats[i].standard_stats) + *(data++) = emac->stats[i]; +} + +static int emac_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static int emac_set_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + /* Check if interface is up. Can change the num queues when + * the interface is down. + */ + if (netif_running(emac->ndev)) + return -EBUSY; + + emac->tx_ch_num = ch->tx_count; + + return 0; +} + +static void emac_get_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + ch->max_rx = 1; + ch->max_tx = PRUETH_MAX_TX_QUEUES; + ch->rx_count = 1; + ch->tx_count = emac->tx_ch_num; +} + +static const struct ethtool_rmon_hist_range emac_rmon_ranges[] = { + { 0, 64}, + { 65, 128}, + { 129, 256}, + { 257, 512}, + { 513, PRUETH_MAX_PKT_SIZE}, + {} +}; + +static void emac_get_rmon_stats(struct net_device *ndev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + *ranges = emac_rmon_ranges; + + rmon_stats->undersize_pkts = emac_get_stat_by_name(emac, "rx_bucket1_frames") - + emac_get_stat_by_name(emac, "rx_64B_frames"); + + rmon_stats->hist[0] = emac_get_stat_by_name(emac, "rx_bucket1_frames"); + rmon_stats->hist[1] = emac_get_stat_by_name(emac, "rx_bucket2_frames"); + rmon_stats->hist[2] = emac_get_stat_by_name(emac, "rx_bucket3_frames"); + rmon_stats->hist[3] = emac_get_stat_by_name(emac, "rx_bucket4_frames"); + rmon_stats->hist[4] = emac_get_stat_by_name(emac, "rx_bucket5_frames"); + + rmon_stats->hist_tx[0] = emac_get_stat_by_name(emac, "tx_bucket1_frames"); + rmon_stats->hist_tx[1] = emac_get_stat_by_name(emac, "tx_bucket2_frames"); + rmon_stats->hist_tx[2] = emac_get_stat_by_name(emac, "tx_bucket3_frames"); + rmon_stats->hist_tx[3] = emac_get_stat_by_name(emac, "tx_bucket4_frames"); + rmon_stats->hist_tx[4] = emac_get_stat_by_name(emac, "tx_bucket5_frames"); +} + +const struct ethtool_ops icssg_ethtool_ops = { + .get_drvinfo = emac_get_drvinfo, + .get_msglevel = emac_get_msglevel, + .set_msglevel = emac_set_msglevel, + .get_sset_count = emac_get_sset_count, + .get_ethtool_stats = emac_get_ethtool_stats, + .get_strings = emac_get_strings, + .get_ts_info = emac_get_ts_info, + .get_channels = emac_get_channels, + .set_channels = emac_set_channels, + .get_link_ksettings = emac_get_link_ksettings, + .set_link_ksettings = emac_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_eee = emac_get_eee, + .set_eee = emac_set_eee, + .nway_reset = emac_nway_reset, + .get_rmon_stats = emac_get_rmon_stats, +}; diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c new file mode 100644 index 000000000000..92718ae40d7e --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Texas Instruments ICSSG Ethernet Driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <linux/etherdevice.h> +#include <linux/regmap.h> +#include <linux/types.h> + +#include "icssg_mii_rt.h" +#include "icssg_prueth.h" + +void icssg_mii_update_ipg(struct regmap *mii_rt, int mii, u32 ipg) +{ + u32 val; + + if (mii == ICSS_MII0) { + regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, ipg); + } else { + regmap_read(mii_rt, PRUSS_MII_RT_TX_IPG0, &val); + regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG1, ipg); + regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, val); + } +} + +void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu) +{ + mtu += (ETH_HLEN + ETH_FCS_LEN); + if (mii == ICSS_MII0) { + regmap_update_bits(mii_rt, + PRUSS_MII_RT_RX_FRMS0, + PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK, + (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT); + } else { + regmap_update_bits(mii_rt, + PRUSS_MII_RT_RX_FRMS1, + PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK, + (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT); + } +} + +void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac) +{ + u32 gig_en_mask, gig_val = 0, full_duplex_mask, full_duplex_val = 0; + int slice = prueth_emac_slice(emac); + u32 inband_en_mask, inband_val = 0; + + gig_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_GIG_EN_MII0 : + RGMII_CFG_GIG_EN_MII1; + if (emac->speed == SPEED_1000) + gig_val = gig_en_mask; + regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, gig_en_mask, gig_val); + + inband_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_INBAND_EN_MII0 : + RGMII_CFG_INBAND_EN_MII1; + if (emac->speed == SPEED_10 && phy_interface_mode_is_rgmii(emac->phy_if)) + inband_val = inband_en_mask; + regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, inband_en_mask, inband_val); + + full_duplex_mask = (slice == ICSS_MII0) ? RGMII_CFG_FULL_DUPLEX_MII0 : + RGMII_CFG_FULL_DUPLEX_MII1; + if (emac->duplex == DUPLEX_FULL) + full_duplex_val = full_duplex_mask; + regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask, + full_duplex_val); +} + +void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if) +{ + u32 val, mask, shift; + + mask = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE : ICSSG_CFG_MII1_MODE; + shift = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE_SHIFT : ICSSG_CFG_MII1_MODE_SHIFT; + + val = MII_MODE_RGMII; + if (phy_if == PHY_INTERFACE_MODE_MII) + val = MII_MODE_MII; + + val <<= shift; + regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, mask, val); + regmap_read(miig_rt, ICSSG_CFG_OFFSET, &val); +} + +u32 icssg_rgmii_cfg_get_bitfield(struct regmap *miig_rt, u32 mask, u32 shift) +{ + u32 val; + + regmap_read(miig_rt, RGMII_CFG_OFFSET, &val); + val &= mask; + val >>= shift; + + return val; +} + +u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii) +{ + u32 shift = RGMII_CFG_SPEED_MII0_SHIFT, mask = RGMII_CFG_SPEED_MII0; + + if (mii == ICSS_MII1) { + shift = RGMII_CFG_SPEED_MII1_SHIFT; + mask = RGMII_CFG_SPEED_MII1; + } + + return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift); +} + +u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii) +{ + u32 shift = RGMII_CFG_FULLDUPLEX_MII0_SHIFT; + u32 mask = RGMII_CFG_FULLDUPLEX_MII0; + + if (mii == ICSS_MII1) { + shift = RGMII_CFG_FULLDUPLEX_MII1_SHIFT; + mask = RGMII_CFG_FULLDUPLEX_MII1; + } + + return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift); +} diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_rt.h b/drivers/net/ethernet/ti/icssg/icssg_mii_rt.h new file mode 100644 index 000000000000..55a59bf5299c --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_mii_rt.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* PRU-ICSS MII_RT register definitions + * + * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com + */ + +#ifndef __NET_PRUSS_MII_RT_H__ +#define __NET_PRUSS_MII_RT_H__ + +#include <linux/if_ether.h> +#include <linux/phy.h> + +/* PRUSS_MII_RT Registers */ +#define PRUSS_MII_RT_RXCFG0 0x0 +#define PRUSS_MII_RT_RXCFG1 0x4 +#define PRUSS_MII_RT_TXCFG0 0x10 +#define PRUSS_MII_RT_TXCFG1 0x14 +#define PRUSS_MII_RT_TX_CRC0 0x20 +#define PRUSS_MII_RT_TX_CRC1 0x24 +#define PRUSS_MII_RT_TX_IPG0 0x30 +#define PRUSS_MII_RT_TX_IPG1 0x34 +#define PRUSS_MII_RT_PRS0 0x38 +#define PRUSS_MII_RT_PRS1 0x3c +#define PRUSS_MII_RT_RX_FRMS0 0x40 +#define PRUSS_MII_RT_RX_FRMS1 0x44 +#define PRUSS_MII_RT_RX_PCNT0 0x48 +#define PRUSS_MII_RT_RX_PCNT1 0x4c +#define PRUSS_MII_RT_RX_ERR0 0x50 +#define PRUSS_MII_RT_RX_ERR1 0x54 + +/* PRUSS_MII_RT_RXCFG0/1 bits */ +#define PRUSS_MII_RT_RXCFG_RX_ENABLE BIT(0) +#define PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS BIT(1) +#define PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE BIT(2) +#define PRUSS_MII_RT_RXCFG_RX_MUX_SEL BIT(3) +#define PRUSS_MII_RT_RXCFG_RX_L2_EN BIT(4) +#define PRUSS_MII_RT_RXCFG_RX_BYTE_SWAP BIT(5) +#define PRUSS_MII_RT_RXCFG_RX_AUTO_FWD_PRE BIT(6) +#define PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS BIT(9) + +/* PRUSS_MII_RT_TXCFG0/1 bits */ +#define PRUSS_MII_RT_TXCFG_TX_ENABLE BIT(0) +#define PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE BIT(1) +#define PRUSS_MII_RT_TXCFG_TX_EN_MODE BIT(2) +#define PRUSS_MII_RT_TXCFG_TX_BYTE_SWAP BIT(3) +#define PRUSS_MII_RT_TXCFG_TX_MUX_SEL BIT(8) +#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_SEQUENCE BIT(9) +#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_ESC_ERR BIT(10) +#define PRUSS_MII_RT_TXCFG_TX_32_MODE_EN BIT(11) +#define PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN BIT(12) /* SR2.0 onwards */ + +#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT 16 +#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_MASK GENMASK(25, 16) + +#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT 28 +#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK GENMASK(30, 28) + +/* PRUSS_MII_RT_TX_IPG0/1 bits */ +#define PRUSS_MII_RT_TX_IPG_IPG_SHIFT 0 +#define PRUSS_MII_RT_TX_IPG_IPG_MASK GENMASK(9, 0) + +/* PRUSS_MII_RT_PRS0/1 bits */ +#define PRUSS_MII_RT_PRS_COL BIT(0) +#define PRUSS_MII_RT_PRS_CRS BIT(1) + +/* PRUSS_MII_RT_RX_FRMS0/1 bits */ +#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_SHIFT 0 +#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK GENMASK(15, 0) + +#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT 16 +#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK GENMASK(31, 16) + +/* Min/Max in MII_RT_RX_FRMS */ +/* For EMAC and Switch */ +#define PRUSS_MII_RT_RX_FRMS_MAX (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +#define PRUSS_MII_RT_RX_FRMS_MIN_FRM (64) + +/* for HSR and PRP */ +#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_LRE (PRUSS_MII_RT_RX_FRMS_MAX + \ + ICSS_LRE_TAG_RCT_SIZE) +/* PRUSS_MII_RT_RX_PCNT0/1 bits */ +#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_SHIFT 0 +#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_MASK GENMASK(3, 0) + +#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_SHIFT 4 +#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_MASK GENMASK(7, 4) + +/* PRUSS_MII_RT_RX_ERR0/1 bits */ +#define PRUSS_MII_RT_RX_ERR_MIN_PCNT_ERR BIT(0) +#define PRUSS_MII_RT_RX_ERR_MAX_PCNT_ERR BIT(1) +#define PRUSS_MII_RT_RX_ERR_MIN_FRM_ERR BIT(2) +#define PRUSS_MII_RT_RX_ERR_MAX_FRM_ERR BIT(3) + +#define ICSSG_CFG_OFFSET 0 +#define RGMII_CFG_OFFSET 4 + +/* Constant to choose between MII0 and MII1 */ +#define ICSS_MII0 0 +#define ICSS_MII1 1 + +/* ICSSG_CFG Register bits */ +#define ICSSG_CFG_SGMII_MODE BIT(16) +#define ICSSG_CFG_TX_PRU_EN BIT(11) +#define ICSSG_CFG_RX_SFD_TX_SOF_EN BIT(10) +#define ICSSG_CFG_RTU_PRU_PSI_SHARE_EN BIT(9) +#define ICSSG_CFG_IEP1_TX_EN BIT(8) +#define ICSSG_CFG_MII1_MODE GENMASK(6, 5) +#define ICSSG_CFG_MII1_MODE_SHIFT 5 +#define ICSSG_CFG_MII0_MODE GENMASK(4, 3) +#define ICSSG_CFG_MII0_MODE_SHIFT 3 +#define ICSSG_CFG_RX_L2_G_EN BIT(2) +#define ICSSG_CFG_TX_L2_EN BIT(1) +#define ICSSG_CFG_TX_L1_EN BIT(0) + +enum mii_mode { + MII_MODE_MII = 0, + MII_MODE_RGMII +}; + +/* RGMII CFG Register bits */ +#define RGMII_CFG_INBAND_EN_MII0 BIT(16) +#define RGMII_CFG_GIG_EN_MII0 BIT(17) +#define RGMII_CFG_INBAND_EN_MII1 BIT(20) +#define RGMII_CFG_GIG_EN_MII1 BIT(21) +#define RGMII_CFG_FULL_DUPLEX_MII0 BIT(18) +#define RGMII_CFG_FULL_DUPLEX_MII1 BIT(22) +#define RGMII_CFG_SPEED_MII0 GENMASK(2, 1) +#define RGMII_CFG_SPEED_MII1 GENMASK(6, 5) +#define RGMII_CFG_SPEED_MII0_SHIFT 1 +#define RGMII_CFG_SPEED_MII1_SHIFT 5 +#define RGMII_CFG_FULLDUPLEX_MII0 BIT(3) +#define RGMII_CFG_FULLDUPLEX_MII1 BIT(7) +#define RGMII_CFG_FULLDUPLEX_MII0_SHIFT 3 +#define RGMII_CFG_FULLDUPLEX_MII1_SHIFT 7 +#define RGMII_CFG_SPEED_10M 0 +#define RGMII_CFG_SPEED_100M 1 +#define RGMII_CFG_SPEED_1G 2 + +struct regmap; +struct prueth_emac; + +void icssg_mii_update_ipg(struct regmap *mii_rt, int mii, u32 ipg); +void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu); +void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac); +u32 icssg_rgmii_cfg_get_bitfield(struct regmap *miig_rt, u32 mask, u32 shift); +u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii); +u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii); +void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if); + +#endif /* __NET_PRUSS_MII_RT_H__ */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c new file mode 100644 index 000000000000..411898a4f38c --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -0,0 +1,2366 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Texas Instruments ICSSG Ethernet Driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dma/ti-cppi5.h> +#include <linux/etherdevice.h> +#include <linux/genalloc.h> +#include <linux/if_vlan.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/property.h> +#include <linux/remoteproc/pruss.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> + +#include "icssg_prueth.h" +#include "icssg_mii_rt.h" +#include "../k3-cppi-desc-pool.h" + +#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" + +/* Netif debug messages possible */ +#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | \ + NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_INTR | \ + NETIF_MSG_TX_DONE | \ + NETIF_MSG_RX_STATUS | \ + NETIF_MSG_PKTDATA | \ + NETIF_MSG_HW | \ + NETIF_MSG_WOL) + +#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx) + +/* CTRLMMR_ICSSG_RGMII_CTRL register bits */ +#define ICSSG_CTRL_RGMII_ID_MODE BIT(24) + +#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */ + +static void prueth_cleanup_rx_chns(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + int max_rflows) +{ + if (rx_chn->desc_pool) + k3_cppi_desc_pool_destroy(rx_chn->desc_pool); + + if (rx_chn->rx_chn) + k3_udma_glue_release_rx_chn(rx_chn->rx_chn); +} + +static void prueth_cleanup_tx_chns(struct prueth_emac *emac) +{ + int i; + + for (i = 0; i < emac->tx_ch_num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + if (tx_chn->desc_pool) + k3_cppi_desc_pool_destroy(tx_chn->desc_pool); + + if (tx_chn->tx_chn) + k3_udma_glue_release_tx_chn(tx_chn->tx_chn); + + /* Assume prueth_cleanup_tx_chns() is called at the + * end after all channel resources are freed + */ + memset(tx_chn, 0, sizeof(*tx_chn)); + } +} + +static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) +{ + int i; + + for (i = 0; i < num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + if (tx_chn->irq) + free_irq(tx_chn->irq, tx_chn); + netif_napi_del(&tx_chn->napi_tx); + } +} + +static void prueth_xmit_free(struct prueth_tx_chn *tx_chn, + struct cppi5_host_desc_t *desc) +{ + struct cppi5_host_desc_t *first_desc, *next_desc; + dma_addr_t buf_dma, next_desc_dma; + u32 buf_dma_len; + + first_desc = desc; + next_desc = first_desc; + + cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); + + dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, + DMA_TO_DEVICE); + + next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); + while (next_desc_dma) { + next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, + next_desc_dma); + cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); + + dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, + DMA_TO_DEVICE); + + next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); + + k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); + } + + k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); +} + +static int emac_tx_complete_packets(struct prueth_emac *emac, int chn, + int budget) +{ + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_tx; + struct netdev_queue *netif_txq; + struct prueth_tx_chn *tx_chn; + unsigned int total_bytes = 0; + struct sk_buff *skb; + dma_addr_t desc_dma; + int res, num_tx = 0; + void **swdata; + + tx_chn = &emac->tx_chns[chn]; + + while (true) { + res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); + if (res == -ENODATA) + break; + + /* teardown completion */ + if (cppi5_desc_is_tdcm(desc_dma)) { + if (atomic_dec_and_test(&emac->tdown_cnt)) + complete(&emac->tdown_complete); + break; + } + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, + desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_tx); + + skb = *(swdata); + prueth_xmit_free(tx_chn, desc_tx); + + ndev = skb->dev; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + total_bytes += skb->len; + napi_consume_skb(skb, budget); + num_tx++; + } + + if (!num_tx) + return 0; + + netif_txq = netdev_get_tx_queue(ndev, chn); + netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); + + if (netif_tx_queue_stopped(netif_txq)) { + /* If the TX queue was stopped, wake it now + * if we have enough room. + */ + __netif_tx_lock(netif_txq, smp_processor_id()); + if (netif_running(ndev) && + (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= + MAX_SKB_FRAGS)) + netif_tx_wake_queue(netif_txq); + __netif_tx_unlock(netif_txq); + } + + return num_tx; +} + +static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx); + struct prueth_emac *emac = tx_chn->emac; + int num_tx_packets; + + num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget); + + if (num_tx_packets >= budget) + return budget; + + if (napi_complete_done(napi_tx, num_tx_packets)) + enable_irq(tx_chn->irq); + + return num_tx_packets; +} + +static irqreturn_t prueth_tx_irq(int irq, void *dev_id) +{ + struct prueth_tx_chn *tx_chn = dev_id; + + disable_irq_nosync(irq); + napi_schedule(&tx_chn->napi_tx); + + return IRQ_HANDLED; +} + +static int prueth_ndev_add_tx_napi(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int i, ret; + + for (i = 0; i < emac->tx_ch_num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll); + ret = request_irq(tx_chn->irq, prueth_tx_irq, + IRQF_TRIGGER_HIGH, tx_chn->name, + tx_chn); + if (ret) { + netif_napi_del(&tx_chn->napi_tx); + dev_err(prueth->dev, "unable to request TX IRQ %d\n", + tx_chn->irq); + goto fail; + } + } + + return 0; +fail: + prueth_ndev_del_tx_napi(emac, i); + return ret; +} + +static int prueth_init_tx_chns(struct prueth_emac *emac) +{ + static const struct k3_ring_cfg ring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .mode = K3_RINGACC_RING_MODE_RING, + .flags = 0, + .size = PRUETH_MAX_TX_DESC, + }; + struct k3_udma_glue_tx_channel_cfg tx_cfg; + struct device *dev = emac->prueth->dev; + struct net_device *ndev = emac->ndev; + int ret, slice, i; + u32 hdesc_size; + + slice = prueth_emac_slice(emac); + if (slice < 0) + return slice; + + init_completion(&emac->tdown_complete); + + hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, + PRUETH_NAV_SW_DATA_SIZE); + memset(&tx_cfg, 0, sizeof(tx_cfg)); + tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; + tx_cfg.tx_cfg = ring_cfg; + tx_cfg.txcq_cfg = ring_cfg; + + for (i = 0; i < emac->tx_ch_num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + /* To differentiate channels for SLICE0 vs SLICE1 */ + snprintf(tx_chn->name, sizeof(tx_chn->name), + "tx%d-%d", slice, i); + + tx_chn->emac = emac; + tx_chn->id = i; + tx_chn->descs_num = PRUETH_MAX_TX_DESC; + + tx_chn->tx_chn = + k3_udma_glue_request_tx_chn(dev, tx_chn->name, + &tx_cfg); + if (IS_ERR(tx_chn->tx_chn)) { + ret = PTR_ERR(tx_chn->tx_chn); + tx_chn->tx_chn = NULL; + netdev_err(ndev, + "Failed to request tx dma ch: %d\n", ret); + goto fail; + } + + tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); + tx_chn->desc_pool = + k3_cppi_desc_pool_create_name(tx_chn->dma_dev, + tx_chn->descs_num, + hdesc_size, + tx_chn->name); + if (IS_ERR(tx_chn->desc_pool)) { + ret = PTR_ERR(tx_chn->desc_pool); + tx_chn->desc_pool = NULL; + netdev_err(ndev, "Failed to create tx pool: %d\n", ret); + goto fail; + } + + ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); + if (ret < 0) { + netdev_err(ndev, "failed to get tx irq\n"); + goto fail; + } + tx_chn->irq = ret; + + snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", + dev_name(dev), tx_chn->id); + } + + return 0; + +fail: + prueth_cleanup_tx_chns(emac); + return ret; +} + +static int prueth_init_rx_chns(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + char *name, u32 max_rflows, + u32 max_desc_num) +{ + struct k3_udma_glue_rx_channel_cfg rx_cfg; + struct device *dev = emac->prueth->dev; + struct net_device *ndev = emac->ndev; + u32 fdqring_id, hdesc_size; + int i, ret = 0, slice; + + slice = prueth_emac_slice(emac); + if (slice < 0) + return slice; + + /* To differentiate channels for SLICE0 vs SLICE1 */ + snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice); + + hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, + PRUETH_NAV_SW_DATA_SIZE); + memset(&rx_cfg, 0, sizeof(rx_cfg)); + rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; + rx_cfg.flow_id_num = max_rflows; + rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */ + + /* init all flows */ + rx_chn->dev = dev; + rx_chn->descs_num = max_desc_num; + + rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name, + &rx_cfg); + if (IS_ERR(rx_chn->rx_chn)) { + ret = PTR_ERR(rx_chn->rx_chn); + rx_chn->rx_chn = NULL; + netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret); + goto fail; + } + + rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); + rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, + rx_chn->descs_num, + hdesc_size, + rx_chn->name); + if (IS_ERR(rx_chn->desc_pool)) { + ret = PTR_ERR(rx_chn->desc_pool); + rx_chn->desc_pool = NULL; + netdev_err(ndev, "Failed to create rx pool: %d\n", ret); + goto fail; + } + + emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); + netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base); + + fdqring_id = K3_RINGACC_RING_ID_ANY; + for (i = 0; i < rx_cfg.flow_id_num; i++) { + struct k3_ring_cfg rxring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .mode = K3_RINGACC_RING_MODE_RING, + .flags = 0, + }; + struct k3_ring_cfg fdqring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .flags = K3_RINGACC_RING_SHARED, + }; + struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { + .rx_cfg = rxring_cfg, + .rxfdq_cfg = fdqring_cfg, + .ring_rxq_id = K3_RINGACC_RING_ID_ANY, + .src_tag_lo_sel = + K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, + }; + + rx_flow_cfg.ring_rxfdq0_id = fdqring_id; + rx_flow_cfg.rx_cfg.size = max_desc_num; + rx_flow_cfg.rxfdq_cfg.size = max_desc_num; + rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode; + + ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, + i, &rx_flow_cfg); + if (ret) { + netdev_err(ndev, "Failed to init rx flow%d %d\n", + i, ret); + goto fail; + } + if (!i) + fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, + i); + rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); + if (rx_chn->irq[i] <= 0) { + ret = rx_chn->irq[i]; + netdev_err(ndev, "Failed to get rx dma irq"); + goto fail; + } + } + + return 0; + +fail: + prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); + return ret; +} + +static int prueth_dma_rx_push(struct prueth_emac *emac, + struct sk_buff *skb, + struct prueth_rx_chn *rx_chn) +{ + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + u32 pkt_len = skb_tailroom(skb); + dma_addr_t desc_dma; + dma_addr_t buf_dma; + void **swdata; + + desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); + if (!desc_rx) { + netdev_err(ndev, "rx push: failed to allocate descriptor\n"); + return -ENOMEM; + } + desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); + + buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + netdev_err(ndev, "rx push: failed to map rx pkt buffer\n"); + return -EINVAL; + } + + cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb)); + + swdata = cppi5_hdesc_get_swdata(desc_rx); + *swdata = skb; + + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, + desc_rx, desc_dma); +} + +static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) +{ + u32 iepcount_lo, iepcount_hi, hi_rollover_count; + u64 ns; + + iepcount_lo = lo & GENMASK(19, 0); + iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20; + hi_rollover_count = hi >> 11; + + ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw); + ns = ns * cycle_time_ns + iepcount_lo; + + return ns; +} + +static void emac_rx_timestamp(struct prueth_emac *emac, + struct sk_buff *skb, u32 *psdata) +{ + struct skb_shared_hwtstamps *ssh; + u64 ns; + + u32 hi_sw = readl(emac->prueth->shram.va + + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); + ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0], + IEP_DEFAULT_CYCLE_TIME_NS); + + ssh = skb_hwtstamps(skb); + memset(ssh, 0, sizeof(*ssh)); + ssh->hwtstamp = ns_to_ktime(ns); +} + +static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + u32 buf_dma_len, pkt_len, port_id = 0; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct sk_buff *skb, *new_skb; + dma_addr_t desc_dma, buf_dma; + void **swdata; + u32 *psdata; + int ret; + + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); + if (ret) { + if (ret != -ENODATA) + netdev_err(ndev, "rx pop: failed: %d\n", ret); + return ret; + } + + if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ + return 0; + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + + swdata = cppi5_hdesc_get_swdata(desc_rx); + skb = *swdata; + + psdata = cppi5_hdesc_get_psdata(desc_rx); + /* RX HW timestamp */ + if (emac->rx_ts_enabled) + emac_rx_timestamp(emac, skb, psdata); + + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + /* firmware adds 4 CRC bytes, strip them */ + pkt_len -= 4; + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); + + dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + + skb->dev = ndev; + new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE); + /* if allocation fails we drop the packet but push the + * descriptor back to the ring with old skb to prevent a stall + */ + if (!new_skb) { + ndev->stats.rx_dropped++; + new_skb = skb; + } else { + /* send the filled skb up the n/w stack */ + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&emac->napi_rx, skb); + ndev->stats.rx_bytes += pkt_len; + ndev->stats.rx_packets++; + } + + /* queue another RX DMA */ + ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns); + if (WARN_ON(ret < 0)) { + dev_kfree_skb_any(new_skb); + ndev->stats.rx_errors++; + ndev->stats.rx_dropped++; + } + + return ret; +} + +static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) +{ + struct prueth_rx_chn *rx_chn = data; + struct cppi5_host_desc_t *desc_rx; + struct sk_buff *skb; + dma_addr_t buf_dma; + u32 buf_dma_len; + void **swdata; + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_rx); + skb = *swdata; + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + + dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, + DMA_FROM_DEVICE); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + + dev_kfree_skb_any(skb); +} + +static int emac_get_tx_ts(struct prueth_emac *emac, + struct emac_tx_ts_response *rsp) +{ + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + int addr; + + addr = icssg_queue_pop(prueth, slice == 0 ? + ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1); + if (addr < 0) + return addr; + + memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); + /* return buffer back for to pool */ + icssg_queue_push(prueth, slice == 0 ? + ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr); + + return 0; +} + +static void tx_ts_work(struct prueth_emac *emac) +{ + struct skb_shared_hwtstamps ssh; + struct emac_tx_ts_response tsr; + struct sk_buff *skb; + int ret = 0; + u32 hi_sw; + u64 ns; + + /* There may be more than one pending requests */ + while (1) { + ret = emac_get_tx_ts(emac, &tsr); + if (ret) /* nothing more */ + break; + + if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS || + !emac->tx_ts_skb[tsr.cookie]) { + netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n", + tsr.cookie); + break; + } + + skb = emac->tx_ts_skb[tsr.cookie]; + emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */ + if (!skb) { + netdev_err(emac->ndev, "Driver Bug! got NULL skb\n"); + break; + } + + hi_sw = readl(emac->prueth->shram.va + + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); + ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts, + IEP_DEFAULT_CYCLE_TIME_NS); + + memset(&ssh, 0, sizeof(ssh)); + ssh.hwtstamp = ns_to_ktime(ns); + + skb_tstamp_tx(skb, &ssh); + dev_consume_skb_any(skb); + + if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */ + break; + } +} + +static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) +{ + int i; + + /* search and get the next free slot */ + for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { + if (!emac->tx_ts_skb[i]) { + emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */ + return i; + } + } + + return -EBUSY; +} + +/** + * emac_ndo_start_xmit - EMAC Transmit function + * @skb: SKB pointer + * @ndev: EMAC network adapter + * + * Called by the system to transmit a packet - we queue the packet in + * EMAC hardware transmit queue + * Doesn't wait for completion we'll check for TX completion in + * emac_tx_complete_packets(). + * + * Return: enum netdev_tx + */ +static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; + struct prueth_emac *emac = netdev_priv(ndev); + struct netdev_queue *netif_txq; + struct prueth_tx_chn *tx_chn; + dma_addr_t desc_dma, buf_dma; + int i, ret = 0, q_idx; + bool in_tx_ts = 0; + int tx_ts_cookie; + void **swdata; + u32 pkt_len; + u32 *epib; + + pkt_len = skb_headlen(skb); + q_idx = skb_get_queue_mapping(skb); + + tx_chn = &emac->tx_chns[q_idx]; + netif_txq = netdev_get_tx_queue(ndev, q_idx); + + /* Map the linear buffer */ + buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { + netdev_err(ndev, "tx: failed to map skb buffer\n"); + ret = NETDEV_TX_OK; + goto drop_free_skb; + } + + first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (!first_desc) { + netdev_dbg(ndev, "tx: failed to allocate descriptor\n"); + dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); + goto drop_stop_q_busy; + } + + cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(first_desc, 0); + epib = first_desc->epib; + epib[0] = 0; + epib[1] = 0; + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + emac->tx_ts_enabled) { + tx_ts_cookie = prueth_tx_ts_cookie_get(emac); + if (tx_ts_cookie >= 0) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* Request TX timestamp */ + epib[0] = (u32)tx_ts_cookie; + epib[1] = 0x80000000; /* TX TS request */ + emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb); + in_tx_ts = 1; + } + } + + /* set dst tag to indicate internal qid at the firmware which is at + * bit8..bit15. bit0..bit7 indicates port num for directed + * packets in case of switch mode operation + */ + cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); + cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); + swdata = cppi5_hdesc_get_swdata(first_desc); + *swdata = skb; + + /* Handle the case where skb is fragmented in pages */ + cur_desc = first_desc; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 frag_size = skb_frag_size(frag); + + next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (!next_desc) { + netdev_err(ndev, + "tx: failed to allocate frag. descriptor\n"); + goto free_desc_stop_q_busy_cleanup_tx_ts; + } + + buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { + netdev_err(ndev, "tx: Failed to map skb page\n"); + k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); + ret = NETDEV_TX_OK; + goto cleanup_tx_ts; + } + + cppi5_hdesc_reset_hbdesc(next_desc); + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); + cppi5_hdesc_attach_buf(next_desc, + buf_dma, frag_size, buf_dma, frag_size); + + desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, + next_desc); + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); + cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); + + pkt_len += frag_size; + cur_desc = next_desc; + } + WARN_ON_ONCE(pkt_len != skb->len); + + /* report bql before sending packet */ + netdev_tx_sent_queue(netif_txq, pkt_len); + + cppi5_hdesc_set_pktlen(first_desc, pkt_len); + desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); + /* cppi5_desc_dump(first_desc, 64); */ + + skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */ + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); + if (ret) { + netdev_err(ndev, "tx: push failed: %d\n", ret); + goto drop_free_descs; + } + + if (in_tx_ts) + atomic_inc(&emac->tx_ts_pending); + + if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { + netif_tx_stop_queue(netif_txq); + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= + MAX_SKB_FRAGS) + netif_tx_wake_queue(netif_txq); + } + + return NETDEV_TX_OK; + +cleanup_tx_ts: + if (in_tx_ts) { + dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); + emac->tx_ts_skb[tx_ts_cookie] = NULL; + } + +drop_free_descs: + prueth_xmit_free(tx_chn, first_desc); + +drop_free_skb: + dev_kfree_skb_any(skb); + + /* error */ + ndev->stats.tx_dropped++; + netdev_err(ndev, "tx: error: %d\n", ret); + + return ret; + +free_desc_stop_q_busy_cleanup_tx_ts: + if (in_tx_ts) { + dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); + emac->tx_ts_skb[tx_ts_cookie] = NULL; + } + prueth_xmit_free(tx_chn, first_desc); + +drop_stop_q_busy: + netif_tx_stop_queue(netif_txq); + return NETDEV_TX_BUSY; +} + +static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) +{ + struct prueth_tx_chn *tx_chn = data; + struct cppi5_host_desc_t *desc_tx; + struct sk_buff *skb; + void **swdata; + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_tx); + skb = *(swdata); + prueth_xmit_free(tx_chn, desc_tx); + + dev_kfree_skb_any(skb); +} + +static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) +{ + struct prueth_emac *emac = dev_id; + + /* currently only TX timestamp is being returned */ + tx_ts_work(emac); + + return IRQ_HANDLED; +} + +static irqreturn_t prueth_rx_irq(int irq, void *dev_id) +{ + struct prueth_emac *emac = dev_id; + + disable_irq_nosync(irq); + napi_schedule(&emac->napi_rx); + + return IRQ_HANDLED; +} + +struct icssg_firmwares { + char *pru; + char *rtu; + char *txpru; +}; + +static struct icssg_firmwares icssg_emac_firmwares[] = { + { + .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", + }, + { + .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf", + } +}; + +static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) +{ + struct icssg_firmwares *firmwares; + struct device *dev = prueth->dev; + int slice, ret; + + firmwares = icssg_emac_firmwares; + + slice = prueth_emac_slice(emac); + if (slice < 0) { + netdev_err(emac->ndev, "invalid port\n"); + return -EINVAL; + } + + ret = icssg_config(prueth, emac, slice); + if (ret) + return ret; + + ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru); + ret = rproc_boot(prueth->pru[slice]); + if (ret) { + dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); + return -EINVAL; + } + + ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu); + ret = rproc_boot(prueth->rtu[slice]); + if (ret) { + dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); + goto halt_pru; + } + + ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru); + ret = rproc_boot(prueth->txpru[slice]); + if (ret) { + dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret); + goto halt_rtu; + } + + emac->fw_running = 1; + return 0; + +halt_rtu: + rproc_shutdown(prueth->rtu[slice]); + +halt_pru: + rproc_shutdown(prueth->pru[slice]); + + return ret; +} + +static void prueth_emac_stop(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int slice; + + switch (emac->port_id) { + case PRUETH_PORT_MII0: + slice = ICSS_SLICE0; + break; + case PRUETH_PORT_MII1: + slice = ICSS_SLICE1; + break; + default: + netdev_err(emac->ndev, "invalid port\n"); + return; + } + + emac->fw_running = 0; + rproc_shutdown(prueth->txpru[slice]); + rproc_shutdown(prueth->rtu[slice]); + rproc_shutdown(prueth->pru[slice]); +} + +static void prueth_cleanup_tx_ts(struct prueth_emac *emac) +{ + int i; + + for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { + if (emac->tx_ts_skb[i]) { + dev_kfree_skb_any(emac->tx_ts_skb[i]); + emac->tx_ts_skb[i] = NULL; + } + } +} + +/* called back by PHY layer if there is change in link state of hw port*/ +static void emac_adjust_link(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + struct prueth *prueth = emac->prueth; + bool new_state = false; + unsigned long flags; + + if (phydev->link) { + /* check the mode of operation - full/half duplex */ + if (phydev->duplex != emac->duplex) { + new_state = true; + emac->duplex = phydev->duplex; + } + if (phydev->speed != emac->speed) { + new_state = true; + emac->speed = phydev->speed; + } + if (!emac->link) { + new_state = true; + emac->link = 1; + } + } else if (emac->link) { + new_state = true; + emac->link = 0; + + /* f/w should support 100 & 1000 */ + emac->speed = SPEED_1000; + + /* half duplex may not be supported by f/w */ + emac->duplex = DUPLEX_FULL; + } + + if (new_state) { + phy_print_status(phydev); + + /* update RGMII and MII configuration based on PHY negotiated + * values + */ + if (emac->link) { + if (emac->duplex == DUPLEX_HALF) + icssg_config_half_duplex(emac); + /* Set the RGMII cfg for gig en and full duplex */ + icssg_update_rgmii_cfg(prueth->miig_rt, emac); + + /* update the Tx IPG based on 100M/1G speed */ + spin_lock_irqsave(&emac->lock, flags); + icssg_config_ipg(emac); + spin_unlock_irqrestore(&emac->lock, flags); + icssg_config_set_speed(emac); + emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); + + } else { + emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); + } + } + + if (emac->link) { + /* reactivate the transmit queue */ + netif_tx_wake_all_queues(ndev); + } else { + netif_tx_stop_all_queues(ndev); + prueth_cleanup_tx_ts(emac); + } +} + +static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget) +{ + struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); + int rx_flow = PRUETH_RX_FLOW_DATA; + int flow = PRUETH_MAX_RX_FLOWS; + int num_rx = 0; + int cur_budget; + int ret; + + while (flow--) { + cur_budget = budget - num_rx; + + while (cur_budget--) { + ret = emac_rx_packet(emac, flow); + if (ret) + break; + num_rx++; + } + + if (num_rx >= budget) + break; + } + + if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) + enable_irq(emac->rx_chns.irq[rx_flow]); + + return num_rx; +} + +static int prueth_prepare_rx_chan(struct prueth_emac *emac, + struct prueth_rx_chn *chn, + int buf_size) +{ + struct sk_buff *skb; + int i, ret; + + for (i = 0; i < chn->descs_num; i++) { + skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + ret = prueth_dma_rx_push(emac, skb, chn); + if (ret < 0) { + netdev_err(emac->ndev, + "cannot submit skb for rx chan %s ret %d\n", + chn->name, ret); + kfree_skb(skb); + return ret; + } + } + + return 0; +} + +static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, + bool free_skb) +{ + int i; + + for (i = 0; i < ch_num; i++) { + if (free_skb) + k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, + &emac->tx_chns[i], + prueth_tx_cleanup); + k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); + } +} + +static void prueth_reset_rx_chan(struct prueth_rx_chn *chn, + int num_flows, bool disable) +{ + int i; + + for (i = 0; i < num_flows; i++) + k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn, + prueth_rx_cleanup, !!i); + if (disable) + k3_udma_glue_disable_rx_chn(chn->rx_chn); +} + +static int emac_phy_connect(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + struct net_device *ndev = emac->ndev; + /* connect PHY */ + ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node, + &emac_adjust_link, 0, + emac->phy_if); + if (!ndev->phydev) { + dev_err(prueth->dev, "couldn't connect to phy %s\n", + emac->phy_node->full_name); + return -ENODEV; + } + + if (!emac->half_duplex) { + dev_dbg(prueth->dev, "half duplex mode is not supported\n"); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + } + + /* remove unsupported modes */ + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + + if (emac->phy_if == PHY_INTERFACE_MODE_MII) + phy_set_max_speed(ndev->phydev, SPEED_100); + + return 0; +} + +static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts) +{ + u32 hi_rollover_count, hi_rollover_count_r; + struct prueth_emac *emac = clockops_data; + struct prueth *prueth = emac->prueth; + void __iomem *fw_hi_r_count_addr; + void __iomem *fw_count_hi_addr; + u32 iepcount_hi, iepcount_hi_r; + unsigned long flags; + u32 iepcount_lo; + u64 ts = 0; + + fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET; + fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET; + + local_irq_save(flags); + do { + iepcount_hi = icss_iep_get_count_hi(emac->iep); + iepcount_hi += readl(fw_count_hi_addr); + hi_rollover_count = readl(fw_hi_r_count_addr); + ptp_read_system_prets(sts); + iepcount_lo = icss_iep_get_count_low(emac->iep); + ptp_read_system_postts(sts); + + iepcount_hi_r = icss_iep_get_count_hi(emac->iep); + iepcount_hi_r += readl(fw_count_hi_addr); + hi_rollover_count_r = readl(fw_hi_r_count_addr); + } while ((iepcount_hi_r != iepcount_hi) || + (hi_rollover_count != hi_rollover_count_r)); + local_irq_restore(flags); + + ts = ((u64)hi_rollover_count) << 23 | iepcount_hi; + ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo; + + return ts; +} + +static void prueth_iep_settime(void *clockops_data, u64 ns) +{ + struct icssg_setclock_desc __iomem *sc_descp; + struct prueth_emac *emac = clockops_data; + struct icssg_setclock_desc sc_desc; + u64 cyclecount; + u32 cycletime; + int timeout; + + if (!emac->fw_running) + return; + + sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET; + + cycletime = IEP_DEFAULT_CYCLE_TIME_NS; + cyclecount = ns / cycletime; + + memset(&sc_desc, 0, sizeof(sc_desc)); + sc_desc.margin = cycletime - 1000; + sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0); + sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32; + sc_desc.iepcount_set = ns % cycletime; + sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4 + + memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc)); + + writeb(1, &sc_descp->request); + + timeout = 5; /* fw should take 2-3 ms */ + while (timeout--) { + if (readb(&sc_descp->acknowledgment)) + return; + + usleep_range(500, 1000); + } + + dev_err(emac->prueth->dev, "settime timeout\n"); +} + +static int prueth_perout_enable(void *clockops_data, + struct ptp_perout_request *req, int on, + u64 *cmp) +{ + struct prueth_emac *emac = clockops_data; + u32 reduction_factor = 0, offset = 0; + struct timespec64 ts; + u64 ns_period; + + if (!on) + return 0; + + /* Any firmware specific stuff for PPS/PEROUT handling */ + ts.tv_sec = req->period.sec; + ts.tv_nsec = req->period.nsec; + ns_period = timespec64_to_ns(&ts); + + /* f/w doesn't support period less than cycle time */ + if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS) + return -ENXIO; + + reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS; + offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS; + + /* f/w requires at least 1uS within a cycle so CMP + * can trigger after SYNC is enabled + */ + if (offset < 5 * NSEC_PER_USEC) + offset = 5 * NSEC_PER_USEC; + + /* if offset is close to cycle time then we will miss + * the CMP event for last tick when IEP rolls over. + * In normal mode, IEP tick is 4ns. + * In slow compensation it could be 0ns or 8ns at + * every slow compensation cycle. + */ + if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8) + offset = IEP_DEFAULT_CYCLE_TIME_NS - 8; + + /* we're in shadow mode so need to set upper 32-bits */ + *cmp = (u64)offset << 32; + + writel(reduction_factor, emac->prueth->shram.va + + TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); + + writel(0, emac->prueth->shram.va + + TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); + + return 0; +} + +const struct icss_iep_clockops prueth_iep_clockops = { + .settime = prueth_iep_settime, + .gettime = prueth_iep_gettime, + .perout_enable = prueth_perout_enable, +}; + +/** + * emac_ndo_open - EMAC device open + * @ndev: network adapter device + * + * Called when system wants to start the interface. + * + * Return: 0 for a successful open, or appropriate error code + */ +static int emac_ndo_open(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int ret, i, num_data_chn = emac->tx_ch_num; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + struct device *dev = prueth->dev; + int max_rx_flows; + int rx_flow; + + /* clear SMEM and MSMC settings for all slices */ + if (!prueth->emacs_initialized) { + memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); + memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); + } + + /* set h/w MAC as user might have re-configured */ + ether_addr_copy(emac->mac_addr, ndev->dev_addr); + + icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); + icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); + + icssg_class_default(prueth->miig_rt, slice, 0); + + /* Notify the stack of the actual queue counts. */ + ret = netif_set_real_num_tx_queues(ndev, num_data_chn); + if (ret) { + dev_err(dev, "cannot set real number of tx queues\n"); + return ret; + } + + init_completion(&emac->cmd_complete); + ret = prueth_init_tx_chns(emac); + if (ret) { + dev_err(dev, "failed to init tx channel: %d\n", ret); + return ret; + } + + max_rx_flows = PRUETH_MAX_RX_FLOWS; + ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx", + max_rx_flows, PRUETH_MAX_RX_DESC); + if (ret) { + dev_err(dev, "failed to init rx channel: %d\n", ret); + goto cleanup_tx; + } + + ret = prueth_ndev_add_tx_napi(emac); + if (ret) + goto cleanup_rx; + + /* we use only the highest priority flow for now i.e. @irq[3] */ + rx_flow = PRUETH_RX_FLOW_DATA; + ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq, + IRQF_TRIGGER_HIGH, dev_name(dev), emac); + if (ret) { + dev_err(dev, "unable to request RX IRQ\n"); + goto cleanup_napi; + } + + /* reset and start PRU firmware */ + ret = prueth_emac_start(prueth, emac); + if (ret) + goto free_rx_irq; + + icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); + + if (!prueth->emacs_initialized) { + ret = icss_iep_init(emac->iep, &prueth_iep_clockops, + emac, IEP_DEFAULT_CYCLE_TIME_NS); + } + + ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq, + IRQF_ONESHOT, dev_name(dev), emac); + if (ret) + goto stop; + + /* Prepare RX */ + ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); + if (ret) + goto free_tx_ts_irq; + + ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + if (ret) + goto reset_rx_chn; + + for (i = 0; i < emac->tx_ch_num; i++) { + ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); + if (ret) + goto reset_tx_chan; + } + + /* Enable NAPI in Tx and Rx direction */ + for (i = 0; i < emac->tx_ch_num; i++) + napi_enable(&emac->tx_chns[i].napi_tx); + napi_enable(&emac->napi_rx); + + /* start PHY */ + phy_start(ndev->phydev); + + prueth->emacs_initialized++; + + queue_work(system_long_wq, &emac->stats_work.work); + + return 0; + +reset_tx_chan: + /* Since interface is not yet up, there is wouldn't be + * any SKB for completion. So set false to free_skb + */ + prueth_reset_tx_chan(emac, i, false); +reset_rx_chn: + prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); +free_tx_ts_irq: + free_irq(emac->tx_ts_irq, emac); +stop: + prueth_emac_stop(emac); +free_rx_irq: + free_irq(emac->rx_chns.irq[rx_flow], emac); +cleanup_napi: + prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); +cleanup_rx: + prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); +cleanup_tx: + prueth_cleanup_tx_chns(emac); + + return ret; +} + +/** + * emac_ndo_stop - EMAC device stop + * @ndev: network adapter device + * + * Called when system wants to stop or down the interface. + * + * Return: Always 0 (Success) + */ +static int emac_ndo_stop(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + int rx_flow = PRUETH_RX_FLOW_DATA; + int max_rx_flows; + int ret, i; + + /* inform the upper layers. */ + netif_tx_stop_all_queues(ndev); + + /* block packets from wire */ + if (ndev->phydev) + phy_stop(ndev->phydev); + + icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); + + atomic_set(&emac->tdown_cnt, emac->tx_ch_num); + /* ensure new tdown_cnt value is visible */ + smp_mb__after_atomic(); + /* tear down and disable UDMA channels */ + reinit_completion(&emac->tdown_complete); + for (i = 0; i < emac->tx_ch_num; i++) + k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); + + ret = wait_for_completion_timeout(&emac->tdown_complete, + msecs_to_jiffies(1000)); + if (!ret) + netdev_err(ndev, "tx teardown timeout\n"); + + prueth_reset_tx_chan(emac, emac->tx_ch_num, true); + for (i = 0; i < emac->tx_ch_num; i++) + napi_disable(&emac->tx_chns[i].napi_tx); + + max_rx_flows = PRUETH_MAX_RX_FLOWS; + k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); + + prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); + + napi_disable(&emac->napi_rx); + + cancel_work_sync(&emac->rx_mode_work); + + /* Destroying the queued work in ndo_stop() */ + cancel_delayed_work_sync(&emac->stats_work); + + /* stop PRUs */ + prueth_emac_stop(emac); + + if (prueth->emacs_initialized == 1) + icss_iep_exit(emac->iep); + + /* stop PRUs */ + prueth_emac_stop(emac); + + free_irq(emac->tx_ts_irq, emac); + + free_irq(emac->rx_chns.irq[rx_flow], emac); + prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); + prueth_cleanup_tx_chns(emac); + + prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); + prueth_cleanup_tx_chns(emac); + + prueth->emacs_initialized--; + + return 0; +} + +static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + ndev->stats.tx_errors++; +} + +static void emac_ndo_set_rx_mode_work(struct work_struct *work) +{ + struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work); + struct net_device *ndev = emac->ndev; + bool promisc, allmulti; + + if (!netif_running(ndev)) + return; + + promisc = ndev->flags & IFF_PROMISC; + allmulti = ndev->flags & IFF_ALLMULTI; + emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); + emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); + + if (promisc) { + emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); + emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); + return; + } + + if (allmulti) { + emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); + return; + } + + if (!netdev_mc_empty(ndev)) { + emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); + return; + } +} + +/** + * emac_ndo_set_rx_mode - EMAC set receive mode function + * @ndev: The EMAC network adapter + * + * Called when system wants to set the receive mode of the device. + * + */ +static void emac_ndo_set_rx_mode(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + queue_work(emac->cmd_wq, &emac->rx_mode_work); +} + +static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + emac->tx_ts_enabled = 0; + break; + case HWTSTAMP_TX_ON: + emac->tx_ts_enabled = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + emac->rx_ts_enabled = 0; + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + emac->rx_ts_enabled = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return emac_get_ts_config(ndev, ifr); + case SIOCSHWTSTAMP: + return emac_set_ts_config(ndev, ifr); + default: + break; + } + + return phy_do_ioctl(ndev, ifr, cmd); +} + +static void emac_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + emac_update_hardware_stats(emac); + + stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets"); + stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes"); + stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets"); + stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes"); + stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors"); + stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors"); + stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames"); + + stats->rx_errors = ndev->stats.rx_errors; + stats->rx_dropped = ndev->stats.rx_dropped; + stats->tx_errors = ndev->stats.tx_errors; + stats->tx_dropped = ndev->stats.tx_dropped; +} + +static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int ret; + + ret = snprintf(name, len, "p%d", emac->port_id); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_ndo_open, + .ndo_stop = emac_ndo_stop, + .ndo_start_xmit = emac_ndo_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = emac_ndo_tx_timeout, + .ndo_set_rx_mode = emac_ndo_set_rx_mode, + .ndo_eth_ioctl = emac_ndo_ioctl, + .ndo_get_stats64 = emac_ndo_get_stats64, + .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, +}; + +/* get emac_port corresponding to eth_node name */ +static int prueth_node_port(struct device_node *eth_node) +{ + u32 port_id; + int ret; + + ret = of_property_read_u32(eth_node, "reg", &port_id); + if (ret) + return ret; + + if (port_id == 0) + return PRUETH_PORT_MII0; + else if (port_id == 1) + return PRUETH_PORT_MII1; + else + return PRUETH_PORT_INVALID; +} + +/* get MAC instance corresponding to eth_node name */ +static int prueth_node_mac(struct device_node *eth_node) +{ + u32 port_id; + int ret; + + ret = of_property_read_u32(eth_node, "reg", &port_id); + if (ret) + return ret; + + if (port_id == 0) + return PRUETH_MAC0; + else if (port_id == 1) + return PRUETH_MAC1; + else + return PRUETH_MAC_INVALID; +} + +static int prueth_netdev_init(struct prueth *prueth, + struct device_node *eth_node) +{ + int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES; + struct prueth_emac *emac; + struct net_device *ndev; + enum prueth_port port; + const char *irq_name; + enum prueth_mac mac; + + port = prueth_node_port(eth_node); + if (port == PRUETH_PORT_INVALID) + return -EINVAL; + + mac = prueth_node_mac(eth_node); + if (mac == PRUETH_MAC_INVALID) + return -EINVAL; + + ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn); + if (!ndev) + return -ENOMEM; + + emac = netdev_priv(ndev); + emac->prueth = prueth; + emac->ndev = ndev; + emac->port_id = port; + emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); + if (!emac->cmd_wq) { + ret = -ENOMEM; + goto free_ndev; + } + INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); + + INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler); + + ret = pruss_request_mem_region(prueth->pruss, + port == PRUETH_PORT_MII0 ? + PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1, + &emac->dram); + if (ret) { + dev_err(prueth->dev, "unable to get DRAM: %d\n", ret); + ret = -ENOMEM; + goto free_wq; + } + + emac->tx_ch_num = 1; + + irq_name = "tx_ts0"; + if (emac->port_id == PRUETH_PORT_MII1) + irq_name = "tx_ts1"; + emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name); + if (emac->tx_ts_irq < 0) { + ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n"); + goto free; + } + + SET_NETDEV_DEV(ndev, prueth->dev); + spin_lock_init(&emac->lock); + mutex_init(&emac->cmd_lock); + + emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0); + if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) { + dev_err(prueth->dev, "couldn't find phy-handle\n"); + ret = -ENODEV; + goto free; + } else if (of_phy_is_fixed_link(eth_node)) { + ret = of_phy_register_fixed_link(eth_node); + if (ret) { + ret = dev_err_probe(prueth->dev, ret, + "failed to register fixed-link phy\n"); + goto free; + } + + emac->phy_node = eth_node; + } + + ret = of_get_phy_mode(eth_node, &emac->phy_if); + if (ret) { + dev_err(prueth->dev, "could not get phy-mode property\n"); + goto free; + } + + if (emac->phy_if != PHY_INTERFACE_MODE_MII && + !phy_interface_mode_is_rgmii(emac->phy_if)) { + dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if)); + ret = -EINVAL; + goto free; + } + + /* AM65 SR2.0 has TX Internal delay always enabled by hardware + * and it is not possible to disable TX Internal delay. The below + * switch case block describes how we handle different phy modes + * based on hardware restriction. + */ + switch (emac->phy_if) { + case PHY_INTERFACE_MODE_RGMII_ID: + emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + emac->phy_if = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: + dev_err(prueth->dev, "RGMII mode without TX delay is not supported"); + ret = -EINVAL; + goto free; + default: + break; + } + + /* get mac address from DT and set private and netdev addr */ + ret = of_get_ethdev_address(eth_node, ndev); + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", + port, ndev->dev_addr); + } + ether_addr_copy(emac->mac_addr, ndev->dev_addr); + + ndev->min_mtu = PRUETH_MIN_PKT_SIZE; + ndev->max_mtu = PRUETH_MAX_MTU; + ndev->netdev_ops = &emac_netdev_ops; + ndev->ethtool_ops = &icssg_ethtool_ops; + ndev->hw_features = NETIF_F_SG; + ndev->features = ndev->hw_features; + + netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll); + prueth->emac[mac] = emac; + + return 0; + +free: + pruss_release_mem_region(prueth->pruss, &emac->dram); +free_wq: + destroy_workqueue(emac->cmd_wq); +free_ndev: + emac->ndev = NULL; + prueth->emac[mac] = NULL; + free_netdev(ndev); + + return ret; +} + +static void prueth_netdev_exit(struct prueth *prueth, + struct device_node *eth_node) +{ + struct prueth_emac *emac; + enum prueth_mac mac; + + mac = prueth_node_mac(eth_node); + if (mac == PRUETH_MAC_INVALID) + return; + + emac = prueth->emac[mac]; + if (!emac) + return; + + if (of_phy_is_fixed_link(emac->phy_node)) + of_phy_deregister_fixed_link(emac->phy_node); + + netif_napi_del(&emac->napi_rx); + + pruss_release_mem_region(prueth->pruss, &emac->dram); + destroy_workqueue(emac->cmd_wq); + free_netdev(emac->ndev); + prueth->emac[mac] = NULL; +} + +static int prueth_get_cores(struct prueth *prueth, int slice) +{ + struct device *dev = prueth->dev; + enum pruss_pru_id pruss_id; + struct device_node *np; + int idx = -1, ret; + + np = dev->of_node; + + switch (slice) { + case ICSS_SLICE0: + idx = 0; + break; + case ICSS_SLICE1: + idx = 3; + break; + default: + return -EINVAL; + } + + prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id); + if (IS_ERR(prueth->pru[slice])) { + ret = PTR_ERR(prueth->pru[slice]); + prueth->pru[slice] = NULL; + return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice); + } + prueth->pru_id[slice] = pruss_id; + + idx++; + prueth->rtu[slice] = pru_rproc_get(np, idx, NULL); + if (IS_ERR(prueth->rtu[slice])) { + ret = PTR_ERR(prueth->rtu[slice]); + prueth->rtu[slice] = NULL; + return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice); + } + + idx++; + prueth->txpru[slice] = pru_rproc_get(np, idx, NULL); + if (IS_ERR(prueth->txpru[slice])) { + ret = PTR_ERR(prueth->txpru[slice]); + prueth->txpru[slice] = NULL; + return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice); + } + + return 0; +} + +static void prueth_put_cores(struct prueth *prueth, int slice) +{ + if (prueth->txpru[slice]) + pru_rproc_put(prueth->txpru[slice]); + + if (prueth->rtu[slice]) + pru_rproc_put(prueth->rtu[slice]); + + if (prueth->pru[slice]) + pru_rproc_put(prueth->pru[slice]); +} + +static int prueth_probe(struct platform_device *pdev) +{ + struct device_node *eth_node, *eth_ports_node; + struct device_node *eth0_node = NULL; + struct device_node *eth1_node = NULL; + struct genpool_data_align gp_data = { + .align = SZ_64K, + }; + struct device *dev = &pdev->dev; + struct device_node *np; + struct prueth *prueth; + struct pruss *pruss; + u32 msmc_ram_size; + int i, ret; + + np = dev->of_node; + + prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); + if (!prueth) + return -ENOMEM; + + dev_set_drvdata(dev, prueth); + prueth->pdev = pdev; + prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); + + prueth->dev = dev; + eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); + if (!eth_ports_node) + return -ENOENT; + + for_each_child_of_node(eth_ports_node, eth_node) { + u32 reg; + + if (strcmp(eth_node->name, "port")) + continue; + ret = of_property_read_u32(eth_node, "reg", ®); + if (ret < 0) { + dev_err(dev, "%pOF error reading port_id %d\n", + eth_node, ret); + } + + of_node_get(eth_node); + + if (reg == 0) { + eth0_node = eth_node; + if (!of_device_is_available(eth0_node)) { + of_node_put(eth0_node); + eth0_node = NULL; + } + } else if (reg == 1) { + eth1_node = eth_node; + if (!of_device_is_available(eth1_node)) { + of_node_put(eth1_node); + eth1_node = NULL; + } + } else { + dev_err(dev, "port reg should be 0 or 1\n"); + } + } + + of_node_put(eth_ports_node); + + /* At least one node must be present and available else we fail */ + if (!eth0_node && !eth1_node) { + dev_err(dev, "neither port0 nor port1 node available\n"); + return -ENODEV; + } + + if (eth0_node == eth1_node) { + dev_err(dev, "port0 and port1 can't have same reg\n"); + of_node_put(eth0_node); + return -ENODEV; + } + + prueth->eth_node[PRUETH_MAC0] = eth0_node; + prueth->eth_node[PRUETH_MAC1] = eth1_node; + + prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt"); + if (IS_ERR(prueth->miig_rt)) { + dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n"); + return -ENODEV; + } + + prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); + if (IS_ERR(prueth->mii_rt)) { + dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n"); + return -ENODEV; + } + + if (eth0_node) { + ret = prueth_get_cores(prueth, ICSS_SLICE0); + if (ret) + goto put_cores; + } + + if (eth1_node) { + ret = prueth_get_cores(prueth, ICSS_SLICE1); + if (ret) + goto put_cores; + } + + pruss = pruss_get(eth0_node ? + prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]); + if (IS_ERR(pruss)) { + ret = PTR_ERR(pruss); + dev_err(dev, "unable to get pruss handle\n"); + goto put_cores; + } + + prueth->pruss = pruss; + + ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2, + &prueth->shram); + if (ret) { + dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); + goto put_pruss; + } + + prueth->sram_pool = of_gen_pool_get(np, "sram", 0); + if (!prueth->sram_pool) { + dev_err(dev, "unable to get SRAM pool\n"); + ret = -ENODEV; + + goto put_mem; + } + + msmc_ram_size = MSMC_RAM_SIZE; + + /* NOTE: FW bug needs buffer base to be 64KB aligned */ + prueth->msmcram.va = + (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool, + msmc_ram_size, + gen_pool_first_fit_align, + &gp_data); + + if (!prueth->msmcram.va) { + ret = -ENOMEM; + dev_err(dev, "unable to allocate MSMC resource\n"); + goto put_mem; + } + prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool, + (unsigned long)prueth->msmcram.va); + prueth->msmcram.size = msmc_ram_size; + memset_io(prueth->msmcram.va, 0, msmc_ram_size); + dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, + prueth->msmcram.va, prueth->msmcram.size); + + prueth->iep0 = icss_iep_get_idx(np, 0); + if (IS_ERR(prueth->iep0)) { + ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n"); + prueth->iep0 = NULL; + goto free_pool; + } + + prueth->iep1 = icss_iep_get_idx(np, 1); + if (IS_ERR(prueth->iep1)) { + ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n"); + goto put_iep0; + } + + if (prueth->pdata.quirk_10m_link_issue) { + /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX + * traffic. + */ + icss_iep_init_fw(prueth->iep1); + } + + /* setup netdev interfaces */ + if (eth0_node) { + ret = prueth_netdev_init(prueth, eth0_node); + if (ret) { + dev_err_probe(dev, ret, "netdev init %s failed\n", + eth0_node->name); + goto exit_iep; + } + + if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL)) + prueth->emac[PRUETH_MAC0]->half_duplex = 1; + + prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; + } + + if (eth1_node) { + ret = prueth_netdev_init(prueth, eth1_node); + if (ret) { + dev_err_probe(dev, ret, "netdev init %s failed\n", + eth1_node->name); + goto netdev_exit; + } + + if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL)) + prueth->emac[PRUETH_MAC1]->half_duplex = 1; + + prueth->emac[PRUETH_MAC1]->iep = prueth->iep0; + } + + /* register the network devices */ + if (eth0_node) { + ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); + if (ret) { + dev_err(dev, "can't register netdev for port MII0"); + goto netdev_exit; + } + + prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; + + emac_phy_connect(prueth->emac[PRUETH_MAC0]); + phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); + } + + if (eth1_node) { + ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); + if (ret) { + dev_err(dev, "can't register netdev for port MII1"); + goto netdev_unregister; + } + + prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; + emac_phy_connect(prueth->emac[PRUETH_MAC1]); + phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); + } + + dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", + (!eth0_node || !eth1_node) ? "single" : "dual"); + + if (eth1_node) + of_node_put(eth1_node); + if (eth0_node) + of_node_put(eth0_node); + return 0; + +netdev_unregister: + for (i = 0; i < PRUETH_NUM_MACS; i++) { + if (!prueth->registered_netdevs[i]) + continue; + if (prueth->emac[i]->ndev->phydev) { + phy_disconnect(prueth->emac[i]->ndev->phydev); + prueth->emac[i]->ndev->phydev = NULL; + } + unregister_netdev(prueth->registered_netdevs[i]); + } + +netdev_exit: + for (i = 0; i < PRUETH_NUM_MACS; i++) { + eth_node = prueth->eth_node[i]; + if (!eth_node) + continue; + + prueth_netdev_exit(prueth, eth_node); + } + +exit_iep: + if (prueth->pdata.quirk_10m_link_issue) + icss_iep_exit_fw(prueth->iep1); + icss_iep_put(prueth->iep1); + +put_iep0: + icss_iep_put(prueth->iep0); + prueth->iep0 = NULL; + prueth->iep1 = NULL; + +free_pool: + gen_pool_free(prueth->sram_pool, + (unsigned long)prueth->msmcram.va, msmc_ram_size); + +put_mem: + pruss_release_mem_region(prueth->pruss, &prueth->shram); + +put_pruss: + pruss_put(prueth->pruss); + +put_cores: + if (eth1_node) { + prueth_put_cores(prueth, ICSS_SLICE1); + of_node_put(eth1_node); + } + + if (eth0_node) { + prueth_put_cores(prueth, ICSS_SLICE0); + of_node_put(eth0_node); + } + + return ret; +} + +static void prueth_remove(struct platform_device *pdev) +{ + struct prueth *prueth = platform_get_drvdata(pdev); + struct device_node *eth_node; + int i; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + if (!prueth->registered_netdevs[i]) + continue; + phy_stop(prueth->emac[i]->ndev->phydev); + phy_disconnect(prueth->emac[i]->ndev->phydev); + prueth->emac[i]->ndev->phydev = NULL; + unregister_netdev(prueth->registered_netdevs[i]); + } + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + eth_node = prueth->eth_node[i]; + if (!eth_node) + continue; + + prueth_netdev_exit(prueth, eth_node); + } + + if (prueth->pdata.quirk_10m_link_issue) + icss_iep_exit_fw(prueth->iep1); + + icss_iep_put(prueth->iep1); + icss_iep_put(prueth->iep0); + + gen_pool_free(prueth->sram_pool, + (unsigned long)prueth->msmcram.va, + MSMC_RAM_SIZE); + + pruss_release_mem_region(prueth->pruss, &prueth->shram); + + pruss_put(prueth->pruss); + + if (prueth->eth_node[PRUETH_MAC1]) + prueth_put_cores(prueth, ICSS_SLICE1); + + if (prueth->eth_node[PRUETH_MAC0]) + prueth_put_cores(prueth, ICSS_SLICE0); +} + +#ifdef CONFIG_PM_SLEEP +static int prueth_suspend(struct device *dev) +{ + struct prueth *prueth = dev_get_drvdata(dev); + struct net_device *ndev; + int i, ret; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + ndev = prueth->registered_netdevs[i]; + + if (!ndev) + continue; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + ret = emac_ndo_stop(ndev); + if (ret < 0) { + netdev_err(ndev, "failed to stop: %d", ret); + return ret; + } + } + } + + return 0; +} + +static int prueth_resume(struct device *dev) +{ + struct prueth *prueth = dev_get_drvdata(dev); + struct net_device *ndev; + int i, ret; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + ndev = prueth->registered_netdevs[i]; + + if (!ndev) + continue; + + if (netif_running(ndev)) { + ret = emac_ndo_open(ndev); + if (ret < 0) { + netdev_err(ndev, "failed to start: %d", ret); + return ret; + } + netif_device_attach(ndev); + } + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops prueth_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) +}; + +static const struct prueth_pdata am654_icssg_pdata = { + .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, + .quirk_10m_link_issue = 1, +}; + +static const struct prueth_pdata am64x_icssg_pdata = { + .fdqring_mode = K3_RINGACC_RING_MODE_RING, +}; + +static const struct of_device_id prueth_dt_match[] = { + { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata }, + { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, prueth_dt_match); + +static struct platform_driver prueth_driver = { + .probe = prueth_probe, + .remove_new = prueth_remove, + .driver = { + .name = "icssg-prueth", + .of_match_table = prueth_dt_match, + .pm = &prueth_dev_pm_ops, + }, +}; +module_platform_driver(prueth_driver); + +MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); +MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); +MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h new file mode 100644 index 000000000000..8b6d6b497010 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSSG_PRUETH_H +#define __NET_TI_ICSSG_PRUETH_H + +#include <linux/etherdevice.h> +#include <linux/genalloc.h> +#include <linux/if_vlan.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/net_tstamp.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/remoteproc/pruss.h> +#include <linux/pruss_driver.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/remoteproc.h> + +#include <linux/dma-mapping.h> +#include <linux/dma/ti-cppi5.h> +#include <linux/dma/k3-udma-glue.h> + +#include <net/devlink.h> + +#include "icssg_config.h" +#include "icss_iep.h" +#include "icssg_switch_map.h" + +#define PRUETH_MAX_MTU (2000 - ETH_HLEN - ETH_FCS_LEN) +#define PRUETH_MIN_PKT_SIZE (VLAN_ETH_ZLEN) +#define PRUETH_MAX_PKT_SIZE (PRUETH_MAX_MTU + ETH_HLEN + ETH_FCS_LEN) + +#define ICSS_SLICE0 0 +#define ICSS_SLICE1 1 + +#define ICSS_FW_PRU 0 +#define ICSS_FW_RTU 1 + +#define ICSSG_MAX_RFLOWS 8 /* per slice */ + +/* Number of ICSSG related stats */ +#define ICSSG_NUM_STATS 60 +#define ICSSG_NUM_STANDARD_STATS 31 +#define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS) + +/* Firmware status codes */ +#define ICSS_HS_FW_READY 0x55555555 +#define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */ + +/* Firmware command codes */ +#define ICSS_HS_CMD_BUSY 0x40000000 +#define ICSS_HS_CMD_DONE 0x80000000 +#define ICSS_HS_CMD_CANCEL 0x10000000 + +/* Firmware commands */ +#define ICSS_CMD_SPAD 0x20 +#define ICSS_CMD_RXTX 0x10 +#define ICSS_CMD_ADD_FDB 0x1 +#define ICSS_CMD_DEL_FDB 0x2 +#define ICSS_CMD_SET_RUN 0x4 +#define ICSS_CMD_GET_FDB_SLOT 0x5 +#define ICSS_CMD_ENABLE_VLAN 0x5 +#define ICSS_CMD_DISABLE_VLAN 0x6 +#define ICSS_CMD_ADD_FILTER 0x7 +#define ICSS_CMD_ADD_MAC 0x8 + +/* In switch mode there are 3 real ports i.e. 3 mac addrs. + * however Linux sees only the host side port. The other 2 ports + * are the switch ports. + * In emac mode there are 2 real ports i.e. 2 mac addrs. + * Linux sees both the ports. + */ +enum prueth_port { + PRUETH_PORT_HOST = 0, /* host side port */ + PRUETH_PORT_MII0, /* physical port RG/SG MII 0 */ + PRUETH_PORT_MII1, /* physical port RG/SG MII 1 */ + PRUETH_PORT_INVALID, /* Invalid prueth port */ +}; + +enum prueth_mac { + PRUETH_MAC0 = 0, + PRUETH_MAC1, + PRUETH_NUM_MACS, + PRUETH_MAC_INVALID, +}; + +struct prueth_tx_chn { + struct device *dma_dev; + struct napi_struct napi_tx; + struct k3_cppi_desc_pool *desc_pool; + struct k3_udma_glue_tx_channel *tx_chn; + struct prueth_emac *emac; + u32 id; + u32 descs_num; + unsigned int irq; + char name[32]; +}; + +struct prueth_rx_chn { + struct device *dev; + struct device *dma_dev; + struct k3_cppi_desc_pool *desc_pool; + struct k3_udma_glue_rx_channel *rx_chn; + u32 descs_num; + unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */ + char name[32]; +}; + +/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3) + * and lower three are lower priority channels or threads. + */ +#define PRUETH_MAX_TX_QUEUES 4 + +#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */ + +/* data for each emac port */ +struct prueth_emac { + bool fw_running; + struct prueth *prueth; + struct net_device *ndev; + u8 mac_addr[6]; + struct napi_struct napi_rx; + u32 msg_enable; + + int link; + int speed; + int duplex; + + const char *phy_id; + struct device_node *phy_node; + phy_interface_t phy_if; + enum prueth_port port_id; + struct icss_iep *iep; + unsigned int rx_ts_enabled : 1; + unsigned int tx_ts_enabled : 1; + unsigned int half_duplex : 1; + + /* DMA related */ + struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES]; + struct completion tdown_complete; + atomic_t tdown_cnt; + struct prueth_rx_chn rx_chns; + int rx_flow_id_base; + int tx_ch_num; + + spinlock_t lock; /* serialize access */ + + /* TX HW Timestamping */ + /* TX TS cookie will be index to the tx_ts_skb array */ + struct sk_buff *tx_ts_skb[PRUETH_MAX_TX_TS_REQUESTS]; + atomic_t tx_ts_pending; + int tx_ts_irq; + + u8 cmd_seq; + /* shutdown related */ + u32 cmd_data[4]; + struct completion cmd_complete; + /* Mutex to serialize access to firmware command interface */ + struct mutex cmd_lock; + struct work_struct rx_mode_work; + struct workqueue_struct *cmd_wq; + + struct pruss_mem_region dram; + + struct delayed_work stats_work; + u64 stats[ICSSG_NUM_STATS]; +}; + +/** + * struct prueth_pdata - PRUeth platform data + * @fdqring_mode: Free desc queue mode + * @quirk_10m_link_issue: 10M link detect errata + */ +struct prueth_pdata { + enum k3_ring_mode fdqring_mode; + u32 quirk_10m_link_issue:1; +}; + +/** + * struct prueth - PRUeth structure + * @dev: device + * @pruss: pruss handle + * @pru: rproc instances of PRUs + * @rtu: rproc instances of RTUs + * @txpru: rproc instances of TX_PRUs + * @shram: PRUSS shared RAM region + * @sram_pool: MSMC RAM pool for buffers + * @msmcram: MSMC RAM region + * @eth_node: DT node for the port + * @emac: private EMAC data structure + * @registered_netdevs: list of registered netdevs + * @miig_rt: regmap to mii_g_rt block + * @mii_rt: regmap to mii_rt block + * @pru_id: ID for each of the PRUs + * @pdev: pointer to ICSSG platform device + * @pdata: pointer to platform data for ICSSG driver + * @icssg_hwcmdseq: seq counter or HWQ messages + * @emacs_initialized: num of EMACs/ext ports that are up/running + * @iep0: pointer to IEP0 device + * @iep1: pointer to IEP1 device + */ +struct prueth { + struct device *dev; + struct pruss *pruss; + struct rproc *pru[PRUSS_NUM_PRUS]; + struct rproc *rtu[PRUSS_NUM_PRUS]; + struct rproc *txpru[PRUSS_NUM_PRUS]; + struct pruss_mem_region shram; + struct gen_pool *sram_pool; + struct pruss_mem_region msmcram; + + struct device_node *eth_node[PRUETH_NUM_MACS]; + struct prueth_emac *emac[PRUETH_NUM_MACS]; + struct net_device *registered_netdevs[PRUETH_NUM_MACS]; + struct regmap *miig_rt; + struct regmap *mii_rt; + + enum pruss_pru_id pru_id[PRUSS_NUM_PRUS]; + struct platform_device *pdev; + struct prueth_pdata pdata; + u8 icssg_hwcmdseq; + int emacs_initialized; + struct icss_iep *iep0; + struct icss_iep *iep1; +}; + +struct emac_tx_ts_response { + u32 reserved[2]; + u32 cookie; + u32 lo_ts; + u32 hi_ts; +}; + +/* get PRUSS SLICE number from prueth_emac */ +static inline int prueth_emac_slice(struct prueth_emac *emac) +{ + switch (emac->port_id) { + case PRUETH_PORT_MII0: + return ICSS_SLICE0; + case PRUETH_PORT_MII1: + return ICSS_SLICE1; + default: + return -EINVAL; + } +} + +extern const struct ethtool_ops icssg_ethtool_ops; + +/* Classifier helpers */ +void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac); +void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac); +void icssg_class_disable(struct regmap *miig_rt, int slice); +void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti); +void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr); + +/* config helpers */ +void icssg_config_ipg(struct prueth_emac *emac); +int icssg_config(struct prueth *prueth, struct prueth_emac *emac, + int slice); +int emac_set_port_state(struct prueth_emac *emac, + enum icssg_port_state_cmd state); +void icssg_config_set_speed(struct prueth_emac *emac); +void icssg_config_half_duplex(struct prueth_emac *emac); + +/* Buffer queue helpers */ +int icssg_queue_pop(struct prueth *prueth, u8 queue); +void icssg_queue_push(struct prueth *prueth, int queue, u16 addr); +u32 icssg_queue_level(struct prueth *prueth, int queue); + +#define prueth_napi_to_tx_chn(pnapi) \ + container_of(pnapi, struct prueth_tx_chn, napi_tx) + +void emac_stats_work_handler(struct work_struct *work); +void emac_update_hardware_stats(struct prueth_emac *emac); +int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name); +#endif /* __NET_TI_ICSSG_PRUETH_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_queues.c b/drivers/net/ethernet/ti/icssg/icssg_queues.c new file mode 100644 index 000000000000..3c34f61ad40b --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_queues.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ICSSG Buffer queue helpers + * + * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <linux/regmap.h> +#include "icssg_prueth.h" + +#define ICSSG_QUEUES_MAX 64 +#define ICSSG_QUEUE_OFFSET 0xd00 +#define ICSSG_QUEUE_PEEK_OFFSET 0xe00 +#define ICSSG_QUEUE_CNT_OFFSET 0xe40 +#define ICSSG_QUEUE_RESET_OFFSET 0xf40 + +int icssg_queue_pop(struct prueth *prueth, u8 queue) +{ + u32 val, cnt; + + if (queue >= ICSSG_QUEUES_MAX) + return -EINVAL; + + regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, &cnt); + if (!cnt) + return -EINVAL; + + regmap_read(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, &val); + + return val; +} + +void icssg_queue_push(struct prueth *prueth, int queue, u16 addr) +{ + if (queue >= ICSSG_QUEUES_MAX) + return; + + regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr); +} + +u32 icssg_queue_level(struct prueth *prueth, int queue) +{ + u32 reg; + + if (queue >= ICSSG_QUEUES_MAX) + return 0; + + regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, ®); + + return reg; +} diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c new file mode 100644 index 000000000000..3dbadddd7e35 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include "icssg_prueth.h" +#include "icssg_stats.h" +#include <linux/regmap.h> + +#define ICSSG_TX_PACKET_OFFSET 0xA0 +#define ICSSG_TX_BYTE_OFFSET 0xEC + +static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */ + 0xb18, /* Slice 1 stats start */ +}; + +void emac_update_hardware_stats(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + u32 base = stats_base[slice]; + u32 tx_pkt_cnt = 0; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) { + regmap_read(prueth->miig_rt, + base + icssg_all_stats[i].offset, + &val); + regmap_write(prueth->miig_rt, + base + icssg_all_stats[i].offset, + val); + + if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET) + tx_pkt_cnt = val; + + emac->stats[i] += val; + if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET) + emac->stats[i] -= tx_pkt_cnt * 8; + } +} + +void emac_stats_work_handler(struct work_struct *work) +{ + struct prueth_emac *emac = container_of(work, struct prueth_emac, + stats_work.work); + emac_update_hardware_stats(emac); + + queue_delayed_work(system_long_wq, &emac->stats_work, + msecs_to_jiffies((STATS_TIME_LIMIT_1G_MS * 1000) / emac->speed)); +} + +int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) { + if (!strcmp(icssg_all_stats[i].name, stat_name)) + return emac->stats[icssg_all_stats[i].offset / sizeof(u32)]; + } + + netdev_err(emac->ndev, "Invalid stats %s\n", stat_name); + return -EINVAL; +} diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h new file mode 100644 index 000000000000..999a4a91276c --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSSG_STATS_H +#define __NET_TI_ICSSG_STATS_H + +#include "icssg_prueth.h" + +#define STATS_TIME_LIMIT_1G_MS 25000 /* 25 seconds @ 1G */ + +struct miig_stats_regs { + /* Rx */ + u32 rx_packets; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_crc_errors; + u32 rx_mii_error_frames; + u32 rx_odd_nibble_frames; + u32 rx_frame_max_size; + u32 rx_max_size_error_frames; + u32 rx_frame_min_size; + u32 rx_min_size_error_frames; + u32 rx_over_errors; + u32 rx_class0_hits; + u32 rx_class1_hits; + u32 rx_class2_hits; + u32 rx_class3_hits; + u32 rx_class4_hits; + u32 rx_class5_hits; + u32 rx_class6_hits; + u32 rx_class7_hits; + u32 rx_class8_hits; + u32 rx_class9_hits; + u32 rx_class10_hits; + u32 rx_class11_hits; + u32 rx_class12_hits; + u32 rx_class13_hits; + u32 rx_class14_hits; + u32 rx_class15_hits; + u32 rx_smd_frags; + u32 rx_bucket1_size; + u32 rx_bucket2_size; + u32 rx_bucket3_size; + u32 rx_bucket4_size; + u32 rx_64B_frames; + u32 rx_bucket1_frames; + u32 rx_bucket2_frames; + u32 rx_bucket3_frames; + u32 rx_bucket4_frames; + u32 rx_bucket5_frames; + u32 rx_bytes; + u32 rx_tx_total_bytes; + /* Tx */ + u32 tx_packets; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_odd_nibble_frames; + u32 tx_underflow_errors; + u32 tx_frame_max_size; + u32 tx_max_size_error_frames; + u32 tx_frame_min_size; + u32 tx_min_size_error_frames; + u32 tx_bucket1_size; + u32 tx_bucket2_size; + u32 tx_bucket3_size; + u32 tx_bucket4_size; + u32 tx_64B_frames; + u32 tx_bucket1_frames; + u32 tx_bucket2_frames; + u32 tx_bucket3_frames; + u32 tx_bucket4_frames; + u32 tx_bucket5_frames; + u32 tx_bytes; +}; + +#define ICSSG_STATS(field, stats_type) \ +{ \ + #field, \ + offsetof(struct miig_stats_regs, field), \ + stats_type \ +} + +struct icssg_stats { + char name[ETH_GSTRING_LEN]; + u32 offset; + bool standard_stats; +}; + +static const struct icssg_stats icssg_all_stats[] = { + /* Rx */ + ICSSG_STATS(rx_packets, true), + ICSSG_STATS(rx_broadcast_frames, false), + ICSSG_STATS(rx_multicast_frames, true), + ICSSG_STATS(rx_crc_errors, true), + ICSSG_STATS(rx_mii_error_frames, false), + ICSSG_STATS(rx_odd_nibble_frames, false), + ICSSG_STATS(rx_frame_max_size, true), + ICSSG_STATS(rx_max_size_error_frames, false), + ICSSG_STATS(rx_frame_min_size, true), + ICSSG_STATS(rx_min_size_error_frames, false), + ICSSG_STATS(rx_over_errors, true), + ICSSG_STATS(rx_class0_hits, false), + ICSSG_STATS(rx_class1_hits, false), + ICSSG_STATS(rx_class2_hits, false), + ICSSG_STATS(rx_class3_hits, false), + ICSSG_STATS(rx_class4_hits, false), + ICSSG_STATS(rx_class5_hits, false), + ICSSG_STATS(rx_class6_hits, false), + ICSSG_STATS(rx_class7_hits, false), + ICSSG_STATS(rx_class8_hits, false), + ICSSG_STATS(rx_class9_hits, false), + ICSSG_STATS(rx_class10_hits, false), + ICSSG_STATS(rx_class11_hits, false), + ICSSG_STATS(rx_class12_hits, false), + ICSSG_STATS(rx_class13_hits, false), + ICSSG_STATS(rx_class14_hits, false), + ICSSG_STATS(rx_class15_hits, false), + ICSSG_STATS(rx_smd_frags, false), + ICSSG_STATS(rx_bucket1_size, true), + ICSSG_STATS(rx_bucket2_size, true), + ICSSG_STATS(rx_bucket3_size, true), + ICSSG_STATS(rx_bucket4_size, true), + ICSSG_STATS(rx_64B_frames, true), + ICSSG_STATS(rx_bucket1_frames, true), + ICSSG_STATS(rx_bucket2_frames, true), + ICSSG_STATS(rx_bucket3_frames, true), + ICSSG_STATS(rx_bucket4_frames, true), + ICSSG_STATS(rx_bucket5_frames, true), + ICSSG_STATS(rx_bytes, true), + ICSSG_STATS(rx_tx_total_bytes, false), + /* Tx */ + ICSSG_STATS(tx_packets, true), + ICSSG_STATS(tx_broadcast_frames, false), + ICSSG_STATS(tx_multicast_frames, false), + ICSSG_STATS(tx_odd_nibble_frames, false), + ICSSG_STATS(tx_underflow_errors, false), + ICSSG_STATS(tx_frame_max_size, true), + ICSSG_STATS(tx_max_size_error_frames, false), + ICSSG_STATS(tx_frame_min_size, true), + ICSSG_STATS(tx_min_size_error_frames, false), + ICSSG_STATS(tx_bucket1_size, true), + ICSSG_STATS(tx_bucket2_size, true), + ICSSG_STATS(tx_bucket3_size, true), + ICSSG_STATS(tx_bucket4_size, true), + ICSSG_STATS(tx_64B_frames, true), + ICSSG_STATS(tx_bucket1_frames, true), + ICSSG_STATS(tx_bucket2_frames, true), + ICSSG_STATS(tx_bucket3_frames, true), + ICSSG_STATS(tx_bucket4_frames, true), + ICSSG_STATS(tx_bucket5_frames, true), + ICSSG_STATS(tx_bytes, true), +}; + +#endif /* __NET_TI_ICSSG_STATS_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h new file mode 100644 index 000000000000..424a7e945ea8 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSSG_SWITCH_MAP_H +#define __NET_TI_ICSSG_SWITCH_MAP_H + +/************************* Ethernet Switch Constants *********************/ + +/* if bucket size is changed in firmware then this too should be changed + * because it directly impacts FDB ageing calculation + */ +#define NUMBER_OF_FDB_BUCKET_ENTRIES (4) + +/* This is fixed in ICSSG */ +#define SIZE_OF_FDB (2048) + +#define FW_LINK_SPEED_1G (0x00) +#define FW_LINK_SPEED_100M (0x01) +#define FW_LINK_SPEED_10M (0x02) +#define FW_LINK_SPEED_HD (0x80) + +/* Time after which FDB entries are checked for aged out values. + * Values are in nanoseconds + */ +#define FDB_AGEING_TIMEOUT_OFFSET 0x0014 + +/* Default VLAN tag for Host Port */ +#define HOST_PORT_DF_VLAN_OFFSET 0x001C + +/* Same as HOST_PORT_DF_VLAN_OFFSET */ +#define EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET HOST_PORT_DF_VLAN_OFFSET + +/* Default VLAN tag for P1 Port */ +#define P1_PORT_DF_VLAN_OFFSET 0x0020 + +/* Same as P1_PORT_DF_VLAN_OFFSET */ +#define EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET P1_PORT_DF_VLAN_OFFSET + +/* default VLAN tag for P2 Port */ +#define P2_PORT_DF_VLAN_OFFSET 0x0024 + +/* Same as P2_PORT_DF_VLAN_OFFSET */ +#define EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET P2_PORT_DF_VLAN_OFFSET + +/* VLAN-FID Table offset. 4096 VIDs. 2B per VID = 8KB = 0x2000 */ +#define VLAN_STATIC_REG_TABLE_OFFSET 0x0100 + +/* VLAN-FID Table offset for EMAC */ +#define EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET VLAN_STATIC_REG_TABLE_OFFSET + +/* Packet descriptor Q reserved memory */ +#define PORT_DESC0_HI 0x2104 + +/* Packet descriptor Q reserved memory */ +#define PORT_DESC0_LO 0x2F6C + +/* Packet descriptor Q reserved memory */ +#define PORT_DESC1_HI 0x3DD4 + +/* Packet descriptor Q reserved memory */ +#define PORT_DESC1_LO 0x4C3C + +/* Packet descriptor Q reserved memory */ +#define HOST_DESC0_HI 0x5AA4 + +/* Packet descriptor Q reserved memory */ +#define HOST_DESC0_LO 0x5F0C + +/* Packet descriptor Q reserved memory */ +#define HOST_DESC1_HI 0x6374 + +/* Packet descriptor Q reserved memory */ +#define HOST_DESC1_LO 0x67DC + +/* Special packet descriptor Q reserved memory */ +#define HOST_SPPD0 0x7AAC + +/* Special acket descriptor Q reserved memory */ +#define HOST_SPPD1 0x7EAC + +/* IEP count cycle counter*/ +#define TIMESYNC_FW_WC_CYCLECOUNT_OFFSET 0x83EC + +/* IEP count hi roll over count */ +#define TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET 0x83F4 + +/* IEP count hi sw counter */ +#define TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET 0x83F8 + +/* Set clock descriptor */ +#define TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET 0x83FC + +/* IEP count syncout reduction factor */ +#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET 0x843C + +/* IEP count syncout reduction counter */ +#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_COUNT_OFFSET 0x8440 + +/* IEP count syncout start time cycle counter */ +#define TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET 0x8444 + +/* Control variable to generate SYNC1 */ +#define TIMESYNC_FW_WC_ISOM_PIN_SIGNAL_EN_OFFSET 0x844C + +/* SystemTime Sync0 periodicity */ +#define TIMESYNC_FW_ST_SYNCOUT_PERIOD_OFFSET 0x8450 + +/* pktTxDelay for P1 = link speed dependent p1 mac delay + p1 phy delay */ +#define TIMESYNC_FW_WC_PKTTXDELAY_P1_OFFSET 0x8454 + +/* pktTxDelay for P2 = link speed dependent p2 mac delay + p2 phy delay */ +#define TIMESYNC_FW_WC_PKTTXDELAY_P2_OFFSET 0x8458 + +/* Set clock operation done signal for next task */ +#define TIMESYNC_FW_SIG_PNFW_OFFSET 0x845C + +/* Set clock operation done signal for next task */ +#define TIMESYNC_FW_SIG_TIMESYNCFW_OFFSET 0x8460 + +/* New list is copied at this time */ +#define TAS_CONFIG_CHANGE_TIME 0x000C + +/* config change error counter */ +#define TAS_CONFIG_CHANGE_ERROR_COUNTER 0x0014 + +/* TAS List update pending flag */ +#define TAS_CONFIG_PENDING 0x0018 + +/* TAS list update trigger flag */ +#define TAS_CONFIG_CHANGE 0x0019 + +/* List length for new TAS schedule */ +#define TAS_ADMIN_LIST_LENGTH 0x001A + +/* Currently active TAS list index */ +#define TAS_ACTIVE_LIST_INDEX 0x001B + +/* Cycle time for the new TAS schedule */ +#define TAS_ADMIN_CYCLE_TIME 0x001C + +/* Cycle counts remaining till the TAS list update */ +#define TAS_CONFIG_CHANGE_CYCLE_COUNT 0x0020 + +/* Base Flow ID for sending Packets to Host for Slice0 */ +#define PSI_L_REGULAR_FLOW_ID_BASE_OFFSET 0x0024 + +/* Same as PSI_L_REGULAR_FLOW_ID_BASE_OFFSET */ +#define EMAC_ICSSG_SWITCH_PSI_L_REGULAR_FLOW_ID_BASE_OFFSET PSI_L_REGULAR_FLOW_ID_BASE_OFFSET + +/* Base Flow ID for sending mgmt and Tx TS to Host for Slice0 */ +#define PSI_L_MGMT_FLOW_ID_OFFSET 0x0026 + +/* Same as PSI_L_MGMT_FLOW_ID_OFFSET */ +#define EMAC_ICSSG_SWITCH_PSI_L_MGMT_FLOW_ID_BASE_OFFSET PSI_L_MGMT_FLOW_ID_OFFSET + +/* Queue number for Special Packets written here */ +#define SPL_PKT_DEFAULT_PRIORITY 0x0028 + +/* Express Preemptible Queue Mask */ +#define EXPRESS_PRE_EMPTIVE_Q_MASK 0x0029 + +/* Port1/Port2 Default Queue number for untagged Packets, only 1B is used */ +#define QUEUE_NUM_UNTAGGED 0x002A + +/* Stores the table used for priority regeneration. 1B per PCP/Queue */ +#define PORT_Q_PRIORITY_REGEN_OFFSET 0x002C + +/* For marking Packet as priority/express (this feature is disabled) or + * cut-through/S&F. + */ +#define EXPRESS_PRE_EMPTIVE_Q_MAP 0x0034 + +/* Stores the table used for priority mapping. 1B per PCP/Queue */ +#define PORT_Q_PRIORITY_MAPPING_OFFSET 0x003C + +/* Used to notify the FW of the current link speed */ +#define PORT_LINK_SPEED_OFFSET 0x00A8 + +/* TAS gate mask for windows list0 */ +#define TAS_GATE_MASK_LIST0 0x0100 + +/* TAS gate mask for windows list1 */ +#define TAS_GATE_MASK_LIST1 0x0350 + +/* Memory to Enable/Disable Preemption on TX side */ +#define PRE_EMPTION_ENABLE_TX 0x05A0 + +/* Active State of Preemption on TX side */ +#define PRE_EMPTION_ACTIVE_TX 0x05A1 + +/* Memory to Enable/Disable Verify State Machine Preemption */ +#define PRE_EMPTION_ENABLE_VERIFY 0x05A2 + +/* Verify Status of State Machine */ +#define PRE_EMPTION_VERIFY_STATUS 0x05A3 + +/* Non Final Fragment Size supported by Link Partner */ +#define PRE_EMPTION_ADD_FRAG_SIZE_REMOTE 0x05A4 + +/* Non Final Fragment Size supported by Firmware */ +#define PRE_EMPTION_ADD_FRAG_SIZE_LOCAL 0x05A6 + +/* Time in ms the State machine waits for respond Packet */ +#define PRE_EMPTION_VERIFY_TIME 0x05A8 + +/* Memory used for R30 related management commands */ +#define MGR_R30_CMD_OFFSET 0x05AC + +/* HW Buffer Pool0 base address */ +#define BUFFER_POOL_0_ADDR_OFFSET 0x05BC + +/* 16B for Host Egress MSMC Q (Pre-emptible) context */ +#define HOST_RX_Q_PRE_CONTEXT_OFFSET 0x0684 + +/* Buffer for 8 FDB entries to be added by 'Add Multiple FDB entries IOCTL' */ +#define FDB_CMD_BUFFER 0x0894 + +/* TAS queue max sdu length list */ +#define TAS_QUEUE_MAX_SDU_LIST 0x08FA + +/* Used by FW to generate random number with the SEED value */ +#define HD_RAND_SEED_OFFSET 0x0934 + +/* 16B for Host Egress MSMC Q (Express) context */ +#define HOST_RX_Q_EXP_CONTEXT_OFFSET 0x0940 + +/* Start of 32 bits PA_STAT counters */ +#define PA_STAT_32b_START_OFFSET 0x0080 + +#endif /* __NET_TI_ICSSG_SWITCH_MAP_H */ diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c index 38cc12f9f133..05cc7aab1ec8 100644 --- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c +++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c @@ -39,6 +39,7 @@ void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) gen_pool_destroy(pool->gen_pool); /* frees pool->name */ } +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_destroy); struct k3_cppi_desc_pool * k3_cppi_desc_pool_create_name(struct device *dev, size_t size, @@ -98,29 +99,38 @@ gen_pool_create_fail: devm_kfree(pool->dev, pool); return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_create_name); dma_addr_t k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool, void *addr) { return addr ? pool->dma_addr + (addr - pool->cpumem) : 0; } +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_virt2dma); void *k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma) { return dma ? pool->cpumem + (dma - pool->dma_addr) : NULL; } +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_dma2virt); void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool) { return (void *)gen_pool_alloc(pool->gen_pool, pool->desc_size); } +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_alloc); void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr) { gen_pool_free(pool->gen_pool, (unsigned long)addr, pool->desc_size); } +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_free); size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool) { return gen_pool_avail(pool->gen_pool) / pool->desc_size; } +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_avail); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI K3 CPPI5 descriptors pool API"); diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 43d5cd59b56b..7007eb8bed36 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -233,8 +233,6 @@ int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, netcp_hook_rtn *hook_rtn, void *hook_data); int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, netcp_hook_rtn *hook_rtn, void *hook_data); -void *netcp_device_find_module(struct netcp_device *netcp_device, - const char *name); /* SGMII functions */ int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index d829113c16ee..11b90e1da0c6 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2228,7 +2228,7 @@ probe_quit: return ret; } -static int netcp_remove(struct platform_device *pdev) +static void netcp_remove(struct platform_device *pdev) { struct netcp_device *netcp_device = platform_get_drvdata(pdev); struct netcp_intf *netcp_intf, *netcp_tmp; @@ -2256,7 +2256,6 @@ static int netcp_remove(struct platform_device *pdev) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); - return 0; } static const struct of_device_id of_match[] = { @@ -2271,7 +2270,7 @@ static struct platform_driver netcp_driver = { .of_match_table = of_match, }, .probe = netcp_probe, - .remove = netcp_remove, + .remove_new = netcp_remove, }; module_platform_driver(netcp_driver); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 2adf82a32bf6..02cb6474f6dc 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1735,8 +1735,8 @@ static const struct netcp_ethtool_stat xgbe10_et_stats[] = { static void keystone_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); - strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); + strscpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); + strscpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); } static u32 keystone_get_msglevel(struct net_device *ndev) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index dc14a66583ff..44488c153ea2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev, key_index = wl->current_key; if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) { - /* reques to change default key index */ + /* request to change default key index */ pr_debug("%s: request to change default key to %d\n", __func__, key_index); wl->current_key = key_index; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 50d7eacfec58..87e67121477c 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2332,7 +2332,7 @@ spider_net_alloc_card(void) struct spider_net_card *card; netdev = alloc_etherdev(struct_size(card, darray, - tx_descriptors + rx_descriptors)); + size_add(tx_descriptors, rx_descriptors))); if (!netdev) return NULL; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index b50be67b398b..6e3758dfbdbd 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -667,8 +667,7 @@ static int tc_mii_init(struct net_device *dev) lp->mii_bus->name = "tc35815_mii_bus"; lp->mii_bus->read = tc_mdio_read; lp->mii_bus->write = tc_mdio_write; - snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", - (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(lp->pci_dev)); lp->mii_bus->priv = dev; lp->mii_bus->parent = &lp->pci_dev->dev; err = mdiobus_register(lp->mii_bus); @@ -1435,14 +1434,10 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) u32 dmactl = tc_readl(&tr->DMA_Ctl); if (!(dmactl & DMA_IntMask)) { - /* disable interrupts */ - tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); - if (napi_schedule_prep(&lp->napi)) + if (napi_schedule_prep(&lp->napi)) { + /* disable interrupts */ + tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); __napi_schedule(&lp->napi); - else { - printk(KERN_ERR "%s: interrupt taken in poll\n", - dev->name); - BUG(); } (void)tc_readl(&tr->Int_Src); /* flush */ return IRQ_HANDLED; diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index d09d352e1c0a..554aff7c8f3b 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1660,7 +1660,7 @@ static void tsi108_timed_checker(struct timer_list *t) mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL); } -static int tsi108_ether_remove(struct platform_device *pdev) +static void tsi108_ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct tsi108_prv_data *priv = netdev_priv(dev); @@ -1670,15 +1670,13 @@ static int tsi108_ether_remove(struct platform_device *pdev) iounmap(priv->regs); iounmap(priv->phyregs); free_netdev(dev); - - return 0; } /* Structure for a device driver */ static struct platform_driver tsi_eth_driver = { .probe = tsi108_init_one, - .remove = tsi108_ether_remove, + .remove_new = tsi108_ether_remove, .driver = { .name = "tsi-ethernet", }, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index d716e6fe26e1..e80c02948801 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -94,7 +94,7 @@ static const int multicast_filter_limit = 32; #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> @@ -2443,7 +2443,7 @@ static void rhine_remove_one_pci(struct pci_dev *pdev) pci_disable_device(pdev); } -static int rhine_remove_one_platform(struct platform_device *pdev) +static void rhine_remove_one_platform(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); @@ -2453,8 +2453,6 @@ static int rhine_remove_one_platform(struct platform_device *pdev) iounmap(rp->base); free_netdev(dev); - - return 0; } static void rhine_shutdown_pci(struct pci_dev *pdev) @@ -2572,7 +2570,7 @@ static struct pci_driver rhine_driver_pci = { static struct platform_driver rhine_driver_platform = { .probe = rhine_init_one_platform, - .remove = rhine_remove_one_platform, + .remove_new = rhine_remove_one_platform, .driver = { .name = DRV_NAME, .of_match_table = rhine_of_tbl, diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 86f7843b4591..1c6b2a9bba08 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -57,8 +57,8 @@ #include <linux/if.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/inetdevice.h> #include <linux/platform_device.h> @@ -2957,11 +2957,9 @@ static int velocity_platform_probe(struct platform_device *pdev) return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM); } -static int velocity_platform_remove(struct platform_device *pdev) +static void velocity_platform_remove(struct platform_device *pdev) { velocity_remove(&pdev->dev); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -3249,7 +3247,7 @@ static struct pci_driver velocity_pci_driver = { static struct platform_driver velocity_platform_driver = { .probe = velocity_platform_probe, - .remove = velocity_platform_remove, + .remove_new = velocity_platform_remove, .driver = { .name = "via-velocity", .of_match_table = velocity_of_ids, diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 39596cd13539..23cd610bd376 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -41,6 +41,7 @@ config TXGBE tristate "Wangxun(R) 10GbE PCI Express adapters support" depends on PCI depends on COMMON_CLK + select MARVELL_10G_PHY select REGMAP select I2C select I2C_DESIGNWARE_PLATFORM diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 93cb6f2294e7..ddc5f6d20b9c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -3,9 +3,171 @@ #include <linux/pci.h> #include <linux/phy.h> +#include <linux/ethtool.h> #include "wx_type.h" #include "wx_ethtool.h" +#include "wx_hw.h" + +struct wx_stats { + char stat_string[ETH_GSTRING_LEN]; + size_t sizeof_stat; + off_t stat_offset; +}; + +#define WX_STAT(str, m) { \ + .stat_string = str, \ + .sizeof_stat = sizeof(((struct wx *)0)->m), \ + .stat_offset = offsetof(struct wx, m) } + +static const struct wx_stats wx_gstrings_stats[] = { + WX_STAT("rx_dma_pkts", stats.gprc), + WX_STAT("tx_dma_pkts", stats.gptc), + WX_STAT("rx_dma_bytes", stats.gorc), + WX_STAT("tx_dma_bytes", stats.gotc), + WX_STAT("rx_total_pkts", stats.tpr), + WX_STAT("tx_total_pkts", stats.tpt), + WX_STAT("rx_long_length_count", stats.roc), + WX_STAT("rx_short_length_count", stats.ruc), + WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + WX_STAT("os2bmc_tx_by_host", stats.o2bspc), + WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), + WX_STAT("rx_no_dma_resources", stats.rdmdrop), + WX_STAT("tx_busy", tx_busy), + WX_STAT("non_eop_descs", non_eop_descs), + WX_STAT("tx_restart_queue", restart_queue), + WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), + WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +}; + +/* drivers allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define WX_NUM_RX_QUEUES netdev->num_tx_queues +#define WX_NUM_TX_QUEUES netdev->num_tx_queues + +#define WX_QUEUE_STATS_LEN ( \ + (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ + (sizeof(struct wx_queue_stats) / sizeof(u64))) +#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) + +int wx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return WX_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(wx_get_sset_count); + +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string); + for (i = 0; i < netdev->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + } + for (i = 0; i < WX_NUM_RX_QUEUES; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + } + break; + } +} +EXPORT_SYMBOL(wx_get_strings); + +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_ring *ring; + unsigned int start; + int i, j; + char *p; + + wx_update_stats(wx); + + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { + p = (char *)wx + wx_gstrings_stats[i].stat_offset; + data[i] = (wx_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = wx->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < WX_NUM_RX_QUEUES; j++) { + ring = wx->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } +} +EXPORT_SYMBOL(wx_get_ethtool_stats); + +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + mac_stats->MulticastFramesXmittedOK = hwstats->mptc; + mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; + mac_stats->MulticastFramesReceivedOK = hwstats->mprc; + mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; +} +EXPORT_SYMBOL(wx_get_mac_stats); + +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; + stats->rx_pause_frames = hwstats->lxonoffrxc; +} +EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -14,5 +176,12 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); + if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { + info->n_stats = WX_STATS_LEN - + (WX_NUM_TX_QUEUES - wx->num_tx_queues) * + (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; + } else { + info->n_stats = WX_STATS_LEN; + } } EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index e85538c69454..16d1a09369a6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -4,5 +4,13 @@ #ifndef _WX_ETHTOOL_H_ #define _WX_ETHTOOL_H_ +int wx_get_sset_count(struct net_device *netdev, int sset); +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data); +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats); +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 6321178fc814..533e912af089 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -12,6 +12,98 @@ #include "wx_lib.h" #include "wx_hw.h" +static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22); + +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22); + +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45); + +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45); + static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; @@ -432,71 +524,6 @@ out: EXPORT_SYMBOL(wx_read_ee_hostif_buffer); /** - * wx_calculate_checksum - Calculate checksum for buffer - * @buffer: pointer to EEPROM - * @length: size of EEPROM to calculate a checksum for - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. - **/ -static u8 wx_calculate_checksum(u8 *buffer, u32 length) -{ - u8 sum = 0; - u32 i; - - if (!buffer) - return 0; - - for (i = 0; i < length; i++) - sum += buffer[i]; - - return (u8)(0 - sum); -} - -/** - * wx_reset_hostif - send reset cmd to fw - * @wx: pointer to hardware structure - * - * Sends reset cmd to firmware through the manageability - * block. - **/ -int wx_reset_hostif(struct wx *wx) -{ - struct wx_hic_reset reset_cmd; - int ret_val = 0; - int i; - - reset_cmd.hdr.cmd = FW_RESET_CMD; - reset_cmd.hdr.buf_len = FW_RESET_LEN; - reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - reset_cmd.lan_id = wx->bus.func; - reset_cmd.reset_type = (u16)wx->reset_type; - reset_cmd.hdr.checksum = 0; - reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd, - (FW_CEM_HDR_LEN + - reset_cmd.hdr.buf_len)); - - for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd, - sizeof(reset_cmd), - WX_HI_COMMAND_TIMEOUT, - true); - if (ret_val != 0) - continue; - - if (reset_cmd.hdr.cmd_or_resp.ret_status == - FW_CEM_RESP_STATUS_SUCCESS) - ret_val = 0; - else - ret_val = -EFAULT; - - break; - } - - return ret_val; -} -EXPORT_SYMBOL(wx_reset_hostif); - -/** * wx_init_eeprom_params - Initialize EEPROM params * @wx: pointer to hardware structure * @@ -1501,7 +1528,7 @@ static void wx_restore_vlan(struct wx *wx) * * Configure the Rx unit of the MAC after a reset. **/ -static void wx_configure_rx(struct wx *wx) +void wx_configure_rx(struct wx *wx) { u32 psrtype, i; int ret; @@ -1544,6 +1571,7 @@ static void wx_configure_rx(struct wx *wx) wx_enable_rx(wx); wx_enable_sec_rx_path(wx); } +EXPORT_SYMBOL(wx_configure_rx); static void wx_configure_isb(struct wx *wx) { @@ -1741,10 +1769,12 @@ int wx_sw_init(struct wx *wx) wx->subsystem_device_id = pdev->subsystem_device; } else { err = wx_flash_read_dword(wx, 0xfffdc, &ssid); - if (!err) - wx->subsystem_device_id = swab16((u16)ssid); + if (err < 0) { + wx_err(wx, "read of internal subsystem device id failed\n"); + return err; + } - return err; + wx->subsystem_device_id = swab16((u16)ssid); } wx->mac_table = kcalloc(wx->mac.num_rar_entries, @@ -1974,6 +2004,105 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) EXPORT_SYMBOL(wx_vlan_rx_kill_vid); /** + * wx_update_stats - Update the board statistics counters. + * @wx: board private structure + **/ +void wx_update_stats(struct wx *wx) +{ + struct wx_hw_stats *hwstats = &wx->stats; + + u64 non_eop_descs = 0, alloc_rx_buff_failed = 0; + u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0; + u64 restart_queue = 0, tx_busy = 0; + u32 i; + + /* gather some stats to the wx struct that are per queue */ + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *rx_ring = wx->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + } + wx->non_eop_descs = non_eop_descs; + wx->alloc_rx_buff_failed = alloc_rx_buff_failed; + wx->hw_csum_rx_error = hw_csum_rx_error; + wx->hw_csum_rx_good = hw_csum_rx_good; + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *tx_ring = wx->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + } + wx->restart_queue = restart_queue; + wx->tx_busy = tx_busy; + + hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT); + hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT); + hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB); + hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB); + hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC); + hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC); + hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC); + hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT); + hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT); + hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT); + hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); + hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + + for (i = 0; i < wx->mac.max_rx_queues; i++) + hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); +} +EXPORT_SYMBOL(wx_update_stats); + +/** + * wx_clear_hw_cntrs - Generic clear hardware counters + * @wx: board private structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +void wx_clear_hw_cntrs(struct wx *wx) +{ + u16 i = 0; + + for (i = 0; i < wx->mac.max_rx_queues; i++) + wr32(wx, WX_PX_MPRC(i), 0); + + rd32(wx, WX_RDM_PKT_CNT); + rd32(wx, WX_TDM_PKT_CNT); + rd64(wx, WX_RDM_BYTE_CNT_LSB); + rd32(wx, WX_TDM_BYTE_CNT_LSB); + rd32(wx, WX_RDM_DRP_PKT); + rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + rd32(wx, WX_RDB_LXONTXC); + rd32(wx, WX_RDB_LXOFFTXC); + rd32(wx, WX_MAC_LXONOFFRXC); +} +EXPORT_SYMBOL(wx_clear_hw_cntrs); + +/** * wx_start_hw - Prepare hardware for Tx/Rx * @wx: pointer to hardware structure * diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 1f93ca32c921..12c20a7c364d 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,6 +4,13 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ +#include <linux/phy.h> + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum); +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value); +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum); +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value); void wx_intr_enable(struct wx *wx, u64 qmask); void wx_irq_disable(struct wx *wx); int wx_check_flash_load(struct wx *wx, u32 check_bit); @@ -14,7 +21,6 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer, int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data); -int wx_reset_hostif(struct wx *wx); void wx_init_eeprom_params(struct wx *wx); void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); void wx_init_rx_addrs(struct wx *wx); @@ -25,6 +31,7 @@ void wx_disable_rx(struct wx *wx); void wx_set_rx_mode(struct net_device *netdev); int wx_change_mtu(struct net_device *netdev, int new_mtu); void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); +void wx_configure_rx(struct wx *wx); void wx_configure(struct wx *wx); void wx_start_hw(struct wx *wx); int wx_disable_pcie_master(struct wx *wx); @@ -34,5 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void wx_update_stats(struct wx *wx); +void wx_clear_hw_cntrs(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 2c3f08be8c37..347d3cec02a3 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -3,7 +3,7 @@ #include <linux/etherdevice.h> #include <net/ip6_checksum.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <net/inet_ecn.h> #include <linux/iopoll.h> #include <linux/sctp.h> @@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); } -static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer, - int rx_buffer_pgcnt) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; - - /* avoid re-using remote and pfmemalloc pages */ - if (!dev_page_is_reusable(page)) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) - return false; -#endif - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); - rx_buffer->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** - * wx_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void wx_reuse_rx_page(struct wx_ring *rx_ring, - struct wx_rx_buffer *old_buff) -{ - u16 nta = rx_ring->next_to_alloc; - struct wx_rx_buffer *new_buff; - - new_buff = &rx_ring->rx_buffer_info[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->page = old_buff->page; - new_buff->page_dma = old_buff->page_dma; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - static void wx_dma_sync_frag(struct wx_ring *rx_ring, struct wx_rx_buffer *rx_buffer) { @@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, size, DMA_FROM_DEVICE); skip_sync: - rx_buffer->pagecnt_bias--; - return rx_buffer; } @@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring, struct sk_buff *skb, int rx_buffer_pgcnt) { - if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { - /* hand second half of page back to the ring */ - wx_reuse_rx_page(rx_ring, rx_buffer); - } else { - if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) - /* the page has been released from the ring */ - WX_CB(skb)->page_released = true; - else - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } + if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) + /* the page has been released from the ring */ + WX_CB(skb)->page_released = true; /* clear contents of rx_buffer */ rx_buffer->page = NULL; @@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, if (size <= WX_RXBUFFER_256) { memcpy(__skb_put(skb, size), page_addr, ALIGN(size, sizeof(long))); - rx_buffer->pagecnt_bias++; - + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); return skb; } + skb_mark_for_recycle(skb); + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) WX_CB(skb)->dma = rx_buffer->dma; @@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, bi->page_dma = dma; bi->page = page; bi->page_offset = 0; - page_ref_add(page, USHRT_MAX - 1); - bi->pagecnt_bias = USHRT_MAX; return true; } @@ -488,6 +421,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, return false; rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; return true; } @@ -721,7 +655,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { - rx_buffer->pagecnt_bias++; + rx_ring->rx_stats.alloc_rx_buff_failed++; break; } @@ -877,9 +811,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - netif_running(tx_ring->netdev)) + netif_running(tx_ring->netdev)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } } return !!budget; @@ -956,6 +892,7 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; return 0; } @@ -1533,8 +1470,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> frags[f])); - if (wx_maybe_stop_tx(tx_ring, count + 3)) + if (wx_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; @@ -1965,11 +1904,11 @@ void wx_reset_interrupt_capability(struct wx *wx) if (!pdev->msi_enabled && !pdev->msix_enabled) return; - pci_free_irq_vectors(wx->pdev); if (pdev->msix_enabled) { kfree(wx->msix_entries); wx->msix_entries = NULL; } + pci_free_irq_vectors(wx->pdev); } EXPORT_SYMBOL(wx_reset_interrupt_capability); @@ -2241,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) /* free resources associated with mapping */ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); i++; rx_buffer++; @@ -2665,8 +2602,11 @@ void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; int i; + wx_update_stats(wx); + rcu_read_lock(); for (i = 0; i < wx->num_rx_queues; i++) { struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); @@ -2702,6 +2642,12 @@ void wx_get_stats64(struct net_device *netdev, } rcu_read_unlock(); + + hwstats = &wx->stats; + stats->rx_errors = hwstats->crcerrs + hwstats->rlec; + stats->multicast = hwstats->qmprc; + stats->rx_length_errors = hwstats->rlec; + stats->rx_crc_errors = hwstats->crcerrs; } EXPORT_SYMBOL(wx_get_stats64); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 29dfb561887d..83f9bb7b3c22 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -59,6 +59,25 @@ #define WX_TS_ALARM_ST_DALARM BIT(1) #define WX_TS_ALARM_ST_ALARM BIT(0) +/* statistic */ +#define WX_TX_FRAME_CNT_GOOD_BAD_L 0x1181C +#define WX_TX_BC_FRAMES_GOOD_L 0x11824 +#define WX_TX_MC_FRAMES_GOOD_L 0x1182C +#define WX_RX_FRAME_CNT_GOOD_BAD_L 0x11900 +#define WX_RX_BC_FRAMES_GOOD_L 0x11918 +#define WX_RX_MC_FRAMES_GOOD_L 0x11920 +#define WX_RX_CRC_ERROR_FRAMES_L 0x11928 +#define WX_RX_LEN_ERROR_FRAMES_L 0x11978 +#define WX_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define WX_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define WX_MAC_LXONOFFRXC 0x11E0C + +/*********************** Receive DMA registers **************************/ +#define WX_RDM_DRP_PKT 0x12500 +#define WX_RDM_PKT_CNT 0x12504 +#define WX_RDM_BYTE_CNT_LSB 0x12508 +#define WX_RDM_BMC2OS_CNT 0x12510 + /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 @@ -94,6 +113,9 @@ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) #define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_PKT_CNT 0x18308 +#define WX_TDM_BYTE_CNT_LSB 0x1830C +#define WX_TDM_OS2BMC_CNT 0x18314 #define WX_TDM_RP_RATE 0x18404 /***************************** RDB registers *********************************/ @@ -106,6 +128,8 @@ /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +#define WX_RDB_LXOFFTXC 0x19218 +#define WX_RDB_LXONTXC 0x1921C /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -160,6 +184,10 @@ #define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) #define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +#define WX_PSR_WKUP_CTL 0x15B80 +/* Wake Up Filter Control Bit */ +#define WX_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */ + /* vlan tbl */ #define WX_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) @@ -201,6 +229,8 @@ #define WX_TSC_CTL 0x1D000 #define WX_TSC_CTL_TX_DIS BIT(1) #define WX_TSC_CTL_TSEC_DIS BIT(0) +#define WX_TSC_ST 0x1D004 +#define WX_TSC_ST_SECTX_RDY BIT(0) #define WX_TSC_BUF_AE 0x1D00C #define WX_TSC_BUF_AE_THR GENMASK(9, 0) @@ -212,6 +242,8 @@ #define WX_MNG_MBOX_CTL 0x1E044 #define WX_MNG_MBOX_CTL_SWRDY BIT(0) #define WX_MNG_MBOX_CTL_FWRDY BIT(2) +#define WX_MNG_BMC2OS_CNT 0x1E090 +#define WX_MNG_OS2BMC_CNT 0x1E094 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -227,6 +259,25 @@ #define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ +/* MDIO Registers */ +#define WX_MSCA 0x11200 +#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v) +#define WX_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) +#define WX_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) +#define WX_MSCC 0x11204 +#define WX_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) + +enum WX_MSCA_CMD_value { + WX_MSCA_CMD_RSV = 0, + WX_MSCA_CMD_WRITE, + WX_MSCA_CMD_POST_READ, + WX_MSCA_CMD_READ, +}; + +#define WX_MSCC_SADDR BIT(18) +#define WX_MSCC_BUSY BIT(22) +#define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) +#define WX_MDIO_CLAUSE_SELECT 0x11220 #define WX_MMC_CONTROL 0x11800 #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ @@ -276,6 +327,7 @@ #define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) #define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) @@ -576,6 +628,13 @@ enum wx_mac_type { wx_mac_em }; +enum sp_media_type { + sp_media_unknown = 0, + sp_media_fiber, + sp_media_copper, + sp_media_backplane +}; + enum em_mac_type { em_mac_type_unknown = 0, em_mac_type_mdi, @@ -728,7 +787,6 @@ struct wx_rx_buffer { dma_addr_t page_dma; struct page *page; unsigned int page_offset; - u16 pagecnt_bias; }; struct wx_queue_stats { @@ -736,9 +794,16 @@ struct wx_queue_stats { u64 bytes; }; +struct wx_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; +}; + struct wx_rx_queue_stats { + u64 non_eop_descs; u64 csum_good_cnt; u64 csum_err; + u64 alloc_rx_buff_failed; }; /* iterator for handling rings in ring container */ @@ -782,6 +847,7 @@ struct wx_ring { struct wx_queue_stats stats; struct u64_stats_sync syncp; union { + struct wx_tx_queue_stats tx_stats; struct wx_rx_queue_stats rx_stats; }; } ____cacheline_internodealigned_in_smp; @@ -813,6 +879,33 @@ enum wx_isb_idx { WX_ISB_MAX }; +/* Statistics counters collected by the MAC */ +struct wx_hw_stats { + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 tpr; + u64 tpt; + u64 bprc; + u64 bptc; + u64 mprc; + u64 mptc; + u64 roc; + u64 ruc; + u64 lxonoffrxc; + u64 lxontxc; + u64 lxofftxc; + u64 o2bgptc; + u64 b2ospc; + u64 o2bspc; + u64 b2ogprc; + u64 rdmdrop; + u64 crcerrs; + u64 rlec; + u64 qmprc; +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -823,6 +916,7 @@ struct wx { struct wx_bus_info bus; struct wx_mac_info mac; enum em_mac_type mac_type; + enum sp_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; struct wx_mac_addr *mac_table; @@ -846,7 +940,7 @@ struct wx { int duplex; struct phy_device *phydev; - bool wol_enabled; + bool wol_hw_supported; bool ncsi_enabled; bool gpio_ctrl; raw_spinlock_t gpio_lock; @@ -887,6 +981,14 @@ struct wx { u32 wol; u16 bd_number; + + struct wx_hw_stats stats; + u64 tx_busy; + u64 non_eop_descs; + u64 restart_queue; + u64 hw_csum_rx_good; + u64 hw_csum_rx_error; + u64 alloc_rx_buff_failed; }; #define WX_INTR_ALL (~0ULL) @@ -920,6 +1022,17 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) wr32(wx, reg, val); } +static inline u64 +rd64(struct wx *wx, u32 reg) +{ + u64 lsb, msb; + + lsb = rd32(wx, reg); + msb = rd32(wx, reg + 4); + + return (lsb | msb << 32); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 5b25834baf38..afbdf6919071 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -6,14 +6,54 @@ #include <linux/netdevice.h> #include "../libwx/wx_ethtool.h" +#include "../libwx/wx_type.h" #include "ngbe_ethtool.h" +static void ngbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct wx *wx = netdev_priv(netdev); + + if (!wx->wol_hw_supported) + return; + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + if (wx->wol & WX_PSR_WKUP_CTL_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int ngbe_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct wx *wx = netdev_priv(netdev); + struct pci_dev *pdev = wx->pdev; + + if (!wx->wol_hw_supported) + return -EOPNOTSUPP; + + wx->wol = 0; + if (wol->wolopts & WAKE_MAGIC) + wx->wol = WX_PSR_WKUP_CTL_MAG; + netdev->wol_enabled = !!(wx->wol); + wr32(wx, WX_PSR_WKUP_CTL, wx->wol); + device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled); + + return 0; +} + static const struct ethtool_ops ngbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .nway_reset = phy_ethtool_nway_reset, + .get_wol = ngbe_get_wol, + .set_wol = ngbe_set_wol, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 6562a2de9527..6459bc1d7c22 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -85,6 +85,8 @@ int ngbe_reset_hw(struct wx *wx) } ngbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index c99a5d3de72e..8db804543e66 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -62,7 +62,7 @@ static void ngbe_init_type_code(struct wx *wx) em_mac_type_rgmii : em_mac_type_mdi; - wx->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; + wx->wol_hw_supported = (wol_mask == NGBE_WOL_SUP) ? 1 : 0; wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK || type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0; @@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx) /* PCI config space info */ err = wx_sw_init(wx); - if (err < 0) { - wx_err(wx, "read of internal subsystem device id failed\n"); + if (err < 0) return err; - } /* mac type, phy type , oem type */ ngbe_init_type_code(wx); @@ -332,6 +330,8 @@ static void ngbe_disable_device(struct wx *wx) wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); } + + wx_update_stats(wx); } static void ngbe_down(struct wx *wx) @@ -440,14 +440,26 @@ static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct wx *wx = pci_get_drvdata(pdev); struct net_device *netdev; + u32 wufc = wx->wol; netdev = wx->netdev; + rtnl_lock(); netif_device_detach(netdev); - rtnl_lock(); if (netif_running(netdev)) - ngbe_down(wx); + ngbe_close(netdev); + wx_clear_interrupt_scheme(wx); rtnl_unlock(); + + if (wufc) { + wx_set_rx_mode(netdev); + wx_configure_rx(wx); + wr32(wx, NGBE_PSR_WKUP_CTL, wufc); + } else { + wr32(wx, NGBE_PSR_WKUP_CTL, 0); + } + pci_wake_from_d3(pdev, !!wufc); + *enable_wake = !!wufc; wx_control_hw(wx, false); pci_disable_device(pdev); @@ -621,12 +633,11 @@ static int ngbe_probe(struct pci_dev *pdev, } wx->wol = 0; - if (wx->wol_enabled) + if (wx->wol_hw_supported) wx->wol = NGBE_PSR_WKUP_CTL_MAG; - wx->wol_enabled = !!(wx->wol); + netdev->wol_enabled = !!(wx->wol); wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, wx->wol); /* Save off EEPROM version number and Option Rom version which @@ -666,11 +677,6 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, wx); - netif_info(wx, probe, netdev, - "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_register: @@ -712,11 +718,52 @@ static void ngbe_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static int ngbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + bool wake; + + ngbe_dev_shutdown(pdev, &wake); + device_set_wakeup_enable(&pdev->dev, wake); + + return 0; +} + +static int ngbe_resume(struct pci_dev *pdev) +{ + struct net_device *netdev; + struct wx *wx; + u32 err; + + wx = pci_get_drvdata(pdev); + netdev = wx->netdev; + + err = pci_enable_device_mem(pdev); + if (err) { + wx_err(wx, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + device_wakeup_disable(&pdev->dev); + + ngbe_reset_hw(wx); + rtnl_lock(); + err = wx_init_interrupt_scheme(wx); + if (!err && netif_running(netdev)) + err = ngbe_open(netdev); + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return 0; +} + static struct pci_driver ngbe_driver = { .name = ngbe_driver_name, .id_table = ngbe_pci_tbl, .probe = ngbe_probe, .remove = ngbe_remove, + .suspend = ngbe_suspend, + .resume = ngbe_resume, .shutdown = ngbe_shutdown, }; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index c9ddbbc3fa4f..6302ecca71bb 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re return 0; } -static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(device_type); - wr32(wx, NGBE_MSCA, command); - command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); - if (ret) { - wx_err(wx, "Mdio read c22 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, NGBE_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(device_type); - wr32(wx, NGBE_MSCA, command); - command = value | - NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); - if (ret) - wx_err(wx, "Mdio write c22 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(devnum); - wr32(wx, NGBE_MSCA, command); - command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, NGBE_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = NGBE_MSCA_RA(regnum) | - NGBE_MSCA_PA(phy_addr) | - NGBE_MSCA_DA(devnum); - wr32(wx, NGBE_MSCA, command); - command = value | - NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) | - NGBE_MSCC_BUSY | - NGBE_MDIO_CLK(6); - wr32(wx, NGBE_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000, - 100000, false, wx, NGBE_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) { struct wx *wx = bus->priv; @@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) if (wx->mac_type == em_mac_type_mdi) phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); else - phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum); return phy_data; } @@ -162,7 +51,7 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, if (wx->mac_type == em_mac_type_mdi) ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); else - ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); return ret; } @@ -236,6 +125,7 @@ static void ngbe_phy_fixup(struct wx *wx) phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phydev->mac_managed_pm = true; if (wx->mac_type != em_mac_type_mdi) return; /* disable EEE, internal phy does not support eee */ @@ -261,12 +151,11 @@ int ngbe_mdio_init(struct wx *wx) mii_bus->priv = wx; if (wx->mac_type == em_mac_type_rgmii) { - mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; - mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + mii_bus->read_c45 = wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = wx_phy_write_reg_mdi_c45; } - snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", - (pdev->bus->number << 8) | pdev->devfn); + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); ret = devm_mdiobus_register(&pdev->dev, mii_bus); if (ret) return ret; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index b70eca397b67..ff754d69bdf6 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -59,28 +59,6 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E -/* mdio access */ -#define NGBE_MSCA 0x11200 -#define NGBE_MSCA_RA(v) FIELD_PREP(U16_MAX, v) -#define NGBE_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v) -#define NGBE_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v) -#define NGBE_MSCC 0x11204 -#define NGBE_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v) - -enum NGBE_MSCA_CMD_value { - NGBE_MSCA_CMD_RSV = 0, - NGBE_MSCA_CMD_WRITE, - NGBE_MSCA_CMD_POST_READ, - NGBE_MSCA_CMD_READ, -}; - -#define NGBE_MSCC_SADDR BIT(18) -#define NGBE_MSCC_BUSY BIT(22) -#define NGBE_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) - -/* Media-dependent registers. */ -#define NGBE_MDIO_CLAUSE_SELECT 0x11220 - /* GPIO Registers */ #define NGBE_GPIO_DR 0x14800 #define NGBE_GPIO_DDR 0x14804 diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 859da112586a..3f336a088e43 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -39,6 +39,11 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = txgbe_set_link_ksettings, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 0772eb14eabf..d6b2b3c781b6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -14,6 +14,34 @@ #include "txgbe_hw.h" /** + * txgbe_disable_sec_tx_path - Stops the transmit data path + * @wx: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the tx security block + **/ +int txgbe_disable_sec_tx_path(struct wx *wx) +{ + int val; + + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, WX_TSC_CTL_TX_DIS); + return read_poll_timeout(rd32, val, val & WX_TSC_ST_SECTX_RDY, + 1000, 20000, false, wx, WX_TSC_ST); +} + +/** + * txgbe_enable_sec_tx_path - Enables the transmit data path + * @wx: pointer to hardware structure + * + * Enables the transmit data path. + **/ +void txgbe_enable_sec_tx_path(struct wx *wx) +{ + wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, 0); + WX_WRITE_FLUSH(wx); +} + +/** * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds * @wx: pointer to hardware structure * @@ -43,114 +71,6 @@ static void txgbe_init_thermal_sensor_thresh(struct wx *wx) } /** - * txgbe_read_pba_string - Reads part number string from EEPROM - * @wx: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) -{ - u16 pba_ptr, offset, length, data; - int ret_val; - - if (!pba_num) { - wx_err(wx, "PBA string buffer was null\n"); - return -EINVAL; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, - &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, - &pba_ptr); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - /* if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wx, "NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return 0; - } - - ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - wx_err(wx, "NVM PBA number section invalid length\n"); - return -EINVAL; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return 0; -} - -/** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum * @wx: pointer to hardware structure * @checksum: pointer to cheksum @@ -263,11 +183,14 @@ int txgbe_reset_hw(struct wx *wx) if (status != 0) return status; - if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || - ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) - wx_reset_hostif(wx); + if (wx->media_type != sp_media_copper) { + u32 val; - usleep_range(10, 100); + val = WX_MIS_RST_LAN_RST(wx->bus.func); + wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST)); + WX_WRITE_FLUSH(wx); + usleep_range(10, 100); + } status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func)); if (status != 0) @@ -275,6 +198,8 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index e82f65dff8a6..1f3ecf60e3c4 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -4,7 +4,8 @@ #ifndef _TXGBE_HW_H_ #define _TXGBE_HW_H_ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); +int txgbe_disable_sec_tx_path(struct wx *wx); +void txgbe_enable_sec_tx_path(struct wx *wx); int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); int txgbe_reset_hw(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 46eba6d6188b..526250102db2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -286,6 +286,8 @@ static void txgbe_disable_device(struct wx *wx) /* Disable the Tx DMA engine */ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + + wx_update_stats(wx); } static void txgbe_down(struct wx *wx) @@ -301,6 +303,49 @@ static void txgbe_down(struct wx *wx) } /** + * txgbe_init_type_code - Initialize the shared code + * @wx: pointer to hardware structure + **/ +static void txgbe_init_type_code(struct wx *wx) +{ + u8 device_type = wx->subsystem_device_id & 0xF0; + + switch (wx->device_id) { + case TXGBE_DEV_ID_SP1000: + case TXGBE_DEV_ID_WX1820: + wx->mac.type = wx_mac_sp; + break; + default: + wx->mac.type = wx_mac_unknown; + break; + } + + switch (device_type) { + case TXGBE_ID_SFP: + wx->media_type = sp_media_fiber; + break; + case TXGBE_ID_XAUI: + case TXGBE_ID_SGMII: + wx->media_type = sp_media_copper; + break; + case TXGBE_ID_KR_KX_KX4: + case TXGBE_ID_MAC_XAUI: + case TXGBE_ID_MAC_SGMII: + wx->media_type = sp_media_backplane; + break; + case TXGBE_ID_SFI_XAUI: + if (wx->bus.func == 0) + wx->media_type = sp_media_fiber; + else + wx->media_type = sp_media_copper; + break; + default: + wx->media_type = sp_media_unknown; + break; + } +} + +/** * txgbe_sw_init - Initialize general software structures (struct wx) * @wx: board private structure to initialize **/ @@ -319,20 +364,10 @@ static int txgbe_sw_init(struct wx *wx) /* PCI config space info */ err = wx_sw_init(wx); - if (err < 0) { - wx_err(wx, "read of internal subsystem device id failed\n"); + if (err < 0) return err; - } - switch (wx->device_id) { - case TXGBE_DEV_ID_SP1000: - case TXGBE_DEV_ID_WX1820: - wx->mac.type = wx_mac_sp; - break; - default: - wx->mac.type = wx_mac_unknown; - break; - } + txgbe_init_type_code(wx); /* Set common capability flags and settings */ wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS; @@ -503,7 +538,6 @@ static int txgbe_probe(struct pci_dev *pdev, u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; u16 build = 0, major = 0, patch = 0; - u8 part_str[TXGBE_PBANUM_LENGTH]; u32 etrack_id = 0; err = pci_enable_device_mem(pdev); @@ -663,6 +697,9 @@ static int txgbe_probe(struct pci_dev *pdev, "0x%08x", etrack_id); } + if (etrack_id < 0x20010) + dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); + txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); if (!txgbe) { err = -ENOMEM; @@ -698,13 +735,6 @@ static int txgbe_probe(struct pci_dev *pdev, else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); - /* First try to read PBA as a string */ - err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_remove_phy: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 8779645a54be..b6c06adb8656 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -18,6 +18,7 @@ #include "../libwx/wx_hw.h" #include "txgbe_type.h" #include "txgbe_phy.h" +#include "txgbe_hw.h" static int txgbe_swnodes_register(struct txgbe *txgbe) { @@ -26,7 +27,7 @@ static int txgbe_swnodes_register(struct txgbe *txgbe) struct software_node *swnodes; u32 id; - id = (pdev->bus->number << 8) | pdev->devfn; + id = pci_dev_id(pdev); snprintf(nodes->gpio_name, sizeof(nodes->gpio_name), "txgbe_gpio-%x", id); snprintf(nodes->i2c_name, sizeof(nodes->i2c_name), "txgbe_i2c-%x", id); @@ -140,7 +141,7 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) mii_bus->phy_mask = ~0; mii_bus->priv = wx; snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe_pcs-%x", - (pdev->bus->number << 8) | pdev->devfn); + pci_dev_id(pdev)); ret = devm_mdiobus_register(&pdev->dev, mii_bus); if (ret) @@ -160,7 +161,10 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi { struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); - return &txgbe->xpcs->pcs; + if (interface == PHY_INTERFACE_MODE_10GBASER) + return &txgbe->xpcs->pcs; + + return NULL; } static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, @@ -210,8 +214,32 @@ static void txgbe_mac_link_up(struct phylink_config *config, wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); } +static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + + wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); + + return txgbe_disable_sec_tx_path(wx); +} + +static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = netdev_priv(to_net_dev(config->dev)); + + txgbe_enable_sec_tx_path(wx); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + + return 0; +} + static const struct phylink_mac_ops txgbe_mac_ops = { .mac_select_pcs = txgbe_phylink_mac_select, + .mac_prepare = txgbe_mac_prepare, + .mac_finish = txgbe_mac_finish, .mac_config = txgbe_mac_config, .mac_link_down = txgbe_mac_link_down, .mac_link_up = txgbe_mac_link_up, @@ -219,8 +247,8 @@ static const struct phylink_mac_ops txgbe_mac_ops = { static int txgbe_phylink_init(struct txgbe *txgbe) { + struct fwnode_handle *fwnode = NULL; struct phylink_config *config; - struct fwnode_handle *fwnode; struct wx *wx = txgbe->wx; phy_interface_t phy_mode; struct phylink *phylink; @@ -231,14 +259,34 @@ static int txgbe_phylink_init(struct txgbe *txgbe) config->dev = &wx->netdev->dev; config->type = PHYLINK_NETDEV; - config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_SYM_PAUSE | MAC_ASYM_PAUSE; - phy_mode = PHY_INTERFACE_MODE_10GBASER; - __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); - fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]); + config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + + if (wx->media_type == sp_media_copper) { + phy_mode = PHY_INTERFACE_MODE_XAUI; + __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); + } else { + phy_mode = PHY_INTERFACE_MODE_10GBASER; + fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]); + __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, config->supported_interfaces); + } + phylink = phylink_create(config, fwnode, phy_mode, &txgbe_mac_ops); if (IS_ERR(phylink)) return PTR_ERR(phylink); + if (wx->phydev) { + int ret; + + ret = phylink_connect_phy(phylink, wx->phydev); + if (ret) { + phylink_destroy(phylink); + return ret; + } + } + txgbe->phylink = phylink; return 0; @@ -431,7 +479,8 @@ static void txgbe_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); - if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN)) { + if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | + TXGBE_PX_MISC_ETH_AN)) { u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); @@ -459,7 +508,7 @@ static int txgbe_gpio_init(struct txgbe *txgbe) return -ENOMEM; gc->label = devm_kasprintf(dev, GFP_KERNEL, "txgbe_gpio-%x", - (wx->pdev->bus->number << 8) | wx->pdev->devfn); + pci_dev_id(wx->pdev)); if (!gc->label) return -ENOMEM; @@ -503,7 +552,7 @@ static int txgbe_clock_register(struct txgbe *txgbe) struct clk *clk; snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d", - (pdev->bus->number << 8) | pdev->devfn); + pci_dev_id(pdev)); clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); if (IS_ERR(clk)) @@ -566,7 +615,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe) info.parent = &pdev->dev; info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); info.name = "i2c_designware"; - info.id = (pdev->bus->number << 8) | pdev->devfn; + info.id = pci_dev_id(pdev); info.res = &DEFINE_RES_IRQ(pdev->irq); info.num_res = 1; @@ -588,7 +637,7 @@ static int txgbe_sfp_register(struct txgbe *txgbe) info.parent = &pdev->dev; info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_SFP]); info.name = "sfp"; - info.id = (pdev->bus->number << 8) | pdev->devfn; + info.id = pci_dev_id(pdev); sfp_dev = platform_device_register_full(&info); if (IS_ERR(sfp_dev)) return PTR_ERR(sfp_dev); @@ -598,10 +647,65 @@ static int txgbe_sfp_register(struct txgbe *txgbe) return 0; } +static int txgbe_ext_phy_init(struct txgbe *txgbe) +{ + struct phy_device *phydev; + struct mii_bus *mii_bus; + struct pci_dev *pdev; + struct wx *wx; + int ret = 0; + + wx = txgbe->wx; + pdev = wx->pdev; + + mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "txgbe_mii_bus"; + mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45; + mii_bus->parent = &pdev->dev; + mii_bus->phy_mask = GENMASK(31, 1); + mii_bus->priv = wx; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", + (pdev->bus->number << 8) | pdev->devfn); + + ret = devm_mdiobus_register(&pdev->dev, mii_bus); + if (ret) { + wx_err(wx, "failed to register MDIO bus: %d\n", ret); + return ret; + } + + phydev = phy_find_first(mii_bus); + if (!phydev) { + wx_err(wx, "no PHY found\n"); + return -ENODEV; + } + + phy_attached_info(phydev); + + wx->link = 0; + wx->speed = 0; + wx->duplex = 0; + wx->phydev = phydev; + + ret = txgbe_phylink_init(txgbe); + if (ret) { + wx_err(wx, "failed to init phylink: %d\n", ret); + return ret; + } + + return 0; +} + int txgbe_init_phy(struct txgbe *txgbe) { int ret; + if (txgbe->wx->media_type == sp_media_copper) + return txgbe_ext_phy_init(txgbe); + ret = txgbe_swnodes_register(txgbe); if (ret) { wx_err(txgbe->wx, "failed to register software nodes\n"); @@ -663,6 +767,12 @@ err_unregister_swnode: void txgbe_remove_phy(struct txgbe *txgbe) { + if (txgbe->wx->media_type == sp_media_copper) { + phylink_disconnect_phy(txgbe->phylink); + phylink_destroy(txgbe->phylink); + return; + } + platform_device_unregister(txgbe->sfp_dev); platform_device_unregister(txgbe->i2c_dev); clkdev_drop(txgbe->clock); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 51199c355f95..3ba9ce43f394 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -88,9 +88,6 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 -/* Part Number String Length */ -#define TXGBE_PBANUM_LENGTH 32 - /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -98,9 +95,6 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 -#define TXGBE_PBANUM0_PTR 0x05 -#define TXGBE_PBANUM1_PTR 0x06 -#define TXGBE_PBANUM_PTR_GUARD 0xFAFA #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c index 7c52796273a4..990a3cce8c0f 100644 --- a/drivers/net/ethernet/wiznet/w5100-spi.c +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -14,8 +14,8 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/netdevice.h> +#include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_device.h> #include <linux/spi/spi.h> #include "w5100.h" @@ -420,7 +420,6 @@ MODULE_DEVICE_TABLE(of, w5100_of_match); static int w5100_spi_probe(struct spi_device *spi) { - const struct of_device_id *of_id; const struct w5100_ops *ops; kernel_ulong_t driver_data; const void *mac = NULL; @@ -432,14 +431,7 @@ static int w5100_spi_probe(struct spi_device *spi) if (!ret) mac = tmpmac; - if (spi->dev.of_node) { - of_id = of_match_device(w5100_of_match, &spi->dev); - if (!of_id) - return -ENODEV; - driver_data = (kernel_ulong_t)of_id->data; - } else { - driver_data = spi_get_device_id(spi)->driver_data; - } + driver_data = (uintptr_t)spi_get_device_match_data(spi); switch (driver_data) { case W5100: diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 634946e87e5f..b26fd15c25ae 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -930,8 +930,8 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) if (priv->ops->may_sleep) queue_work(priv->xfer_wq, &priv->rx_work); - else if (napi_schedule_prep(&priv->napi)) - __napi_schedule(&priv->napi); + else + napi_schedule(&priv->napi); } return IRQ_HANDLED; @@ -1062,11 +1062,9 @@ static int w5100_mmio_probe(struct platform_device *pdev) mac_addr, irq, data ? data->link_gpio : -EINVAL); } -static int w5100_mmio_remove(struct platform_device *pdev) +static void w5100_mmio_remove(struct platform_device *pdev) { w5100_remove(&pdev->dev); - - return 0; } void *w5100_ops_priv(const struct net_device *ndev) @@ -1273,6 +1271,6 @@ static struct platform_driver w5100_mmio_driver = { .pm = &w5100_pm_ops, }, .probe = w5100_mmio_probe, - .remove = w5100_mmio_remove, + .remove_new = w5100_mmio_remove, }; module_platform_driver(w5100_mmio_driver); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index b0958fe8111e..3318b50a5911 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -627,7 +627,7 @@ err_register: return err; } -static int w5300_remove(struct platform_device *pdev) +static void w5300_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct w5300_priv *priv = netdev_priv(ndev); @@ -639,7 +639,6 @@ static int w5300_remove(struct platform_device *pdev) unregister_netdev(ndev); free_netdev(ndev); - return 0; } #ifdef CONFIG_PM_SLEEP @@ -683,7 +682,7 @@ static struct platform_driver w5300_driver = { .pm = &w5300_pm_ops, }, .probe = w5300_probe, - .remove = w5300_remove, + .remove_new = w5300_remove, }; module_platform_driver(w5300_driver); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 6668d1b760d8..90d122d5475c 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -5,6 +5,7 @@ #include <linux/netdevice.h> #include <linux/of.h> +#include <linux/platform_device.h> #include <linux/spinlock.h> #ifdef CONFIG_PPC_DCR diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 49f303353ecb..9df39cf8b097 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -35,12 +35,10 @@ #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> -#include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/tcp.h> /* needed for sizeof(tcphdr) */ @@ -1628,7 +1626,7 @@ err_sysfs_create: return rc; } -static int temac_remove(struct platform_device *pdev) +static void temac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct temac_local *lp = netdev_priv(ndev); @@ -1638,7 +1636,6 @@ static int temac_remove(struct platform_device *pdev) if (lp->phy_node) of_node_put(lp->phy_node); temac_mdio_teardown(lp); - return 0; } static const struct of_device_id temac_of_match[] = { @@ -1652,7 +1649,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match); static struct platform_driver temac_driver = { .probe = temac_probe, - .remove = temac_remove, + .remove_new = temac_remove, .driver = { .name = "xilinx_temac", .of_match_table = temac_of_match, diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c index 2371c072b53f..07a9fb49eda1 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c +++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c @@ -10,8 +10,8 @@ #include <linux/mutex.h> #include <linux/phy.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of_mdio.h> #include <linux/platform_data/xilinx-ll-temac.h> diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 8e32dc50a408..bf6e33990490 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -27,11 +27,12 @@ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> #include <linux/of_irq.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/math64.h> #include <linux/phy.h> @@ -821,7 +822,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; - } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { + } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { csum_start_off = skb_transport_offset(skb); csum_index_off = csum_start_off + skb->csum_offset; /* Tx Partial Checksum Offload Enabled */ @@ -2182,7 +2183,7 @@ free_netdev: return ret; } -static int axienet_remove(struct platform_device *pdev) +static void axienet_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct axienet_local *lp = netdev_priv(ndev); @@ -2201,8 +2202,6 @@ static int axienet_remove(struct platform_device *pdev) clk_disable_unprepare(lp->axi_clk); free_netdev(ndev); - - return 0; } static void axienet_shutdown(struct platform_device *pdev) @@ -2255,7 +2254,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, static struct platform_driver axienet_driver = { .probe = axienet_probe, - .remove = axienet_remove, + .remove_new = axienet_remove, .shutdown = axienet_shutdown, .driver = { .name = "xilinx_axienet", diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index ad2c30d9a482..765aa516aada 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -8,6 +8,7 @@ */ #include <linux/module.h> +#include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -15,9 +16,8 @@ #include <linux/ethtool.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> @@ -1180,10 +1180,8 @@ error: * This function is called if a device is physically removed from the system or * if the driver module is being unloaded. It frees any resources allocated to * the device. - * - * Return: 0, always. */ -static int xemaclite_of_remove(struct platform_device *of_dev) +static void xemaclite_of_remove(struct platform_device *of_dev) { struct net_device *ndev = platform_get_drvdata(of_dev); @@ -1202,8 +1200,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev) lp->phy_node = NULL; free_netdev(ndev); - - return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1262,7 +1258,7 @@ static struct platform_driver xemaclite_of_driver = { .of_match_table = xemaclite_of_match, }, .probe = xemaclite_of_probe, - .remove = xemaclite_of_remove, + .remove_new = xemaclite_of_remove, }; module_platform_driver(xemaclite_of_driver); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 3b0c5f177447..e0d26148dfd9 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -24,6 +24,7 @@ #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/etherdevice.h> +#include <linux/if_vlan.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/net_tstamp.h> @@ -63,7 +64,15 @@ #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) #define REGS_SIZE 0x1000 -#define MAX_MRU 1536 /* 0x600 */ + +/* MRU is said to be 14320 in a code dump, the SW manual says that + * MRU/MTU is 16320 and includes VLAN and ethernet headers. + * See "IXP400 Software Programmer's Guide" section 10.3.2, page 161. + * + * FIXME: we have chosen the safe default (14320) but if you can test + * jumboframes, experiment with 16320 and see what happens! + */ +#define MAX_MRU (14320 - VLAN_ETH_HLEN) #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) #define NAPI_WEIGHT 16 @@ -154,7 +163,6 @@ typedef void buffer_t; /* Information about built-in Ethernet MAC interfaces */ struct eth_plat_info { - u8 phy; /* MII PHY ID, 0 - 31 */ u8 rxq; /* configurable, currently 0 - 31 only */ u8 txreadyq; u8 hwaddr[ETH_ALEN]; @@ -714,9 +722,9 @@ static int eth_poll(struct napi_struct *napi, int budget) napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_below_low_watermark(rxq) && - napi_reschedule(napi)) { /* not empty again */ + napi_schedule(napi)) { /* not empty again */ #if DEBUG_RX - netdev_debug(dev, "eth_poll napi_reschedule succeeded\n"); + netdev_debug(dev, "eth_poll napi_schedule succeeded\n"); #endif qmgr_disable_irq(rxq); continue; @@ -1182,6 +1190,54 @@ static void destroy_queues(struct port *port) } } +static int ixp4xx_do_change_mtu(struct net_device *dev, int new_mtu) +{ + struct port *port = netdev_priv(dev); + struct npe *npe = port->npe; + int framesize, chunks; + struct msg msg = {}; + + /* adjust for ethernet headers */ + framesize = new_mtu + VLAN_ETH_HLEN; + /* max rx/tx 64 byte chunks */ + chunks = DIV_ROUND_UP(framesize, 64); + + msg.cmd = NPE_SETMAXFRAMELENGTHS; + msg.eth_id = port->id; + + /* Firmware wants to know buffer size in 64 byte chunks */ + msg.byte2 = chunks << 8; + msg.byte3 = chunks << 8; + + msg.byte4 = msg.byte6 = framesize >> 8; + msg.byte5 = msg.byte7 = framesize & 0xff; + + if (npe_send_recv_message(npe, &msg, "ETH_SET_MAX_FRAME_LENGTH")) + return -EIO; + netdev_dbg(dev, "set MTU on NPE %s to %d bytes\n", + npe_name(npe), new_mtu); + + return 0; +} + +static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + + /* MTU can only be changed when the interface is up. We also + * set the MTU from dev->mtu when opening the device. + */ + if (dev->flags & IFF_UP) { + ret = ixp4xx_do_change_mtu(dev, new_mtu); + if (ret < 0) + return ret; + } + + dev->mtu = new_mtu; + + return 0; +} + static int eth_open(struct net_device *dev) { struct port *port = netdev_priv(dev); @@ -1232,6 +1288,8 @@ static int eth_open(struct net_device *dev) if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) return -EIO; + ixp4xx_do_change_mtu(dev, dev->mtu); + if ((err = request_queues(port)) != 0) return err; @@ -1374,6 +1432,7 @@ static int eth_close(struct net_device *dev) static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_open = eth_open, .ndo_stop = eth_close, + .ndo_change_mtu = ixp4xx_eth_change_mtu, .ndo_start_xmit = eth_xmit, .ndo_set_rx_mode = eth_set_mcast_list, .ndo_eth_ioctl = eth_ioctl, @@ -1488,6 +1547,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) ndev->dev.dma_mask = dev->dma_mask; ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; + ndev->min_mtu = ETH_MIN_MTU; + ndev->max_mtu = MAX_MRU; + netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT); if (!(port->npe = npe_request(NPE_ID(port->id)))) @@ -1520,7 +1582,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) if ((err = register_netdev(ndev))) goto err_phy_dis; - netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy, + netdev_info(ndev, "%s: MII PHY %s on %s\n", ndev->name, phydev_name(phydev), npe_name(port->npe)); return 0; @@ -1533,7 +1595,7 @@ err_free_mem: return err; } -static int ixp4xx_eth_remove(struct platform_device *pdev) +static void ixp4xx_eth_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct phy_device *phydev = ndev->phydev; @@ -1544,7 +1606,6 @@ static int ixp4xx_eth_remove(struct platform_device *pdev) ixp4xx_mdio_remove(); npe_port_tab[NPE_ID(port->id)] = NULL; npe_release(port->npe); - return 0; } static const struct of_device_id ixp4xx_eth_of_match[] = { @@ -1560,7 +1621,7 @@ static struct platform_driver ixp4xx_eth_driver = { .of_match_table = of_match_ptr(ixp4xx_eth_of_match), }, .probe = ixp4xx_eth_probe, - .remove = ixp4xx_eth_remove, + .remove_new = ixp4xx_eth_remove, }; module_platform_driver(ixp4xx_eth_driver); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 2513be6d4e11..cd8cf08477ec 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1030,7 +1030,7 @@ static int fjes_poll(struct napi_struct *napi, int budget) } if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { - napi_reschedule(napi); + napi_schedule(napi); } else { spin_lock(&hw->rx_status_lock); for (epidx = 0; epidx < hw->max_epid; epidx++) { diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 78f9d588f712..acd9c615d1f4 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -784,117 +784,21 @@ free_dst: return err; } -static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, - struct net_device *dev, - struct geneve_sock *gs4, - struct flowi4 *fl4, - const struct ip_tunnel_info *info, - __be16 dport, __be16 sport, - __u8 *full_tos) +static u8 geneve_get_dsfield(struct sk_buff *skb, struct net_device *dev, + const struct ip_tunnel_info *info, + bool *use_cache) { - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); - struct dst_cache *dst_cache; - struct rtable *rt = NULL; - __u8 tos; + u8 dsfield; - if (!gs4) - return ERR_PTR(-EIO); - - memset(fl4, 0, sizeof(*fl4)); - fl4->flowi4_mark = skb->mark; - fl4->flowi4_proto = IPPROTO_UDP; - fl4->daddr = info->key.u.ipv4.dst; - fl4->saddr = info->key.u.ipv4.src; - fl4->fl4_dport = dport; - fl4->fl4_sport = sport; - fl4->flowi4_flags = info->key.flow_flags; - - tos = info->key.tos; - if ((tos == 1) && !geneve->cfg.collect_md) { - tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb); - use_cache = false; - } - fl4->flowi4_tos = RT_TOS(tos); - if (full_tos) - *full_tos = tos; - - dst_cache = (struct dst_cache *)&info->dst_cache; - if (use_cache) { - rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); - if (rt) - return rt; - } - rt = ip_route_output_key(geneve->net, fl4); - if (IS_ERR(rt)) { - netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); - return ERR_PTR(-ENETUNREACH); - } - if (rt->dst.dev == dev) { /* is this necessary? */ - netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); - ip_rt_put(rt); - return ERR_PTR(-ELOOP); - } - if (use_cache) - dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr); - return rt; -} - -#if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, - struct net_device *dev, - struct geneve_sock *gs6, - struct flowi6 *fl6, - const struct ip_tunnel_info *info, - __be16 dport, __be16 sport) -{ - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct geneve_dev *geneve = netdev_priv(dev); - struct dst_entry *dst = NULL; - struct dst_cache *dst_cache; - __u8 prio; - - if (!gs6) - return ERR_PTR(-EIO); - - memset(fl6, 0, sizeof(*fl6)); - fl6->flowi6_mark = skb->mark; - fl6->flowi6_proto = IPPROTO_UDP; - fl6->daddr = info->key.u.ipv6.dst; - fl6->saddr = info->key.u.ipv6.src; - fl6->fl6_dport = dport; - fl6->fl6_sport = sport; - - prio = info->key.tos; - if ((prio == 1) && !geneve->cfg.collect_md) { - prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); - use_cache = false; - } - - fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label); - dst_cache = (struct dst_cache *)&info->dst_cache; - if (use_cache) { - dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); - if (dst) - return dst; - } - dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, - NULL); - if (IS_ERR(dst)) { - netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); - return ERR_PTR(-ENETUNREACH); - } - if (dst->dev == dev) { /* is this necessary? */ - netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr); - dst_release(dst); - return ERR_PTR(-ELOOP); + dsfield = info->key.tos; + if (dsfield == 1 && !geneve->cfg.collect_md) { + dsfield = ip_tunnel_get_dsfield(ip_hdr(skb), skb); + *use_cache = false; } - if (use_cache) - dst_cache_set_ip6(dst_cache, dst, &fl6->saddr); - return dst; + return dsfield; } -#endif static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, @@ -904,19 +808,28 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); const struct ip_tunnel_key *key = &info->key; struct rtable *rt; - struct flowi4 fl4; - __u8 full_tos; + bool use_cache; __u8 tos, ttl; __be16 df = 0; + __be32 saddr; __be16 sport; int err; if (!pskb_inet_may_pull(skb)) return -EINVAL; + if (!gs4) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + tos = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, - geneve->cfg.info.key.tp_dst, sport, &full_tos); + + rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, + &info->key, + sport, geneve->cfg.info.key.tp_dst, tos, + use_cache ? + (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -939,8 +852,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -ENOMEM; } - unclone->key.u.ipv4.dst = fl4.saddr; - unclone->key.u.ipv4.src = fl4.daddr; + unclone->key.u.ipv4.dst = saddr; + unclone->key.u.ipv4.src = info->key.u.ipv4.dst; } if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -954,13 +867,12 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -EMSGSIZE; } + tos = ip_tunnel_ecn_encap(tos, ip_hdr(skb), skb); if (geneve->cfg.collect_md) { - tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { - tos = ip_tunnel_ecn_encap(full_tos, ip_hdr(skb), skb); if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else @@ -988,7 +900,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) return err; - udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, + udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), !(info->key.tun_flags & TUNNEL_CSUM)); @@ -1004,7 +916,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); const struct ip_tunnel_key *key = &info->key; struct dst_entry *dst = NULL; - struct flowi6 fl6; + struct in6_addr saddr; + bool use_cache; __u8 prio, ttl; __be16 sport; int err; @@ -1012,9 +925,18 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (!pskb_inet_may_pull(skb)) return -EINVAL; + if (!gs6) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + prio = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, - geneve->cfg.info.key.tp_dst, sport); + + dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, + &saddr, key, sport, + geneve->cfg.info.key.tp_dst, prio, + use_cache ? + (struct dst_cache *)&info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1036,8 +958,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -ENOMEM; } - unclone->key.u.ipv6.dst = fl6.saddr; - unclone->key.u.ipv6.src = fl6.daddr; + unclone->key.u.ipv6.dst = saddr; + unclone->key.u.ipv6.src = info->key.u.ipv6.dst; } if (!pskb_may_pull(skb, ETH_HLEN)) { @@ -1051,12 +973,10 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -EMSGSIZE; } + prio = ip_tunnel_ecn_encap(prio, ip_hdr(skb), skb); if (geneve->cfg.collect_md) { - prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; } else { - prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), - ip_hdr(skb), skb); if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else @@ -1069,7 +989,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return err; udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, - &fl6.saddr, &fl6.daddr, prio, ttl, + &saddr, &key->u.ipv6.dst, prio, ttl, info->key.label, sport, geneve->cfg.info.key.tp_dst, !(info->key.tun_flags & TUNNEL_CSUM)); return 0; @@ -1137,35 +1057,54 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; - struct flowi4 fl4; - struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); + bool use_cache; + __be32 saddr; + u8 tos; + + if (!gs4) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + tos = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, - geneve->cfg.info.key.tp_dst, sport, NULL); + rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, + &info->key, + sport, geneve->cfg.info.key.tp_dst, + tos, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); - info->key.u.ipv4.src = fl4.saddr; + info->key.u.ipv4.src = saddr; #if IS_ENABLED(CONFIG_IPV6) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; - struct flowi6 fl6; - struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); + struct in6_addr saddr; + bool use_cache; + u8 prio; + + if (!gs6) + return -EIO; + + use_cache = ip_tunnel_dst_cache_usable(skb, info); + prio = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, - geneve->cfg.info.key.tp_dst, sport); + dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, + &saddr, &info->key, sport, + geneve->cfg.info.key.tp_dst, prio, + use_cache ? &info->dst_cache : NULL); if (IS_ERR(dst)) return PTR_ERR(dst); dst_release(dst); - info->key.u.ipv6.src = fl6.saddr; + info->key.u.ipv6.src = saddr; #endif } else { return -EINVAL; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index acb20ad4e37e..b1919278e931 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -243,7 +243,8 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, fl4->flowi4_oif = sk->sk_bound_dev_if; fl4->daddr = daddr; fl4->saddr = saddr; - fl4->flowi4_tos = RT_CONN_FLAGS(sk); + fl4->flowi4_tos = ip_sock_rt_tos(sk); + fl4->flowi4_scope = ip_sock_rt_scope(sk); fl4->flowi4_proto = sk->sk_protocol; return ip_route_output_key(sock_net(sk), fl4); @@ -629,7 +630,7 @@ static void __gtp_encap_destroy(struct sock *sk) gtp->sk0 = NULL; else gtp->sk1u = NULL; - udp_sk(sk)->encap_type = 0; + WRITE_ONCE(udp_sk(sk)->encap_type, 0); rcu_assign_sk_user_data(sk, NULL); release_sock(sk); sock_put(sk); @@ -681,7 +682,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); - switch (udp_sk(sk)->encap_type) { + switch (READ_ONCE(udp_sk(sk)->encap_type)) { case UDP_ENCAP_GTP0: netdev_dbg(gtp->dev, "received GTP0 packet\n"); ret = gtp0_udp_encap_recv(gtp, skb); @@ -871,8 +872,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, skb_dst_update_pmtu_no_confirm(skb, mtu); - if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && - mtu < ntohs(iph->tot_len)) { + if (iph->frag_off & htons(IP_DF) && + ((!skb_is_gso(skb) && skb->len > mtu) || + (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 9fb567524220..6ed38a3cdd73 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -427,8 +427,8 @@ out: * a block of 6pack data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. */ -static void sixpack_receive_buf(struct tty_struct *tty, - const unsigned char *cp, const char *fp, int count) +static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp, + const u8 *fp, size_t count) { struct sixpack *sp; int count1; diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index a94c7bd5db2e..25b1f929c422 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -94,8 +94,8 @@ config BAYCOM_SER_FDX driver, "BAYCOM ser12 half-duplex driver for AX.25" is the old driver and still provided in case this driver does not work with your serial interface chip. To configure the driver, use the sethdlc - utility available in the standard ax25 utilities package. For - information on the modems, see <http://www.baycom.de/> and + utility available in the standard ax25 utilities package. + For more information on the modems, see <file:Documentation/networking/device_drivers/hamradio/baycom.rst>. To compile this driver as a module, choose M here: the module @@ -112,8 +112,7 @@ config BAYCOM_SER_HDX still provided in case your serial interface chip does not work with the full-duplex driver. This driver is deprecated. To configure the driver, use the sethdlc utility available in the standard ax25 - utilities package. For information on the modems, see - <http://www.baycom.de/> and + utilities package. For more information on the modems, see <file:Documentation/networking/device_drivers/hamradio/baycom.rst>. To compile this driver as a module, choose M here: the module @@ -127,8 +126,8 @@ config BAYCOM_PAR This is a driver for Baycom style simple amateur radio modems that connect to a parallel interface. The driver supports the picpar and par96 designs. To configure the driver, use the sethdlc utility - available in the standard ax25 utilities package. For information on - the modems, see <http://www.baycom.de/> and the file + available in the standard ax25 utilities package. + For more information on the modems, see <file:Documentation/networking/device_drivers/hamradio/baycom.rst>. To compile this driver as a module, choose M here: the module @@ -142,8 +141,8 @@ config BAYCOM_EPP This is a driver for Baycom style simple amateur radio modems that connect to a parallel interface. The driver supports the EPP designs. To configure the driver, use the sethdlc utility available - in the standard ax25 utilities package. For information on the - modems, see <http://www.baycom.de/> and the file + in the standard ax25 utilities package. + For more information on the modems, see <file:Documentation/networking/device_drivers/hamradio/baycom.rst>. To compile this driver as a module, choose M here: the module diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 83ff882f5d97..ccfc83857c26 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -1074,7 +1074,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr, return 0; case HDLCDRVCTL_DRIVERNAME: - strncpy(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername)); + strscpy_pad(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername)); break; case HDLCDRVCTL_GETMODE: @@ -1091,7 +1091,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr, return baycom_setmode(bc, hi.data.modename); case HDLCDRVCTL_MODELIST: - strncpy(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x", + strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x", sizeof(hi.data.modename)); break; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index c251e04ae047..5f38a002bd9e 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -874,8 +874,8 @@ static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd, * a block of data has been received, which can now be decapsulated * and sent on to the AX.25 layer for further processing. */ -static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp, - const char *fp, int count) +static void mkiss_receive_buf(struct tty_struct *tty, const u8 *cp, + const u8 *fp, size_t count) { struct mkiss *ax = mkiss_get(tty); diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig index ca7bf7f897d3..c8cbd85adcf9 100644 --- a/drivers/net/hyperv/Kconfig +++ b/drivers/net/hyperv/Kconfig @@ -3,5 +3,6 @@ config HYPERV_NET tristate "Microsoft Hyper-V virtual network driver" depends on HYPERV select UCS2_STRING + select NLS help Select this option to enable the Hyper-V virtual network driver. diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index c9dd69dbe1b8..810977952f95 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -16,6 +16,7 @@ #include <linux/hyperv.h> #include <linux/rndis.h> #include <linux/jhash.h> +#include <net/xdp.h> /* RSS related */ #define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 82e9796c8f5e..1dafa44155d0 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -851,7 +851,7 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - fallthrough; + break; case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: if (msglen < sizeof(struct nvsp_message_header) + @@ -860,7 +860,7 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - fallthrough; + break; case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: if (msglen < sizeof(struct nvsp_message_header) + @@ -869,7 +869,7 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - fallthrough; + break; case NVSP_MSG5_TYPE_SUBCHANNEL: if (msglen < sizeof(struct nvsp_message_header) + @@ -878,10 +878,6 @@ static void netvsc_send_completion(struct net_device *ndev, msglen); return; } - /* Copy the response back */ - memcpy(&net_device->channel_init_pkt, nvsp_packet, - sizeof(struct nvsp_message)); - complete(&net_device->channel_init_wait); break; case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: @@ -904,13 +900,19 @@ static void netvsc_send_completion(struct net_device *ndev, netvsc_send_tx_complete(ndev, net_device, incoming_channel, desc, budget); - break; + return; default: netdev_err(ndev, "Unknown send completion type %d received!!\n", nvsp_packet->hdr.msg_type); + return; } + + /* Copy the response back */ + memcpy(&net_device->channel_init_pkt, nvsp_packet, + sizeof(struct nvsp_message)); + complete(&net_device->channel_init_wait); } static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3ba3c8fb28a5..706ea5263e87 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2206,9 +2206,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto upper_link_failed; } - /* set slave flag before open to prevent IPv6 addrconf */ - vf_netdev->flags |= IFF_SLAVE; - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); @@ -2315,16 +2312,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) } - /* Fallback path to check synthetic vf with - * help of mac addr + /* Fallback path to check synthetic vf with help of mac addr. + * Because this function can be called before vf_netdev is + * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied + * from dev_addr, also try to match to its dev_addr. + * Note: On Hyper-V and Azure, it's not possible to set a MAC address + * on a VF that matches to the MAC of a unrelated NETVSC device. */ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { ndev = hv_get_drvdata(ndev_ctx->device_ctx); - if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) { - netdev_notice(vf_netdev, - "falling back to mac addr based matching\n"); + if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) || + ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr)) return ndev; - } } netdev_notice(vf_netdev, @@ -2332,6 +2331,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) return NULL; } +static int netvsc_prepare_bonding(struct net_device *vf_netdev) +{ + struct net_device *ndev; + + ndev = get_netvsc_byslot(vf_netdev); + if (!ndev) + return NOTIFY_DONE; + + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + return NOTIFY_DONE; +} + static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; @@ -2531,15 +2543,6 @@ static int netvsc_probe(struct hv_device *dev, goto devinfo_failed; } - nvdev = rndis_filter_device_add(dev, device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); - netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - goto rndis_failed; - } - - eth_hw_addr_set(net, device_info->mac_adr); - /* We must get rtnl lock before scheduling nvdev->subchan_work, * otherwise netvsc_subchan_work() can get rtnl lock first and wait * all subchannels to show up, but that may not happen because @@ -2547,9 +2550,23 @@ static int netvsc_probe(struct hv_device *dev, * -> ... -> device_add() -> ... -> __device_attach() can't get * the device lock, so all the subchannels can't be processed -- * finally netvsc_subchan_work() hangs forever. + * + * The rtnl lock also needs to be held before rndis_filter_device_add() + * which advertises nvsp_2_vsc_capability / sriov bit, and triggers + * VF NIC offering and registering. If VF NIC finished register_netdev() + * earlier it may cause name based config failure. */ rtnl_lock(); + nvdev = rndis_filter_device_add(dev, device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); + netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); + goto rndis_failed; + } + + eth_hw_addr_set(net, device_info->mac_adr); + if (nvdev->num_chn > 1) schedule_work(&nvdev->subchan_work); @@ -2586,9 +2603,9 @@ static int netvsc_probe(struct hv_device *dev, return 0; register_failed: - rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: + rtnl_unlock(); netvsc_devinfo_put(device_info); devinfo_failed: free_percpu(net_device_ctx->vf_stats); @@ -2753,6 +2770,8 @@ static int netvsc_netdev_event(struct notifier_block *this, return NOTIFY_DONE; switch (event) { + case NETDEV_POST_INIT: + return netvsc_prepare_bonding(event_dev); case NETDEV_REGISTER: return netvsc_register_vf(event_dev); case NETDEV_UNREGISTER: @@ -2788,12 +2807,17 @@ static int __init netvsc_drv_init(void) } netvsc_ring_bytes = ring_size * PAGE_SIZE; + register_netdevice_notifier(&netvsc_netdev_notifier); + ret = vmbus_driver_register(&netvsc_drv); if (ret) - return ret; + goto err_vmbus_reg; - register_netdevice_notifier(&netvsc_netdev_notifier); return 0; + +err_vmbus_reg: + unregister_netdevice_notifier(&netvsc_netdev_notifier); + return ret; } MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index a03490ba2e5b..cc7ddc40020f 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset) static void adf7242_debugfs_init(struct adf7242_local *lp) { - char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-"; + char debugfs_dir_name[DNAME_INLINE_LEN + 1]; - strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); + snprintf(debugfs_dir_name, sizeof(debugfs_dir_name), + "adf7242-%s", dev_name(&lp->spi->dev)); lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index f9b10e84de06..4ec0dab38872 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -57,7 +57,6 @@ #include <linux/io.h> #include <linux/kfifo.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/module.h> #include <linux/mutex.h> @@ -2741,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi) struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); struct ca8210_platform_data *pdata = spi->dev.platform_data; - int ret = 0; if (!np) return -EFAULT; @@ -2758,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi) dev_crit(&spi->dev, "Failed to register external clk\n"); return PTR_ERR(priv->clk); } - ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); - if (ret) { - clk_unregister(priv->clk); - dev_crit( - &spi->dev, - "Failed to register external clock as clock provider\n" - ); - } else { - dev_info(&spi->dev, "External clock set as clock provider\n"); - } - return ret; + return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk); } /** @@ -2781,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi) { struct ca8210_priv *priv = spi_get_drvdata(spi); - if (!priv->clk) - return + if (IS_ERR_OR_NULL(priv->clk)) + return; of_clk_del_provider(spi->dev.of_node); clk_unregister(priv->clk); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 78253ad57b2e..2c1b5def4a0b 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -454,5 +454,6 @@ static void __exit ifb_cleanup_module(void) module_init(ifb_init_module); module_exit(ifb_cleanup_module); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intermediate Functional Block (ifb) netdevice driver for sharing of resources and ingress packet queuing"); MODULE_AUTHOR("Jamal Hadi Salim"); MODULE_ALIAS_RTNL_LINK("ifb"); diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 6a2f2fc2f501..da853353a5c7 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -13,8 +13,8 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/firmware/qcom/qcom_scm.h> #include <linux/soc/qcom/mdt_loader.h> diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index 921eecf3eff6..e223886123ce 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -67,7 +67,7 @@ struct ipa_power { spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); u32 interconnect_count; - struct icc_bulk_data interconnect[]; + struct icc_bulk_data interconnect[] __counted_by(interconnect_count); }; /* Initialize interconnects required for IPA operation */ @@ -324,15 +324,12 @@ void ipa_power_retention(struct ipa *ipa, bool enable) { static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }"; struct ipa_power *power = ipa->power; - char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */ int ret; if (!power->qmp) return; /* Not needed on this platform */ - (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0'); - - ret = qmp_send(power->qmp, buf, sizeof(buf)); + ret = qmp_send(power->qmp, fmt, enable ? '1' : '0'); if (ret) dev_err(power->dev, "error %d sending QMP %sable request\n", ret, enable ? "en" : "dis"); diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c index d7b81a36d673..145eb0bd096d 100644 --- a/drivers/net/ipa/reg/gsi_reg-v5.0.c +++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c @@ -78,7 +78,7 @@ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0, 0x0001c000 + 0x12000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_1_fmask[] = { - [R_LENGTH] = GENMASK(19, 0), + [R_LENGTH] = GENMASK(23, 0), }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1, diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index c0c49f181367..2d5b021b4ea6 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, return addr; } -static int ipvlan_process_v4_outbound(struct sk_buff *skb) +static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; @@ -441,25 +441,23 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) err = ip_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out; err: - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out: return ret; } #if IS_ENABLED(CONFIG_IPV6) -static int ipvlan_process_v6_outbound(struct sk_buff *skb) + +static noinline_for_stack int +ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); - struct net_device *dev = skb->dev; - struct net *net = dev_net(dev); - struct dst_entry *dst; - int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { .flowi6_oif = dev->ifindex, .daddr = ip6h->daddr, @@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) .flowi6_mark = skb->mark, .flowi6_proto = ip6h->nexthdr, }; + struct dst_entry *dst; + int err; - dst = ip6_route_output(net, NULL, &fl6); - if (dst->error) { - ret = dst->error; + dst = ip6_route_output(dev_net(dev), NULL, &fl6); + err = dst->error; + if (err) { dst_release(dst); - goto err; + return err; } skb_dst_set(skb, dst); + return 0; +} + +static int ipvlan_process_v6_outbound(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + int err, ret = NET_XMIT_DROP; + + err = ipvlan_route_v6_outbound(dev, skb); + if (unlikely(err)) { + DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + return err; + } memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); - err = ip6_local_out(net, skb->sk, skb); + err = ip6_local_out(dev_net(dev), skb->sk, skb); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; - goto out; -err: - dev->stats.tx_errors++; - kfree_skb(skb); -out: return ret; } #else diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1b55928e89b8..57c79f5f2991 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -324,6 +324,7 @@ static void ipvlan_get_stats64(struct net_device *dev, s->rx_dropped = rx_errs; s->tx_dropped = tx_drps; } + s->tx_errors = DEV_STATS_READ(dev, tx_errors); } static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 144ec756c796..9663050a852d 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -518,14 +518,8 @@ static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, static void count_tx(struct net_device *dev, int ret, int len) { - if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { - struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->tx_packets); - u64_stats_add(&stats->tx_bytes, len); - u64_stats_update_end(&stats->syncp); - } + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) + dev_sw_netstats_tx_add(dev, 1, len); } static void macsec_encrypt_done(void *data, int err) @@ -827,12 +821,7 @@ static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) static void count_rx(struct net_device *dev, int len) { - struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->rx_packets); - u64_stats_add(&stats->rx_bytes, len); - u64_stats_update_end(&stats->syncp); + dev_sw_netstats_rx_add(dev, len); } static void macsec_decrypt_done(void *data, int err) @@ -1341,8 +1330,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) struct crypto_aead *tfm; int ret; - /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ - tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_aead("gcm(aes)", 0, 0); if (IS_ERR(tfm)) return tfm; @@ -2395,6 +2383,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) ctx.sa.assoc_num = assoc_num; ctx.sa.tx_sa = tx_sa; + ctx.sa.update_pn = !!prev_pn.full64; ctx.secy = secy; ret = macsec_offload(ops->mdo_upd_txsa, &ctx); @@ -2488,6 +2477,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) ctx.sa.assoc_num = assoc_num; ctx.sa.rx_sa = rx_sa; + ctx.sa.update_pn = !!prev_pn.full64; ctx.secy = secy; ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); @@ -3667,9 +3657,9 @@ static void macsec_get_stats64(struct net_device *dev, dev_fetch_sw_netstats(s, dev->tstats); - s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); - s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); - s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); + s->rx_dropped = DEV_STATS_READ(dev, rx_dropped); + s->tx_dropped = DEV_STATS_READ(dev, tx_dropped); + s->rx_errors = DEV_STATS_READ(dev, rx_errors); } static int macsec_get_iflink(const struct net_device *dev) @@ -4240,6 +4230,18 @@ static struct net *macsec_get_link_net(const struct net_device *dev) return dev_net(macsec_priv(dev)->real_dev); } +struct net_device *macsec_get_real_dev(const struct net_device *dev) +{ + return macsec_priv(dev)->real_dev; +} +EXPORT_SYMBOL_GPL(macsec_get_real_dev); + +bool macsec_netdev_is_offloaded(struct net_device *dev) +{ + return macsec_is_offloaded(macsec_priv(dev)); +} +EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); + static size_t macsec_get_size(const struct net_device *dev) { return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index ed908165a8b4..c8da94af4161 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) + if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC) dev_set_promiscuity(lowerdev, dev->flags & IFF_PROMISC ? 1 : -1); @@ -868,31 +868,24 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int macvlan_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int macvlan_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) { struct net_device *real_dev = macvlan_dev_real_dev(dev); - const struct net_device_ops *ops = real_dev->netdev_ops; - struct ifreq ifrr; - int err = -EOPNOTSUPP; - strscpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); - ifrr.ifr_ifru = ifr->ifr_ifru; + return generic_hwtstamp_get_lower(real_dev, cfg); +} - switch (cmd) { - case SIOCSHWTSTAMP: - if (!net_eq(dev_net(dev), &init_net)) - break; - fallthrough; - case SIOCGHWTSTAMP: - if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) - err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); - break; - } +static int macvlan_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct net_device *real_dev = macvlan_dev_real_dev(dev); - if (!err) - ifr->ifr_ifru = ifrr.ifr_ifru; + if (!net_eq(dev_net(dev), &init_net)) + return -EOPNOTSUPP; - return err; + return generic_hwtstamp_set_lower(real_dev, cfg, extack); } /* @@ -1193,7 +1186,6 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, - .ndo_eth_ioctl = macvlan_eth_ioctl, .ndo_fix_features = macvlan_fix_features, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, @@ -1212,6 +1204,8 @@ static const struct net_device_ops macvlan_netdev_ops = { #endif .ndo_get_iflink = macvlan_dev_get_iflink, .ndo_features_check = passthru_features_check, + .ndo_hwtstamp_get = macvlan_hwtstamp_get, + .ndo_hwtstamp_set = macvlan_hwtstamp_set, }; static void macvlan_dev_free(struct net_device *dev) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index bddcc127812e..29a5929d48e5 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -250,5 +250,6 @@ static void __exit macvtap_exit(void) module_exit(macvtap_exit); MODULE_ALIAS_RTNL_LINK("macvtap"); +MODULE_DESCRIPTION("MAC-VLAN based tap driver"); MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig index dc71657d9184..ce9d2d2ccf3b 100644 --- a/drivers/net/mctp/Kconfig +++ b/drivers/net/mctp/Kconfig @@ -33,6 +33,15 @@ config MCTP_TRANSPORT_I2C from DMTF specification DSP0237. A MCTP protocol network device is created for each I2C bus that has been assigned a mctp-i2c device. +config MCTP_TRANSPORT_I3C + tristate "MCTP I3C transport" + depends on I3C + help + Provides a driver to access MCTP devices over I3C transport, + from DMTF specification DSP0233. + A MCTP protocol network device is created for each I3C bus + having a "mctp-controller" devicetree property. + endmenu endif diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile index 1ca3e6028f77..e1cb99ced54a 100644 --- a/drivers/net/mctp/Makefile +++ b/drivers/net/mctp/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o +obj-$(CONFIG_MCTP_TRANSPORT_I3C) += mctp-i3c.o diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c new file mode 100644 index 000000000000..8e989c157caa --- /dev/null +++ b/drivers/net/mctp/mctp-i3c.c @@ -0,0 +1,755 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implements DMTF specification + * "DSP0233 Management Component Transport Protocol (MCTP) I3C Transport + * Binding" + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0233_1.0.0.pdf + * + * Copyright (c) 2023 Code Construct + */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/i3c/device.h> +#include <linux/i3c/master.h> +#include <linux/if_arp.h> +#include <asm/unaligned.h> +#include <net/mctp.h> +#include <net/mctpdevice.h> + +#define MCTP_I3C_MAXBUF 65536 +/* 48 bit Provisioned Id */ +#define PID_SIZE 6 + +/* 64 byte payload, 4 byte MCTP header */ +static const int MCTP_I3C_MINMTU = 64 + 4; +/* One byte less to allow for the PEC */ +static const int MCTP_I3C_MAXMTU = MCTP_I3C_MAXBUF - 1; +/* 4 byte MCTP header, no data, 1 byte PEC */ +static const int MCTP_I3C_MINLEN = 4 + 1; + +/* Sufficient for 64kB at min mtu */ +static const int MCTP_I3C_TX_QUEUE_LEN = 1100; + +/* Somewhat arbitrary */ +static const int MCTP_I3C_IBI_SLOTS = 8; + +/* Mandatory Data Byte in an IBI, from DSP0233 */ +#define I3C_MDB_MCTP 0xAE +/* From MIPI Device Characteristics Register (DCR) Assignments */ +#define I3C_DCR_MCTP 0xCC + +static const char *MCTP_I3C_OF_PROP = "mctp-controller"; + +/* List of mctp_i3c_busdev */ +static LIST_HEAD(busdevs); +/* Protects busdevs, as well as mctp_i3c_bus.devs lists */ +static DEFINE_MUTEX(busdevs_lock); + +struct mctp_i3c_bus { + struct net_device *ndev; + + struct task_struct *tx_thread; + wait_queue_head_t tx_wq; + /* tx_lock protects tx_skb and devs */ + spinlock_t tx_lock; + /* Next skb to transmit */ + struct sk_buff *tx_skb; + /* Scratch buffer for xmit */ + u8 tx_scratch[MCTP_I3C_MAXBUF]; + + /* Element of busdevs */ + struct list_head list; + + /* Provisioned ID of our controller */ + u64 pid; + + struct i3c_bus *bus; + /* Head of mctp_i3c_device.list. Protected by busdevs_lock */ + struct list_head devs; +}; + +struct mctp_i3c_device { + struct i3c_device *i3c; + struct mctp_i3c_bus *mbus; + struct list_head list; /* Element of mctp_i3c_bus.devs */ + + /* Held while tx_thread is using this device */ + struct mutex lock; + + /* Whether BCR indicates MDB is present in IBI */ + bool have_mdb; + /* I3C dynamic address */ + u8 addr; + /* Maximum read length */ + u16 mrl; + /* Maximum write length */ + u16 mwl; + /* Provisioned ID */ + u64 pid; +}; + +/* We synthesise a mac header using the Provisioned ID. + * Used to pass dest to mctp_i3c_start_xmit. + */ +struct mctp_i3c_internal_hdr { + u8 dest[PID_SIZE]; + u8 source[PID_SIZE]; +} __packed; + +static int mctp_i3c_read(struct mctp_i3c_device *mi) +{ + struct i3c_priv_xfer xfer = { .rnw = 1, .len = mi->mrl }; + struct net_device_stats *stats = &mi->mbus->ndev->stats; + struct mctp_i3c_internal_hdr *ihdr = NULL; + struct sk_buff *skb = NULL; + struct mctp_skb_cb *cb; + int net_status, rc; + u8 pec, addr; + + skb = netdev_alloc_skb(mi->mbus->ndev, + mi->mrl + sizeof(struct mctp_i3c_internal_hdr)); + if (!skb) { + stats->rx_dropped++; + rc = -ENOMEM; + goto err; + } + + skb->protocol = htons(ETH_P_MCTP); + /* Create a header for internal use */ + skb_reset_mac_header(skb); + ihdr = skb_put(skb, sizeof(struct mctp_i3c_internal_hdr)); + put_unaligned_be48(mi->pid, ihdr->source); + put_unaligned_be48(mi->mbus->pid, ihdr->dest); + skb_pull(skb, sizeof(struct mctp_i3c_internal_hdr)); + + xfer.data.in = skb_put(skb, mi->mrl); + + rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); + if (rc < 0) + goto err; + + if (WARN_ON_ONCE(xfer.len > mi->mrl)) { + /* Bad i3c bus driver */ + rc = -EIO; + goto err; + } + if (xfer.len < MCTP_I3C_MINLEN) { + stats->rx_length_errors++; + rc = -EIO; + goto err; + } + + /* check PEC, including address byte */ + addr = mi->addr << 1 | 1; + pec = i2c_smbus_pec(0, &addr, 1); + pec = i2c_smbus_pec(pec, xfer.data.in, xfer.len - 1); + if (pec != ((u8 *)xfer.data.in)[xfer.len - 1]) { + stats->rx_crc_errors++; + rc = -EINVAL; + goto err; + } + + /* Remove PEC */ + skb_trim(skb, xfer.len - 1); + + cb = __mctp_cb(skb); + cb->halen = PID_SIZE; + put_unaligned_be48(mi->pid, cb->haddr); + + net_status = netif_rx(skb); + + if (net_status == NET_RX_SUCCESS) { + stats->rx_packets++; + stats->rx_bytes += xfer.len - 1; + } else { + stats->rx_dropped++; + } + + return 0; +err: + kfree_skb(skb); + return rc; +} + +static void mctp_i3c_ibi_handler(struct i3c_device *i3c, + const struct i3c_ibi_payload *payload) +{ + struct mctp_i3c_device *mi = i3cdev_get_drvdata(i3c); + + if (WARN_ON_ONCE(!mi)) + return; + + if (mi->have_mdb) { + if (payload->len > 0) { + if (((u8 *)payload->data)[0] != I3C_MDB_MCTP) { + /* Not a mctp-i3c interrupt, ignore it */ + return; + } + } else { + /* The BCR advertised a Mandatory Data Byte but the + * device didn't send one. + */ + dev_warn_once(i3cdev_to_dev(i3c), "IBI with missing MDB"); + } + } + + mctp_i3c_read(mi); +} + +static int mctp_i3c_setup(struct mctp_i3c_device *mi) +{ + const struct i3c_ibi_setup ibi = { + .max_payload_len = 1, + .num_slots = MCTP_I3C_IBI_SLOTS, + .handler = mctp_i3c_ibi_handler, + }; + struct i3c_device_info info; + int rc; + + i3c_device_get_info(mi->i3c, &info); + mi->have_mdb = info.bcr & BIT(2); + mi->addr = info.dyn_addr; + mi->mwl = info.max_write_len; + mi->mrl = info.max_read_len; + mi->pid = info.pid; + + rc = i3c_device_request_ibi(mi->i3c, &ibi); + if (rc == -ENOTSUPP) { + /* This driver only supports In-Band Interrupt mode. + * Support for Polling Mode could be added if required. + * (ENOTSUPP is from the i3c layer, not EOPNOTSUPP). + */ + dev_warn(i3cdev_to_dev(mi->i3c), + "Failed, bus driver doesn't support In-Band Interrupts"); + goto err; + } else if (rc < 0) { + dev_err(i3cdev_to_dev(mi->i3c), + "Failed requesting IBI (%d)\n", rc); + goto err; + } + + rc = i3c_device_enable_ibi(mi->i3c); + if (rc < 0) { + /* Assume a driver supporting request_ibi also + * supports enable_ibi. + */ + dev_err(i3cdev_to_dev(mi->i3c), "Failed enabling IBI (%d)\n", rc); + goto err_free_ibi; + } + + return 0; + +err_free_ibi: + i3c_device_free_ibi(mi->i3c); + +err: + return rc; +} + +/* Adds a new MCTP i3c_device to a bus */ +static int mctp_i3c_add_device(struct mctp_i3c_bus *mbus, + struct i3c_device *i3c) +__must_hold(&busdevs_lock) +{ + struct mctp_i3c_device *mi = NULL; + int rc; + + mi = kzalloc(sizeof(*mi), GFP_KERNEL); + if (!mi) { + rc = -ENOMEM; + goto err; + } + mi->mbus = mbus; + mi->i3c = i3c; + mutex_init(&mi->lock); + list_add(&mi->list, &mbus->devs); + + i3cdev_set_drvdata(i3c, mi); + rc = mctp_i3c_setup(mi); + if (rc < 0) + goto err_free; + + return 0; + +err_free: + list_del(&mi->list); + kfree(mi); + +err: + dev_warn(i3cdev_to_dev(i3c), "Error adding mctp-i3c device, %d\n", rc); + return rc; +} + +static int mctp_i3c_probe(struct i3c_device *i3c) +{ + struct mctp_i3c_bus *b = NULL, *mbus = NULL; + + /* Look for a known bus */ + mutex_lock(&busdevs_lock); + list_for_each_entry(b, &busdevs, list) + if (b->bus == i3c->bus) { + mbus = b; + break; + } + mutex_unlock(&busdevs_lock); + + if (!mbus) { + /* probably no "mctp-controller" property on the i3c bus */ + return -ENODEV; + } + + return mctp_i3c_add_device(mbus, i3c); +} + +static void mctp_i3c_remove_device(struct mctp_i3c_device *mi) +__must_hold(&busdevs_lock) +{ + /* Ensure the tx thread isn't using the device */ + mutex_lock(&mi->lock); + + /* Counterpart of mctp_i3c_setup */ + i3c_device_disable_ibi(mi->i3c); + i3c_device_free_ibi(mi->i3c); + + /* Counterpart of mctp_i3c_add_device */ + i3cdev_set_drvdata(mi->i3c, NULL); + list_del(&mi->list); + + /* Safe to unlock after removing from the list */ + mutex_unlock(&mi->lock); + kfree(mi); +} + +static void mctp_i3c_remove(struct i3c_device *i3c) +{ + struct mctp_i3c_device *mi = i3cdev_get_drvdata(i3c); + + /* We my have received a Bus Remove notify prior to device remove, + * so mi will already be removed. + */ + if (!mi) + return; + + mutex_lock(&busdevs_lock); + mctp_i3c_remove_device(mi); + mutex_unlock(&busdevs_lock); +} + +/* Returns the device for an address, with mi->lock held */ +static struct mctp_i3c_device * +mctp_i3c_lookup(struct mctp_i3c_bus *mbus, u64 pid) +{ + struct mctp_i3c_device *mi = NULL, *ret = NULL; + + mutex_lock(&busdevs_lock); + list_for_each_entry(mi, &mbus->devs, list) + if (mi->pid == pid) { + ret = mi; + mutex_lock(&mi->lock); + break; + } + mutex_unlock(&busdevs_lock); + return ret; +} + +static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb) +{ + struct net_device_stats *stats = &mbus->ndev->stats; + struct i3c_priv_xfer xfer = { .rnw = false }; + struct mctp_i3c_internal_hdr *ihdr = NULL; + struct mctp_i3c_device *mi = NULL; + unsigned int data_len; + u8 *data = NULL; + u8 addr, pec; + int rc = 0; + u64 pid; + + skb_pull(skb, sizeof(struct mctp_i3c_internal_hdr)); + data_len = skb->len; + + ihdr = (void *)skb_mac_header(skb); + + pid = get_unaligned_be48(ihdr->dest); + mi = mctp_i3c_lookup(mbus, pid); + if (!mi) { + /* I3C endpoint went away after the packet was enqueued? */ + stats->tx_dropped++; + goto out; + } + + if (WARN_ON_ONCE(data_len + 1 > MCTP_I3C_MAXBUF)) + goto out; + + if (data_len + 1 > (unsigned int)mi->mwl) { + /* Route MTU was larger than supported by the endpoint */ + stats->tx_dropped++; + goto out; + } + + /* Need a linear buffer with space for the PEC */ + xfer.len = data_len + 1; + if (skb_tailroom(skb) >= 1) { + skb_put(skb, 1); + data = skb->data; + } else { + /* Otherwise need to copy the buffer */ + skb_copy_bits(skb, 0, mbus->tx_scratch, skb->len); + data = mbus->tx_scratch; + } + + /* PEC calculation */ + addr = mi->addr << 1; + pec = i2c_smbus_pec(0, &addr, 1); + pec = i2c_smbus_pec(pec, data, data_len); + data[data_len] = pec; + + xfer.data.out = data; + rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); + if (rc == 0) { + stats->tx_bytes += data_len; + stats->tx_packets++; + } else { + stats->tx_errors++; + } + +out: + if (mi) + mutex_unlock(&mi->lock); +} + +static int mctp_i3c_tx_thread(void *data) +{ + struct mctp_i3c_bus *mbus = data; + struct sk_buff *skb; + + for (;;) { + if (kthread_should_stop()) + break; + + spin_lock_bh(&mbus->tx_lock); + skb = mbus->tx_skb; + mbus->tx_skb = NULL; + spin_unlock_bh(&mbus->tx_lock); + + if (netif_queue_stopped(mbus->ndev)) + netif_wake_queue(mbus->ndev); + + if (skb) { + mctp_i3c_xmit(mbus, skb); + kfree_skb(skb); + } else { + wait_event_idle(mbus->tx_wq, + mbus->tx_skb || kthread_should_stop()); + } + } + + return 0; +} + +static netdev_tx_t mctp_i3c_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mctp_i3c_bus *mbus = netdev_priv(ndev); + netdev_tx_t ret; + + spin_lock(&mbus->tx_lock); + netif_stop_queue(ndev); + if (mbus->tx_skb) { + dev_warn_ratelimited(&ndev->dev, "TX with queue stopped"); + ret = NETDEV_TX_BUSY; + } else { + mbus->tx_skb = skb; + ret = NETDEV_TX_OK; + } + spin_unlock(&mbus->tx_lock); + + if (ret == NETDEV_TX_OK) + wake_up(&mbus->tx_wq); + + return ret; +} + +static void mctp_i3c_bus_free(struct mctp_i3c_bus *mbus) +__must_hold(&busdevs_lock) +{ + struct mctp_i3c_device *mi = NULL, *tmp = NULL; + + if (mbus->tx_thread) { + kthread_stop(mbus->tx_thread); + mbus->tx_thread = NULL; + } + + /* Remove any child devices */ + list_for_each_entry_safe(mi, tmp, &mbus->devs, list) { + mctp_i3c_remove_device(mi); + } + + kfree_skb(mbus->tx_skb); + list_del(&mbus->list); +} + +static void mctp_i3c_ndo_uninit(struct net_device *ndev) +{ + struct mctp_i3c_bus *mbus = netdev_priv(ndev); + + /* Perform cleanup here to ensure there are no remaining references */ + mctp_i3c_bus_free(mbus); +} + +static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len) +{ + struct mctp_i3c_internal_hdr *ihdr; + + skb_push(skb, sizeof(struct mctp_i3c_internal_hdr)); + skb_reset_mac_header(skb); + ihdr = (void *)skb_mac_header(skb); + memcpy(ihdr->dest, daddr, PID_SIZE); + memcpy(ihdr->source, saddr, PID_SIZE); + return 0; +} + +static const struct net_device_ops mctp_i3c_ops = { + .ndo_start_xmit = mctp_i3c_start_xmit, + .ndo_uninit = mctp_i3c_ndo_uninit, +}; + +static const struct header_ops mctp_i3c_headops = { + .create = mctp_i3c_header_create, +}; + +static void mctp_i3c_net_setup(struct net_device *dev) +{ + dev->type = ARPHRD_MCTP; + + dev->mtu = MCTP_I3C_MAXMTU; + dev->min_mtu = MCTP_I3C_MINMTU; + dev->max_mtu = MCTP_I3C_MAXMTU; + dev->tx_queue_len = MCTP_I3C_TX_QUEUE_LEN; + + dev->hard_header_len = sizeof(struct mctp_i3c_internal_hdr); + dev->addr_len = PID_SIZE; + + dev->netdev_ops = &mctp_i3c_ops; + dev->header_ops = &mctp_i3c_headops; +} + +static bool mctp_i3c_is_mctp_controller(struct i3c_bus *bus) +{ + struct i3c_dev_desc *master = bus->cur_master; + + if (!master) + return false; + + return of_property_read_bool(master->common.master->dev.of_node, + MCTP_I3C_OF_PROP); +} + +/* Returns the Provisioned Id of a local bus master */ +static int mctp_i3c_bus_local_pid(struct i3c_bus *bus, u64 *ret_pid) +{ + struct i3c_dev_desc *master; + + master = bus->cur_master; + if (WARN_ON_ONCE(!master)) + return -ENOENT; + *ret_pid = master->info.pid; + + return 0; +} + +/* Returns an ERR_PTR on failure */ +static struct mctp_i3c_bus *mctp_i3c_bus_add(struct i3c_bus *bus) +__must_hold(&busdevs_lock) +{ + struct mctp_i3c_bus *mbus = NULL; + struct net_device *ndev = NULL; + char namebuf[IFNAMSIZ]; + u8 addr[PID_SIZE]; + int rc; + + if (!mctp_i3c_is_mctp_controller(bus)) + return ERR_PTR(-ENOENT); + + snprintf(namebuf, sizeof(namebuf), "mctpi3c%d", bus->id); + ndev = alloc_netdev(sizeof(*mbus), namebuf, NET_NAME_ENUM, + mctp_i3c_net_setup); + if (!ndev) { + rc = -ENOMEM; + goto err; + } + + mbus = netdev_priv(ndev); + mbus->ndev = ndev; + mbus->bus = bus; + INIT_LIST_HEAD(&mbus->devs); + list_add(&mbus->list, &busdevs); + + rc = mctp_i3c_bus_local_pid(bus, &mbus->pid); + if (rc < 0) { + dev_err(&ndev->dev, "No I3C PID available\n"); + goto err_free_uninit; + } + put_unaligned_be48(mbus->pid, addr); + dev_addr_set(ndev, addr); + + init_waitqueue_head(&mbus->tx_wq); + spin_lock_init(&mbus->tx_lock); + mbus->tx_thread = kthread_run(mctp_i3c_tx_thread, mbus, + "%s/tx", ndev->name); + if (IS_ERR(mbus->tx_thread)) { + dev_warn(&ndev->dev, "Error creating thread: %pe\n", + mbus->tx_thread); + rc = PTR_ERR(mbus->tx_thread); + mbus->tx_thread = NULL; + goto err_free_uninit; + } + + rc = mctp_register_netdev(ndev, NULL); + if (rc < 0) { + dev_warn(&ndev->dev, "netdev register failed: %d\n", rc); + goto err_free_netdev; + } + return mbus; + +err_free_uninit: + /* uninit will not get called if a netdev has not been registered, + * so we perform the same mbus cleanup manually. + */ + mctp_i3c_bus_free(mbus); + +err_free_netdev: + free_netdev(ndev); + +err: + return ERR_PTR(rc); +} + +static void mctp_i3c_bus_remove(struct mctp_i3c_bus *mbus) +__must_hold(&busdevs_lock) +{ + /* Unregister calls through to ndo_uninit -> mctp_i3c_bus_free() */ + mctp_unregister_netdev(mbus->ndev); + + free_netdev(mbus->ndev); + /* mbus is deallocated */ +} + +/* Removes all mctp-i3c busses */ +static void mctp_i3c_bus_remove_all(void) +{ + struct mctp_i3c_bus *mbus = NULL, *tmp = NULL; + + mutex_lock(&busdevs_lock); + list_for_each_entry_safe(mbus, tmp, &busdevs, list) { + mctp_i3c_bus_remove(mbus); + } + mutex_unlock(&busdevs_lock); +} + +/* Adds a i3c_bus if it isn't already in the busdevs list. + * Suitable as an i3c_for_each_bus_locked callback. + */ +static int mctp_i3c_bus_add_new(struct i3c_bus *bus, void *data) +{ + struct mctp_i3c_bus *mbus = NULL, *tmp = NULL; + bool exists = false; + + mutex_lock(&busdevs_lock); + list_for_each_entry_safe(mbus, tmp, &busdevs, list) + if (mbus->bus == bus) + exists = true; + + /* It is OK for a bus to already exist. That can occur due to + * the race in mod_init between notifier and for_each_bus + */ + if (!exists) + mctp_i3c_bus_add(bus); + mutex_unlock(&busdevs_lock); + return 0; +} + +static void mctp_i3c_notify_bus_remove(struct i3c_bus *bus) +{ + struct mctp_i3c_bus *mbus = NULL, *tmp; + + mutex_lock(&busdevs_lock); + list_for_each_entry_safe(mbus, tmp, &busdevs, list) + if (mbus->bus == bus) + mctp_i3c_bus_remove(mbus); + mutex_unlock(&busdevs_lock); +} + +static int mctp_i3c_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + switch (action) { + case I3C_NOTIFY_BUS_ADD: + mctp_i3c_bus_add_new((struct i3c_bus *)data, NULL); + break; + case I3C_NOTIFY_BUS_REMOVE: + mctp_i3c_notify_bus_remove((struct i3c_bus *)data); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block mctp_i3c_notifier = { + .notifier_call = mctp_i3c_notifier_call, +}; + +static const struct i3c_device_id mctp_i3c_ids[] = { + I3C_CLASS(I3C_DCR_MCTP, NULL), + { 0 }, +}; + +static struct i3c_driver mctp_i3c_driver = { + .driver = { + .name = "mctp-i3c", + }, + .probe = mctp_i3c_probe, + .remove = mctp_i3c_remove, + .id_table = mctp_i3c_ids, +}; + +static __init int mctp_i3c_mod_init(void) +{ + int rc; + + rc = i3c_register_notifier(&mctp_i3c_notifier); + if (rc < 0) { + i3c_driver_unregister(&mctp_i3c_driver); + return rc; + } + + i3c_for_each_bus_locked(mctp_i3c_bus_add_new, NULL); + + rc = i3c_driver_register(&mctp_i3c_driver); + if (rc < 0) + return rc; + + return 0; +} + +static __exit void mctp_i3c_mod_exit(void) +{ + int rc; + + i3c_driver_unregister(&mctp_i3c_driver); + + rc = i3c_unregister_notifier(&mctp_i3c_notifier); + if (rc < 0) + pr_warn("MCTP I3C could not unregister notifier, %d\n", rc); + + mctp_i3c_bus_remove_all(); +} + +module_init(mctp_i3c_mod_init); +module_exit(mctp_i3c_mod_exit); + +MODULE_DEVICE_TABLE(i3c, mctp_i3c_ids); +MODULE_DESCRIPTION("MCTP I3C device"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>"); diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c index 9f9eaf896047..5bf6fdff701c 100644 --- a/drivers/net/mctp/mctp-serial.c +++ b/drivers/net/mctp/mctp-serial.c @@ -390,9 +390,8 @@ static void mctp_serial_push(struct mctp_serial *dev, unsigned char c) } } -static void mctp_serial_tty_receive_buf(struct tty_struct *tty, - const unsigned char *c, - const char *f, int len) +static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c, + const u8 *f, size_t len) { struct mctp_serial *dev = tty->disc_data; int i; diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c index 4630dde01974..5d0f11f280cf 100644 --- a/drivers/net/mdio/acpi_mdio.c +++ b/drivers/net/mdio/acpi_mdio.c @@ -16,6 +16,7 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ACPI MDIO bus (Ethernet PHY) accessors"); /** * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL. diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index 1183ef5e203e..fd02f5cbc853 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -14,6 +14,7 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors"); static struct pse_control * fwnode_find_pse_control(struct fwnode_handle *fwnode) diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index c727103c8b05..c2170650415c 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -177,15 +177,13 @@ static int aspeed_mdio_probe(struct platform_device *pdev) return 0; } -static int aspeed_mdio_remove(struct platform_device *pdev) +static void aspeed_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev); struct aspeed_mdio *ctx = bus->priv; reset_control_assert(ctx->reset); mdiobus_unregister(bus); - - return 0; } static const struct of_device_id aspeed_mdio_of_match[] = { @@ -200,10 +198,11 @@ static struct platform_driver aspeed_mdio_driver = { .of_match_table = aspeed_mdio_of_match, }, .probe = aspeed_mdio_probe, - .remove = aspeed_mdio_remove, + .remove_new = aspeed_mdio_remove, }; module_platform_driver(aspeed_mdio_driver); MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ASPEED MDIO bus controller"); diff --git a/drivers/net/mdio/mdio-bcm-iproc.c b/drivers/net/mdio/mdio-bcm-iproc.c index 77fc970cdfde..5a2d26c6afdc 100644 --- a/drivers/net/mdio/mdio-bcm-iproc.c +++ b/drivers/net/mdio/mdio-bcm-iproc.c @@ -168,14 +168,12 @@ err_iproc_mdio: return rc; } -static int iproc_mdio_remove(struct platform_device *pdev) +static void iproc_mdio_remove(struct platform_device *pdev) { struct iproc_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -210,7 +208,7 @@ static struct platform_driver iproc_mdio_driver = { #endif }, .probe = iproc_mdio_probe, - .remove = iproc_mdio_remove, + .remove_new = iproc_mdio_remove, }; module_platform_driver(iproc_mdio_driver); diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index bfc9be23c973..e8cd8eef319b 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -296,15 +296,13 @@ out_clk_disable: return ret; } -static int unimac_mdio_remove(struct platform_device *pdev) +static void unimac_mdio_remove(struct platform_device *pdev) { struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); clk_disable_unprepare(priv->clk); - - return 0; } static int __maybe_unused unimac_mdio_suspend(struct device *d) @@ -334,6 +332,8 @@ static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops, unimac_mdio_suspend, unimac_mdio_resume); static const struct of_device_id unimac_mdio_ids[] = { + { .compatible = "brcm,asp-v2.1-mdio", }, + { .compatible = "brcm,asp-v2.0-mdio", }, { .compatible = "brcm,genet-mdio-v5", }, { .compatible = "brcm,genet-mdio-v4", }, { .compatible = "brcm,genet-mdio-v3", }, @@ -351,7 +351,7 @@ static struct platform_driver unimac_mdio_driver = { .pm = &unimac_mdio_pm_ops, }, .probe = unimac_mdio_probe, - .remove = unimac_mdio_remove, + .remove_new = unimac_mdio_remove, }; module_platform_driver(unimac_mdio_driver); diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index 81b7748c10ce..f88639297ff2 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -263,3 +263,4 @@ void free_mdio_bitbang(struct mii_bus *bus) EXPORT_SYMBOL(free_mdio_bitbang); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Bitbanged MDIO buses"); diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 0fb3c2de0845..897b88c50bbb 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -194,11 +194,9 @@ static int mdio_gpio_probe(struct platform_device *pdev) return ret; } -static int mdio_gpio_remove(struct platform_device *pdev) +static void mdio_gpio_remove(struct platform_device *pdev) { mdio_gpio_bus_destroy(&pdev->dev); - - return 0; } static const struct of_device_id mdio_gpio_of_match[] = { @@ -210,7 +208,7 @@ MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, - .remove = mdio_gpio_remove, + .remove_new = mdio_gpio_remove, .driver = { .name = "mdio-gpio", .of_match_table = mdio_gpio_of_match, diff --git a/drivers/net/mdio/mdio-hisi-femac.c b/drivers/net/mdio/mdio-hisi-femac.c index f231c2fbb1de..6703f626ee83 100644 --- a/drivers/net/mdio/mdio-hisi-femac.c +++ b/drivers/net/mdio/mdio-hisi-femac.c @@ -118,7 +118,7 @@ err_out_free_mdiobus: return ret; } -static int hisi_femac_mdio_remove(struct platform_device *pdev) +static void hisi_femac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct hisi_femac_mdio_data *data = bus->priv; @@ -126,8 +126,6 @@ static int hisi_femac_mdio_remove(struct platform_device *pdev) mdiobus_unregister(bus); clk_disable_unprepare(data->clk); mdiobus_free(bus); - - return 0; } static const struct of_device_id hisi_femac_mdio_dt_ids[] = { @@ -138,7 +136,7 @@ MODULE_DEVICE_TABLE(of, hisi_femac_mdio_dt_ids); static struct platform_driver hisi_femac_mdio_driver = { .probe = hisi_femac_mdio_probe, - .remove = hisi_femac_mdio_remove, + .remove_new = hisi_femac_mdio_remove, .driver = { .name = "hisi-femac-mdio", .of_match_table = hisi_femac_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 78b93de636f5..abd8b508ec16 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -278,13 +278,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev) return 0; } -static int ipq4019_mdio_remove(struct platform_device *pdev) +static void ipq4019_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); - - return 0; } static const struct of_device_id ipq4019_mdio_dt_ids[] = { @@ -296,7 +294,7 @@ MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids); static struct platform_driver ipq4019_mdio_driver = { .probe = ipq4019_mdio_probe, - .remove = ipq4019_mdio_remove, + .remove_new = ipq4019_mdio_remove, .driver = { .name = "ipq4019-mdio", .of_match_table = ipq4019_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index fd9716960106..f71b6e1c66e4 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -147,14 +147,11 @@ ipq8064_mdio_probe(struct platform_device *pdev) return 0; } -static int -ipq8064_mdio_remove(struct platform_device *pdev) +static void ipq8064_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); - - return 0; } static const struct of_device_id ipq8064_mdio_dt_ids[] = { @@ -165,7 +162,7 @@ MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids); static struct platform_driver ipq8064_mdio_driver = { .probe = ipq8064_mdio_probe, - .remove = ipq8064_mdio_remove, + .remove_new = ipq8064_mdio_remove, .driver = { .name = "ipq8064-mdio", .of_match_table = ipq8064_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c index f0cff584e176..d35af8cd7c4d 100644 --- a/drivers/net/mdio/mdio-moxart.c +++ b/drivers/net/mdio/mdio-moxart.c @@ -155,14 +155,12 @@ err_out_free_mdiobus: return ret; } -static int moxart_mdio_remove(struct platform_device *pdev) +static void moxart_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); mdiobus_free(bus); - - return 0; } static const struct of_device_id moxart_mdio_dt_ids[] = { @@ -173,7 +171,7 @@ MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids); static struct platform_driver moxart_mdio_driver = { .probe = moxart_mdio_probe, - .remove = moxart_mdio_remove, + .remove_new = moxart_mdio_remove, .driver = { .name = "moxart-mdio", .of_match_table = moxart_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 1a1b95ae95fa..c29377c85307 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -335,15 +335,13 @@ out_disable_clk: return ret; } -static int mscc_miim_remove(struct platform_device *pdev) +static void mscc_miim_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct mscc_miim_dev *miim = bus->priv; clk_disable_unprepare(miim->clk); mdiobus_unregister(bus); - - return 0; } static const struct mscc_miim_info mscc_ocelot_miim_info = { @@ -371,7 +369,7 @@ MODULE_DEVICE_TABLE(of, mscc_miim_match); static struct platform_driver mscc_miim_driver = { .probe = mscc_miim_probe, - .remove = mscc_miim_remove, + .remove_new = mscc_miim_remove, .driver = { .name = "mscc-miim", .of_match_table = mscc_miim_match, diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 956d54846b62..a750bd4c77a0 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -287,15 +287,13 @@ out_clk: return rc; } -static int mdio_mux_iproc_remove(struct platform_device *pdev) +static void mdio_mux_iproc_remove(struct platform_device *pdev) { struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); clk_disable_unprepare(md->core_clk); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -342,7 +340,7 @@ static struct platform_driver mdiomux_iproc_driver = { .pm = &mdio_mux_iproc_pm_ops, }, .probe = mdio_mux_iproc_probe, - .remove = mdio_mux_iproc_remove, + .remove_new = mdio_mux_iproc_remove, }; module_platform_driver(mdiomux_iproc_driver); diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c index 8b444a8eb6b5..1b77e0e3e6e1 100644 --- a/drivers/net/mdio/mdio-mux-bcm6368.c +++ b/drivers/net/mdio/mdio-mux-bcm6368.c @@ -153,14 +153,12 @@ out_register: return rc; } -static int bcm6368_mdiomux_remove(struct platform_device *pdev) +static void bcm6368_mdiomux_remove(struct platform_device *pdev) { struct bcm6368_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); - - return 0; } static const struct of_device_id bcm6368_mdiomux_ids[] = { @@ -175,7 +173,7 @@ static struct platform_driver bcm6368_mdiomux_driver = { .of_match_table = bcm6368_mdiomux_ids, }, .probe = bcm6368_mdiomux_probe, - .remove = bcm6368_mdiomux_remove, + .remove_new = bcm6368_mdiomux_remove, }; module_platform_driver(bcm6368_mdiomux_driver); diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index 3c7f16f06b45..38fb031f8979 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -62,11 +62,10 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev) return 0; } -static int mdio_mux_gpio_remove(struct platform_device *pdev) +static void mdio_mux_gpio_remove(struct platform_device *pdev) { struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); - return 0; } static const struct of_device_id mdio_mux_gpio_match[] = { @@ -87,7 +86,7 @@ static struct platform_driver mdio_mux_gpio_driver = { .of_match_table = mdio_mux_gpio_match, }, .probe = mdio_mux_gpio_probe, - .remove = mdio_mux_gpio_remove, + .remove_new = mdio_mux_gpio_remove, }; module_platform_driver(mdio_mux_gpio_driver); diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c index 910e5cf74e89..754b0f2cf15b 100644 --- a/drivers/net/mdio/mdio-mux-meson-g12a.c +++ b/drivers/net/mdio/mdio-mux-meson-g12a.c @@ -336,7 +336,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev) return ret; } -static int g12a_mdio_mux_remove(struct platform_device *pdev) +static void g12a_mdio_mux_remove(struct platform_device *pdev) { struct g12a_mdio_mux *priv = platform_get_drvdata(pdev); @@ -344,13 +344,11 @@ static int g12a_mdio_mux_remove(struct platform_device *pdev) if (__clk_is_enabled(priv->pll)) clk_disable_unprepare(priv->pll); - - return 0; } static struct platform_driver g12a_mdio_mux_driver = { .probe = g12a_mdio_mux_probe, - .remove = g12a_mdio_mux_remove, + .remove_new = g12a_mdio_mux_remove, .driver = { .name = "g12a-mdio_mux", .of_match_table = g12a_mdio_mux_match, diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c index 76188575ca1f..89554021b5cc 100644 --- a/drivers/net/mdio/mdio-mux-meson-gxl.c +++ b/drivers/net/mdio/mdio-mux-meson-gxl.c @@ -140,18 +140,16 @@ static int gxl_mdio_mux_probe(struct platform_device *pdev) return ret; } -static int gxl_mdio_mux_remove(struct platform_device *pdev) +static void gxl_mdio_mux_remove(struct platform_device *pdev) { struct gxl_mdio_mux *priv = platform_get_drvdata(pdev); mdio_mux_uninit(priv->mux_handle); - - return 0; } static struct platform_driver gxl_mdio_mux_driver = { .probe = gxl_mdio_mux_probe, - .remove = gxl_mdio_mux_remove, + .remove_new = gxl_mdio_mux_remove, .driver = { .name = "gxl-mdio-mux", .of_match_table = gxl_mdio_mux_match, diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index 09af150ed774..de08419d0c98 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -169,13 +169,11 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) return 0; } -static int mdio_mux_mmioreg_remove(struct platform_device *pdev) +static void mdio_mux_mmioreg_remove(struct platform_device *pdev) { struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); - - return 0; } static const struct of_device_id mdio_mux_mmioreg_match[] = { @@ -192,7 +190,7 @@ static struct platform_driver mdio_mux_mmioreg_driver = { .of_match_table = mdio_mux_mmioreg_match, }, .probe = mdio_mux_mmioreg_probe, - .remove = mdio_mux_mmioreg_remove, + .remove_new = mdio_mux_mmioreg_remove, }; module_platform_driver(mdio_mux_mmioreg_driver); diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c index bfa5af577b0a..569b13383191 100644 --- a/drivers/net/mdio/mdio-mux-multiplexer.c +++ b/drivers/net/mdio/mdio-mux-multiplexer.c @@ -85,7 +85,7 @@ static int mdio_mux_multiplexer_probe(struct platform_device *pdev) return ret; } -static int mdio_mux_multiplexer_remove(struct platform_device *pdev) +static void mdio_mux_multiplexer_remove(struct platform_device *pdev) { struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev); @@ -93,8 +93,6 @@ static int mdio_mux_multiplexer_remove(struct platform_device *pdev) if (s->do_deselect) mux_control_deselect(s->muxc); - - return 0; } static const struct of_device_id mdio_mux_multiplexer_match[] = { @@ -109,7 +107,7 @@ static struct platform_driver mdio_mux_multiplexer_driver = { .of_match_table = mdio_mux_multiplexer_match, }, .probe = mdio_mux_multiplexer_probe, - .remove = mdio_mux_multiplexer_remove, + .remove_new = mdio_mux_multiplexer_remove, }; module_platform_driver(mdio_mux_multiplexer_driver); diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c index a881e3523328..bef4cce71287 100644 --- a/drivers/net/mdio/mdio-mux.c +++ b/drivers/net/mdio/mdio-mux.c @@ -55,6 +55,27 @@ out: return r; } +static int mdio_mux_read_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum) +{ + struct mdio_mux_child_bus *cb = bus->priv; + struct mdio_mux_parent_bus *pb = cb->parent; + int r; + + mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX); + r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); + if (r) + goto out; + + pb->current_child = cb->bus_number; + + r = pb->mii_bus->read_c45(pb->mii_bus, phy_id, dev_addr, regnum); +out: + mutex_unlock(&pb->mii_bus->mdio_lock); + + return r; +} + /* * The parent bus' lock is used to order access to the switch_fn. */ @@ -80,6 +101,28 @@ out: return r; } +static int mdio_mux_write_c45(struct mii_bus *bus, int phy_id, int dev_addr, + int regnum, u16 val) +{ + struct mdio_mux_child_bus *cb = bus->priv; + struct mdio_mux_parent_bus *pb = cb->parent; + + int r; + + mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX); + r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); + if (r) + goto out; + + pb->current_child = cb->bus_number; + + r = pb->mii_bus->write_c45(pb->mii_bus, phy_id, dev_addr, regnum, val); +out: + mutex_unlock(&pb->mii_bus->mdio_lock); + + return r; +} + static int parent_count; static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb) @@ -173,6 +216,10 @@ int mdio_mux_init(struct device *dev, cb->mii_bus->parent = dev; cb->mii_bus->read = mdio_mux_read; cb->mii_bus->write = mdio_mux_write; + if (parent_bus->read_c45) + cb->mii_bus->read_c45 = mdio_mux_read_c45; + if (parent_bus->write_c45) + cb->mii_bus->write_c45 = mdio_mux_write_c45; r = of_mdiobus_register(cb->mii_bus, child_bus_node); if (r) { mdiobus_free(cb->mii_bus); diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index 7c65c547d377..037a38cfed56 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -78,7 +78,7 @@ fail_register: return err; } -static int octeon_mdiobus_remove(struct platform_device *pdev) +static void octeon_mdiobus_remove(struct platform_device *pdev) { struct cavium_mdiobus *bus; union cvmx_smix_en smi_en; @@ -88,7 +88,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev) mdiobus_unregister(bus->mii_bus); smi_en.u64 = 0; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); - return 0; } static const struct of_device_id octeon_mdiobus_match[] = { @@ -105,7 +104,7 @@ static struct platform_driver octeon_mdiobus_driver = { .of_match_table = octeon_mdiobus_match, }, .probe = octeon_mdiobus_probe, - .remove = octeon_mdiobus_remove, + .remove_new = octeon_mdiobus_remove, }; module_platform_driver(octeon_mdiobus_driver); diff --git a/drivers/net/mdio/mdio-sun4i.c b/drivers/net/mdio/mdio-sun4i.c index f798de3276dc..4511bcc73b36 100644 --- a/drivers/net/mdio/mdio-sun4i.c +++ b/drivers/net/mdio/mdio-sun4i.c @@ -142,7 +142,7 @@ err_out_free_mdiobus: return ret; } -static int sun4i_mdio_remove(struct platform_device *pdev) +static void sun4i_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct sun4i_mdio_data *data = bus->priv; @@ -151,8 +151,6 @@ static int sun4i_mdio_remove(struct platform_device *pdev) if (data->regulator) regulator_disable(data->regulator); mdiobus_free(bus); - - return 0; } static const struct of_device_id sun4i_mdio_dt_ids[] = { @@ -166,7 +164,7 @@ MODULE_DEVICE_TABLE(of, sun4i_mdio_dt_ids); static struct platform_driver sun4i_mdio_driver = { .probe = sun4i_mdio_probe, - .remove = sun4i_mdio_remove, + .remove_new = sun4i_mdio_remove, .driver = { .name = "sun4i-mdio", .of_match_table = sun4i_mdio_dt_ids, diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c index 7aafc221b5cf..2772a3098543 100644 --- a/drivers/net/mdio/mdio-xgene.c +++ b/drivers/net/mdio/mdio-xgene.c @@ -13,15 +13,15 @@ #include <linux/io.h> #include <linux/mdio/mdio-xgene.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_platform.h> #include <linux/phy.h> +#include <linux/platform_device.h> #include <linux/prefetch.h> +#include <linux/property.h> #include <net/ip.h> -static bool xgene_mdio_status; - u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr) { void __iomem *addr, *rd, *cmd, *cmd_done; @@ -79,7 +79,7 @@ EXPORT_SYMBOL(xgene_mdio_wr_mac); int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg) { - struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv; + struct xgene_mdio_pdata *pdata = bus->priv; u32 data, done; u8 wait = 10; @@ -105,7 +105,7 @@ EXPORT_SYMBOL(xgene_mdio_rgmii_read); int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data) { - struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv; + struct xgene_mdio_pdata *pdata = bus->priv; u32 val, done; u8 wait = 10; @@ -328,24 +328,11 @@ static int xgene_mdio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mii_bus *mdio_bus; - const struct of_device_id *of_id; struct xgene_mdio_pdata *pdata; void __iomem *csr_base; int mdio_id = 0, ret = 0; - of_id = of_match_device(xgene_mdio_of_match, &pdev->dev); - if (of_id) { - mdio_id = (enum xgene_mdio_id)of_id->data; - } else { -#ifdef CONFIG_ACPI - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(xgene_mdio_acpi_match, &pdev->dev); - if (acpi_id) - mdio_id = (enum xgene_mdio_id)acpi_id->driver_data; -#endif - } - + mdio_id = (uintptr_t)device_get_match_data(&pdev->dev); if (!mdio_id) return -ENODEV; @@ -421,7 +408,6 @@ static int xgene_mdio_probe(struct platform_device *pdev) goto out_mdiobus; pdata->mdio_bus = mdio_bus; - xgene_mdio_status = true; return 0; @@ -435,7 +421,7 @@ out_clk: return ret; } -static int xgene_mdio_remove(struct platform_device *pdev) +static void xgene_mdio_remove(struct platform_device *pdev) { struct xgene_mdio_pdata *pdata = platform_get_drvdata(pdev); struct mii_bus *mdio_bus = pdata->mdio_bus; @@ -446,18 +432,16 @@ static int xgene_mdio_remove(struct platform_device *pdev) if (dev->of_node) clk_disable_unprepare(pdata->clk); - - return 0; } static struct platform_driver xgene_mdio_driver = { .driver = { .name = "xgene-mdio", - .of_match_table = of_match_ptr(xgene_mdio_of_match), + .of_match_table = xgene_mdio_of_match, .acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match), }, .probe = xgene_mdio_probe, - .remove = xgene_mdio_remove, + .remove_new = xgene_mdio_remove, }; module_platform_driver(xgene_mdio_driver); diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 7eb32ebb846d..64ebcb6d235c 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -25,6 +25,7 @@ MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("OpenFirmware MDIO bus (Ethernet PHY) accessors"); /* Extract the clause 22 phy ID from the compatible string of the form * ethernet-phy-idAAAA.BBBB */ diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c index 3d322ac4f6a5..ae169929a9d8 100644 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@ -403,7 +403,6 @@ static struct mhi_driver mhi_net_driver = { .id_table = mhi_net_id_table, .driver = { .name = "mhi_net", - .owner = THIS_MODULE, }, }; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 4f4f79532c6c..6e14ba5e06c8 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -36,6 +36,7 @@ #include <linux/inet.h> #include <linux/configfs.h> #include <linux/etherdevice.h> +#include <linux/utsname.h> MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>"); MODULE_DESCRIPTION("Console driver for network interfaces"); @@ -52,6 +53,8 @@ static bool oops_only = false; module_param(oops_only, bool, 0600); MODULE_PARM_DESC(oops_only, "Only log oops messages"); +#define NETCONSOLE_PARAM_TARGET_PREFIX "cmdline" + #ifndef MODULE static int __init option_setup(char *opt) { @@ -84,6 +87,8 @@ static struct console netconsole_ext; * Also, other parameters of a target may be modified at * runtime only when it is disabled (enabled == 0). * @extended: Denotes whether console is extended or not. + * @release: Denotes whether kernel release version should be prepended + * to the message. Depends on extended console. * @np: The netpoll structure for this target. * Contains the other userspace visible parameters: * dev_name (read-write) @@ -101,6 +106,7 @@ struct netconsole_target { #endif bool enabled; bool extended; + bool release; struct netpoll np; }; @@ -161,21 +167,27 @@ static void netconsole_target_put(struct netconsole_target *nt) { } +static void populate_configfs_item(struct netconsole_target *nt, + int cmdline_count) +{ +} #endif /* CONFIG_NETCONSOLE_DYNAMIC */ -/* Allocate new target (from boot/module param) and setup netpoll for it */ -static struct netconsole_target *alloc_param_target(char *target_config) +/* Allocate and initialize with defaults. + * Note that these targets get their config_item fields zeroed-out. + */ +static struct netconsole_target *alloc_and_init(void) { - int err = -ENOMEM; struct netconsole_target *nt; - /* - * Allocate and initialize with defaults. - * Note that these targets get their config_item fields zeroed-out. - */ nt = kzalloc(sizeof(*nt), GFP_KERNEL); if (!nt) - goto fail; + return nt; + + if (IS_ENABLED(CONFIG_NETCONSOLE_EXTENDED_LOG)) + nt->extended = true; + if (IS_ENABLED(CONFIG_NETCONSOLE_PREPEND_RELEASE)) + nt->release = true; nt->np.name = "netconsole"; strscpy(nt->np.dev_name, "eth0", IFNAMSIZ); @@ -183,34 +195,7 @@ static struct netconsole_target *alloc_param_target(char *target_config) nt->np.remote_port = 6666; eth_broadcast_addr(nt->np.remote_mac); - if (*target_config == '+') { - nt->extended = true; - target_config++; - } - - /* Parse parameters and setup netpoll */ - err = netpoll_parse_options(&nt->np, target_config); - if (err) - goto fail; - - err = netpoll_setup(&nt->np); - if (err) - goto fail; - - nt->enabled = true; - return nt; - -fail: - kfree(nt); - return ERR_PTR(err); -} - -/* Cleanup netpoll for given target (from boot/module param) and free it */ -static void free_param_target(struct netconsole_target *nt) -{ - netpoll_cleanup(&nt->np); - kfree(nt); } #ifdef CONFIG_NETCONSOLE_DYNAMIC @@ -222,6 +207,7 @@ static void free_param_target(struct netconsole_target *nt) * | * <target>/ * | enabled + * | release * | dev_name * | local_port * | remote_port @@ -246,27 +232,32 @@ static struct netconsole_target *to_target(struct config_item *item) static ssize_t enabled_show(struct config_item *item, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->enabled); + return sysfs_emit(buf, "%d\n", to_target(item)->enabled); } static ssize_t extended_show(struct config_item *item, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->extended); + return sysfs_emit(buf, "%d\n", to_target(item)->extended); +} + +static ssize_t release_show(struct config_item *item, char *buf) +{ + return sysfs_emit(buf, "%d\n", to_target(item)->release); } static ssize_t dev_name_show(struct config_item *item, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", to_target(item)->np.dev_name); + return sysfs_emit(buf, "%s\n", to_target(item)->np.dev_name); } static ssize_t local_port_show(struct config_item *item, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->np.local_port); + return sysfs_emit(buf, "%d\n", to_target(item)->np.local_port); } static ssize_t remote_port_show(struct config_item *item, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", to_target(item)->np.remote_port); + return sysfs_emit(buf, "%d\n", to_target(item)->np.remote_port); } static ssize_t local_ip_show(struct config_item *item, char *buf) @@ -274,9 +265,9 @@ static ssize_t local_ip_show(struct config_item *item, char *buf) struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) - return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.local_ip.in6); + return sysfs_emit(buf, "%pI6c\n", &nt->np.local_ip.in6); else - return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip); + return sysfs_emit(buf, "%pI4\n", &nt->np.local_ip); } static ssize_t remote_ip_show(struct config_item *item, char *buf) @@ -284,9 +275,9 @@ static ssize_t remote_ip_show(struct config_item *item, char *buf) struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) - return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.remote_ip.in6); + return sysfs_emit(buf, "%pI6c\n", &nt->np.remote_ip.in6); else - return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip); + return sysfs_emit(buf, "%pI4\n", &nt->np.remote_ip); } static ssize_t local_mac_show(struct config_item *item, char *buf) @@ -294,12 +285,12 @@ static ssize_t local_mac_show(struct config_item *item, char *buf) struct net_device *dev = to_target(item)->np.dev; static const u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - return snprintf(buf, PAGE_SIZE, "%pM\n", dev ? dev->dev_addr : bcast); + return sysfs_emit(buf, "%pM\n", dev ? dev->dev_addr : bcast); } static ssize_t remote_mac_show(struct config_item *item, char *buf) { - return snprintf(buf, PAGE_SIZE, "%pM\n", to_target(item)->np.remote_mac); + return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac); } /* @@ -314,17 +305,15 @@ static ssize_t enabled_store(struct config_item *item, { struct netconsole_target *nt = to_target(item); unsigned long flags; - int enabled; + bool enabled; int err; mutex_lock(&dynamic_netconsole_mutex); - err = kstrtoint(buf, 10, &enabled); - if (err < 0) + err = kstrtobool(buf, &enabled); + if (err) goto out_unlock; err = -EINVAL; - if (enabled < 0 || enabled > 1) - goto out_unlock; if ((bool)enabled == nt->enabled) { pr_info("network logging has already %s\n", nt->enabled ? "started" : "stopped"); @@ -332,6 +321,11 @@ static ssize_t enabled_store(struct config_item *item, } if (enabled) { /* true */ + if (nt->release && !nt->extended) { + pr_err("Not enabling netconsole. Release feature requires extended log message"); + goto out_unlock; + } + if (nt->extended && !console_is_registered(&netconsole_ext)) register_console(&netconsole_ext); @@ -366,11 +360,11 @@ out_unlock: return err; } -static ssize_t extended_store(struct config_item *item, const char *buf, - size_t count) +static ssize_t release_store(struct config_item *item, const char *buf, + size_t count) { struct netconsole_target *nt = to_target(item); - int extended; + bool release; int err; mutex_lock(&dynamic_netconsole_mutex); @@ -381,14 +375,38 @@ static ssize_t extended_store(struct config_item *item, const char *buf, goto out_unlock; } - err = kstrtoint(buf, 10, &extended); - if (err < 0) + err = kstrtobool(buf, &release); + if (err) goto out_unlock; - if (extended < 0 || extended > 1) { + + nt->release = release; + + mutex_unlock(&dynamic_netconsole_mutex); + return strnlen(buf, count); +out_unlock: + mutex_unlock(&dynamic_netconsole_mutex); + return err; +} + +static ssize_t extended_store(struct config_item *item, const char *buf, + size_t count) +{ + struct netconsole_target *nt = to_target(item); + bool extended; + int err; + + mutex_lock(&dynamic_netconsole_mutex); + if (nt->enabled) { + pr_err("target (%s) is enabled, disable to update parameters\n", + config_item_name(&nt->item)); err = -EINVAL; goto out_unlock; } + err = kstrtobool(buf, &extended); + if (err) + goto out_unlock; + nt->extended = extended; mutex_unlock(&dynamic_netconsole_mutex); @@ -576,10 +594,12 @@ CONFIGFS_ATTR(, local_ip); CONFIGFS_ATTR(, remote_ip); CONFIGFS_ATTR_RO(, local_mac); CONFIGFS_ATTR(, remote_mac); +CONFIGFS_ATTR(, release); static struct configfs_attribute *netconsole_target_attrs[] = { &attr_enabled, &attr_extended, + &attr_release, &attr_dev_name, &attr_local_port, &attr_remote_port, @@ -609,6 +629,23 @@ static const struct config_item_type netconsole_target_type = { .ct_owner = THIS_MODULE, }; +static struct netconsole_target *find_cmdline_target(const char *name) +{ + struct netconsole_target *nt, *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&target_list_lock, flags); + list_for_each_entry(nt, &target_list, list) { + if (!strcmp(nt->item.ci_name, name)) { + ret = nt; + break; + } + } + spin_unlock_irqrestore(&target_list_lock, flags); + + return ret; +} + /* * Group operations and type for netconsole_subsys. */ @@ -616,23 +653,24 @@ static const struct config_item_type netconsole_target_type = { static struct config_item *make_netconsole_target(struct config_group *group, const char *name) { - unsigned long flags; struct netconsole_target *nt; + unsigned long flags; - /* - * Allocate and initialize with defaults. - * Target is disabled at creation (!enabled). + /* Checking if a target by this name was created at boot time. If so, + * attach a configfs entry to that target. This enables dynamic + * control. */ - nt = kzalloc(sizeof(*nt), GFP_KERNEL); + if (!strncmp(name, NETCONSOLE_PARAM_TARGET_PREFIX, + strlen(NETCONSOLE_PARAM_TARGET_PREFIX))) { + nt = find_cmdline_target(name); + if (nt) + return &nt->item; + } + + nt = alloc_and_init(); if (!nt) return ERR_PTR(-ENOMEM); - nt->np.name = "netconsole"; - strscpy(nt->np.dev_name, "eth0", IFNAMSIZ); - nt->np.local_port = 6665; - nt->np.remote_port = 6666; - eth_broadcast_addr(nt->np.remote_mac); - /* Initialize the config_item member */ config_item_init_type_name(&nt->item, name, &netconsole_target_type); @@ -684,6 +722,17 @@ static struct configfs_subsystem netconsole_subsys = { }, }; +static void populate_configfs_item(struct netconsole_target *nt, + int cmdline_count) +{ + char target_name[16]; + + snprintf(target_name, sizeof(target_name), "%s%d", + NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count); + config_item_init_type_name(&nt->item, target_name, + &netconsole_target_type); +} + #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Handle network interface device notifications */ @@ -772,9 +821,23 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, const char *header, *body; int offset = 0; int header_len, body_len; + const char *msg_ready = msg; + const char *release; + int release_len = 0; + + if (nt->release) { + release = init_utsname()->release; + release_len = strlen(release) + 1; + } - if (msg_len <= MAX_PRINT_CHUNK) { - netpoll_send_udp(&nt->np, msg, msg_len); + if (msg_len + release_len <= MAX_PRINT_CHUNK) { + /* No fragmentation needed */ + if (nt->release) { + scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); + msg_len += release_len; + msg_ready = buf; + } + netpoll_send_udp(&nt->np, msg_ready, msg_len); return; } @@ -792,7 +855,10 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, * Transfer multiple chunks with the following extra header. * "ncfrag=<byte-offset>/<total-bytes>" */ - memcpy(buf, header, header_len); + if (nt->release) + scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release); + memcpy(buf + release_len, header, header_len); + header_len += release_len; while (offset < body_len) { int this_header = header_len; @@ -865,6 +931,60 @@ static void write_msg(struct console *con, const char *msg, unsigned int len) spin_unlock_irqrestore(&target_list_lock, flags); } +/* Allocate new target (from boot/module param) and setup netpoll for it */ +static struct netconsole_target *alloc_param_target(char *target_config, + int cmdline_count) +{ + struct netconsole_target *nt; + int err; + + nt = alloc_and_init(); + if (!nt) { + err = -ENOMEM; + goto fail; + } + + if (*target_config == '+') { + nt->extended = true; + target_config++; + } + + if (*target_config == 'r') { + if (!nt->extended) { + pr_err("Netconsole configuration error. Release feature requires extended log message"); + err = -EINVAL; + goto fail; + } + nt->release = true; + target_config++; + } + + /* Parse parameters and setup netpoll */ + err = netpoll_parse_options(&nt->np, target_config); + if (err) + goto fail; + + err = netpoll_setup(&nt->np); + if (err) + goto fail; + + populate_configfs_item(nt, cmdline_count); + nt->enabled = true; + + return nt; + +fail: + kfree(nt); + return ERR_PTR(err); +} + +/* Cleanup netpoll for given target (from boot/module param) and free it */ +static void free_param_target(struct netconsole_target *nt) +{ + netpoll_cleanup(&nt->np); + kfree(nt); +} + static struct console netconsole_ext = { .name = "netcon_ext", .flags = CON_ENABLED | CON_EXTENDED, @@ -881,6 +1001,7 @@ static int __init init_netconsole(void) { int err; struct netconsole_target *nt, *tmp; + unsigned int count = 0; bool extended = false; unsigned long flags; char *target_config; @@ -888,7 +1009,7 @@ static int __init init_netconsole(void) if (strnlen(input, MAX_PARAM_LENGTH)) { while ((target_config = strsep(&input, ";"))) { - nt = alloc_param_target(target_config); + nt = alloc_param_target(target_config, count); if (IS_ERR(nt)) { err = PTR_ERR(nt); goto fail; @@ -904,6 +1025,7 @@ static int __init init_netconsole(void) spin_lock_irqsave(&target_list_lock, flags); list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); + count++; } } diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index 5735e5b1a2cb..f8de93bc5f5b 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -17,3 +17,7 @@ endif ifneq ($(CONFIG_PSAMPLE),) netdevsim-objs += psample.o endif + +ifneq ($(CONFIG_MACSEC),) +netdevsim-objs += macsec.o +endif diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index f60eb97e3a62..608953d4f98d 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -93,7 +93,7 @@ static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded) { struct nsim_bpf_bound_prog *state; - if (!prog || !prog->aux->offload) + if (!prog || !bpf_prog_is_offloaded(prog->aux)) return; state = prog->aux->offload->dev_priv; @@ -311,7 +311,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf) if (!bpf->prog) return 0; - if (!bpf->prog->aux->offload) { + if (!bpf_prog_is_offloaded(bpf->prog->aux)) { NSIM_EA(bpf->extack, "xdpoffload of non-bound program"); return -EINVAL; } diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c index 0787ad252dd9..bcbc1e19edde 100644 --- a/drivers/net/netdevsim/bus.c +++ b/drivers/net/netdevsim/bus.c @@ -3,11 +3,13 @@ * Copyright (C) 2019 Mellanox Technologies. All rights reserved */ +#include <linux/completion.h> #include <linux/device.h> #include <linux/idr.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mutex.h> +#include <linux/refcount.h> #include <linux/slab.h> #include <linux/sysfs.h> @@ -17,6 +19,8 @@ static DEFINE_IDA(nsim_bus_dev_ids); static LIST_HEAD(nsim_bus_dev_list); static DEFINE_MUTEX(nsim_bus_dev_list_lock); static bool nsim_bus_enable; +static refcount_t nsim_bus_devs; /* Including the bus itself. */ +static DECLARE_COMPLETION(nsim_bus_devs_released); static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev) { @@ -121,6 +125,8 @@ static void nsim_bus_dev_release(struct device *dev) nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev); kfree(nsim_bus_dev); + if (refcount_dec_and_test(&nsim_bus_devs)) + complete(&nsim_bus_devs_released); } static struct device_type nsim_bus_dev_type = { @@ -170,6 +176,7 @@ new_device_store(const struct bus_type *bus, const char *buf, size_t count) goto err; } + refcount_inc(&nsim_bus_devs); /* Allow using nsim_bus_dev */ smp_store_release(&nsim_bus_dev->init, true); @@ -326,6 +333,7 @@ int nsim_bus_init(void) err = driver_register(&nsim_driver); if (err) goto err_bus_unregister; + refcount_set(&nsim_bus_devs, 1); /* Allow using resources */ smp_store_release(&nsim_bus_enable, true); return 0; @@ -341,6 +349,8 @@ void nsim_bus_exit(void) /* Disallow using resources */ smp_store_release(&nsim_bus_enable, false); + if (refcount_dec_and_test(&nsim_bus_devs)) + complete(&nsim_bus_devs_released); mutex_lock(&nsim_bus_dev_list_lock); list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) { @@ -349,6 +359,8 @@ void nsim_bus_exit(void) } mutex_unlock(&nsim_bus_dev_list_lock); + wait_for_completion(&nsim_bus_devs_released); + driver_unregister(&nsim_driver); bus_unregister(&nsim_bus); } diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index ffd9f84b6644..bd546d4d26c6 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -140,6 +140,16 @@ nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) return 0; } +static int nsim_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct netdevsim *ns = netdev_priv(dev); + + info->phc_index = mock_phc_index(ns->phc); + + return 0; +} + static const struct ethtool_ops nsim_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS, .get_pause_stats = nsim_get_pause_stats, @@ -153,6 +163,7 @@ static const struct ethtool_ops nsim_ethtool_ops = { .set_channels = nsim_set_channels, .get_fecparam = nsim_get_fecparam, .set_fecparam = nsim_set_fecparam, + .get_ts_info = nsim_get_ts_info, }; static void nsim_ethtool_ring_init(struct netdevsim *ns) diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index eb04ed715d2d..70e8bdf34be9 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -63,91 +63,45 @@ nsim_dev_dummy_reporter_recover(struct devlink_health_reporter *reporter, static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len) { char *binary; - int err; int i; - err = devlink_fmsg_bool_pair_put(fmsg, "test_bool", true); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3); - if (err) - return err; - err = devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4); - if (err) - return err; - err = devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring"); - if (err) - return err; + devlink_fmsg_bool_pair_put(fmsg, "test_bool", true); + devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1); + devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3); + devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4); + devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring"); binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN); if (!binary) return -ENOMEM; get_random_bytes(binary, binary_len); - err = devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len); + devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len); kfree(binary); - if (err) - return err; - err = devlink_fmsg_pair_nest_start(fmsg, "test_nest"); - if (err) - return err; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - err = devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false); - if (err) - return err; - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - err = devlink_fmsg_pair_nest_end(fmsg); - if (err) - return err; + devlink_fmsg_pair_nest_start(fmsg, "test_nest"); + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false); + devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false); + devlink_fmsg_obj_nest_end(fmsg); + devlink_fmsg_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array"); - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + for (i = 0; i < 10; i++) + devlink_fmsg_u32_put(fmsg, i); + devlink_fmsg_arr_pair_nest_end(fmsg); + devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects"); - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array"); - if (err) - return err; for (i = 0; i < 10; i++) { - err = devlink_fmsg_u32_put(fmsg, i); - if (err) - return err; + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_bool_pair_put(fmsg, "in_array_nested_test_bool", + false); + devlink_fmsg_u8_pair_put(fmsg, "in_array_nested_test_u8", i); + devlink_fmsg_obj_nest_end(fmsg); } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; + devlink_fmsg_arr_pair_nest_end(fmsg); - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - err = devlink_fmsg_bool_pair_put(fmsg, - "in_array_nested_test_bool", - false); - if (err) - return err; - err = devlink_fmsg_u8_pair_put(fmsg, - "in_array_nested_test_u8", - i); - if (err) - return err; - err = devlink_fmsg_obj_nest_end(fmsg); - if (err) - return err; - } - return devlink_fmsg_arr_pair_nest_end(fmsg); + return 0; } static int @@ -157,14 +111,10 @@ nsim_dev_dummy_reporter_dump(struct devlink_health_reporter *reporter, { struct nsim_dev_health *health = devlink_health_reporter_priv(reporter); struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx; - int err; - if (ctx) { - err = devlink_fmsg_string_pair_put(fmsg, "break_message", - ctx->break_msg); - if (err) - return err; - } + if (ctx) + devlink_fmsg_string_pair_put(fmsg, "break_message", ctx->break_msg); + return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len); } @@ -174,15 +124,11 @@ nsim_dev_dummy_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct nsim_dev_health *health = devlink_health_reporter_priv(reporter); - int err; - if (health->recovered_break_msg) { - err = devlink_fmsg_string_pair_put(fmsg, - "recovered_break_message", - health->recovered_break_msg); - if (err) - return err; - } + if (health->recovered_break_msg) + devlink_fmsg_string_pair_put(fmsg, "recovered_break_message", + health->recovered_break_msg); + return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len); } diff --git a/drivers/net/netdevsim/macsec.c b/drivers/net/netdevsim/macsec.c new file mode 100644 index 000000000000..0d5f50430dd3 --- /dev/null +++ b/drivers/net/netdevsim/macsec.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <net/macsec.h> +#include "netdevsim.h" + +static inline u64 sci_to_cpu(sci_t sci) +{ + return be64_to_cpu((__force __be64)sci); +} + +static int nsim_macsec_find_secy(struct netdevsim *ns, sci_t sci) +{ + int i; + + for (i = 0; i < NSIM_MACSEC_MAX_SECY_COUNT; i++) { + if (ns->macsec.nsim_secy[i].sci == sci) + return i; + } + + return -1; +} + +static int nsim_macsec_find_rxsc(struct nsim_secy *ns_secy, sci_t sci) +{ + int i; + + for (i = 0; i < NSIM_MACSEC_MAX_RXSC_COUNT; i++) { + if (ns_secy->nsim_rxsc[i].sci == sci) + return i; + } + + return -1; +} + +static int nsim_macsec_add_secy(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + int idx; + + if (ns->macsec.nsim_secy_count == NSIM_MACSEC_MAX_SECY_COUNT) + return -ENOSPC; + + for (idx = 0; idx < NSIM_MACSEC_MAX_SECY_COUNT; idx++) { + if (!ns->macsec.nsim_secy[idx].used) + break; + } + + if (idx == NSIM_MACSEC_MAX_SECY_COUNT) { + netdev_err(ctx->netdev, "%s: nsim_secy_count not full but all SecYs used\n", + __func__); + return -ENOSPC; + } + + netdev_dbg(ctx->netdev, "%s: adding new secy with sci %08llx at index %d\n", + __func__, sci_to_cpu(ctx->secy->sci), idx); + ns->macsec.nsim_secy[idx].used = true; + ns->macsec.nsim_secy[idx].nsim_rxsc_count = 0; + ns->macsec.nsim_secy[idx].sci = ctx->secy->sci; + ns->macsec.nsim_secy_count++; + + return 0; +} + +static int nsim_macsec_upd_secy(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: updating secy with sci %08llx at index %d\n", + __func__, sci_to_cpu(ctx->secy->sci), idx); + + return 0; +} + +static int nsim_macsec_del_secy(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: removing SecY with SCI %08llx at index %d\n", + __func__, sci_to_cpu(ctx->secy->sci), idx); + + ns->macsec.nsim_secy[idx].used = false; + memset(&ns->macsec.nsim_secy[idx], 0, sizeof(ns->macsec.nsim_secy[idx])); + ns->macsec.nsim_secy_count--; + + return 0; +} + +static int nsim_macsec_add_rxsc(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + struct nsim_secy *secy; + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + secy = &ns->macsec.nsim_secy[idx]; + + if (secy->nsim_rxsc_count == NSIM_MACSEC_MAX_RXSC_COUNT) + return -ENOSPC; + + for (idx = 0; idx < NSIM_MACSEC_MAX_RXSC_COUNT; idx++) { + if (!secy->nsim_rxsc[idx].used) + break; + } + + if (idx == NSIM_MACSEC_MAX_RXSC_COUNT) + netdev_err(ctx->netdev, "%s: nsim_rxsc_count not full but all RXSCs used\n", + __func__); + + netdev_dbg(ctx->netdev, "%s: adding new rxsc with sci %08llx at index %d\n", + __func__, sci_to_cpu(ctx->rx_sc->sci), idx); + secy->nsim_rxsc[idx].used = true; + secy->nsim_rxsc[idx].sci = ctx->rx_sc->sci; + secy->nsim_rxsc_count++; + + return 0; +} + +static int nsim_macsec_upd_rxsc(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + struct nsim_secy *secy; + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + secy = &ns->macsec.nsim_secy[idx]; + + idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + __func__, sci_to_cpu(ctx->rx_sc->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: updating RXSC with sci %08llx at index %d\n", + __func__, sci_to_cpu(ctx->rx_sc->sci), idx); + + return 0; +} + +static int nsim_macsec_del_rxsc(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + struct nsim_secy *secy; + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + secy = &ns->macsec.nsim_secy[idx]; + + idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + __func__, sci_to_cpu(ctx->rx_sc->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: removing RXSC with sci %08llx at index %d\n", + __func__, sci_to_cpu(ctx->rx_sc->sci), idx); + + secy->nsim_rxsc[idx].used = false; + memset(&secy->nsim_rxsc[idx], 0, sizeof(secy->nsim_rxsc[idx])); + secy->nsim_rxsc_count--; + + return 0; +} + +static int nsim_macsec_add_rxsa(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + struct nsim_secy *secy; + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + secy = &ns->macsec.nsim_secy[idx]; + + idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n", + __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num); + + return 0; +} + +static int nsim_macsec_upd_rxsa(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + struct nsim_secy *secy; + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + secy = &ns->macsec.nsim_secy[idx]; + + idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n", + __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num); + + return 0; +} + +static int nsim_macsec_del_rxsa(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + struct nsim_secy *secy; + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + secy = &ns->macsec.nsim_secy[idx]; + + idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n", + __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n", + __func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num); + + return 0; +} + +static int nsim_macsec_add_txsa(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n", + __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num); + + return 0; +} + +static int nsim_macsec_upd_txsa(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n", + __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num); + + return 0; +} + +static int nsim_macsec_del_txsa(struct macsec_context *ctx) +{ + struct netdevsim *ns = netdev_priv(ctx->netdev); + int idx; + + idx = nsim_macsec_find_secy(ns, ctx->secy->sci); + if (idx < 0) { + netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n", + __func__, sci_to_cpu(ctx->secy->sci)); + return -ENOENT; + } + + netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n", + __func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num); + + return 0; +} + +static const struct macsec_ops nsim_macsec_ops = { + .mdo_add_secy = nsim_macsec_add_secy, + .mdo_upd_secy = nsim_macsec_upd_secy, + .mdo_del_secy = nsim_macsec_del_secy, + .mdo_add_rxsc = nsim_macsec_add_rxsc, + .mdo_upd_rxsc = nsim_macsec_upd_rxsc, + .mdo_del_rxsc = nsim_macsec_del_rxsc, + .mdo_add_rxsa = nsim_macsec_add_rxsa, + .mdo_upd_rxsa = nsim_macsec_upd_rxsa, + .mdo_del_rxsa = nsim_macsec_del_rxsa, + .mdo_add_txsa = nsim_macsec_add_txsa, + .mdo_upd_txsa = nsim_macsec_upd_txsa, + .mdo_del_txsa = nsim_macsec_del_txsa, +}; + +void nsim_macsec_init(struct netdevsim *ns) +{ + ns->netdev->macsec_ops = &nsim_macsec_ops; + ns->netdev->features |= NETIF_F_HW_MACSEC; + memset(&ns->macsec, 0, sizeof(ns->macsec)); +} + +void nsim_macsec_teardown(struct netdevsim *ns) +{ +} diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 35fa1ca98671..aecaf5f44374 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -209,6 +209,31 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) return 0; } +static void nsim_taprio_stats(struct tc_taprio_qopt_stats *stats) +{ + stats->window_drops = 0; + stats->tx_overruns = 0; +} + +static int nsim_setup_tc_taprio(struct net_device *dev, + struct tc_taprio_qopt_offload *offload) +{ + int err = 0; + + switch (offload->cmd) { + case TAPRIO_CMD_REPLACE: + case TAPRIO_CMD_DESTROY: + break; + case TAPRIO_CMD_STATS: + nsim_taprio_stats(&offload->stats); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + static LIST_HEAD(nsim_block_cb_list); static int @@ -217,6 +242,8 @@ nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) struct netdevsim *ns = netdev_priv(dev); switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return nsim_setup_tc_taprio(dev, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &nsim_block_cb_list, @@ -291,19 +318,26 @@ static void nsim_setup(struct net_device *dev) static int nsim_init_netdevsim(struct netdevsim *ns) { + struct mock_phc *phc; int err; + phc = mock_phc_create(&ns->nsim_bus_dev->dev); + if (IS_ERR(phc)) + return PTR_ERR(phc); + + ns->phc = phc; ns->netdev->netdev_ops = &nsim_netdev_ops; err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev); if (err) - return err; + goto err_phc_destroy; rtnl_lock(); err = nsim_bpf_init(ns); if (err) goto err_utn_destroy; + nsim_macsec_init(ns); nsim_ipsec_init(ns); err = register_netdevice(ns->netdev); @@ -314,10 +348,13 @@ static int nsim_init_netdevsim(struct netdevsim *ns) err_ipsec_teardown: nsim_ipsec_teardown(ns); + nsim_macsec_teardown(ns); nsim_bpf_uninit(ns); err_utn_destroy: rtnl_unlock(); nsim_udp_tunnels_info_destroy(ns->netdev); +err_phc_destroy: + mock_phc_destroy(ns->phc); return err; } @@ -374,12 +411,14 @@ void nsim_destroy(struct netdevsim *ns) rtnl_lock(); unregister_netdevice(dev); if (nsim_dev_port_is_pf(ns->nsim_dev_port)) { + nsim_macsec_teardown(ns); nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); } rtnl_unlock(); if (nsim_dev_port_is_pf(ns->nsim_dev_port)) nsim_udp_tunnels_info_destroy(dev); + mock_phc_destroy(ns->phc); free_netdev(dev); } @@ -431,4 +470,5 @@ static void __exit nsim_module_exit(void) module_init(nsim_module_init); module_exit(nsim_module_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simulated networking device for testing"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 7d8ed8d8df5c..028c825b86db 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -19,10 +19,12 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> +#include <linux/ptp_mock.h> #include <linux/u64_stats_sync.h> #include <net/devlink.h> #include <net/udp_tunnel.h> #include <net/xdp.h> +#include <net/macsec.h> #define DRV_NAME "netdevsim" @@ -52,6 +54,25 @@ struct nsim_ipsec { u32 ok; }; +#define NSIM_MACSEC_MAX_SECY_COUNT 3 +#define NSIM_MACSEC_MAX_RXSC_COUNT 1 +struct nsim_rxsc { + sci_t sci; + bool used; +}; + +struct nsim_secy { + sci_t sci; + struct nsim_rxsc nsim_rxsc[NSIM_MACSEC_MAX_RXSC_COUNT]; + u8 nsim_rxsc_count; + bool used; +}; + +struct nsim_macsec { + struct nsim_secy nsim_secy[NSIM_MACSEC_MAX_SECY_COUNT]; + u8 nsim_secy_count; +}; + struct nsim_ethtool_pauseparam { bool rx; bool tx; @@ -73,6 +94,7 @@ struct netdevsim { struct net_device *netdev; struct nsim_dev *nsim_dev; struct nsim_dev_port *nsim_dev_port; + struct mock_phc *phc; u64 tx_packets; u64 tx_bytes; @@ -93,6 +115,7 @@ struct netdevsim { bool bpf_map_accept; struct nsim_ipsec ipsec; + struct nsim_macsec macsec; struct { u32 inject_error; u32 sleep; @@ -366,6 +389,19 @@ static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) } #endif +#if IS_ENABLED(CONFIG_MACSEC) +void nsim_macsec_init(struct netdevsim *ns); +void nsim_macsec_teardown(struct netdevsim *ns); +#else +static inline void nsim_macsec_init(struct netdevsim *ns) +{ +} + +static inline void nsim_macsec_teardown(struct netdevsim *ns) +{ +} +#endif + struct nsim_bus_dev { struct device dev; struct list_head list; diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c new file mode 100644 index 000000000000..39171380ccf2 --- /dev/null +++ b/drivers/net/netkit.c @@ -0,0 +1,960 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Isovalent */ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/filter.h> +#include <linux/netfilter_netdev.h> +#include <linux/bpf_mprog.h> +#include <linux/indirect_call_wrapper.h> + +#include <net/netkit.h> +#include <net/dst.h> +#include <net/tcx.h> + +#define DRV_NAME "netkit" + +struct netkit { + /* Needed in fast-path */ + struct net_device __rcu *peer; + struct bpf_mprog_entry __rcu *active; + enum netkit_action policy; + struct bpf_mprog_bundle bundle; + + /* Needed in slow-path */ + enum netkit_mode mode; + bool primary; + u32 headroom; +}; + +struct netkit_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +static __always_inline int +netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb, + enum netkit_action ret) +{ + const struct bpf_mprog_fp *fp; + const struct bpf_prog *prog; + + bpf_mprog_foreach_prog(entry, fp, prog) { + bpf_compute_data_pointers(skb); + ret = bpf_prog_run(prog, skb); + if (ret != NETKIT_NEXT) + break; + } + return ret; +} + +static void netkit_prep_forward(struct sk_buff *skb, bool xnet) +{ + skb_scrub_packet(skb, xnet); + skb->priority = 0; + nf_skip_egress(skb, true); +} + +static struct netkit *netkit_priv(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + enum netkit_action ret = READ_ONCE(nk->policy); + netdev_tx_t ret_dev = NET_XMIT_SUCCESS; + const struct bpf_mprog_entry *entry; + struct net_device *peer; + int len = skb->len; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (unlikely(!peer || !(peer->flags & IFF_UP) || + !pskb_may_pull(skb, ETH_HLEN) || + skb_orphan_frags(skb, GFP_ATOMIC))) + goto drop; + netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer))); + skb->dev = peer; + entry = rcu_dereference(nk->active); + if (entry) + ret = netkit_run(entry, skb, ret); + switch (ret) { + case NETKIT_NEXT: + case NETKIT_PASS: + skb->protocol = eth_type_trans(skb, skb->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) { + dev_sw_netstats_tx_add(dev, 1, len); + dev_sw_netstats_rx_add(peer, len); + } else { + goto drop_stats; + } + break; + case NETKIT_REDIRECT: + dev_sw_netstats_tx_add(dev, 1, len); + skb_do_redirect(skb); + break; + case NETKIT_DROP: + default: +drop: + kfree_skb(skb); +drop_stats: + dev_core_stats_tx_dropped_inc(dev); + ret_dev = NET_XMIT_DROP; + break; + } + rcu_read_unlock(); + return ret_dev; +} + +static int netkit_open(struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + if (!peer) + return -ENOTCONN; + if (peer->flags & IFF_UP) { + netif_carrier_on(dev); + netif_carrier_on(peer); + } + return 0; +} + +static int netkit_close(struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + netif_carrier_off(dev); + if (peer) + netif_carrier_off(peer); + return 0; +} + +static int netkit_get_iflink(const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer; + int iflink = 0; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (peer) + iflink = peer->ifindex; + rcu_read_unlock(); + return iflink; +} + +static void netkit_set_multicast(struct net_device *dev) +{ + /* Nothing to do, we receive whatever gets pushed to us! */ +} + +static void netkit_set_headroom(struct net_device *dev, int headroom) +{ + struct netkit *nk = netkit_priv(dev), *nk2; + struct net_device *peer; + + if (headroom < 0) + headroom = NET_SKB_PAD; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (unlikely(!peer)) + goto out; + + nk2 = netkit_priv(peer); + nk->headroom = headroom; + headroom = max(nk->headroom, nk2->headroom); + + peer->needed_headroom = headroom; + dev->needed_headroom = headroom; +out: + rcu_read_unlock(); +} + +INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev) +{ + return rcu_dereference(netkit_priv(dev)->peer); +} + +static void netkit_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + dev_fetch_sw_netstats(stats, dev->tstats); + stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped); +} + +static void netkit_uninit(struct net_device *dev); + +static const struct net_device_ops netkit_netdev_ops = { + .ndo_open = netkit_open, + .ndo_stop = netkit_close, + .ndo_start_xmit = netkit_xmit, + .ndo_set_rx_mode = netkit_set_multicast, + .ndo_set_rx_headroom = netkit_set_headroom, + .ndo_get_iflink = netkit_get_iflink, + .ndo_get_peer_dev = netkit_peer_dev, + .ndo_get_stats64 = netkit_get_stats, + .ndo_uninit = netkit_uninit, + .ndo_features_check = passthru_features_check, +}; + +static void netkit_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); +} + +static const struct ethtool_ops netkit_ethtool_ops = { + .get_drvinfo = netkit_get_drvinfo, +}; + +static void netkit_setup(struct net_device *dev) +{ + static const netdev_features_t netkit_features_hw_vlan = + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; + static const netdev_features_t netkit_features = + netkit_features_hw_vlan | + NETIF_F_SG | + NETIF_F_FRAGLIST | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HIGHDMA | + NETIF_F_GSO_SOFTWARE | + NETIF_F_GSO_ENCAP_ALL; + + ether_setup(dev); + dev->max_mtu = ETH_MAX_MTU; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + + dev->flags |= IFF_NOARP; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_PHONY_HEADROOM; + dev->priv_flags |= IFF_NO_QUEUE; + + dev->ethtool_ops = &netkit_ethtool_ops; + dev->netdev_ops = &netkit_netdev_ops; + + dev->features |= netkit_features | NETIF_F_LLTX; + dev->hw_features = netkit_features; + dev->hw_enc_features = netkit_features; + dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; + dev->vlan_features = dev->features & ~netkit_features_hw_vlan; + + dev->needs_free_netdev = true; + + netif_set_tso_max_size(dev, GSO_MAX_SIZE); +} + +static struct net *netkit_get_link_net(const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + return peer ? dev_net(peer) : dev_net(dev); +} + +static int netkit_check_policy(int policy, struct nlattr *tb, + struct netlink_ext_ack *extack) +{ + switch (policy) { + case NETKIT_PASS: + case NETKIT_DROP: + return 0; + default: + NL_SET_ERR_MSG_ATTR(extack, tb, + "Provided default xmit policy not supported"); + return -EINVAL; + } +} + +static int netkit_check_mode(int mode, struct nlattr *tb, + struct netlink_ext_ack *extack) +{ + switch (mode) { + case NETKIT_L2: + case NETKIT_L3: + return 0; + default: + NL_SET_ERR_MSG_ATTR(extack, tb, + "Provided device mode can only be L2 or L3"); + return -EINVAL; + } +} + +static int netkit_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct nlattr *attr = tb[IFLA_ADDRESS]; + + if (!attr) + return 0; + NL_SET_ERR_MSG_ATTR(extack, attr, + "Setting Ethernet address is not supported"); + return -EOPNOTSUPP; +} + +static struct rtnl_link_ops netkit_link_ops; + +static int netkit_new_link(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr; + enum netkit_action default_prim = NETKIT_PASS; + enum netkit_action default_peer = NETKIT_PASS; + enum netkit_mode mode = NETKIT_L3; + unsigned char ifname_assign_type; + struct ifinfomsg *ifmp = NULL; + struct net_device *peer; + char ifname[IFNAMSIZ]; + struct netkit *nk; + struct net *net; + int err; + + if (data) { + if (data[IFLA_NETKIT_MODE]) { + attr = data[IFLA_NETKIT_MODE]; + mode = nla_get_u32(attr); + err = netkit_check_mode(mode, attr, extack); + if (err < 0) + return err; + } + if (data[IFLA_NETKIT_PEER_INFO]) { + attr = data[IFLA_NETKIT_PEER_INFO]; + ifmp = nla_data(attr); + err = rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack); + if (err < 0) + return err; + err = netkit_validate(peer_tb, NULL, extack); + if (err < 0) + return err; + tbp = peer_tb; + } + if (data[IFLA_NETKIT_POLICY]) { + attr = data[IFLA_NETKIT_POLICY]; + default_prim = nla_get_u32(attr); + err = netkit_check_policy(default_prim, attr, extack); + if (err < 0) + return err; + } + if (data[IFLA_NETKIT_PEER_POLICY]) { + attr = data[IFLA_NETKIT_PEER_POLICY]; + default_peer = nla_get_u32(attr); + err = netkit_check_policy(default_peer, attr, extack); + if (err < 0) + return err; + } + } + + if (ifmp && tbp[IFLA_IFNAME]) { + nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + ifname_assign_type = NET_NAME_USER; + } else { + strscpy(ifname, "nk%d", IFNAMSIZ); + ifname_assign_type = NET_NAME_ENUM; + } + + net = rtnl_link_get_net(src_net, tbp); + if (IS_ERR(net)) + return PTR_ERR(net); + + peer = rtnl_create_link(net, ifname, ifname_assign_type, + &netkit_link_ops, tbp, extack); + if (IS_ERR(peer)) { + put_net(net); + return PTR_ERR(peer); + } + + netif_inherit_tso_max(peer, dev); + + if (mode == NETKIT_L2) + eth_hw_addr_random(peer); + if (ifmp && dev->ifindex) + peer->ifindex = ifmp->ifi_index; + + nk = netkit_priv(peer); + nk->primary = false; + nk->policy = default_peer; + nk->mode = mode; + bpf_mprog_bundle_init(&nk->bundle); + + err = register_netdevice(peer); + put_net(net); + if (err < 0) + goto err_register_peer; + netif_carrier_off(peer); + if (mode == NETKIT_L2) + dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL); + + err = rtnl_configure_link(peer, NULL, 0, NULL); + if (err < 0) + goto err_configure_peer; + + if (mode == NETKIT_L2) + eth_hw_addr_random(dev); + if (tb[IFLA_IFNAME]) + nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + else + strscpy(dev->name, "nk%d", IFNAMSIZ); + + nk = netkit_priv(dev); + nk->primary = true; + nk->policy = default_prim; + nk->mode = mode; + bpf_mprog_bundle_init(&nk->bundle); + + err = register_netdevice(dev); + if (err < 0) + goto err_configure_peer; + netif_carrier_off(dev); + if (mode == NETKIT_L2) + dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL); + + rcu_assign_pointer(netkit_priv(dev)->peer, peer); + rcu_assign_pointer(netkit_priv(peer)->peer, dev); + return 0; +err_configure_peer: + unregister_netdevice(peer); + return err; +err_register_peer: + free_netdev(peer); + return err; +} + +static struct bpf_mprog_entry *netkit_entry_fetch(struct net_device *dev, + bool bundle_fallback) +{ + struct netkit *nk = netkit_priv(dev); + struct bpf_mprog_entry *entry; + + ASSERT_RTNL(); + entry = rcu_dereference_rtnl(nk->active); + if (entry) + return entry; + if (bundle_fallback) + return &nk->bundle.a; + return NULL; +} + +static void netkit_entry_update(struct net_device *dev, + struct bpf_mprog_entry *entry) +{ + struct netkit *nk = netkit_priv(dev); + + ASSERT_RTNL(); + rcu_assign_pointer(nk->active, entry); +} + +static void netkit_entry_sync(void) +{ + synchronize_rcu(); +} + +static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 which) +{ + struct net_device *dev; + struct netkit *nk; + + ASSERT_RTNL(); + + switch (which) { + case BPF_NETKIT_PRIMARY: + case BPF_NETKIT_PEER: + break; + default: + return ERR_PTR(-EINVAL); + } + + dev = __dev_get_by_index(net, ifindex); + if (!dev) + return ERR_PTR(-ENODEV); + if (dev->netdev_ops != &netkit_netdev_ops) + return ERR_PTR(-ENXIO); + + nk = netkit_priv(dev); + if (!nk->primary) + return ERR_PTR(-EACCES); + if (which == BPF_NETKIT_PEER) { + dev = rcu_dereference_rtnl(nk->peer); + if (!dev) + return ERR_PTR(-ENODEV); + } + return dev; +} + +int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_mprog_entry *entry, *entry_new; + struct bpf_prog *replace_prog = NULL; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex, + attr->attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + entry = netkit_entry_fetch(dev, true); + if (attr->attach_flags & BPF_F_REPLACE) { + replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, + prog->type); + if (IS_ERR(replace_prog)) { + ret = PTR_ERR(replace_prog); + replace_prog = NULL; + goto out; + } + } + ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog, + attr->attach_flags, attr->relative_fd, + attr->expected_revision); + if (!ret) { + if (entry != entry_new) { + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + } + bpf_mprog_commit(entry); + } +out: + if (replace_prog) + bpf_prog_put(replace_prog); + rtnl_unlock(); + return ret; +} + +int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex, + attr->attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags, + attr->relative_fd, attr->expected_revision); + if (!ret) { + if (!bpf_mprog_total(entry_new)) + entry_new = NULL; + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + bpf_mprog_commit(entry); + } +out: + rtnl_unlock(); + return ret; +} + +int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) +{ + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, + attr->query.target_ifindex, + attr->query.attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false)); +out: + rtnl_unlock(); + return ret; +} + +static struct netkit_link *netkit_link(const struct bpf_link *link) +{ + return container_of(link, struct netkit_link, link); +} + +static int netkit_link_prog_attach(struct bpf_link *link, u32 flags, + u32 id_or_fd, u64 revision) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev = nkl->dev; + int ret; + + ASSERT_RTNL(); + entry = netkit_entry_fetch(dev, true); + ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags, + id_or_fd, revision); + if (!ret) { + if (entry != entry_new) { + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + } + bpf_mprog_commit(entry); + } + return ret; +} + +static void netkit_link_release(struct bpf_link *link) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + dev = nkl->dev; + if (!dev) + goto out; + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0); + if (!ret) { + if (!bpf_mprog_total(entry_new)) + entry_new = NULL; + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + bpf_mprog_commit(entry); + nkl->dev = NULL; + } +out: + WARN_ON_ONCE(ret); + rtnl_unlock(); +} + +static int netkit_link_update(struct bpf_link *link, struct bpf_prog *nprog, + struct bpf_prog *oprog) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + dev = nkl->dev; + if (!dev) { + ret = -ENOLINK; + goto out; + } + if (oprog && link->prog != oprog) { + ret = -EPERM; + goto out; + } + oprog = link->prog; + if (oprog == nprog) { + bpf_prog_put(nprog); + goto out; + } + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog, + BPF_F_REPLACE | BPF_F_ID, + link->prog->aux->id, 0); + if (!ret) { + WARN_ON_ONCE(entry != entry_new); + oprog = xchg(&link->prog, nprog); + bpf_prog_put(oprog); + bpf_mprog_commit(entry); + } +out: + rtnl_unlock(); + return ret; +} + +static void netkit_link_dealloc(struct bpf_link *link) +{ + kfree(netkit_link(link)); +} + +static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq) +{ + const struct netkit_link *nkl = netkit_link(link); + u32 ifindex = 0; + + rtnl_lock(); + if (nkl->dev) + ifindex = nkl->dev->ifindex; + rtnl_unlock(); + + seq_printf(seq, "ifindex:\t%u\n", ifindex); + seq_printf(seq, "attach_type:\t%u (%s)\n", + nkl->location, + nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer"); +} + +static int netkit_link_fill_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + const struct netkit_link *nkl = netkit_link(link); + u32 ifindex = 0; + + rtnl_lock(); + if (nkl->dev) + ifindex = nkl->dev->ifindex; + rtnl_unlock(); + + info->netkit.ifindex = ifindex; + info->netkit.attach_type = nkl->location; + return 0; +} + +static int netkit_link_detach(struct bpf_link *link) +{ + netkit_link_release(link); + return 0; +} + +static const struct bpf_link_ops netkit_link_lops = { + .release = netkit_link_release, + .detach = netkit_link_detach, + .dealloc = netkit_link_dealloc, + .update_prog = netkit_link_update, + .show_fdinfo = netkit_link_fdinfo, + .fill_link_info = netkit_link_fill_info, +}; + +static int netkit_link_init(struct netkit_link *nkl, + struct bpf_link_primer *link_primer, + const union bpf_attr *attr, + struct net_device *dev, + struct bpf_prog *prog) +{ + bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT, + &netkit_link_lops, prog); + nkl->location = attr->link_create.attach_type; + nkl->dev = dev; + return bpf_link_prime(&nkl->link, link_primer); +} + +int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_link_primer link_primer; + struct netkit_link *nkl; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, + attr->link_create.target_ifindex, + attr->link_create.attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + nkl = kzalloc(sizeof(*nkl), GFP_KERNEL_ACCOUNT); + if (!nkl) { + ret = -ENOMEM; + goto out; + } + ret = netkit_link_init(nkl, &link_primer, attr, dev, prog); + if (ret) { + kfree(nkl); + goto out; + } + ret = netkit_link_prog_attach(&nkl->link, + attr->link_create.flags, + attr->link_create.netkit.relative_fd, + attr->link_create.netkit.expected_revision); + if (ret) { + nkl->dev = NULL; + bpf_link_cleanup(&link_primer); + goto out; + } + ret = bpf_link_settle(&link_primer); +out: + rtnl_unlock(); + return ret; +} + +static void netkit_release_all(struct net_device *dev) +{ + struct bpf_mprog_entry *entry; + struct bpf_tuple tuple = {}; + struct bpf_mprog_fp *fp; + struct bpf_mprog_cp *cp; + + entry = netkit_entry_fetch(dev, false); + if (!entry) + return; + netkit_entry_update(dev, NULL); + netkit_entry_sync(); + bpf_mprog_foreach_tuple(entry, fp, cp, tuple) { + if (tuple.link) + netkit_link(tuple.link)->dev = NULL; + else + bpf_prog_put(tuple.prog); + } +} + +static void netkit_uninit(struct net_device *dev) +{ + netkit_release_all(dev); +} + +static void netkit_del_link(struct net_device *dev, struct list_head *head) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + RCU_INIT_POINTER(nk->peer, NULL); + unregister_netdevice_queue(dev, head); + if (peer) { + nk = netkit_priv(peer); + RCU_INIT_POINTER(nk->peer, NULL); + unregister_netdevice_queue(peer, head); + } +} + +static int netkit_change_link(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + enum netkit_action policy; + struct nlattr *attr; + int err; + + if (!nk->primary) { + NL_SET_ERR_MSG(extack, + "netkit link settings can be changed only through the primary device"); + return -EACCES; + } + + if (data[IFLA_NETKIT_MODE]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE], + "netkit link operating mode cannot be changed after device creation"); + return -EACCES; + } + + if (data[IFLA_NETKIT_PEER_INFO]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO], + "netkit peer info cannot be changed after device creation"); + return -EINVAL; + } + + if (data[IFLA_NETKIT_POLICY]) { + attr = data[IFLA_NETKIT_POLICY]; + policy = nla_get_u32(attr); + err = netkit_check_policy(policy, attr, extack); + if (err) + return err; + WRITE_ONCE(nk->policy, policy); + } + + if (data[IFLA_NETKIT_PEER_POLICY]) { + err = -EOPNOTSUPP; + attr = data[IFLA_NETKIT_PEER_POLICY]; + policy = nla_get_u32(attr); + if (peer) + err = netkit_check_policy(policy, attr, extack); + if (err) + return err; + nk = netkit_priv(peer); + WRITE_ONCE(nk->policy, policy); + } + + return 0; +} + +static size_t netkit_get_size(const struct net_device *dev) +{ + return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */ + nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */ + 0; +} + +static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + if (nla_put_u8(skb, IFLA_NETKIT_PRIMARY, nk->primary)) + return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_POLICY, nk->policy)) + return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode)) + return -EMSGSIZE; + + if (peer) { + nk = netkit_priv(peer); + if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy)) + return -EMSGSIZE; + } + + return 0; +} + +static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = { + [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) }, + [IFLA_NETKIT_POLICY] = { .type = NLA_U32 }, + [IFLA_NETKIT_MODE] = { .type = NLA_U32 }, + [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 }, + [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT, + .reject_message = "Primary attribute is read-only" }, +}; + +static struct rtnl_link_ops netkit_link_ops = { + .kind = DRV_NAME, + .priv_size = sizeof(struct netkit), + .setup = netkit_setup, + .newlink = netkit_new_link, + .dellink = netkit_del_link, + .changelink = netkit_change_link, + .get_link_net = netkit_get_link_net, + .get_size = netkit_get_size, + .fill_info = netkit_fill_info, + .policy = netkit_policy, + .validate = netkit_validate, + .maxtype = IFLA_NETKIT_MAX, +}; + +static __init int netkit_init(void) +{ + BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT || + (int)NETKIT_PASS != (int)TCX_PASS || + (int)NETKIT_DROP != (int)TCX_DROP || + (int)NETKIT_REDIRECT != (int)TCX_REDIRECT); + + return rtnl_link_register(&netkit_link_ops); +} + +static __exit void netkit_exit(void) +{ + rtnl_link_unregister(&netkit_link_ops); +} + +module_init(netkit_init); +module_exit(netkit_exit); + +MODULE_DESCRIPTION("BPF-programmable network device"); +MODULE_AUTHOR("Daniel Borkmann <daniel@iogearbox.net>"); +MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile index ea662a7989b2..fb1694192ae6 100644 --- a/drivers/net/pcs/Makefile +++ b/drivers/net/pcs/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Linux PCS drivers -pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o +pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o pcs-xpcs-wx.o obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index 9021b96d4f9d..dc3962b2aa6b 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -216,7 +216,7 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, /* The PCS needs to be configured manually only * when not operating on in-band mode */ - if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED) + if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) return; if (duplex == DUPLEX_HALF) diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c index b0f3ede945d9..8501dd365279 100644 --- a/drivers/net/pcs/pcs-mtk-lynxi.c +++ b/drivers/net/pcs/pcs-mtk-lynxi.c @@ -233,11 +233,19 @@ static void mtk_pcs_lynxi_link_up(struct phylink_pcs *pcs, } } +static void mtk_pcs_lynxi_disable(struct phylink_pcs *pcs) +{ + struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs); + + mpcs->interface = PHY_INTERFACE_MODE_NA; +} + static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = { .pcs_get_state = mtk_pcs_lynxi_get_state, .pcs_config = mtk_pcs_lynxi_config, .pcs_an_restart = mtk_pcs_lynxi_restart_an, .pcs_link_up = mtk_pcs_lynxi_link_up, + .pcs_disable = mtk_pcs_lynxi_disable, }; struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev, diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c index 356099169003..97139c07130f 100644 --- a/drivers/net/pcs/pcs-rzn1-miic.c +++ b/drivers/net/pcs/pcs-rzn1-miic.c @@ -12,6 +12,7 @@ #include <linux/of_platform.h> #include <linux/pcs-rzn1-miic.h> #include <linux/phylink.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <dt-bindings/net/pcs-rzn1-miic.h> diff --git a/drivers/net/pcs/pcs-xpcs-wx.c b/drivers/net/pcs/pcs-xpcs-wx.c new file mode 100644 index 000000000000..19c75886f070 --- /dev/null +++ b/drivers/net/pcs/pcs-xpcs-wx.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pcs/pcs-xpcs.h> +#include <linux/mdio.h> +#include "pcs-xpcs.h" + +/* VR_XS_PMA_MMD */ +#define TXGBE_PMA_MMD 0x8020 +#define TXGBE_TX_GENCTL1 0x11 +#define TXGBE_TX_GENCTL1_VBOOST_LVL GENMASK(10, 8) +#define TXGBE_TX_GENCTL1_VBOOST_EN0 BIT(4) +#define TXGBE_TX_GEN_CTL2 0x12 +#define TXGBE_TX_GEN_CTL2_TX0_WIDTH(v) FIELD_PREP(GENMASK(9, 8), v) +#define TXGBE_TX_RATE_CTL 0x14 +#define TXGBE_TX_RATE_CTL_TX0_RATE(v) FIELD_PREP(GENMASK(2, 0), v) +#define TXGBE_RX_GEN_CTL2 0x32 +#define TXGBE_RX_GEN_CTL2_RX0_WIDTH(v) FIELD_PREP(GENMASK(9, 8), v) +#define TXGBE_RX_GEN_CTL3 0x33 +#define TXGBE_RX_GEN_CTL3_LOS_TRSHLD0 GENMASK(2, 0) +#define TXGBE_RX_RATE_CTL 0x34 +#define TXGBE_RX_RATE_CTL_RX0_RATE(v) FIELD_PREP(GENMASK(1, 0), v) +#define TXGBE_RX_EQ_ATTN_CTL 0x37 +#define TXGBE_RX_EQ_ATTN_LVL0 GENMASK(2, 0) +#define TXGBE_RX_EQ_CTL0 0x38 +#define TXGBE_RX_EQ_CTL0_VGA1_GAIN(v) FIELD_PREP(GENMASK(15, 12), v) +#define TXGBE_RX_EQ_CTL0_VGA2_GAIN(v) FIELD_PREP(GENMASK(11, 8), v) +#define TXGBE_RX_EQ_CTL0_CTLE_POLE(v) FIELD_PREP(GENMASK(7, 5), v) +#define TXGBE_RX_EQ_CTL0_CTLE_BOOST(v) FIELD_PREP(GENMASK(4, 0), v) +#define TXGBE_RX_EQ_CTL4 0x3C +#define TXGBE_RX_EQ_CTL4_CONT_OFF_CAN0 BIT(4) +#define TXGBE_RX_EQ_CTL4_CONT_ADAPT0 BIT(0) +#define TXGBE_AFE_DFE_ENABLE 0x3D +#define TXGBE_DFE_EN_0 BIT(4) +#define TXGBE_AFE_EN_0 BIT(0) +#define TXGBE_DFE_TAP_CTL0 0x3E +#define TXGBE_MPLLA_CTL0 0x51 +#define TXGBE_MPLLA_CTL2 0x53 +#define TXGBE_MPLLA_CTL2_DIV16P5_CLK_EN BIT(10) +#define TXGBE_MPLLA_CTL2_DIV10_CLK_EN BIT(9) +#define TXGBE_MPLLA_CTL3 0x57 +#define TXGBE_MISC_CTL0 0x70 +#define TXGBE_MISC_CTL0_PLL BIT(15) +#define TXGBE_MISC_CTL0_CR_PARA_SEL BIT(14) +#define TXGBE_MISC_CTL0_RX_VREF(v) FIELD_PREP(GENMASK(12, 8), v) +#define TXGBE_VCO_CAL_LD0 0x72 +#define TXGBE_VCO_CAL_REF0 0x76 + +static int txgbe_read_pma(struct dw_xpcs *xpcs, int reg) +{ + return xpcs_read(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg); +} + +static int txgbe_write_pma(struct dw_xpcs *xpcs, int reg, u16 val) +{ + return xpcs_write(xpcs, MDIO_MMD_PMAPMD, TXGBE_PMA_MMD + reg, val); +} + +static void txgbe_pma_config_10gbaser(struct dw_xpcs *xpcs) +{ + int val; + + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x21); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0); + val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); + val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); + txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); + txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | + TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_LD0, 0x549); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_REF0, 0x29); + txgbe_write_pma(xpcs, TXGBE_TX_RATE_CTL, 0); + txgbe_write_pma(xpcs, TXGBE_RX_RATE_CTL, 0); + txgbe_write_pma(xpcs, TXGBE_TX_GEN_CTL2, TXGBE_TX_GEN_CTL2_TX0_WIDTH(3)); + txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL2, TXGBE_RX_GEN_CTL2_RX0_WIDTH(3)); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL2, TXGBE_MPLLA_CTL2_DIV16P5_CLK_EN | + TXGBE_MPLLA_CTL2_DIV10_CLK_EN); + + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_CTLE_POLE(2) | + TXGBE_RX_EQ_CTL0_CTLE_BOOST(5)); + val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); + val &= ~TXGBE_RX_EQ_ATTN_LVL0; + txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0xBE); + val = txgbe_read_pma(xpcs, TXGBE_AFE_DFE_ENABLE); + val &= ~(TXGBE_DFE_EN_0 | TXGBE_AFE_EN_0); + txgbe_write_pma(xpcs, TXGBE_AFE_DFE_ENABLE, val); + val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_CTL4); + val &= ~TXGBE_RX_EQ_CTL4_CONT_ADAPT0; + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL4, val); +} + +static void txgbe_pma_config_1g(struct dw_xpcs *xpcs) +{ + int val; + + val = txgbe_read_pma(xpcs, TXGBE_TX_GENCTL1); + val = u16_replace_bits(val, 0x5, TXGBE_TX_GENCTL1_VBOOST_LVL); + val &= ~TXGBE_TX_GENCTL1_VBOOST_EN0; + txgbe_write_pma(xpcs, TXGBE_TX_GENCTL1, val); + txgbe_write_pma(xpcs, TXGBE_MISC_CTL0, TXGBE_MISC_CTL0_PLL | + TXGBE_MISC_CTL0_CR_PARA_SEL | TXGBE_MISC_CTL0_RX_VREF(0xF)); + + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL0, TXGBE_RX_EQ_CTL0_VGA1_GAIN(7) | + TXGBE_RX_EQ_CTL0_VGA2_GAIN(7) | TXGBE_RX_EQ_CTL0_CTLE_BOOST(6)); + val = txgbe_read_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL); + val &= ~TXGBE_RX_EQ_ATTN_LVL0; + txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + txgbe_write_pma(xpcs, TXGBE_DFE_TAP_CTL0, 0); + val = txgbe_read_pma(xpcs, TXGBE_RX_GEN_CTL3); + val = u16_replace_bits(val, 0x4, TXGBE_RX_GEN_CTL3_LOS_TRSHLD0); + txgbe_write_pma(xpcs, TXGBE_RX_EQ_ATTN_CTL, val); + + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL0, 0x20); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL3, 0x46); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_LD0, 0x540); + txgbe_write_pma(xpcs, TXGBE_VCO_CAL_REF0, 0x2A); + txgbe_write_pma(xpcs, TXGBE_AFE_DFE_ENABLE, 0); + txgbe_write_pma(xpcs, TXGBE_RX_EQ_CTL4, TXGBE_RX_EQ_CTL4_CONT_OFF_CAN0); + txgbe_write_pma(xpcs, TXGBE_TX_RATE_CTL, TXGBE_TX_RATE_CTL_TX0_RATE(3)); + txgbe_write_pma(xpcs, TXGBE_RX_RATE_CTL, TXGBE_RX_RATE_CTL_RX0_RATE(3)); + txgbe_write_pma(xpcs, TXGBE_TX_GEN_CTL2, TXGBE_TX_GEN_CTL2_TX0_WIDTH(1)); + txgbe_write_pma(xpcs, TXGBE_RX_GEN_CTL2, TXGBE_RX_GEN_CTL2_RX0_WIDTH(1)); + txgbe_write_pma(xpcs, TXGBE_MPLLA_CTL2, TXGBE_MPLLA_CTL2_DIV10_CLK_EN); +} + +static int txgbe_pcs_poll_power_up(struct dw_xpcs *xpcs) +{ + int val, ret; + + /* Wait xpcs power-up good */ + ret = read_poll_timeout(xpcs_read_vpcs, val, + (val & DW_PSEQ_ST) == DW_PSEQ_ST_GOOD, + 10000, 1000000, false, + xpcs, DW_VR_XS_PCS_DIG_STS); + if (ret < 0) + dev_err(&xpcs->mdiodev->dev, "xpcs power-up timeout\n"); + + return ret; +} + +static int txgbe_pma_init_done(struct dw_xpcs *xpcs) +{ + int val, ret; + + xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_VR_RST | DW_EN_VSMMD1); + + /* wait pma initialization done */ + ret = read_poll_timeout(xpcs_read_vpcs, val, !(val & DW_VR_RST), + 100000, 10000000, false, + xpcs, DW_VR_XS_PCS_DIG_CTRL1); + if (ret < 0) + dev_err(&xpcs->mdiodev->dev, "xpcs pma initialization timeout\n"); + + return ret; +} + +static bool txgbe_xpcs_mode_quirk(struct dw_xpcs *xpcs) +{ + int ret; + + /* When txgbe do LAN reset, PCS will change to default 10GBASE-R mode */ + ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_CTRL2); + ret &= MDIO_PCS_CTRL2_TYPE; + if ((ret == MDIO_PCS_CTRL2_10GBR && + xpcs->interface != PHY_INTERFACE_MODE_10GBASER) || + xpcs->interface == PHY_INTERFACE_MODE_SGMII) + return true; + + return false; +} + +int txgbe_xpcs_switch_mode(struct dw_xpcs *xpcs, phy_interface_t interface) +{ + int val, ret; + + switch (interface) { + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + break; + default: + return 0; + } + + if (xpcs->interface == interface && !txgbe_xpcs_mode_quirk(xpcs)) + return 0; + + xpcs->interface = interface; + + ret = txgbe_pcs_poll_power_up(xpcs); + if (ret < 0) + return ret; + + if (interface == PHY_INTERFACE_MODE_10GBASER) { + xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBR); + val = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1); + val |= MDIO_CTRL1_SPEED10G; + xpcs_write(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, val); + txgbe_pma_config_10gbaser(xpcs); + } else { + xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL2, MDIO_PCS_CTRL2_10GBX); + xpcs_write(xpcs, MDIO_MMD_PMAPMD, MDIO_CTRL1, 0); + xpcs_write(xpcs, MDIO_MMD_PCS, MDIO_CTRL1, 0); + txgbe_pma_config_1g(xpcs); + } + + return txgbe_pma_init_done(xpcs); +} diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 44b037646865..31f0beba638a 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -228,16 +228,39 @@ static int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg, return xpcs_write(xpcs, dev, DW_VENDOR | reg, val); } -static int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg) +int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg) { return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg); } -static int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val) +int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val) { return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val); } +static int xpcs_dev_flag(struct dw_xpcs *xpcs) +{ + int ret, oui; + + ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1); + if (ret < 0) + return ret; + + oui = ret; + + ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2); + if (ret < 0) + return ret; + + ret = (ret >> 10) & 0x3F; + oui |= ret << 16; + + if (oui == DW_OUI_WX) + xpcs->dev_flag = DW_DEV_TXGBE; + + return 0; +} + static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev) { /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */ @@ -660,7 +683,10 @@ EXPORT_SYMBOL_GPL(xpcs_config_eee); static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int neg_mode) { - int ret, mdio_ctrl; + int ret, mdio_ctrl, tx_conf; + + if (xpcs->dev_flag == DW_DEV_TXGBE) + xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); /* For AN for C37 SGMII mode, the settings are :- * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case @@ -697,9 +723,15 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, ret |= (DW_VR_MII_PCS_MODE_C37_SGMII << DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT & DW_VR_MII_PCS_MODE_MASK); - ret |= (DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII << - DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & - DW_VR_MII_TX_CONFIG_MASK); + if (xpcs->dev_flag == DW_DEV_TXGBE) { + ret |= DW_VR_MII_AN_CTRL_8BIT; + /* Hardware requires it to be PHY side SGMII */ + tx_conf = DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII; + } else { + tx_conf = DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII; + } + ret |= tx_conf << DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT & + DW_VR_MII_TX_CONFIG_MASK; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); if (ret < 0) return ret; @@ -713,6 +745,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, else ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; + if (xpcs->dev_flag == DW_DEV_TXGBE) + ret |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL; + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); if (ret < 0) return ret; @@ -732,6 +767,9 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, int ret, mdio_ctrl, adv; bool changed = 0; + if (xpcs->dev_flag == DW_DEV_TXGBE) + xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); + /* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must * be disabled first:- * 1) VR_MII_MMD_CTRL Bit(12)[AN_ENABLE] = 0b @@ -753,6 +791,8 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, return ret; ret &= ~DW_VR_MII_PCS_MODE_MASK; + if (!xpcs->pcs.poll) + ret |= DW_VR_MII_AN_INTR_EN; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, ret); if (ret < 0) return ret; @@ -818,6 +858,12 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, if (!compat) return -ENODEV; + if (xpcs->dev_flag == DW_DEV_TXGBE) { + ret = txgbe_xpcs_switch_mode(xpcs, interface); + if (ret) + return ret; + } + switch (compat->an_mode) { case DW_10GBASER: break; @@ -977,6 +1023,33 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs, state->duplex = DUPLEX_FULL; else state->duplex = DUPLEX_HALF; + } else if (ret == DW_VR_MII_AN_STS_C37_ANCMPLT_INTR) { + int speed, duplex; + + state->link = true; + + speed = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1); + if (speed < 0) + return speed; + + speed &= SGMII_SPEED_SS13 | SGMII_SPEED_SS6; + if (speed == SGMII_SPEED_SS6) + state->speed = SPEED_1000; + else if (speed == SGMII_SPEED_SS13) + state->speed = SPEED_100; + else if (speed == 0) + state->speed = SPEED_10; + + duplex = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_ADVERTISE); + if (duplex < 0) + return duplex; + + if (duplex & DW_FULL_DUPLEX) + state->duplex = DUPLEX_FULL; + else if (duplex & DW_HALF_DUPLEX) + state->duplex = DUPLEX_HALF; + + xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, 0); } return 0; @@ -1000,12 +1073,45 @@ static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs, if (bmsr < 0) return bmsr; + /* Clear AN complete interrupt */ + if (!xpcs->pcs.poll) { + int an_intr; + + an_intr = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS); + if (an_intr & DW_VR_MII_AN_STS_C37_ANCMPLT_INTR) { + an_intr &= ~DW_VR_MII_AN_STS_C37_ANCMPLT_INTR; + xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS, an_intr); + } + } + phylink_mii_c22_pcs_decode_state(state, bmsr, lpa); } return 0; } +static int xpcs_get_state_2500basex(struct dw_xpcs *xpcs, + struct phylink_link_state *state) +{ + int ret; + + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_STS); + if (ret < 0) { + state->link = 0; + return ret; + } + + state->link = !!(ret & DW_VR_MII_MMD_STS_LINK_STS); + if (!state->link) + return 0; + + state->speed = SPEED_2500; + state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX; + state->duplex = DUPLEX_FULL; + + return 0; +} + static void xpcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { @@ -1043,6 +1149,13 @@ static void xpcs_get_state(struct phylink_pcs *pcs, ERR_PTR(ret)); } break; + case DW_2500BASEX: + ret = xpcs_get_state_2500basex(xpcs, state); + if (ret) { + pr_err("xpcs_get_state_2500basex returned %pe\n", + ERR_PTR(ret)); + } + break; default: return; } @@ -1284,16 +1397,20 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, goto out; } + ret = xpcs_dev_flag(xpcs); + if (ret) + goto out; + xpcs->pcs.ops = &xpcs_phylink_ops; xpcs->pcs.neg_mode = true; - if (compat->an_mode == DW_10GBASER) - return xpcs; - xpcs->pcs.poll = true; + if (xpcs->dev_flag != DW_DEV_TXGBE) { + xpcs->pcs.poll = true; - ret = xpcs_soft_reset(xpcs, compat); - if (ret) - goto out; + ret = xpcs_soft_reset(xpcs, compat); + if (ret) + goto out; + } return xpcs; } diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index 68c6b5a62088..96c36b32ca99 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -15,8 +15,14 @@ /* VR_XS_PCS */ #define DW_USXGMII_RST BIT(10) #define DW_USXGMII_EN BIT(9) +#define DW_VR_XS_PCS_DIG_CTRL1 0x0000 +#define DW_VR_RST BIT(15) +#define DW_EN_VSMMD1 BIT(13) +#define DW_CL37_BP BIT(12) #define DW_VR_XS_PCS_DIG_STS 0x0010 #define DW_RXFIFO_ERR GENMASK(6, 5) +#define DW_PSEQ_ST GENMASK(4, 2) +#define DW_PSEQ_ST_GOOD FIELD_PREP(GENMASK(4, 2), 0x4) /* SR_MII */ #define DW_USXGMII_FULL BIT(8) @@ -49,6 +55,8 @@ /* Clause 37 Defines */ /* VR MII MMD registers offsets */ #define DW_VR_MII_MMD_CTRL 0x0000 +#define DW_VR_MII_MMD_STS 0x0001 +#define DW_VR_MII_MMD_STS_LINK_STS BIT(2) #define DW_VR_MII_DIG_CTRL1 0x8000 #define DW_VR_MII_AN_CTRL 0x8001 #define DW_VR_MII_AN_INTR_STS 0x8002 @@ -61,12 +69,14 @@ /* VR_MII_DIG_CTRL1 */ #define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) +#define DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL BIT(0) /* VR_MII_DIG_CTRL2 */ #define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4) #define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0) /* VR_MII_AN_CTRL */ +#define DW_VR_MII_AN_CTRL_8BIT BIT(8) #define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 #define DW_VR_MII_TX_CONFIG_MASK BIT(3) #define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 @@ -75,8 +85,10 @@ #define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1) #define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0 #define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 +#define DW_VR_MII_AN_INTR_EN BIT(0) /* VR_MII_AN_INTR_STS */ +#define DW_VR_MII_AN_STS_C37_ANCMPLT_INTR BIT(0) #define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) #define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2 #define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) @@ -90,6 +102,10 @@ #define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */ #define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */ +/* SR MII MMD AN Advertisement defines */ +#define DW_HALF_DUPLEX BIT(6) +#define DW_FULL_DUPLEX BIT(5) + /* VR MII EEE Control 0 defines */ #define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */ #define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */ @@ -106,6 +122,9 @@ int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg); int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val); +int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg); +int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val); int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs); int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs); int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs); +int txgbe_xpcs_switch_mode(struct dw_xpcs *xpcs, phy_interface_t interface); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 78e6981650d9..421d2b62918f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -69,9 +69,9 @@ config SFP comment "MII PHY device drivers" config AMD_PHY - tristate "AMD PHYs" + tristate "AMD and Altima PHYs" help - Currently supports the am79c874 + Currently supports the AMD am79c874 and Altima AC101L. config MESON_GXL_PHY tristate "Amlogic Meson GXL Internal PHY" @@ -217,6 +217,12 @@ config MARVELL_10G_PHY help Support for the Marvell Alaska MV88X3310 and compatible PHYs. +config MARVELL_88Q2XXX_PHY + tristate "Marvell 88Q2XXX PHY" + help + Support for the Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet + PHYs. + config MARVELL_88X2222_PHY tristate "Marvell 88X2222 PHY" help @@ -300,7 +306,7 @@ config NXP_C45_TJA11XX_PHY depends on PTP_1588_CLOCK_OPTIONAL help Enable support for NXP C45 TJA11XX PHYs. - Currently supports only the TJA1103 PHY. + Currently supports the TJA1103 and TJA1120 PHYs. config NXP_TJA11XX_PHY tristate "NXP TJA11xx PHYs support" @@ -344,6 +350,7 @@ config ROCKCHIP_PHY config SMSC_PHY tristate "SMSC PHYs" + select CRC16 help Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 2fe51ea83bab..c945ed9bd14b 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -14,6 +14,8 @@ endif # dedicated loadable module, so we bundle them all together into libphy.ko ifdef CONFIG_PHYLIB libphy-y += $(mdio-bus-y) +# the stubs are built-in whenever PHYLIB is built-in or module +obj-y += stubs.o else obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o endif @@ -66,6 +68,7 @@ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o obj-$(CONFIG_MARVELL_PHY) += marvell.o +obj-$(CONFIG_MARVELL_88Q2XXX_PHY) += marvell-88q2xxx.o obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 001bb6d8bfce..930b15fa6ce9 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -13,6 +13,7 @@ #include <linux/mii.h> #include <linux/phy.h> +#define PHY_ID_AC101L 0x00225520 #define PHY_ID_AM79C874 0x0022561b #define MII_AM79C_IR 17 /* Interrupt Status/Control Register */ @@ -87,19 +88,31 @@ static irqreturn_t am79c_handle_interrupt(struct phy_device *phydev) return IRQ_HANDLED; } -static struct phy_driver am79c_driver[] = { { - .phy_id = PHY_ID_AM79C874, - .name = "AM79C874", - .phy_id_mask = 0xfffffff0, - /* PHY_BASIC_FEATURES */ - .config_init = am79c_config_init, - .config_intr = am79c_config_intr, - .handle_interrupt = am79c_handle_interrupt, -} }; +static struct phy_driver am79c_drivers[] = { + { + .phy_id = PHY_ID_AM79C874, + .name = "AM79C874", + .phy_id_mask = 0xfffffff0, + /* PHY_BASIC_FEATURES */ + .config_init = am79c_config_init, + .config_intr = am79c_config_intr, + .handle_interrupt = am79c_handle_interrupt, + }, + { + .phy_id = PHY_ID_AC101L, + .name = "AC101L", + .phy_id_mask = 0xfffffff0, + /* PHY_BASIC_FEATURES */ + .config_init = am79c_config_init, + .config_intr = am79c_config_intr, + .handle_interrupt = am79c_handle_interrupt, + }, +}; -module_phy_driver(am79c_driver); +module_phy_driver(am79c_drivers); static struct mdio_device_id __maybe_unused amd_tbl[] = { + { PHY_ID_AC101L, 0xfffffff0 }, { PHY_ID_AM79C874, 0xfffffff0 }, { } }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 8a77ec33b417..37fb033e1c29 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -272,6 +272,13 @@ #define QCA808X_CDT_STATUS_STAT_OPEN 2 #define QCA808X_CDT_STATUS_STAT_SHORT 3 +/* QCA808X 1G chip type */ +#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d +#define QCA808X_PHY_CHIP_TYPE_1G BIT(0) + +#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072 +#define QCA8081_PHY_FIFO_RSTN BIT(11) + MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver"); MODULE_AUTHOR("Matus Ujhelyi"); MODULE_LICENSE("GPL"); @@ -902,15 +909,6 @@ static int at803x_get_features(struct phy_device *phydev) if (err) return err; - if (phydev->drv->phy_id == QCA8081_PHY_ID) { - err = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_NG_EXTABLE); - if (err < 0) - return err; - - linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported, - err & MDIO_PMA_NG_EXTABLE_2_5GBT); - } - if (phydev->drv->phy_id != ATH8031_PHY_ID) return 0; @@ -1739,24 +1737,30 @@ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev) return 0; } -static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev) +static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable) { - u16 seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE); + u16 seed_value; + + if (!enable) + return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED, + QCA808X_MASTER_SLAVE_SEED_ENABLE, 0); + seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE); return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED, - QCA808X_MASTER_SLAVE_SEED_CFG, - FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value)); + QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE, + FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) | + QCA808X_MASTER_SLAVE_SEED_ENABLE); } -static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable) +static bool qca808x_is_prefer_master(struct phy_device *phydev) { - u16 seed_enable = 0; - - if (enable) - seed_enable = QCA808X_MASTER_SLAVE_SEED_ENABLE; + return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) || + (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED); +} - return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED, - QCA808X_MASTER_SLAVE_SEED_ENABLE, seed_enable); +static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev) +{ + return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported); } static int qca808x_config_init(struct phy_device *phydev) @@ -1775,20 +1779,25 @@ static int qca808x_config_init(struct phy_device *phydev) if (ret) return ret; - /* Config the fast retrain for the link 2500M */ - ret = qca808x_phy_fast_retrain_config(phydev); - if (ret) - return ret; + if (qca808x_has_fast_retrain_or_slave_seed(phydev)) { + /* Config the fast retrain for the link 2500M */ + ret = qca808x_phy_fast_retrain_config(phydev); + if (ret) + return ret; - /* Configure lower ramdom seed to make phy linked as slave mode */ - ret = qca808x_phy_ms_random_seed_set(phydev); - if (ret) - return ret; + ret = genphy_read_master_slave(phydev); + if (ret < 0) + return ret; - /* Enable seed */ - ret = qca808x_phy_ms_seed_enable(phydev, true); - if (ret) - return ret; + if (!qca808x_is_prefer_master(phydev)) { + /* Enable seed and configure lower ramdom seed to make phy + * linked as slave mode. + */ + ret = qca808x_phy_ms_seed_enable(phydev, true); + if (ret) + return ret; + } + } /* Configure adc threshold as 100mv for the link 10M */ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD, @@ -1821,17 +1830,21 @@ static int qca808x_read_status(struct phy_device *phydev) phydev->interface = PHY_INTERFACE_MODE_SGMII; } else { /* generate seed as a lower random value to make PHY linked as SLAVE easily, - * except for master/slave configuration fault detected. + * except for master/slave configuration fault detected or the master mode + * preferred. + * * the reason for not putting this code into the function link_change_notify is * the corner case where the link partner is also the qca8081 PHY and the seed * value is configured as the same value, the link can't be up and no link change * occurs. */ - if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) { - qca808x_phy_ms_seed_enable(phydev, false); - } else { - qca808x_phy_ms_random_seed_set(phydev); - qca808x_phy_ms_seed_enable(phydev, true); + if (qca808x_has_fast_retrain_or_slave_seed(phydev)) { + if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR || + qca808x_is_prefer_master(phydev)) { + qca808x_phy_ms_seed_enable(phydev, false); + } else { + qca808x_phy_ms_seed_enable(phydev, true); + } } } @@ -1846,7 +1859,10 @@ static int qca808x_soft_reset(struct phy_device *phydev) if (ret < 0) return ret; - return qca808x_phy_ms_seed_enable(phydev, true); + if (qca808x_has_fast_retrain_or_slave_seed(phydev)) + ret = qca808x_phy_ms_seed_enable(phydev, true); + + return ret; } static bool qca808x_cdt_fault_length_valid(int cdt_code) @@ -1996,6 +2012,44 @@ static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finish return 0; } +static int qca808x_get_features(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_pma_read_abilities(phydev); + if (ret) + return ret; + + /* The autoneg ability is not existed in bit3 of MMD7.1, + * but it is supported by qca808x PHY, so we add it here + * manually. + */ + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); + + /* As for the qca8081 1G version chip, the 2500baseT ability is also + * existed in the bit0 of MMD1.21, we need to remove it manually if + * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d. + */ + ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE); + if (ret < 0) + return ret; + + if (QCA808X_PHY_CHIP_TYPE_1G & ret) + linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported); + + return 0; +} + +static void qca808x_link_change_notify(struct phy_device *phydev) +{ + /* Assert interface sgmii fifo on link down, deassert it on link up, + * the interface device address is always phy address added by 1. + */ + mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1, + MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL, + QCA8081_PHY_FIFO_RSTN, phydev->link ? QCA8081_PHY_FIFO_RSTN : 0); +} + static struct phy_driver at803x_driver[] = { { /* Qualcomm Atheros AR8035 */ @@ -2163,7 +2217,7 @@ static struct phy_driver at803x_driver[] = { .set_tunable = at803x_set_tunable, .set_wol = at803x_set_wol, .get_wol = at803x_get_wol, - .get_features = at803x_get_features, + .get_features = qca808x_get_features, .config_aneg = at803x_config_aneg, .suspend = genphy_suspend, .resume = genphy_resume, @@ -2172,6 +2226,7 @@ static struct phy_driver at803x_driver[] = { .soft_reset = qca808x_soft_reset, .cable_test_start = qca808x_cable_test_start, .cable_test_get_status = qca808x_cable_test_get_status, + .link_change_notify = qca808x_link_change_notify, }, }; module_phy_driver(at803x_driver); diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c index 0f1e617a26c9..eb74a8cf8df1 100644 --- a/drivers/net/phy/ax88796b.c +++ b/drivers/net/phy/ax88796b.c @@ -90,7 +90,7 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev) */ if (phydev->state == PHY_NOLINK) { phy_init_hw(phydev); - phy_start_aneg(phydev); + _phy_start_aneg(phydev); } } diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c index ef00d6163061..cb4b91af5e17 100644 --- a/drivers/net/phy/bcm-phy-ptp.c +++ b/drivers/net/phy/bcm-phy-ptp.c @@ -942,3 +942,4 @@ struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev) EXPORT_SYMBOL_GPL(bcm_ptp_probe); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Broadcom PHY PTP driver"); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index f8c17a253f8b..97638ba7ae85 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -894,6 +894,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .name = _name, \ /* PHY_BASIC_FEATURES */ \ .flags = PHY_IS_INTERNAL, \ + .get_sset_count = bcm_phy_get_sset_count, \ + .get_strings = bcm_phy_get_strings, \ + .get_stats = bcm7xxx_28nm_get_phy_stats, \ .probe = bcm7xxx_28nm_probe, \ .config_init = bcm7xxx_16nm_ephy_config_init, \ .config_aneg = genphy_config_aneg, \ @@ -913,6 +916,7 @@ static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), + BCM7XXX_16NM_EPHY(PHY_ID_BCM74165, "Broadcom BCM74165"), BCM7XXX_28NM_GPHY(PHY_ID_BCM74371, "Broadcom BCM74371"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index cc2858107668..e81404bf8994 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -223,3 +223,4 @@ static struct phy_driver bcm87xx_driver[] = { module_phy_driver(bcm87xx_driver); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Broadcom BCM87xx PHY driver"); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 04b2e6eeb195..3a627105675a 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -704,16 +704,21 @@ static int brcm_fet_config_init(struct phy_device *phydev) if (err < 0 && err != -EIO) return err; + /* Read to clear status bits */ reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; /* Unmask events we are interested in and mask interrupts globally. */ - reg = MII_BRCM_FET_IR_DUPLEX_EN | - MII_BRCM_FET_IR_SPEED_EN | - MII_BRCM_FET_IR_LINK_EN | - MII_BRCM_FET_IR_ENABLE | - MII_BRCM_FET_IR_MASK; + if (phydev->phy_id == PHY_ID_BCM5221) + reg = MII_BRCM_FET_IR_ENABLE | + MII_BRCM_FET_IR_MASK; + else + reg = MII_BRCM_FET_IR_DUPLEX_EN | + MII_BRCM_FET_IR_SPEED_EN | + MII_BRCM_FET_IR_LINK_EN | + MII_BRCM_FET_IR_ENABLE | + MII_BRCM_FET_IR_MASK; err = phy_write(phydev, MII_BRCM_FET_INTREG, reg); if (err < 0) @@ -726,42 +731,49 @@ static int brcm_fet_config_init(struct phy_device *phydev) reg = brcmtest | MII_BRCM_FET_BT_SRE; - err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); - if (err < 0) - return err; + phy_lock_mdio_bus(phydev); - /* Set the LED mode */ - reg = phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4); - if (reg < 0) { - err = reg; - goto done; + err = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); + if (err < 0) { + phy_unlock_mdio_bus(phydev); + return err; } - reg &= ~MII_BRCM_FET_SHDW_AM4_LED_MASK; - reg |= MII_BRCM_FET_SHDW_AM4_LED_MODE1; + if (phydev->phy_id != PHY_ID_BCM5221) { + /* Set the LED mode */ + reg = __phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4); + if (reg < 0) { + err = reg; + goto done; + } - err = phy_write(phydev, MII_BRCM_FET_SHDW_AUXMODE4, reg); - if (err < 0) - goto done; + err = __phy_modify(phydev, MII_BRCM_FET_SHDW_AUXMODE4, + MII_BRCM_FET_SHDW_AM4_LED_MASK, + MII_BRCM_FET_SHDW_AM4_LED_MODE1); + if (err < 0) + goto done; - /* Enable auto MDIX */ - err = phy_set_bits(phydev, MII_BRCM_FET_SHDW_MISCCTRL, - MII_BRCM_FET_SHDW_MC_FAME); - if (err < 0) - goto done; + /* Enable auto MDIX */ + err = __phy_set_bits(phydev, MII_BRCM_FET_SHDW_MISCCTRL, + MII_BRCM_FET_SHDW_MC_FAME); + if (err < 0) + goto done; + } if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) { /* Enable auto power down */ - err = phy_set_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, - MII_BRCM_FET_SHDW_AS2_APDE); + err = __phy_set_bits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, + MII_BRCM_FET_SHDW_AS2_APDE); } done: /* Disable shadow register access */ - err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); + err2 = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); if (!err) err = err2; + phy_unlock_mdio_bus(phydev); + return err; } @@ -840,23 +852,86 @@ static int brcm_fet_suspend(struct phy_device *phydev) reg = brcmtest | MII_BRCM_FET_BT_SRE; - err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); - if (err < 0) + phy_lock_mdio_bus(phydev); + + err = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg); + if (err < 0) { + phy_unlock_mdio_bus(phydev); return err; + } + + if (phydev->phy_id == PHY_ID_BCM5221) + /* Force Low Power Mode with clock enabled */ + reg = BCM5221_SHDW_AM4_EN_CLK_LPM | BCM5221_SHDW_AM4_FORCE_LPM; + else + /* Set standby mode */ + reg = MII_BRCM_FET_SHDW_AM4_STANDBY; - /* Set standby mode */ - err = phy_modify(phydev, MII_BRCM_FET_SHDW_AUXMODE4, - MII_BRCM_FET_SHDW_AM4_STANDBY, - MII_BRCM_FET_SHDW_AM4_STANDBY); + err = __phy_set_bits(phydev, MII_BRCM_FET_SHDW_AUXMODE4, reg); /* Disable shadow register access */ - err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); + err2 = __phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest); if (!err) err = err2; + phy_unlock_mdio_bus(phydev); + return err; } +static int bcm5221_config_aneg(struct phy_device *phydev) +{ + int ret, val; + + ret = genphy_config_aneg(phydev); + if (ret) + return ret; + + switch (phydev->mdix_ctrl) { + case ETH_TP_MDI: + val = BCM5221_AEGSR_MDIX_DIS; + break; + case ETH_TP_MDI_X: + val = BCM5221_AEGSR_MDIX_DIS | BCM5221_AEGSR_MDIX_MAN_SWAP; + break; + case ETH_TP_MDI_AUTO: + val = 0; + break; + default: + return 0; + } + + return phy_modify(phydev, BCM5221_AEGSR, BCM5221_AEGSR_MDIX_MAN_SWAP | + BCM5221_AEGSR_MDIX_DIS, + val); +} + +static int bcm5221_read_status(struct phy_device *phydev) +{ + int ret; + + /* Read MDIX status */ + ret = phy_read(phydev, BCM5221_AEGSR); + if (ret < 0) + return ret; + + if (ret & BCM5221_AEGSR_MDIX_DIS) { + if (ret & BCM5221_AEGSR_MDIX_MAN_SWAP) + phydev->mdix_ctrl = ETH_TP_MDI_X; + else + phydev->mdix_ctrl = ETH_TP_MDI; + } else { + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + } + + if (ret & BCM5221_AEGSR_MDIX_STATUS) + phydev->mdix = ETH_TP_MDI_X; + else + phydev->mdix = ETH_TP_MDI; + + return genphy_read_status(phydev); +} + static void bcm54xx_phy_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { @@ -1222,6 +1297,18 @@ static struct phy_driver broadcom_drivers[] = { .suspend = brcm_fet_suspend, .resume = brcm_fet_config_init, }, { + .phy_id = PHY_ID_BCM5221, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom BCM5221", + /* PHY_BASIC_FEATURES */ + .config_init = brcm_fet_config_init, + .config_intr = brcm_fet_config_intr, + .handle_interrupt = brcm_fet_handle_interrupt, + .suspend = brcm_fet_suspend, + .resume = brcm_fet_config_init, + .config_aneg = bcm5221_config_aneg, + .read_status = bcm5221_read_status, +}, { .phy_id = PHY_ID_BCM5395, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5395", @@ -1296,6 +1383,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { { PHY_ID_BCM50610M, 0xfffffff0 }, { PHY_ID_BCM57780, 0xfffffff0 }, { PHY_ID_BCMAC131, 0xfffffff0 }, + { PHY_ID_BCM5221, 0xfffffff0 }, { PHY_ID_BCM5241, 0xfffffff0 }, { PHY_ID_BCM5395, 0xfffffff0 }, { PHY_ID_BCM53125, 0xfffffff0 }, diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ef8b14135133..2657be7cc049 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -631,7 +631,6 @@ static void recalibrate(struct dp83640_clock *clock) s64 now, diff; struct phy_txts event_ts; struct timespec64 ts; - struct list_head *this; struct dp83640_private *tmp; struct phy_device *master = clock->chosen->phydev; u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val; @@ -648,8 +647,7 @@ static void recalibrate(struct dp83640_clock *clock) /* * enable broadcast, disable status frames, enable ptp clock */ - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) { enable_broadcast(tmp->phydev, clock->page, 1); tmp->cfg0 = ext_read(tmp->phydev, PAGE5, PSF_CFG0); ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, 0); @@ -667,10 +665,8 @@ static void recalibrate(struct dp83640_clock *clock) evnt |= (CAL_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT; evnt |= (cal_gpio & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT; - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) ext_write(0, tmp->phydev, PAGE5, PTP_EVNT, evnt); - } ext_write(0, master, PAGE5, PTP_EVNT, evnt); /* @@ -709,8 +705,7 @@ static void recalibrate(struct dp83640_clock *clock) event_ts.sec_hi = ext_read(master, PAGE4, PTP_EDATA); now = phy2txts(&event_ts); - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) { val = ext_read(tmp->phydev, PAGE4, PTP_STS); phydev_info(tmp->phydev, "slave PTP_STS 0x%04hx\n", val); val = ext_read(tmp->phydev, PAGE4, PTP_ESTS); @@ -730,10 +725,8 @@ static void recalibrate(struct dp83640_clock *clock) /* * restore status frames */ - list_for_each(this, &clock->phylist) { - tmp = list_entry(this, struct dp83640_private, list); + list_for_each_entry(tmp, &clock->phylist, list) ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, tmp->cfg0); - } ext_write(0, master, PAGE5, PSF_CFG0, cfg0); mutex_unlock(&clock->extreg_lock); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index e397e7d642d9..5f08f9d38bd7 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -159,6 +159,23 @@ #define DP83867_LED_DRV_EN(x) BIT((x) * 4) #define DP83867_LED_DRV_VAL(x) BIT((x) * 4 + 1) +#define DP83867_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4)) +#define DP83867_LED_FN_MASK(idx) (0xf << ((idx) * 4)) +#define DP83867_LED_FN_RX_ERR 0xe /* Receive Error */ +#define DP83867_LED_FN_RX_TX_ERR 0xd /* Receive Error or Transmit Error */ +#define DP83867_LED_FN_LINK_RX_TX 0xb /* Link established, blink for rx or tx activity */ +#define DP83867_LED_FN_FULL_DUPLEX 0xa /* Full duplex */ +#define DP83867_LED_FN_LINK_100_1000_BT 0x9 /* 100/1000BT link established */ +#define DP83867_LED_FN_LINK_10_100_BT 0x8 /* 10/100BT link established */ +#define DP83867_LED_FN_LINK_10_BT 0x7 /* 10BT link established */ +#define DP83867_LED_FN_LINK_100_BTX 0x6 /* 100 BTX link established */ +#define DP83867_LED_FN_LINK_1000_BT 0x5 /* 1000 BT link established */ +#define DP83867_LED_FN_COLLISION 0x4 /* Collision detected */ +#define DP83867_LED_FN_RX 0x3 /* Receive activity */ +#define DP83867_LED_FN_TX 0x2 /* Transmit activity */ +#define DP83867_LED_FN_RX_TX 0x1 /* Receive or Transmit activity */ +#define DP83867_LED_FN_LINK 0x0 /* Link established */ + enum { DP83867_PORT_MIRROING_KEEP, DP83867_PORT_MIRROING_EN, @@ -1018,6 +1035,123 @@ dp83867_led_brightness_set(struct phy_device *phydev, val); } +static int dp83867_led_mode(u8 index, unsigned long rules) +{ + if (index >= DP83867_LED_COUNT) + return -EINVAL; + + switch (rules) { + case BIT(TRIGGER_NETDEV_LINK): + return DP83867_LED_FN_LINK; + case BIT(TRIGGER_NETDEV_LINK_10): + return DP83867_LED_FN_LINK_10_BT; + case BIT(TRIGGER_NETDEV_LINK_100): + return DP83867_LED_FN_LINK_100_BTX; + case BIT(TRIGGER_NETDEV_FULL_DUPLEX): + return DP83867_LED_FN_FULL_DUPLEX; + case BIT(TRIGGER_NETDEV_TX): + return DP83867_LED_FN_TX; + case BIT(TRIGGER_NETDEV_RX): + return DP83867_LED_FN_RX; + case BIT(TRIGGER_NETDEV_LINK_1000): + return DP83867_LED_FN_LINK_1000_BT; + case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX): + return DP83867_LED_FN_RX_TX; + case BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000): + return DP83867_LED_FN_LINK_100_1000_BT; + case BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100): + return DP83867_LED_FN_LINK_10_100_BT; + case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX): + return DP83867_LED_FN_LINK_RX_TX; + default: + return -EOPNOTSUPP; + } +} + +static int dp83867_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int ret; + + ret = dp83867_led_mode(index, rules); + if (ret < 0) + return ret; + + return 0; +} + +static int dp83867_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int mode, ret; + + mode = dp83867_led_mode(index, rules); + if (mode < 0) + return mode; + + ret = phy_modify(phydev, DP83867_LEDCR1, DP83867_LED_FN_MASK(index), + DP83867_LED_FN(index, mode)); + if (ret) + return ret; + + return phy_modify(phydev, DP83867_LEDCR2, DP83867_LED_DRV_EN(index), 0); +} + +static int dp83867_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int val; + + val = phy_read(phydev, DP83867_LEDCR1); + if (val < 0) + return val; + + val &= DP83867_LED_FN_MASK(index); + val >>= index * 4; + + switch (val) { + case DP83867_LED_FN_LINK: + *rules = BIT(TRIGGER_NETDEV_LINK); + break; + case DP83867_LED_FN_LINK_10_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_10); + break; + case DP83867_LED_FN_LINK_100_BTX: + *rules = BIT(TRIGGER_NETDEV_LINK_100); + break; + case DP83867_LED_FN_FULL_DUPLEX: + *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX); + break; + case DP83867_LED_FN_TX: + *rules = BIT(TRIGGER_NETDEV_TX); + break; + case DP83867_LED_FN_RX: + *rules = BIT(TRIGGER_NETDEV_RX); + break; + case DP83867_LED_FN_LINK_1000_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_1000); + break; + case DP83867_LED_FN_RX_TX: + *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX); + break; + case DP83867_LED_FN_LINK_100_1000_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000); + break; + case DP83867_LED_FN_LINK_10_100_BT: + *rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100); + break; + case DP83867_LED_FN_LINK_RX_TX: + *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX); + break; + default: + *rules = 0; + break; + } + + return 0; +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -1047,6 +1181,9 @@ static struct phy_driver dp83867_driver[] = { .set_loopback = dp83867_loopback, .led_brightness_set = dp83867_led_brightness_set, + .led_hw_is_supported = dp83867_led_hw_is_supported, + .led_hw_control_set = dp83867_led_hw_control_set, + .led_hw_control_get = dp83867_led_hw_control_get, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c new file mode 100644 index 000000000000..1c3ff77de56b --- /dev/null +++ b/drivers/net/phy/marvell-88q2xxx.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Marvell 88Q2XXX automotive 100BASE-T1/1000BASE-T1 PHY driver + */ +#include <linux/ethtool_netlink.h> +#include <linux/marvell_phy.h> +#include <linux/phy.h> + +#define MDIO_MMD_AN_MV_STAT 32769 +#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100 +#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000 +#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000 +#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000 +#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000 + +#define MDIO_MMD_PCS_MV_100BT1_STAT1 33032 +#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00FF +#define MDIO_MMD_PCS_MV_100BT1_STAT1_JABBER 0x0100 +#define MDIO_MMD_PCS_MV_100BT1_STAT1_LINK 0x0200 +#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX 0x1000 +#define MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX 0x2000 +#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_MASTER 0x4000 + +#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008 + +static int mv88q2xxx_soft_reset(struct phy_device *phydev) +{ + int ret; + int val; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_PCS_1000BT1_CTRL, MDIO_PCS_1000BT1_CTRL_RESET); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PCS, + MDIO_PCS_1000BT1_CTRL, val, + !(val & MDIO_PCS_1000BT1_CTRL_RESET), + 50000, 600000, true); +} + +static int mv88q2xxx_read_link_gbit(struct phy_device *phydev) +{ + int ret; + bool link = false; + + /* Read vendor specific Auto-Negotiation status register to get local + * and remote receiver status according to software initialization + * guide. + */ + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT); + if (ret < 0) { + return ret; + } else if ((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) && + (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) { + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops except + * the link was already down. + */ + if (!phy_polling_mode(phydev) || !phydev->link) { + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT); + if (ret < 0) + return ret; + else if (ret & MDIO_PCS_1000BT1_STAT_LINK) + link = true; + } + + if (!link) { + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT); + if (ret < 0) + return ret; + else if (ret & MDIO_PCS_1000BT1_STAT_LINK) + link = true; + } + } + + phydev->link = link; + + return 0; +} + +static int mv88q2xxx_read_link_100m(struct phy_device *phydev) +{ + int ret; + + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops except + * the link was already down. In case we are not polling, + * we always read the realtime status. + */ + if (!phy_polling_mode(phydev) || !phydev->link) { + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1); + if (ret < 0) + return ret; + else if (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK) + goto out; + } + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1); + if (ret < 0) + return ret; + +out: + /* Check if we have link and if the remote and local receiver are ok */ + if ((ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK) && + (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX) && + (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX)) + phydev->link = true; + else + phydev->link = false; + + return 0; +} + +static int mv88q2xxx_read_link(struct phy_device *phydev) +{ + int ret; + + /* The 88Q2XXX PHYs do not have the PMA/PMD status register available, + * therefore we need to read the link status from the vendor specific + * registers depending on the speed. + */ + if (phydev->speed == SPEED_1000) + ret = mv88q2xxx_read_link_gbit(phydev); + else + ret = mv88q2xxx_read_link_100m(phydev); + + return ret; +} + +static int mv88q2xxx_read_status(struct phy_device *phydev) +{ + int ret; + + ret = mv88q2xxx_read_link(phydev); + if (ret < 0) + return ret; + + return genphy_c45_read_pma(phydev); +} + +static int mv88q2xxx_get_features(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_pma_read_abilities(phydev); + if (ret) + return ret; + + /* We need to read the baset1 extended abilities manually because the + * PHY does not signalize it has the extended abilities register + * available. + */ + ret = genphy_c45_pma_baset1_read_abilities(phydev); + if (ret) + return ret; + + /* The PHY signalizes it supports autonegotiation. Unfortunately, so + * far it was not possible to get a link even when following the init + * sequence provided by Marvell. Disable it for now until a proper + * workaround is found or a new PHY revision is released. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); + + return 0; +} + +static int mv88q2xxx_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_config_aneg(phydev); + if (ret) + return ret; + + return mv88q2xxx_soft_reset(phydev); +} + +static int mv88q2xxx_config_init(struct phy_device *phydev) +{ + int ret; + + /* The 88Q2XXX PHYs do have the extended ability register available, but + * register MDIO_PMA_EXTABLE where they should signalize it does not + * work according to specification. Therefore, we force it here. + */ + phydev->pma_extable = MDIO_PMA_EXTABLE_BT1; + + /* Read the current PHY configuration */ + ret = genphy_c45_read_pma(phydev); + if (ret) + return ret; + + return mv88q2xxx_config_aneg(phydev); +} + +static int mv88q2xxxx_get_sqi(struct phy_device *phydev) +{ + int ret; + + if (phydev->speed == SPEED_100) { + /* Read the SQI from the vendor specific receiver status + * register + */ + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8230); + if (ret < 0) + return ret; + + ret = ret >> 12; + } else { + /* Read from vendor specific registers, they are not documented + * but can be found in the Software Initialization Guide. Only + * revisions >= A0 are supported. + */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xFC5D, 0x00FF, 0x00AC); + if (ret < 0) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0xfc88); + if (ret < 0) + return ret; + } + + return ret & 0x0F; +} + +static int mv88q2xxxx_get_sqi_max(struct phy_device *phydev) +{ + return 15; +} + +static struct phy_driver mv88q2xxx_driver[] = { + { + .phy_id = MARVELL_PHY_ID_88Q2110, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "mv88q2110", + .get_features = mv88q2xxx_get_features, + .config_aneg = mv88q2xxx_config_aneg, + .config_init = mv88q2xxx_config_init, + .read_status = mv88q2xxx_read_status, + .soft_reset = mv88q2xxx_soft_reset, + .set_loopback = genphy_c45_loopback, + .get_sqi = mv88q2xxxx_get_sqi, + .get_sqi_max = mv88q2xxxx_get_sqi_max, + }, +}; + +module_phy_driver(mv88q2xxx_driver); + +static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = { + { MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK }, + { /*sentinel*/ } +}; +MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl); + +MODULE_DESCRIPTION("Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet PHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c index f83cae64585d..e3aa30dad2e6 100644 --- a/drivers/net/phy/marvell-88x2222.c +++ b/drivers/net/phy/marvell-88x2222.c @@ -14,7 +14,6 @@ #include <linux/mdio.h> #include <linux/marvell_phy.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/sfp.h> #include <linux/netdevice.h> diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 43b6cb725551..eba652a4c1d8 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2893,6 +2893,272 @@ static int m88e1318_led_blink_set(struct phy_device *phydev, u8 index, MII_88E1318S_PHY_LED_FUNC, reg); } +struct marvell_led_rules { + int mode; + unsigned long rules; +}; + +static const struct marvell_led_rules marvell_led0[] = { + { + .mode = 0, + .rules = BIT(TRIGGER_NETDEV_LINK), + }, + { + .mode = 1, + .rules = (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 3, + .rules = (BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 4, + .rules = (BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 5, + .rules = BIT(TRIGGER_NETDEV_TX), + }, + { + .mode = 6, + .rules = BIT(TRIGGER_NETDEV_LINK), + }, + { + .mode = 7, + .rules = BIT(TRIGGER_NETDEV_LINK_1000), + }, + { + .mode = 8, + .rules = 0, + }, +}; + +static const struct marvell_led_rules marvell_led1[] = { + { + .mode = 1, + .rules = (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 2, + .rules = (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_RX)), + }, + { + .mode = 3, + .rules = (BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 4, + .rules = (BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 6, + .rules = (BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000)), + }, + { + .mode = 7, + .rules = BIT(TRIGGER_NETDEV_LINK_100), + }, + { + .mode = 8, + .rules = 0, + }, +}; + +static const struct marvell_led_rules marvell_led2[] = { + { + .mode = 0, + .rules = BIT(TRIGGER_NETDEV_LINK), + }, + { + .mode = 1, + .rules = (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 3, + .rules = (BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 4, + .rules = (BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)), + }, + { + .mode = 5, + .rules = BIT(TRIGGER_NETDEV_TX), + }, + { + .mode = 6, + .rules = (BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_1000)), + }, + { + .mode = 7, + .rules = BIT(TRIGGER_NETDEV_LINK_10), + }, + { + .mode = 8, + .rules = 0, + }, +}; + +static int marvell_find_led_mode(unsigned long rules, + const struct marvell_led_rules *marvell_rules, + int count, + int *mode) +{ + int i; + + for (i = 0; i < count; i++) { + if (marvell_rules[i].rules == rules) { + *mode = marvell_rules[i].mode; + return 0; + } + } + return -EOPNOTSUPP; +} + +static int marvell_get_led_mode(u8 index, unsigned long rules, int *mode) +{ + int ret; + + switch (index) { + case 0: + ret = marvell_find_led_mode(rules, marvell_led0, + ARRAY_SIZE(marvell_led0), mode); + break; + case 1: + ret = marvell_find_led_mode(rules, marvell_led1, + ARRAY_SIZE(marvell_led1), mode); + break; + case 2: + ret = marvell_find_led_mode(rules, marvell_led2, + ARRAY_SIZE(marvell_led2), mode); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int marvell_find_led_rules(unsigned long *rules, + const struct marvell_led_rules *marvell_rules, + int count, + int mode) +{ + int i; + + for (i = 0; i < count; i++) { + if (marvell_rules[i].mode == mode) { + *rules = marvell_rules[i].rules; + return 0; + } + } + return -EOPNOTSUPP; +} + +static int marvell_get_led_rules(u8 index, unsigned long *rules, int mode) +{ + int ret; + + switch (index) { + case 0: + ret = marvell_find_led_rules(rules, marvell_led0, + ARRAY_SIZE(marvell_led0), mode); + break; + case 1: + ret = marvell_find_led_rules(rules, marvell_led1, + ARRAY_SIZE(marvell_led1), mode); + break; + case 2: + ret = marvell_find_led_rules(rules, marvell_led2, + ARRAY_SIZE(marvell_led2), mode); + break; + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int m88e1318_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int mode, ret; + + switch (index) { + case 0: + case 1: + case 2: + ret = marvell_get_led_mode(index, rules, &mode); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int m88e1318_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int mode, ret, reg; + + switch (index) { + case 0: + case 1: + case 2: + ret = marvell_get_led_mode(index, rules, &mode); + break; + default: + ret = -EINVAL; + } + + if (ret < 0) + return ret; + + reg = phy_read_paged(phydev, MII_MARVELL_LED_PAGE, + MII_88E1318S_PHY_LED_FUNC); + if (reg < 0) + return reg; + + reg &= ~(0xf << (4 * index)); + reg |= mode << (4 * index); + return phy_write_paged(phydev, MII_MARVELL_LED_PAGE, + MII_88E1318S_PHY_LED_FUNC, reg); +} + +static int m88e1318_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int mode, reg; + + if (index > 2) + return -EINVAL; + + reg = phy_read_paged(phydev, MII_MARVELL_LED_PAGE, + MII_88E1318S_PHY_LED_FUNC); + if (reg < 0) + return reg; + + mode = (reg >> (4 * index)) & 0xf; + + return marvell_get_led_rules(index, rules, mode); +} + static int marvell_probe(struct phy_device *phydev) { struct marvell_priv *priv; @@ -3144,6 +3410,9 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, .led_brightness_set = m88e1318_led_brightness_set, .led_blink_set = m88e1318_led_blink_set, + .led_hw_is_supported = m88e1318_led_hw_is_supported, + .led_hw_control_set = m88e1318_led_hw_control_set, + .led_hw_control_get = m88e1318_led_hw_control_get, }, { .phy_id = MARVELL_PHY_ID_88E1145, @@ -3252,6 +3521,9 @@ static struct phy_driver marvell_drivers[] = { .cable_test_get_status = marvell_vct7_cable_test_get_status, .led_brightness_set = m88e1318_led_brightness_set, .led_blink_set = m88e1318_led_blink_set, + .led_hw_is_supported = m88e1318_led_hw_is_supported, + .led_hw_control_set = m88e1318_led_hw_control_set, + .led_hw_control_get = m88e1318_led_hw_control_get, }, { .phy_id = MARVELL_PHY_ID_88E1540, @@ -3280,6 +3552,9 @@ static struct phy_driver marvell_drivers[] = { .cable_test_get_status = marvell_vct7_cable_test_get_status, .led_brightness_set = m88e1318_led_brightness_set, .led_blink_set = m88e1318_led_blink_set, + .led_hw_is_supported = m88e1318_led_hw_is_supported, + .led_hw_control_set = m88e1318_led_hw_control_set, + .led_hw_control_get = m88e1318_led_hw_control_get, }, { .phy_id = MARVELL_PHY_ID_88E1545, @@ -3308,6 +3583,9 @@ static struct phy_driver marvell_drivers[] = { .cable_test_get_status = marvell_vct7_cable_test_get_status, .led_brightness_set = m88e1318_led_brightness_set, .led_blink_set = m88e1318_led_blink_set, + .led_hw_is_supported = m88e1318_led_hw_is_supported, + .led_hw_control_set = m88e1318_led_hw_control_set, + .led_hw_control_get = m88e1318_led_hw_control_get, }, { .phy_id = MARVELL_PHY_ID_88E3016, @@ -3451,6 +3729,9 @@ static struct phy_driver marvell_drivers[] = { .set_tunable = m88e1540_set_tunable, .led_brightness_set = m88e1318_led_brightness_set, .led_blink_set = m88e1318_led_blink_set, + .led_hw_is_supported = m88e1318_led_hw_is_supported, + .led_hw_control_set = m88e1318_led_hw_control_set, + .led_hw_control_get = m88e1318_led_hw_control_get, }, }; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 8b3618d3da4a..25dcaa49ab8b 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -107,16 +107,21 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev) } EXPORT_SYMBOL(mdiobus_unregister_device); -struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) +static struct mdio_device *mdiobus_find_device(struct mii_bus *bus, int addr) { bool addr_valid = addr >= 0 && addr < ARRAY_SIZE(bus->mdio_map); - struct mdio_device *mdiodev; if (WARN_ONCE(!addr_valid, "addr %d out of range\n", addr)) return NULL; - mdiodev = bus->mdio_map[addr]; + return bus->mdio_map[addr]; +} + +struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) +{ + struct mdio_device *mdiodev; + mdiodev = mdiobus_find_device(bus, addr); if (!mdiodev) return NULL; @@ -129,7 +134,7 @@ EXPORT_SYMBOL(mdiobus_get_phy); bool mdiobus_is_registered_device(struct mii_bus *bus, int addr) { - return bus->mdio_map[addr]; + return mdiobus_find_device(bus, addr) != NULL; } EXPORT_SYMBOL(mdiobus_is_registered_device); @@ -1210,6 +1215,26 @@ int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad, } EXPORT_SYMBOL(mdiobus_c45_write_nested); +/* + * __mdiobus_modify - Convenience function for modifying a given mdio device + * register + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + */ +int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, + u16 set) +{ + int err; + + err = __mdiobus_modify_changed(bus, addr, regnum, mask, set); + + return err < 0 ? err : 0; +} +EXPORT_SYMBOL_GPL(__mdiobus_modify); + /** * mdiobus_modify - Convenience function for modifying a given mdio device * register @@ -1224,10 +1249,10 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set) int err; mutex_lock(&bus->mdio_lock); - err = __mdiobus_modify_changed(bus, addr, regnum, mask, set); + err = __mdiobus_modify(bus, addr, regnum, mask, set); mutex_unlock(&bus->mdio_lock); - return err < 0 ? err : 0; + return err; } EXPORT_SYMBOL_GPL(mdiobus_modify); diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c index 95369171a7ba..8a20d9889f10 100644 --- a/drivers/net/phy/mediatek-ge-soc.c +++ b/drivers/net/phy/mediatek-ge-soc.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0+ #include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/pinctrl/consumer.h> #include <linux/phy.h> +#include <linux/regmap.h> #define MTK_GPHY_ID_MT7981 0x03a29461 #define MTK_GPHY_ID_MT7988 0x03a29481 @@ -208,9 +209,42 @@ #define MTK_PHY_DA_TX_R50_PAIR_C 0x53f #define MTK_PHY_DA_TX_R50_PAIR_D 0x540 +/* Registers on MDIO_MMD_VEND2 */ +#define MTK_PHY_LED0_ON_CTRL 0x24 +#define MTK_PHY_LED1_ON_CTRL 0x26 +#define MTK_PHY_LED_ON_MASK GENMASK(6, 0) +#define MTK_PHY_LED_ON_LINK1000 BIT(0) +#define MTK_PHY_LED_ON_LINK100 BIT(1) +#define MTK_PHY_LED_ON_LINK10 BIT(2) +#define MTK_PHY_LED_ON_LINKDOWN BIT(3) +#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */ +#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */ +#define MTK_PHY_LED_ON_FORCE_ON BIT(6) +#define MTK_PHY_LED_ON_POLARITY BIT(14) +#define MTK_PHY_LED_ON_ENABLE BIT(15) + +#define MTK_PHY_LED0_BLINK_CTRL 0x25 +#define MTK_PHY_LED1_BLINK_CTRL 0x27 +#define MTK_PHY_LED_BLINK_1000TX BIT(0) +#define MTK_PHY_LED_BLINK_1000RX BIT(1) +#define MTK_PHY_LED_BLINK_100TX BIT(2) +#define MTK_PHY_LED_BLINK_100RX BIT(3) +#define MTK_PHY_LED_BLINK_10TX BIT(4) +#define MTK_PHY_LED_BLINK_10RX BIT(5) +#define MTK_PHY_LED_BLINK_COLLISION BIT(6) +#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7) +#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8) +#define MTK_PHY_LED_BLINK_FORCE_BLINK BIT(9) + +#define MTK_PHY_LED1_DEFAULT_POLARITIES BIT(1) + #define MTK_PHY_RG_BG_RASEL 0x115 #define MTK_PHY_RG_BG_RASEL_MASK GENMASK(2, 0) +/* 'boottrap' register reflecting the configuration of the 4 PHY LEDs */ +#define RG_GPIO_MISC_TPBANK0 0x6f0 +#define RG_GPIO_MISC_TPBANK0_BOOTMODE GENMASK(11, 8) + /* These macro privides efuse parsing for internal phy. */ #define EFS_DA_TX_I2MPB_A(x) (((x) >> 0) & GENMASK(5, 0)) #define EFS_DA_TX_I2MPB_B(x) (((x) >> 6) & GENMASK(5, 0)) @@ -238,13 +272,6 @@ enum { PAIR_D, }; -enum { - GPHY_PORT0, - GPHY_PORT1, - GPHY_PORT2, - GPHY_PORT3, -}; - enum calibration_mode { EFUSE_K, SW_K @@ -263,6 +290,19 @@ enum CAL_MODE { SW_M }; +#define MTK_PHY_LED_STATE_FORCE_ON 0 +#define MTK_PHY_LED_STATE_FORCE_BLINK 1 +#define MTK_PHY_LED_STATE_NETDEV 2 + +struct mtk_socphy_priv { + unsigned long led_state; +}; + +struct mtk_socphy_shared { + u32 boottrap; + struct mtk_socphy_priv priv[4]; +}; + static int mtk_socphy_read_page(struct phy_device *phydev) { return __phy_read(phydev, MTK_EXT_PAGE_ACCESS); @@ -1073,6 +1113,371 @@ static int mt798x_phy_config_init(struct phy_device *phydev) return mt798x_phy_calibration(phydev); } +static int mt798x_phy_hw_led_on_set(struct phy_device *phydev, u8 index, + bool on) +{ + unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + bool changed; + + if (on) + changed = !test_and_set_bit(bit_on, &priv->led_state); + else + changed = !!test_and_clear_bit(bit_on, &priv->led_state); + + changed |= !!test_and_clear_bit(MTK_PHY_LED_STATE_NETDEV + + (index ? 16 : 0), &priv->led_state); + if (changed) + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL, + MTK_PHY_LED_ON_MASK, + on ? MTK_PHY_LED_ON_FORCE_ON : 0); + else + return 0; +} + +static int mt798x_phy_hw_led_blink_set(struct phy_device *phydev, u8 index, + bool blinking) +{ + unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + bool changed; + + if (blinking) + changed = !test_and_set_bit(bit_blink, &priv->led_state); + else + changed = !!test_and_clear_bit(bit_blink, &priv->led_state); + + changed |= !!test_bit(MTK_PHY_LED_STATE_NETDEV + + (index ? 16 : 0), &priv->led_state); + if (changed) + return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_BLINK_CTRL : MTK_PHY_LED0_BLINK_CTRL, + blinking ? MTK_PHY_LED_BLINK_FORCE_BLINK : 0); + else + return 0; +} + +static int mt798x_phy_led_blink_set(struct phy_device *phydev, u8 index, + unsigned long *delay_on, + unsigned long *delay_off) +{ + bool blinking = false; + int err = 0; + + if (index > 1) + return -EINVAL; + + if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) { + blinking = true; + *delay_on = 50; + *delay_off = 50; + } + + err = mt798x_phy_hw_led_blink_set(phydev, index, blinking); + if (err) + return err; + + return mt798x_phy_hw_led_on_set(phydev, index, false); +} + +static int mt798x_phy_led_brightness_set(struct phy_device *phydev, + u8 index, enum led_brightness value) +{ + int err; + + err = mt798x_phy_hw_led_blink_set(phydev, index, false); + if (err) + return err; + + return mt798x_phy_hw_led_on_set(phydev, index, (value != LED_OFF)); +} + +static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_FULL_DUPLEX) | + BIT(TRIGGER_NETDEV_HALF_DUPLEX) | + BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)); + +static int mt798x_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + if (index > 1) + return -EINVAL; + + /* All combinations of the supported triggers are allowed */ + if (rules & ~supported_triggers) + return -EOPNOTSUPP; + + return 0; +}; + +static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + (index ? 16 : 0); + unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0); + unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + int on, blink; + + if (index > 1) + return -EINVAL; + + on = phy_read_mmd(phydev, MDIO_MMD_VEND2, + index ? MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL); + + if (on < 0) + return -EIO; + + blink = phy_read_mmd(phydev, MDIO_MMD_VEND2, + index ? MTK_PHY_LED1_BLINK_CTRL : + MTK_PHY_LED0_BLINK_CTRL); + if (blink < 0) + return -EIO; + + if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | + MTK_PHY_LED_ON_LINK10)) || + (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | + MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX | + MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))) + set_bit(bit_netdev, &priv->led_state); + else + clear_bit(bit_netdev, &priv->led_state); + + if (on & MTK_PHY_LED_ON_FORCE_ON) + set_bit(bit_on, &priv->led_state); + else + clear_bit(bit_on, &priv->led_state); + + if (blink & MTK_PHY_LED_BLINK_FORCE_BLINK) + set_bit(bit_blink, &priv->led_state); + else + clear_bit(bit_blink, &priv->led_state); + + if (!rules) + return 0; + + if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10)) + *rules |= BIT(TRIGGER_NETDEV_LINK); + + if (on & MTK_PHY_LED_ON_LINK10) + *rules |= BIT(TRIGGER_NETDEV_LINK_10); + + if (on & MTK_PHY_LED_ON_LINK100) + *rules |= BIT(TRIGGER_NETDEV_LINK_100); + + if (on & MTK_PHY_LED_ON_LINK1000) + *rules |= BIT(TRIGGER_NETDEV_LINK_1000); + + if (on & MTK_PHY_LED_ON_FDX) + *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX); + + if (on & MTK_PHY_LED_ON_HDX) + *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX); + + if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX)) + *rules |= BIT(TRIGGER_NETDEV_RX); + + if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)) + *rules |= BIT(TRIGGER_NETDEV_TX); + + return 0; +}; + +static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + u16 on = 0, blink = 0; + int ret; + + if (index > 1) + return -EINVAL; + + if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX)) + on |= MTK_PHY_LED_ON_FDX; + + if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX)) + on |= MTK_PHY_LED_ON_HDX; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK))) + on |= MTK_PHY_LED_ON_LINK10; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK))) + on |= MTK_PHY_LED_ON_LINK100; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK))) + on |= MTK_PHY_LED_ON_LINK1000; + + if (rules & BIT(TRIGGER_NETDEV_RX)) { + blink |= MTK_PHY_LED_BLINK_10RX | + MTK_PHY_LED_BLINK_100RX | + MTK_PHY_LED_BLINK_1000RX; + } + + if (rules & BIT(TRIGGER_NETDEV_TX)) { + blink |= MTK_PHY_LED_BLINK_10TX | + MTK_PHY_LED_BLINK_100TX | + MTK_PHY_LED_BLINK_1000TX; + } + + if (blink || on) + set_bit(bit_netdev, &priv->led_state); + else + clear_bit(bit_netdev, &priv->led_state); + + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_ON_CTRL : + MTK_PHY_LED0_ON_CTRL, + MTK_PHY_LED_ON_FDX | + MTK_PHY_LED_ON_HDX | + MTK_PHY_LED_ON_LINK10 | + MTK_PHY_LED_ON_LINK100 | + MTK_PHY_LED_ON_LINK1000, + on); + + if (ret) + return ret; + + return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_BLINK_CTRL : + MTK_PHY_LED0_BLINK_CTRL, blink); +}; + +static bool mt7988_phy_led_get_polarity(struct phy_device *phydev, int led_num) +{ + struct mtk_socphy_shared *priv = phydev->shared->priv; + u32 polarities; + + if (led_num == 0) + polarities = ~(priv->boottrap); + else + polarities = MTK_PHY_LED1_DEFAULT_POLARITIES; + + if (polarities & BIT(phydev->mdio.addr)) + return true; + + return false; +} + +static int mt7988_phy_fix_leds_polarities(struct phy_device *phydev) +{ + struct pinctrl *pinctrl; + int index; + + /* Setup LED polarity according to bootstrap use of LED pins */ + for (index = 0; index < 2; ++index) + phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL, + MTK_PHY_LED_ON_POLARITY, + mt7988_phy_led_get_polarity(phydev, index) ? + MTK_PHY_LED_ON_POLARITY : 0); + + /* Only now setup pinctrl to avoid bogus blinking */ + pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "gbe-led"); + if (IS_ERR(pinctrl)) + dev_err(&phydev->mdio.bus->dev, "Failed to setup PHY LED pinctrl\n"); + + return 0; +} + +static int mt7988_phy_probe_shared(struct phy_device *phydev) +{ + struct device_node *np = dev_of_node(&phydev->mdio.bus->dev); + struct mtk_socphy_shared *shared = phydev->shared->priv; + struct regmap *regmap; + u32 reg; + int ret; + + /* The LED0 of the 4 PHYs in MT7988 are wired to SoC pins LED_A, LED_B, + * LED_C and LED_D respectively. At the same time those pins are used to + * bootstrap configuration of the reference clock source (LED_A), + * DRAM DDRx16b x2/x1 (LED_B) and boot device (LED_C, LED_D). + * In practise this is done using a LED and a resistor pulling the pin + * either to GND or to VIO. + * The detected value at boot time is accessible at run-time using the + * TPBANK0 register located in the gpio base of the pinctrl, in order + * to read it here it needs to be referenced by a phandle called + * 'mediatek,pio' in the MDIO bus hosting the PHY. + * The 4 bits in TPBANK0 are kept as package shared data and are used to + * set LED polarity for each of the LED0. + */ + regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,pio"); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = regmap_read(regmap, RG_GPIO_MISC_TPBANK0, ®); + if (ret) + return ret; + + shared->boottrap = FIELD_GET(RG_GPIO_MISC_TPBANK0_BOOTMODE, reg); + + return 0; +} + +static void mt798x_phy_leds_state_init(struct phy_device *phydev) +{ + int i; + + for (i = 0; i < 2; ++i) + mt798x_phy_led_hw_control_get(phydev, i, NULL); +} + +static int mt7988_phy_probe(struct phy_device *phydev) +{ + struct mtk_socphy_shared *shared; + struct mtk_socphy_priv *priv; + int err; + + if (phydev->mdio.addr > 3) + return -EINVAL; + + err = devm_phy_package_join(&phydev->mdio.dev, phydev, 0, + sizeof(struct mtk_socphy_shared)); + if (err) + return err; + + if (phy_package_probe_once(phydev)) { + err = mt7988_phy_probe_shared(phydev); + if (err) + return err; + } + + shared = phydev->shared->priv; + priv = &shared->priv[phydev->mdio.addr]; + + phydev->priv = priv; + + mt798x_phy_leds_state_init(phydev); + + err = mt7988_phy_fix_leds_polarities(phydev); + if (err) + return err; + + return mt798x_phy_calibration(phydev); +} + +static int mt7981_phy_probe(struct phy_device *phydev) +{ + struct mtk_socphy_priv *priv; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct mtk_socphy_priv), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + mt798x_phy_leds_state_init(phydev); + + return mt798x_phy_calibration(phydev); +} + static struct phy_driver mtk_socphy_driver[] = { { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981), @@ -1080,11 +1485,16 @@ static struct phy_driver mtk_socphy_driver[] = { .config_init = mt798x_phy_config_init, .config_intr = genphy_no_config_intr, .handle_interrupt = genphy_handle_interrupt_no_ack, - .probe = mt798x_phy_calibration, + .probe = mt7981_phy_probe, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = mtk_socphy_read_page, .write_page = mtk_socphy_write_page, + .led_blink_set = mt798x_phy_led_blink_set, + .led_brightness_set = mt798x_phy_led_brightness_set, + .led_hw_is_supported = mt798x_phy_led_hw_is_supported, + .led_hw_control_set = mt798x_phy_led_hw_control_set, + .led_hw_control_get = mt798x_phy_led_hw_control_get, }, { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988), @@ -1092,11 +1502,16 @@ static struct phy_driver mtk_socphy_driver[] = { .config_init = mt798x_phy_config_init, .config_intr = genphy_no_config_intr, .handle_interrupt = genphy_handle_interrupt_no_ack, - .probe = mt798x_phy_calibration, + .probe = mt7988_phy_probe, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = mtk_socphy_read_page, .write_page = mtk_socphy_write_page, + .led_blink_set = mt798x_phy_led_blink_set, + .led_brightness_set = mt798x_phy_led_brightness_set, + .led_hw_is_supported = mt798x_phy_led_hw_is_supported, + .led_hw_control_set = mt798x_phy_led_hw_control_set, + .led_hw_control_get = mt798x_phy_led_hw_control_get, }, }; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index b6d7981b2d1e..08e3915001c3 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1733,6 +1733,28 @@ static int ksz886x_config_aneg(struct phy_device *phydev) if (ret) return ret; + if (phydev->autoneg != AUTONEG_ENABLE) { + /* When autonegotation is disabled, we need to manually force + * the link state. If we don't do this, the PHY will keep + * sending Fast Link Pulses (FLPs) which are part of the + * autonegotiation process. This is not desired when + * autonegotiation is off. + */ + ret = phy_set_bits(phydev, MII_KSZPHY_CTRL, + KSZ886X_CTRL_FORCE_LINK); + if (ret) + return ret; + } else { + /* If we had previously forced the link state, we need to + * clear KSZ886X_CTRL_FORCE_LINK bit now. Otherwise, the PHY + * will not perform autonegotiation. + */ + ret = phy_clear_bits(phydev, MII_KSZPHY_CTRL, + KSZ886X_CTRL_FORCE_LINK); + if (ret) + return ret; + } + /* The MDI-X configuration is automatically changed by the PHY after * switching from autoneg off to on. So, take MDI-X configuration under * own control and set it after autoneg configuration was done. @@ -1800,9 +1822,6 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = { /* Transmit waveform amplitude can be improved (1000BASE-T, 100BASE-TX, 10BASE-Te) */ {0x1c, 0x04, 0x00d0}, - /* Energy Efficient Ethernet (EEE) feature select must be manually disabled */ - {0x07, 0x3c, 0x0000}, - /* Register settings are required to meet data sheet supply current specifications */ {0x1c, 0x13, 0x6eff}, {0x1c, 0x14, 0xe6ff}, @@ -1847,6 +1866,12 @@ static int ksz9477_config_init(struct phy_device *phydev) return err; } + /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes + * in this switch shall be regarded as broken. + */ + if (phydev->dev_flags & MICREL_NO_EEE) + phydev->eee_broken_modes = -1; + err = genphy_restart_aneg(phydev); if (err) return err; diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c index 2fa5a90e073b..7a11fdb687cc 100644 --- a/drivers/net/phy/motorcomm.c +++ b/drivers/net/phy/motorcomm.c @@ -163,6 +163,10 @@ #define YT8521_CHIP_CONFIG_REG 0xA001 #define YT8521_CCR_SW_RST BIT(15) +#define YT8531_RGMII_LDO_VOL_MASK GENMASK(5, 4) +#define YT8531_LDO_VOL_3V3 0x0 +#define YT8531_LDO_VOL_1V8 0x2 + /* 1b0 disable 1.9ns rxc clock delay *default* * 1b1 enable 1.9ns rxc clock delay */ @@ -236,6 +240,12 @@ */ #define YTPHY_WCR_TYPE_PULSE BIT(0) +#define YTPHY_PAD_DRIVE_STRENGTH_REG 0xA010 +#define YT8531_RGMII_RXC_DS_MASK GENMASK(15, 13) +#define YT8531_RGMII_RXD_DS_HI_MASK BIT(12) /* Bit 2 of rxd_ds */ +#define YT8531_RGMII_RXD_DS_LOW_MASK GENMASK(5, 4) /* Bit 1/0 of rxd_ds */ +#define YT8531_RGMII_RX_DS_DEFAULT 0x3 + #define YTPHY_SYNCE_CFG_REG 0xA012 #define YT8521_SCR_SYNCE_ENABLE BIT(5) /* 1b0 output 25m clock @@ -835,6 +845,110 @@ static int ytphy_rgmii_clk_delay_config_with_lock(struct phy_device *phydev) } /** + * struct ytphy_ldo_vol_map - map a current value to a register value + * @vol: ldo voltage + * @ds: value in the register + * @cur: value in device configuration + */ +struct ytphy_ldo_vol_map { + u32 vol; + u32 ds; + u32 cur; +}; + +static const struct ytphy_ldo_vol_map yt8531_ldo_vol[] = { + {.vol = YT8531_LDO_VOL_1V8, .ds = 0, .cur = 1200}, + {.vol = YT8531_LDO_VOL_1V8, .ds = 1, .cur = 2100}, + {.vol = YT8531_LDO_VOL_1V8, .ds = 2, .cur = 2700}, + {.vol = YT8531_LDO_VOL_1V8, .ds = 3, .cur = 2910}, + {.vol = YT8531_LDO_VOL_1V8, .ds = 4, .cur = 3110}, + {.vol = YT8531_LDO_VOL_1V8, .ds = 5, .cur = 3600}, + {.vol = YT8531_LDO_VOL_1V8, .ds = 6, .cur = 3970}, + {.vol = YT8531_LDO_VOL_1V8, .ds = 7, .cur = 4350}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 0, .cur = 3070}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 1, .cur = 4080}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 2, .cur = 4370}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 3, .cur = 4680}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 4, .cur = 5020}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 5, .cur = 5450}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 6, .cur = 5740}, + {.vol = YT8531_LDO_VOL_3V3, .ds = 7, .cur = 6140}, +}; + +static u32 yt8531_get_ldo_vol(struct phy_device *phydev) +{ + u32 val; + + val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG); + val = FIELD_GET(YT8531_RGMII_LDO_VOL_MASK, val); + + return val <= YT8531_LDO_VOL_1V8 ? val : YT8531_LDO_VOL_1V8; +} + +static int yt8531_get_ds_map(struct phy_device *phydev, u32 cur) +{ + u32 vol; + int i; + + vol = yt8531_get_ldo_vol(phydev); + for (i = 0; i < ARRAY_SIZE(yt8531_ldo_vol); i++) { + if (yt8531_ldo_vol[i].vol == vol && yt8531_ldo_vol[i].cur == cur) + return yt8531_ldo_vol[i].ds; + } + + return -EINVAL; +} + +static int yt8531_set_ds(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + u32 ds_field_low, ds_field_hi, val; + int ret, ds; + + /* set rgmii rx clk driver strength */ + if (!of_property_read_u32(node, "motorcomm,rx-clk-drv-microamp", &val)) { + ds = yt8531_get_ds_map(phydev, val); + if (ds < 0) + return dev_err_probe(&phydev->mdio.dev, ds, + "No matching current value was found.\n"); + } else { + ds = YT8531_RGMII_RX_DS_DEFAULT; + } + + ret = ytphy_modify_ext_with_lock(phydev, + YTPHY_PAD_DRIVE_STRENGTH_REG, + YT8531_RGMII_RXC_DS_MASK, + FIELD_PREP(YT8531_RGMII_RXC_DS_MASK, ds)); + if (ret < 0) + return ret; + + /* set rgmii rx data driver strength */ + if (!of_property_read_u32(node, "motorcomm,rx-data-drv-microamp", &val)) { + ds = yt8531_get_ds_map(phydev, val); + if (ds < 0) + return dev_err_probe(&phydev->mdio.dev, ds, + "No matching current value was found.\n"); + } else { + ds = YT8531_RGMII_RX_DS_DEFAULT; + } + + ds_field_hi = FIELD_GET(BIT(2), ds); + ds_field_hi = FIELD_PREP(YT8531_RGMII_RXD_DS_HI_MASK, ds_field_hi); + + ds_field_low = FIELD_GET(GENMASK(1, 0), ds); + ds_field_low = FIELD_PREP(YT8531_RGMII_RXD_DS_LOW_MASK, ds_field_low); + + ret = ytphy_modify_ext_with_lock(phydev, + YTPHY_PAD_DRIVE_STRENGTH_REG, + YT8531_RGMII_RXD_DS_LOW_MASK | YT8531_RGMII_RXD_DS_HI_MASK, + ds_field_low | ds_field_hi); + if (ret < 0) + return ret; + + return 0; +} + +/** * yt8521_probe() - read chip config then set suitable polling_mode * @phydev: a pointer to a &struct phy_device * @@ -1518,6 +1632,10 @@ static int yt8531_config_init(struct phy_device *phydev) return ret; } + ret = yt8531_set_ds(phydev); + if (ret < 0) + return ret; + return 0; } diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index 018253a573b8..4f39ba63a9a9 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -849,6 +849,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx) struct macsec_flow *flow; int ret; + if (ctx->sa.update_pn) + return -EINVAL; + flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR); if (IS_ERR(flow)) return PTR_ERR(flow); @@ -900,6 +903,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx) struct macsec_flow *flow; int ret; + if (ctx->sa.update_pn) + return -EINVAL; + flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR); if (IS_ERR(flow)) return PTR_ERR(flow); diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 029875a59ff8..7ab080ff02df 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -18,24 +18,37 @@ #include <linux/net_tstamp.h> #define PHY_ID_TJA_1103 0x001BB010 - -#define PMAPMD_B100T1_PMAPMD_CTL 0x0834 -#define B100T1_PMAPMD_CONFIG_EN BIT(15) -#define B100T1_PMAPMD_MASTER BIT(14) -#define MASTER_MODE (B100T1_PMAPMD_CONFIG_EN | \ - B100T1_PMAPMD_MASTER) -#define SLAVE_MODE (B100T1_PMAPMD_CONFIG_EN) +#define PHY_ID_TJA_1120 0x001BB031 #define VEND1_DEVICE_CONTROL 0x0040 #define DEVICE_CONTROL_RESET BIT(15) #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14) #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13) +#define VEND1_DEVICE_CONFIG 0x0048 + +#define TJA1120_VEND1_EXT_TS_MODE 0x1012 + +#define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08 +#define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A +#define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C +#define TJA1120_DEV_BOOT_DONE BIT(1) + +#define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070 + +#define TJA1120_EGRESS_TS_DATA_S 0x9060 +#define TJA1120_EGRESS_TS_END 0x9067 +#define TJA1120_TS_VALID BIT(0) +#define TJA1120_MORE_TS BIT(15) + #define VEND1_PHY_IRQ_ACK 0x80A0 #define VEND1_PHY_IRQ_EN 0x80A1 #define VEND1_PHY_IRQ_STATUS 0x80A2 #define PHY_IRQ_LINK_EVENT BIT(1) +#define VEND1_ALWAYS_ACCESSIBLE 0x801F +#define FUSA_PASS BIT(4) + #define VEND1_PHY_CONTROL 0x8100 #define PHY_CONFIG_EN BIT(14) #define PHY_START_OP BIT(0) @@ -43,15 +56,16 @@ #define VEND1_PHY_CONFIG 0x8108 #define PHY_CONFIG_AUTO BIT(0) +#define TJA1120_EPHY_RESETS 0x810A +#define EPHY_PCS_RESET BIT(3) + #define VEND1_SIGNAL_QUALITY 0x8320 #define SQI_VALID BIT(14) #define SQI_MASK GENMASK(2, 0) #define MAX_SQI SQI_MASK -#define VEND1_CABLE_TEST 0x8330 #define CABLE_TEST_ENABLE BIT(15) #define CABLE_TEST_START BIT(14) -#define CABLE_TEST_VALID BIT(13) #define CABLE_TEST_OK 0x00 #define CABLE_TEST_SHORTED 0x01 #define CABLE_TEST_OPEN 0x02 @@ -63,6 +77,12 @@ #define VEND1_PORT_ABILITIES 0x8046 #define PTP_ABILITY BIT(3) +#define VEND1_PORT_FUNC_IRQ_EN 0x807A +#define PTP_IRQS BIT(3) + +#define VEND1_PTP_IRQ_ACK 0x9008 +#define EGR_TS_IRQ BIT(1) + #define VEND1_PORT_INFRA_CONTROL 0xAC00 #define PORT_INFRA_CONTROL_EN BIT(14) @@ -85,12 +105,17 @@ #define MII_BASIC_CONFIG_RMII 0x5 #define MII_BASIC_CONFIG_MII 0x4 +#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351 +#define EXTENDED_CNT_EN BIT(15) +#define VEND1_MONITOR_STATUS 0xAC80 +#define MONITOR_RESET BIT(15) +#define VEND1_MONITOR_CONFIG 0xAC86 +#define LOST_FRAMES_CNT_EN BIT(9) +#define ALL_FRAMES_CNT_EN BIT(8) + #define VEND1_SYMBOL_ERROR_COUNTER 0x8350 #define VEND1_LINK_DROP_COUNTER 0x8352 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353 -#define VEND1_R_GOOD_FRAME_CNT 0xA950 -#define VEND1_R_BAD_FRAME_CNT 0xA952 -#define VEND1_R_RXER_FRAME_CNT 0xA954 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE #define VEND1_TX_PREAMBLE_COUNT 0xAFCF #define VEND1_RX_IPG_LENGTH 0xAFD0 @@ -99,81 +124,43 @@ #define VEND1_PTP_CONFIG 0x1102 #define EXT_TRG_EDGE BIT(1) -#define PPS_OUT_POL BIT(2) -#define PPS_OUT_EN BIT(3) -#define VEND1_LTC_LOAD_CTRL 0x1105 -#define READ_LTC BIT(2) -#define LOAD_LTC BIT(0) +#define TJA1120_SYNC_TRIG_FILTER 0x1010 +#define PTP_TRIG_RISE_TS BIT(3) +#define PTP_TRIG_FALLING_TS BIT(2) -#define VEND1_LTC_WR_NSEC_0 0x1106 -#define VEND1_LTC_WR_NSEC_1 0x1107 -#define VEND1_LTC_WR_SEC_0 0x1108 -#define VEND1_LTC_WR_SEC_1 0x1109 - -#define VEND1_LTC_RD_NSEC_0 0x110A -#define VEND1_LTC_RD_NSEC_1 0x110B -#define VEND1_LTC_RD_SEC_0 0x110C -#define VEND1_LTC_RD_SEC_1 0x110D - -#define VEND1_RATE_ADJ_SUBNS_0 0x110F -#define VEND1_RATE_ADJ_SUBNS_1 0x1110 #define CLK_RATE_ADJ_LD BIT(15) #define CLK_RATE_ADJ_DIR BIT(14) -#define VEND1_HW_LTC_LOCK_CTRL 0x1115 -#define HW_LTC_LOCK_EN BIT(0) - -#define VEND1_PTP_IRQ_EN 0x1131 -#define VEND1_PTP_IRQ_STATUS 0x1132 -#define PTP_IRQ_EGR_TS BIT(0) - #define VEND1_RX_TS_INSRT_CTRL 0x114D -#define RX_TS_INSRT_MODE2 0x02 +#define TJA1103_RX_TS_INSRT_MODE2 0x02 + +#define TJA1120_RX_TS_INSRT_CTRL 0x9012 +#define TJA1120_RX_TS_INSRT_EN BIT(15) +#define TJA1120_TS_INSRT_MODE BIT(4) #define VEND1_EGR_RING_DATA_0 0x114E -#define VEND1_EGR_RING_DATA_1_SEQ_ID 0x114F -#define VEND1_EGR_RING_DATA_2_NSEC_15_0 0x1150 -#define VEND1_EGR_RING_DATA_3 0x1151 #define VEND1_EGR_RING_CTRL 0x1154 -#define VEND1_EXT_TRG_TS_DATA_0 0x1121 -#define VEND1_EXT_TRG_TS_DATA_1 0x1122 -#define VEND1_EXT_TRG_TS_DATA_2 0x1123 -#define VEND1_EXT_TRG_TS_DATA_3 0x1124 -#define VEND1_EXT_TRG_TS_DATA_4 0x1125 -#define VEND1_EXT_TRG_TS_CTRL 0x1126 - -#define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0) -#define RING_DATA_0_MSG_TYPE GENMASK(11, 8) -#define RING_DATA_0_SEC_4_2 GENMASK(14, 2) #define RING_DATA_0_TS_VALID BIT(15) -#define RING_DATA_3_NSEC_29_16 GENMASK(13, 0) -#define RING_DATA_3_SEC_1_0 GENMASK(15, 14) -#define RING_DATA_5_SEC_16_5 GENMASK(15, 4) #define RING_DONE BIT(0) #define TS_SEC_MASK GENMASK(1, 0) #define VEND1_PORT_FUNC_ENABLES 0x8048 #define PTP_ENABLE BIT(3) +#define PHY_TEST_ENABLE BIT(0) #define VEND1_PORT_PTP_CONTROL 0x9000 #define PORT_PTP_CONTROL_BYPASS BIT(11) -#define VEND1_PTP_CLK_PERIOD 0x1104 #define PTP_CLK_PERIOD_100BT1 15ULL +#define PTP_CLK_PERIOD_1000BT1 8ULL -#define VEND1_EVENT_MSG_FILT 0x1148 #define EVENT_MSG_FILT_ALL 0x0F #define EVENT_MSG_FILT_NONE 0x00 -#define VEND1_TX_PIPE_DLY_NS 0x1149 -#define VEND1_TX_PIPEDLY_SUBNS 0x114A -#define VEND1_RX_PIPE_DLY_NS 0x114B -#define VEND1_RX_PIPEDLY_SUBNS 0x114C - #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40 #define GPIO_FUNC_EN BIT(15) #define GPIO_FUNC_PTP BIT(6) @@ -191,16 +178,33 @@ #define MAX_ID_PS 2260U #define DEFAULT_ID_PS 2000U -#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK_ULL(31, 0) * (ppb) * \ - PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC) +#define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \ + (ppb) * (ptp_clk_period), NSEC_PER_SEC) #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb) +struct nxp_c45_phy; + struct nxp_c45_skb_cb { struct ptp_header *header; unsigned int type; }; +#define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \ + ((struct nxp_c45_reg_field) { \ + .reg = _reg, \ + .devad = _devad, \ + .offset = _offset, \ + .size = _size, \ + }) + +struct nxp_c45_reg_field { + u16 reg; + u8 devad; + u8 offset; + u8 size; +}; + struct nxp_c45_hwts { u32 nsec; u32 sec; @@ -209,7 +213,76 @@ struct nxp_c45_hwts { u8 msg_type; }; +struct nxp_c45_regmap { + /* PTP config regs. */ + u16 vend1_ptp_clk_period; + u16 vend1_event_msg_filt; + + /* LTC bits and regs. */ + struct nxp_c45_reg_field ltc_read; + struct nxp_c45_reg_field ltc_write; + struct nxp_c45_reg_field ltc_lock_ctrl; + u16 vend1_ltc_wr_nsec_0; + u16 vend1_ltc_wr_nsec_1; + u16 vend1_ltc_wr_sec_0; + u16 vend1_ltc_wr_sec_1; + u16 vend1_ltc_rd_nsec_0; + u16 vend1_ltc_rd_nsec_1; + u16 vend1_ltc_rd_sec_0; + u16 vend1_ltc_rd_sec_1; + u16 vend1_rate_adj_subns_0; + u16 vend1_rate_adj_subns_1; + + /* External trigger reg fields. */ + struct nxp_c45_reg_field irq_egr_ts_en; + struct nxp_c45_reg_field irq_egr_ts_status; + struct nxp_c45_reg_field domain_number; + struct nxp_c45_reg_field msg_type; + struct nxp_c45_reg_field sequence_id; + struct nxp_c45_reg_field sec_1_0; + struct nxp_c45_reg_field sec_4_2; + struct nxp_c45_reg_field nsec_15_0; + struct nxp_c45_reg_field nsec_29_16; + + /* PPS and EXT Trigger bits and regs. */ + struct nxp_c45_reg_field pps_enable; + struct nxp_c45_reg_field pps_polarity; + u16 vend1_ext_trg_data_0; + u16 vend1_ext_trg_data_1; + u16 vend1_ext_trg_data_2; + u16 vend1_ext_trg_data_3; + u16 vend1_ext_trg_ctrl; + + /* Cable test reg fields. */ + u16 cable_test; + struct nxp_c45_reg_field cable_test_valid; + struct nxp_c45_reg_field cable_test_result; +}; + +struct nxp_c45_phy_stats { + const char *name; + const struct nxp_c45_reg_field counter; +}; + +struct nxp_c45_phy_data { + const struct nxp_c45_regmap *regmap; + const struct nxp_c45_phy_stats *stats; + int n_stats; + u8 ptp_clk_period; + bool ext_ts_both_edges; + bool ack_ptp_irq; + void (*counters_enable)(struct phy_device *phydev); + bool (*get_egressts)(struct nxp_c45_phy *priv, + struct nxp_c45_hwts *hwts); + bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts); + void (*ptp_init)(struct phy_device *phydev); + void (*ptp_enable)(struct phy_device *phydev, bool enable); + void (*nmi_handler)(struct phy_device *phydev, + irqreturn_t *irq_status); +}; + struct nxp_c45_phy { + const struct nxp_c45_phy_data *phy_data; struct phy_device *phydev; struct mii_timestamper mii_ts; struct ptp_clock *ptp_clock; @@ -227,13 +300,86 @@ struct nxp_c45_phy { bool extts; }; -struct nxp_c45_phy_stats { - const char *name; - u8 mmd; - u16 reg; - u8 off; - u16 mask; -}; +static const +struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev) +{ + return phydev->drv->driver_data; +} + +static const +struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev) +{ + const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); + + return phy_data->regmap; +} + +static int nxp_c45_read_reg_field(struct phy_device *phydev, + const struct nxp_c45_reg_field *reg_field) +{ + u16 mask; + int ret; + + if (reg_field->size == 0) { + phydev_err(phydev, "Trying to read a reg field of size 0.\n"); + return -EINVAL; + } + + ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg); + if (ret < 0) + return ret; + + mask = reg_field->size == 1 ? BIT(reg_field->offset) : + GENMASK(reg_field->offset + reg_field->size - 1, + reg_field->offset); + ret &= mask; + ret >>= reg_field->offset; + + return ret; +} + +static int nxp_c45_write_reg_field(struct phy_device *phydev, + const struct nxp_c45_reg_field *reg_field, + u16 val) +{ + u16 mask; + u16 set; + + if (reg_field->size == 0) { + phydev_err(phydev, "Trying to write a reg field of size 0.\n"); + return -EINVAL; + } + + mask = reg_field->size == 1 ? BIT(reg_field->offset) : + GENMASK(reg_field->offset + reg_field->size - 1, + reg_field->offset); + set = val << reg_field->offset; + + return phy_modify_mmd_changed(phydev, reg_field->devad, + reg_field->reg, mask, set); +} + +static int nxp_c45_set_reg_field(struct phy_device *phydev, + const struct nxp_c45_reg_field *reg_field) +{ + if (reg_field->size != 1) { + phydev_err(phydev, "Trying to set a reg field of size different than 1.\n"); + return -EINVAL; + } + + return nxp_c45_write_reg_field(phydev, reg_field, 1); +} + +static int nxp_c45_clear_reg_field(struct phy_device *phydev, + const struct nxp_c45_reg_field *reg_field) +{ + if (reg_field->size != 1) { + phydev_err(phydev, "Trying to set a reg field of size different than 1.\n"); + return -EINVAL; + } + + return nxp_c45_write_reg_field(phydev, reg_field, 0); +} static bool nxp_c45_poll_txts(struct phy_device *phydev) { @@ -245,17 +391,17 @@ static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp, struct ptp_system_timestamp *sts) { struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev); - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL, - READ_LTC); + nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read); ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_LTC_RD_NSEC_0); + regmap->vend1_ltc_rd_nsec_0); ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_LTC_RD_NSEC_1) << 16; + regmap->vend1_ltc_rd_nsec_1) << 16; ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_LTC_RD_SEC_0); + regmap->vend1_ltc_rd_sec_0); ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_LTC_RD_SEC_1) << 16; + regmap->vend1_ltc_rd_sec_1) << 16; return 0; } @@ -277,17 +423,17 @@ static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev); - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0, + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0, ts->tv_nsec); - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1, + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1, ts->tv_nsec >> 16); - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0, + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0, ts->tv_sec); - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1, + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1, ts->tv_sec >> 16); - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL, - LOAD_LTC); + nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write); return 0; } @@ -307,6 +453,8 @@ static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp, static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); + const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev); + const struct nxp_c45_regmap *regmap = data->regmap; s32 ppb = scaled_ppm_to_ppb(scaled_ppm); u64 subns_inc_val; bool inc; @@ -315,16 +463,18 @@ static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) inc = ppb >= 0; ppb = abs(ppb); - subns_inc_val = PPM_TO_SUBNS_INC(ppb); + subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period); - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0, + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, + regmap->vend1_rate_adj_subns_0, subns_inc_val); subns_inc_val >>= 16; subns_inc_val |= CLK_RATE_ADJ_LD; if (inc) subns_inc_val |= CLK_RATE_ADJ_DIR; - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1, + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, + regmap->vend1_rate_adj_subns_1, subns_inc_val); mutex_unlock(&priv->ptp_lock); @@ -365,19 +515,88 @@ static bool nxp_c45_match_ts(struct ptp_header *header, header->domain_number == hwts->domain_number; } -static void nxp_c45_get_extts(struct nxp_c45_phy *priv, +static bool nxp_c45_get_extts(struct nxp_c45_phy *priv, struct timespec64 *extts) { + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev); + extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_EXT_TRG_TS_DATA_0); + regmap->vend1_ext_trg_data_0); extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_EXT_TRG_TS_DATA_1) << 16; + regmap->vend1_ext_trg_data_1) << 16; extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_EXT_TRG_TS_DATA_2); + regmap->vend1_ext_trg_data_2); extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_EXT_TRG_TS_DATA_3) << 16; - phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL, - RING_DONE); + regmap->vend1_ext_trg_data_3) << 16; + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, + regmap->vend1_ext_trg_ctrl, RING_DONE); + + return true; +} + +static bool tja1120_extts_is_valid(struct phy_device *phydev) +{ + bool valid; + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_VEND1_PTP_TRIG_DATA_S); + valid = !!(reg & TJA1120_TS_VALID); + + return valid; +} + +static bool tja1120_get_extts(struct nxp_c45_phy *priv, + struct timespec64 *extts) +{ + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev); + struct phy_device *phydev = priv->phydev; + bool more_ts; + bool valid; + u16 reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, + regmap->vend1_ext_trg_ctrl); + more_ts = !!(reg & TJA1120_MORE_TS); + + valid = tja1120_extts_is_valid(phydev); + if (!valid) { + if (!more_ts) + goto tja1120_get_extts_out; + + /* Bug workaround for TJA1120 engineering samples: move the new + * timestamp from the FIFO to the buffer. + */ + phy_write_mmd(phydev, MDIO_MMD_VEND1, + regmap->vend1_ext_trg_ctrl, RING_DONE); + valid = tja1120_extts_is_valid(phydev); + if (!valid) + goto tja1120_get_extts_out; + } + + nxp_c45_get_extts(priv, extts); +tja1120_get_extts_out: + return valid; +} + +static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv, + struct nxp_c45_hwts *hwts) +{ + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev); + struct phy_device *phydev = priv->phydev; + + hwts->domain_number = + nxp_c45_read_reg_field(phydev, ®map->domain_number); + hwts->msg_type = + nxp_c45_read_reg_field(phydev, ®map->msg_type); + hwts->sequence_id = + nxp_c45_read_reg_field(phydev, ®map->sequence_id); + hwts->nsec = + nxp_c45_read_reg_field(phydev, ®map->nsec_15_0); + hwts->nsec |= + nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16; + hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0); + hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2; } static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv, @@ -394,22 +613,56 @@ static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv, if (!valid) goto nxp_c45_get_hwtxts_out; - hwts->domain_number = reg; - hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8; - hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10; - hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_EGR_RING_DATA_1_SEQ_ID); - hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_EGR_RING_DATA_2_NSEC_15_0); - reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3); - hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16; - hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14; - + nxp_c45_read_egress_ts(priv, hwts); nxp_c45_get_hwtxts_out: mutex_unlock(&priv->ptp_lock); return valid; } +static bool tja1120_egress_ts_is_valid(struct phy_device *phydev) +{ + bool valid; + u16 reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S); + valid = !!(reg & TJA1120_TS_VALID); + + return valid; +} + +static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv, + struct nxp_c45_hwts *hwts) +{ + struct phy_device *phydev = priv->phydev; + bool more_ts; + bool valid; + u16 reg; + + mutex_lock(&priv->ptp_lock); + reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END); + more_ts = !!(reg & TJA1120_MORE_TS); + valid = tja1120_egress_ts_is_valid(phydev); + if (!valid) { + if (!more_ts) + goto tja1120_get_hwtxts_out; + + /* Bug workaround for TJA1120 engineering samples: move the + * new timestamp from the FIFO to the buffer. + */ + phy_write_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_EGRESS_TS_END, TJA1120_TS_VALID); + valid = tja1120_egress_ts_is_valid(phydev); + if (!valid) + goto tja1120_get_hwtxts_out; + } + nxp_c45_read_egress_ts(priv, hwts); + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S, + TJA1120_TS_VALID); +tja1120_get_hwtxts_out: + mutex_unlock(&priv->ptp_lock); + return valid; +} + static void nxp_c45_process_txts(struct nxp_c45_phy *priv, struct nxp_c45_hwts *txts) { @@ -448,6 +701,7 @@ static void nxp_c45_process_txts(struct nxp_c45_phy *priv, static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) { struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); + const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev); bool poll_txts = nxp_c45_poll_txts(priv->phydev); struct skb_shared_hwtstamps *shhwtstamps_rx; struct ptp_clock_event event; @@ -455,12 +709,12 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) bool reschedule = false; struct timespec64 ts; struct sk_buff *skb; - bool txts_valid; + bool ts_valid; u32 ts_raw; while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) { - txts_valid = nxp_c45_get_hwtxts(priv, &hwts); - if (unlikely(!txts_valid)) { + ts_valid = data->get_egressts(priv, &hwts); + if (unlikely(!ts_valid)) { /* Still more skbs in the queue */ reschedule = true; break; @@ -482,8 +736,8 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) } if (priv->extts) { - nxp_c45_get_extts(priv, &ts); - if (timespec64_compare(&ts, &priv->extts_ts) != 0) { + ts_valid = data->get_extts(priv, &ts); + if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) { priv->extts_ts = ts; event.index = priv->extts_index; event.type = PTP_CLOCK_EXTTS; @@ -508,6 +762,7 @@ static void nxp_c45_gpio_config(struct nxp_c45_phy *priv, static int nxp_c45_perout_enable(struct nxp_c45_phy *priv, struct ptp_perout_request *perout, int on) { + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev); struct phy_device *phydev = priv->phydev; int pin; @@ -519,10 +774,10 @@ static int nxp_c45_perout_enable(struct nxp_c45_phy *priv, return pin; if (!on) { - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, - PPS_OUT_EN); - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, - PPS_OUT_POL); + nxp_c45_clear_reg_field(priv->phydev, + ®map->pps_enable); + nxp_c45_clear_reg_field(priv->phydev, + ®map->pps_polarity); nxp_c45_gpio_config(priv, pin, GPIO_DISABLE); @@ -551,23 +806,62 @@ static int nxp_c45_perout_enable(struct nxp_c45_phy *priv, } if (perout->phase.nsec == 0) - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, - VEND1_PTP_CONFIG, PPS_OUT_POL); + nxp_c45_clear_reg_field(priv->phydev, + ®map->pps_polarity); else - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, - VEND1_PTP_CONFIG, PPS_OUT_POL); + nxp_c45_set_reg_field(priv->phydev, + ®map->pps_polarity); } nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN); + nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable); return 0; } +static void nxp_c45_set_rising_or_falling(struct phy_device *phydev, + struct ptp_extts_request *extts) +{ + if (extts->flags & PTP_RISING_EDGE) + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PTP_CONFIG, EXT_TRG_EDGE); + + if (extts->flags & PTP_FALLING_EDGE) + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PTP_CONFIG, EXT_TRG_EDGE); +} + +static void nxp_c45_set_rising_and_falling(struct phy_device *phydev, + struct ptp_extts_request *extts) +{ + /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In + * this case external ts will be enabled on rising edge. + */ + if (extts->flags & PTP_RISING_EDGE || + extts->flags == PTP_ENABLE_FEATURE) + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_SYNC_TRIG_FILTER, + PTP_TRIG_RISE_TS); + else + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_SYNC_TRIG_FILTER, + PTP_TRIG_RISE_TS); + + if (extts->flags & PTP_FALLING_EDGE) + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_SYNC_TRIG_FILTER, + PTP_TRIG_FALLING_TS); + else + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_SYNC_TRIG_FILTER, + PTP_TRIG_FALLING_TS); +} + static int nxp_c45_extts_enable(struct nxp_c45_phy *priv, struct ptp_extts_request *extts, int on) { + const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev); int pin; if (extts->flags & ~(PTP_ENABLE_FEATURE | @@ -578,7 +872,8 @@ static int nxp_c45_extts_enable(struct nxp_c45_phy *priv, /* Sampling on both edges is not supported */ if ((extts->flags & PTP_RISING_EDGE) && - (extts->flags & PTP_FALLING_EDGE)) + (extts->flags & PTP_FALLING_EDGE) && + !data->ext_ts_both_edges) return -EOPNOTSUPP; pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index); @@ -592,13 +887,10 @@ static int nxp_c45_extts_enable(struct nxp_c45_phy *priv, return 0; } - if (extts->flags & PTP_RISING_EDGE) - phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_PTP_CONFIG, EXT_TRG_EDGE); - - if (extts->flags & PTP_FALLING_EDGE) - phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1, - VEND1_PTP_CONFIG, EXT_TRG_EDGE); + if (data->ext_ts_both_edges) + nxp_c45_set_rising_and_falling(priv->phydev, extts); + else + nxp_c45_set_rising_or_falling(priv->phydev, extts); nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG); priv->extts = true; @@ -735,6 +1027,7 @@ static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts, struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy, mii_ts); struct phy_device *phydev = priv->phydev; + const struct nxp_c45_phy_data *data; struct hwtstamp_config cfg; if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg))) @@ -743,6 +1036,7 @@ static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts, if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON) return -ERANGE; + data = nxp_c45_get_data(phydev); priv->hwts_tx = cfg.tx_type; switch (cfg.rx_filter) { @@ -760,27 +1054,24 @@ static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts, } if (priv->hwts_rx || priv->hwts_tx) { - phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT, + phy_write_mmd(phydev, MDIO_MMD_VEND1, + data->regmap->vend1_event_msg_filt, EVENT_MSG_FILT_ALL); - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, - VEND1_PORT_PTP_CONTROL, - PORT_PTP_CONTROL_BYPASS); + data->ptp_enable(phydev, true); } else { - phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT, + phy_write_mmd(phydev, MDIO_MMD_VEND1, + data->regmap->vend1_event_msg_filt, EVENT_MSG_FILT_NONE); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL, - PORT_PTP_CONTROL_BYPASS); + data->ptp_enable(phydev, false); } if (nxp_c45_poll_txts(priv->phydev)) goto nxp_c45_no_ptp_irq; if (priv->hwts_tx) - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, - VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS); + nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en); else - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, - VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS); + nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en); nxp_c45_no_ptp_irq: return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; @@ -805,63 +1096,100 @@ static int nxp_c45_ts_info(struct mii_timestamper *mii_ts, return 0; } -static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = { - { "phy_symbol_error_cnt", MDIO_MMD_VEND1, - VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) }, - { "phy_link_status_drop_cnt", MDIO_MMD_VEND1, - VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) }, - { "phy_link_availability_drop_cnt", MDIO_MMD_VEND1, - VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) }, - { "phy_link_loss_cnt", MDIO_MMD_VEND1, - VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) }, - { "phy_link_failure_cnt", MDIO_MMD_VEND1, - VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) }, - { "r_good_frame_cnt", MDIO_MMD_VEND1, - VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) }, - { "r_bad_frame_cnt", MDIO_MMD_VEND1, - VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) }, - { "r_rxer_frame_cnt", MDIO_MMD_VEND1, - VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) }, - { "rx_preamble_count", MDIO_MMD_VEND1, - VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) }, - { "tx_preamble_count", MDIO_MMD_VEND1, - VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) }, - { "rx_ipg_length", MDIO_MMD_VEND1, - VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) }, - { "tx_ipg_length", MDIO_MMD_VEND1, - VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) }, +static const struct nxp_c45_phy_stats common_hw_stats[] = { + { "phy_link_status_drop_cnt", + NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), }, + { "phy_link_availability_drop_cnt", + NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), }, + { "phy_link_loss_cnt", + NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), }, + { "phy_link_failure_cnt", + NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), }, + { "phy_symbol_error_cnt", + NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) }, +}; + +static const struct nxp_c45_phy_stats tja1103_hw_stats[] = { + { "rx_preamble_count", + NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), }, + { "tx_preamble_count", + NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), }, + { "rx_ipg_length", + NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), }, + { "tx_ipg_length", + NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), }, +}; + +static const struct nxp_c45_phy_stats tja1120_hw_stats[] = { + { "phy_symbol_error_cnt_ext", + NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) }, + { "tx_frames_xtd", + NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), }, + { "tx_frames", + NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), }, + { "rx_frames_xtd", + NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), }, + { "rx_frames", + NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), }, + { "tx_lost_frames_xtd", + NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), }, + { "tx_lost_frames", + NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), }, + { "rx_lost_frames_xtd", + NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), }, + { "rx_lost_frames", + NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), }, }; static int nxp_c45_get_sset_count(struct phy_device *phydev) { - return ARRAY_SIZE(nxp_c45_hw_stats); + const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); + + return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0); } static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data) { + const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); + size_t count = nxp_c45_get_sset_count(phydev); + size_t idx; size_t i; - for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) { - strncpy(data + i * ETH_GSTRING_LEN, - nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < count; i++) { + if (i < ARRAY_SIZE(common_hw_stats)) { + strscpy(data + i * ETH_GSTRING_LEN, + common_hw_stats[i].name, ETH_GSTRING_LEN); + continue; + } + idx = i - ARRAY_SIZE(common_hw_stats); + strscpy(data + i * ETH_GSTRING_LEN, + phy_data->stats[idx].name, ETH_GSTRING_LEN); } } static void nxp_c45_get_stats(struct phy_device *phydev, struct ethtool_stats *stats, u64 *data) { + const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); + size_t count = nxp_c45_get_sset_count(phydev); + const struct nxp_c45_reg_field *reg_field; + size_t idx; size_t i; int ret; - for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) { - ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd, - nxp_c45_hw_stats[i].reg); - if (ret < 0) { - data[i] = U64_MAX; + for (i = 0; i < count; i++) { + if (i < ARRAY_SIZE(common_hw_stats)) { + reg_field = &common_hw_stats[i].counter; } else { - data[i] = ret & nxp_c45_hw_stats[i].mask; - data[i] >>= nxp_c45_hw_stats[i].off; + idx = i - ARRAY_SIZE(common_hw_stats); + reg_field = &phy_data->stats[idx].counter; } + + ret = nxp_c45_read_reg_field(phydev, reg_field); + if (ret < 0) + data[i] = U64_MAX; + else + data[i] = ret; } } @@ -898,8 +1226,40 @@ static int nxp_c45_config_intr(struct phy_device *phydev) VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT); } +static int tja1103_config_intr(struct phy_device *phydev) +{ + int ret; + + /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE, + FUSA_PASS); + if (ret) + return ret; + + return nxp_c45_config_intr(phydev); +} + +static int tja1120_config_intr(struct phy_device *phydev) +{ + int ret; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_GLOBAL_INFRA_IRQ_EN, + TJA1120_DEV_BOOT_DONE); + else + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_GLOBAL_INFRA_IRQ_EN, + TJA1120_DEV_BOOT_DONE); + if (ret) + return ret; + + return nxp_c45_config_intr(phydev); +} + static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev) { + const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev); struct nxp_c45_phy *priv = phydev->priv; irqreturn_t ret = IRQ_NONE; struct nxp_c45_hwts hwts; @@ -913,18 +1273,23 @@ static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev) ret = IRQ_HANDLED; } - /* There is no need for ACK. - * The irq signal will be asserted until the EGR TS FIFO will be - * emptied. - */ - irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS); - if (irq & PTP_IRQ_EGR_TS) { - while (nxp_c45_get_hwtxts(priv, &hwts)) + irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status); + if (irq) { + /* If ack_ptp_irq is false, the IRQ bit is self-clear and will + * be cleared when the EGR TS FIFO is empty. Otherwise, the + * IRQ bit should be cleared before reading the timestamp, + */ + if (data->ack_ptp_irq) + phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PTP_IRQ_ACK, EGR_TS_IRQ); + while (data->get_egressts(priv, &hwts)) nxp_c45_process_txts(priv, &hwts); ret = IRQ_HANDLED; } + data->nmi_handler(phydev, &ret); + return ret; } @@ -945,24 +1310,30 @@ static int nxp_c45_soft_reset(struct phy_device *phydev) static int nxp_c45_cable_test_start(struct phy_device *phydev) { - return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST, - CABLE_TEST_ENABLE | CABLE_TEST_START); + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev); + + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE); + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test, + CABLE_TEST_ENABLE | CABLE_TEST_START); } static int nxp_c45_cable_test_get_status(struct phy_device *phydev, bool *finished) { + const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev); int ret; u8 cable_test_result; - ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST); - if (!(ret & CABLE_TEST_VALID)) { + ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid); + if (!ret) { *finished = false; return 0; } *finished = true; - cable_test_result = ret & GENMASK(2, 0); + cable_test_result = nxp_c45_read_reg_field(phydev, + ®map->cable_test_result); switch (cable_test_result) { case CABLE_TEST_OK: @@ -982,78 +1353,14 @@ static int nxp_c45_cable_test_get_status(struct phy_device *phydev, ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC); } - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST, + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test, CABLE_TEST_ENABLE); + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE); return nxp_c45_start_op(phydev); } -static int nxp_c45_setup_master_slave(struct phy_device *phydev) -{ - switch (phydev->master_slave_set) { - case MASTER_SLAVE_CFG_MASTER_FORCE: - case MASTER_SLAVE_CFG_MASTER_PREFERRED: - phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL, - MASTER_MODE); - break; - case MASTER_SLAVE_CFG_SLAVE_PREFERRED: - case MASTER_SLAVE_CFG_SLAVE_FORCE: - phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL, - SLAVE_MODE); - break; - case MASTER_SLAVE_CFG_UNKNOWN: - case MASTER_SLAVE_CFG_UNSUPPORTED: - return 0; - default: - phydev_warn(phydev, "Unsupported Master/Slave mode\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -static int nxp_c45_read_master_slave(struct phy_device *phydev) -{ - int reg; - - phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; - phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; - - reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL); - if (reg < 0) - return reg; - - if (reg & B100T1_PMAPMD_MASTER) { - phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE; - phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; - } else { - phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE; - phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; - } - - return 0; -} - -static int nxp_c45_config_aneg(struct phy_device *phydev) -{ - return nxp_c45_setup_master_slave(phydev); -} - -static int nxp_c45_read_status(struct phy_device *phydev) -{ - int ret; - - ret = genphy_c45_read_status(phydev); - if (ret) - return ret; - - ret = nxp_c45_read_master_slave(phydev); - if (ret) - return ret; - - return 0; -} - static int nxp_c45_get_sqi(struct phy_device *phydev) { int reg; @@ -1067,6 +1374,19 @@ static int nxp_c45_get_sqi(struct phy_device *phydev) return reg; } +static void tja1120_link_change_notify(struct phy_device *phydev) +{ + /* Bug workaround for TJA1120 enegineering samples: fix egress + * timestamps lost after link recovery. + */ + if (phydev->state == PHY_NOLINK) { + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_EPHY_RESETS, EPHY_PCS_RESET); + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_EPHY_RESETS, EPHY_PCS_RESET); + } +} + static int nxp_c45_get_sqi_max(struct phy_device *phydev) { return MAX_SQI; @@ -1087,6 +1407,28 @@ static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay) return 0; } +static void nxp_c45_counters_enable(struct phy_device *phydev) +{ + const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev); + + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER, + COUNTER_EN); + + data->counters_enable(phydev); +} + +static void nxp_c45_ptp_init(struct phy_device *phydev) +{ + const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev); + + phy_write_mmd(phydev, MDIO_MMD_VEND1, + data->regmap->vend1_ptp_clk_period, + data->ptp_clk_period); + nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl); + + data->ptp_init(phydev); +} + static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw) { /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9. @@ -1264,35 +1606,26 @@ static int nxp_c45_config_init(struct phy_device *phydev) phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG, PHY_CONFIG_AUTO); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER, - COUNTER_EN); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT, - COUNTER_EN); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT, - COUNTER_EN); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH, - COUNTER_EN); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH, - COUNTER_EN); - ret = nxp_c45_set_phy_mode(phydev); if (ret) return ret; phydev->autoneg = AUTONEG_DISABLE; - phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD, - PTP_CLK_PERIOD_100BT1); - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL, - HW_LTC_LOCK_EN); - phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL, - RX_TS_INSRT_MODE2); - phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES, - PTP_ENABLE); + nxp_c45_counters_enable(phydev); + nxp_c45_ptp_init(phydev); return nxp_c45_start_op(phydev); } +static int nxp_c45_get_features(struct phy_device *phydev) +{ + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported); + + return genphy_c45_pma_read_abilities(phydev); +} + static int nxp_c45_probe(struct phy_device *phydev) { struct nxp_c45_phy *priv; @@ -1348,18 +1681,274 @@ static void nxp_c45_remove(struct phy_device *phydev) skb_queue_purge(&priv->rx_queue); } +static void tja1103_counters_enable(struct phy_device *phydev) +{ + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT, + COUNTER_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT, + COUNTER_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH, + COUNTER_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH, + COUNTER_EN); +} + +static void tja1103_ptp_init(struct phy_device *phydev) +{ + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL, + TJA1103_RX_TS_INSRT_MODE2); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES, + PTP_ENABLE); +} + +static void tja1103_ptp_enable(struct phy_device *phydev, bool enable) +{ + if (enable) + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_PTP_CONTROL, + PORT_PTP_CONTROL_BYPASS); + else + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_PTP_CONTROL, + PORT_PTP_CONTROL_BYPASS); +} + +static void tja1103_nmi_handler(struct phy_device *phydev, + irqreturn_t *irq_status) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, + VEND1_ALWAYS_ACCESSIBLE); + if (ret & FUSA_PASS) { + phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_ALWAYS_ACCESSIBLE, + FUSA_PASS); + *irq_status = IRQ_HANDLED; + } +} + +static const struct nxp_c45_regmap tja1103_regmap = { + .vend1_ptp_clk_period = 0x1104, + .vend1_event_msg_filt = 0x1148, + .pps_enable = + NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1), + .pps_polarity = + NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1), + .ltc_lock_ctrl = + NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1), + .ltc_read = + NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1), + .ltc_write = + NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1), + .vend1_ltc_wr_nsec_0 = 0x1106, + .vend1_ltc_wr_nsec_1 = 0x1107, + .vend1_ltc_wr_sec_0 = 0x1108, + .vend1_ltc_wr_sec_1 = 0x1109, + .vend1_ltc_rd_nsec_0 = 0x110A, + .vend1_ltc_rd_nsec_1 = 0x110B, + .vend1_ltc_rd_sec_0 = 0x110C, + .vend1_ltc_rd_sec_1 = 0x110D, + .vend1_rate_adj_subns_0 = 0x110F, + .vend1_rate_adj_subns_1 = 0x1110, + .irq_egr_ts_en = + NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1), + .irq_egr_ts_status = + NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1), + .domain_number = + NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8), + .msg_type = + NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4), + .sequence_id = + NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16), + .sec_1_0 = + NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2), + .sec_4_2 = + NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3), + .nsec_15_0 = + NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16), + .nsec_29_16 = + NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14), + .vend1_ext_trg_data_0 = 0x1121, + .vend1_ext_trg_data_1 = 0x1122, + .vend1_ext_trg_data_2 = 0x1123, + .vend1_ext_trg_data_3 = 0x1124, + .vend1_ext_trg_ctrl = 0x1126, + .cable_test = 0x8330, + .cable_test_valid = + NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1), + .cable_test_result = + NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3), +}; + +static const struct nxp_c45_phy_data tja1103_phy_data = { + .regmap = &tja1103_regmap, + .stats = tja1103_hw_stats, + .n_stats = ARRAY_SIZE(tja1103_hw_stats), + .ptp_clk_period = PTP_CLK_PERIOD_100BT1, + .ext_ts_both_edges = false, + .ack_ptp_irq = false, + .counters_enable = tja1103_counters_enable, + .get_egressts = nxp_c45_get_hwtxts, + .get_extts = nxp_c45_get_extts, + .ptp_init = tja1103_ptp_init, + .ptp_enable = tja1103_ptp_enable, + .nmi_handler = tja1103_nmi_handler, +}; + +static void tja1120_counters_enable(struct phy_device *phydev) +{ + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD, + EXTENDED_CNT_EN); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS, + MONITOR_RESET); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG, + ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN); +} + +static void tja1120_ptp_init(struct phy_device *phydev) +{ + phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL, + TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE); + phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE, + TJA1120_TS_INSRT_MODE); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG, + PTP_ENABLE); +} + +static void tja1120_ptp_enable(struct phy_device *phydev, bool enable) +{ + if (enable) + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_FUNC_ENABLES, + PTP_ENABLE); + else + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_FUNC_ENABLES, + PTP_ENABLE); +} + +static void tja1120_nmi_handler(struct phy_device *phydev, + irqreturn_t *irq_status) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_GLOBAL_INFRA_IRQ_STATUS); + if (ret & TJA1120_DEV_BOOT_DONE) { + phy_write_mmd(phydev, MDIO_MMD_VEND1, + TJA1120_GLOBAL_INFRA_IRQ_ACK, + TJA1120_DEV_BOOT_DONE); + *irq_status = IRQ_HANDLED; + } +} + +static const struct nxp_c45_regmap tja1120_regmap = { + .vend1_ptp_clk_period = 0x1020, + .vend1_event_msg_filt = 0x9010, + .pps_enable = + NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1), + .pps_polarity = + NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1), + .ltc_lock_ctrl = + NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1), + .ltc_read = + NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1), + .ltc_write = + NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1), + .vend1_ltc_wr_nsec_0 = 0x1040, + .vend1_ltc_wr_nsec_1 = 0x1041, + .vend1_ltc_wr_sec_0 = 0x1042, + .vend1_ltc_wr_sec_1 = 0x1043, + .vend1_ltc_rd_nsec_0 = 0x1048, + .vend1_ltc_rd_nsec_1 = 0x1049, + .vend1_ltc_rd_sec_0 = 0x104A, + .vend1_ltc_rd_sec_1 = 0x104B, + .vend1_rate_adj_subns_0 = 0x1030, + .vend1_rate_adj_subns_1 = 0x1031, + .irq_egr_ts_en = + NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1), + .irq_egr_ts_status = + NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1), + .domain_number = + NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8), + .msg_type = + NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4), + .sequence_id = + NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16), + .sec_1_0 = + NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2), + .sec_4_2 = + NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3), + .nsec_15_0 = + NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16), + .nsec_29_16 = + NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14), + .vend1_ext_trg_data_0 = 0x1071, + .vend1_ext_trg_data_1 = 0x1072, + .vend1_ext_trg_data_2 = 0x1073, + .vend1_ext_trg_data_3 = 0x1074, + .vend1_ext_trg_ctrl = 0x1075, + .cable_test = 0x8360, + .cable_test_valid = + NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1), + .cable_test_result = + NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3), +}; + +static const struct nxp_c45_phy_data tja1120_phy_data = { + .regmap = &tja1120_regmap, + .stats = tja1120_hw_stats, + .n_stats = ARRAY_SIZE(tja1120_hw_stats), + .ptp_clk_period = PTP_CLK_PERIOD_1000BT1, + .ext_ts_both_edges = true, + .ack_ptp_irq = true, + .counters_enable = tja1120_counters_enable, + .get_egressts = tja1120_get_hwtxts, + .get_extts = tja1120_get_extts, + .ptp_init = tja1120_ptp_init, + .ptp_enable = tja1120_ptp_enable, + .nmi_handler = tja1120_nmi_handler, +}; + static struct phy_driver nxp_c45_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), .name = "NXP C45 TJA1103", - .features = PHY_BASIC_T1_FEATURES, + .get_features = nxp_c45_get_features, + .driver_data = &tja1103_phy_data, + .probe = nxp_c45_probe, + .soft_reset = nxp_c45_soft_reset, + .config_aneg = genphy_c45_config_aneg, + .config_init = nxp_c45_config_init, + .config_intr = tja1103_config_intr, + .handle_interrupt = nxp_c45_handle_interrupt, + .read_status = genphy_c45_read_status, + .suspend = genphy_c45_pma_suspend, + .resume = genphy_c45_pma_resume, + .get_sset_count = nxp_c45_get_sset_count, + .get_strings = nxp_c45_get_strings, + .get_stats = nxp_c45_get_stats, + .cable_test_start = nxp_c45_cable_test_start, + .cable_test_get_status = nxp_c45_cable_test_get_status, + .set_loopback = genphy_c45_loopback, + .get_sqi = nxp_c45_get_sqi, + .get_sqi_max = nxp_c45_get_sqi_max, + .remove = nxp_c45_remove, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120), + .name = "NXP C45 TJA1120", + .get_features = nxp_c45_get_features, + .driver_data = &tja1120_phy_data, .probe = nxp_c45_probe, .soft_reset = nxp_c45_soft_reset, - .config_aneg = nxp_c45_config_aneg, + .config_aneg = genphy_c45_config_aneg, .config_init = nxp_c45_config_init, - .config_intr = nxp_c45_config_intr, + .config_intr = tja1120_config_intr, .handle_interrupt = nxp_c45_handle_interrupt, - .read_status = nxp_c45_read_status, + .read_status = genphy_c45_read_status, + .link_change_notify = tja1120_link_change_notify, .suspend = genphy_c45_pma_suspend, .resume = genphy_c45_pma_resume, .get_sset_count = nxp_c45_get_sset_count, @@ -1378,6 +1967,7 @@ module_phy_driver(nxp_c45_driver); static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = { { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) }, + { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) }, { /*sentinel*/ }, }; diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index b13e15310feb..a71399965142 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -414,10 +414,8 @@ static void tja11xx_get_strings(struct phy_device *phydev, u8 *data) { int i; - for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) { - strncpy(data + i * ETH_GSTRING_LEN, - tja11xx_hw_stats[i].string, ETH_GSTRING_LEN); - } + for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++) + ethtool_sprintf(&data, "%s", tja11xx_hw_stats[i].string); } static void tja11xx_get_stats(struct phy_device *phydev, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 93ed07223377..8e6fd4962c48 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -108,7 +108,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_setup_master_slave); */ int genphy_c45_pma_setup_forced(struct phy_device *phydev) { - int ctrl1, ctrl2, ret; + int bt1_ctrl, ctrl1, ctrl2, ret; /* Half duplex is not supported */ if (phydev->duplex != DUPLEX_FULL) @@ -176,6 +176,15 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) ret = genphy_c45_pma_baset1_setup_master_slave(phydev); if (ret < 0) return ret; + + bt1_ctrl = 0; + if (phydev->speed == SPEED_1000) + bt1_ctrl = MDIO_PMA_PMD_BT1_CTRL_STRAP_B1000; + + ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_STRAP, bt1_ctrl); + if (ret < 0) + return ret; } return genphy_c45_an_disable_aneg(phydev); @@ -873,6 +882,44 @@ int genphy_c45_an_config_eee_aneg(struct phy_device *phydev) } /** + * genphy_c45_pma_baset1_read_abilities - read supported baset1 link modes from PMA + * @phydev: target phy_device struct + * + * Read the supported link modes from the extended BASE-T1 ability register + */ +int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + phydev->supported, + val & MDIO_PMA_PMD_BT1_B10L_ABLE); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, + phydev->supported, + val & MDIO_PMA_PMD_BT1_B100_ABLE); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, + phydev->supported, + val & MDIO_PMA_PMD_BT1_B1000_ABLE); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported, + val & MDIO_AN_STAT1_ABLE); + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_read_abilities); + +/** * genphy_c45_pma_read_abilities - read supported link modes from PMA * @phydev: target phy_device struct * @@ -968,21 +1015,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev) } if (val & MDIO_PMA_EXTABLE_BT1) { - val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1); + val = genphy_c45_pma_baset1_read_abilities(phydev); if (val < 0) return val; - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, - phydev->supported, - val & MDIO_PMA_PMD_BT1_B10L_ABLE); - - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); - if (val < 0) - return val; - - linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - phydev->supported, - val & MDIO_AN_STAT1_ABLE); } } diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index a64186dc53f8..966c93cbe616 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -142,6 +142,8 @@ int phy_interface_num_ports(phy_interface_t interface) case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_QUSGMII: return 4; + case PHY_INTERFACE_MODE_PSGMII: + return 5; case PHY_INTERFACE_MODE_MAX: WARN_ONCE(1, "PHY_INTERFACE_MODE_MAX isn't a valid interface mode"); return 0; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a9ecfdd19624..a5fa077650e8 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -456,6 +456,40 @@ int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd) EXPORT_SYMBOL(phy_do_ioctl_running); /** + * __phy_hwtstamp_get - Get hardware timestamping configuration from PHY + * + * @phydev: the PHY device structure + * @config: structure holding the timestamping configuration + * + * Query the PHY device for its current hardware timestamping configuration. + */ +int __phy_hwtstamp_get(struct phy_device *phydev, + struct kernel_hwtstamp_config *config) +{ + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, config->ifr, SIOCGHWTSTAMP); +} + +/** + * __phy_hwtstamp_set - Modify PHY hardware timestamping configuration + * + * @phydev: the PHY device structure + * @config: structure holding the timestamping configuration + * @extack: netlink extended ack structure, for error reporting + */ +int __phy_hwtstamp_set(struct phy_device *phydev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, config->ifr, SIOCSHWTSTAMP); +} + +/** * phy_queue_state_machine - Trigger the state machine to run soon * * @phydev: the phy_device struct @@ -947,7 +981,7 @@ static int phy_check_link_status(struct phy_device *phydev) * If the PHYCONTROL Layer is operating, we change the state to * reflect the beginning of Auto-negotiation or forcing. */ -static int _phy_start_aneg(struct phy_device *phydev) +int _phy_start_aneg(struct phy_device *phydev) { int err; @@ -968,6 +1002,7 @@ static int _phy_start_aneg(struct phy_device *phydev) return err; } +EXPORT_SYMBOL(_phy_start_aneg); /** * phy_start_aneg - start auto-negotiation for this PHY device @@ -1197,9 +1232,7 @@ static void phy_error_precise(struct phy_device *phydev, const void *func, int err) { WARN(1, "%pS: returned: %d\n", func, err); - mutex_lock(&phydev->lock); phy_process_error(phydev); - mutex_unlock(&phydev->lock); } /** @@ -1321,6 +1354,113 @@ void phy_free_interrupt(struct phy_device *phydev) } EXPORT_SYMBOL(phy_free_interrupt); +enum phy_state_work { + PHY_STATE_WORK_NONE, + PHY_STATE_WORK_ANEG, + PHY_STATE_WORK_SUSPEND, +}; + +static enum phy_state_work _phy_state_machine(struct phy_device *phydev) +{ + enum phy_state_work state_work = PHY_STATE_WORK_NONE; + struct net_device *dev = phydev->attached_dev; + enum phy_state old_state = phydev->state; + const void *func = NULL; + bool finished = false; + int err = 0; + + switch (phydev->state) { + case PHY_DOWN: + case PHY_READY: + break; + case PHY_UP: + state_work = PHY_STATE_WORK_ANEG; + break; + case PHY_NOLINK: + case PHY_RUNNING: + err = phy_check_link_status(phydev); + func = &phy_check_link_status; + break; + case PHY_CABLETEST: + err = phydev->drv->cable_test_get_status(phydev, &finished); + if (err) { + phy_abort_cable_test(phydev); + netif_testing_off(dev); + state_work = PHY_STATE_WORK_ANEG; + phydev->state = PHY_UP; + break; + } + + if (finished) { + ethnl_cable_test_finished(phydev); + netif_testing_off(dev); + state_work = PHY_STATE_WORK_ANEG; + phydev->state = PHY_UP; + } + break; + case PHY_HALTED: + case PHY_ERROR: + if (phydev->link) { + phydev->link = 0; + phy_link_down(phydev); + } + state_work = PHY_STATE_WORK_SUSPEND; + break; + } + + if (state_work == PHY_STATE_WORK_ANEG) { + err = _phy_start_aneg(phydev); + func = &_phy_start_aneg; + } + + if (err == -ENODEV) + return state_work; + + if (err < 0) + phy_error_precise(phydev, func, err); + + phy_process_state_change(phydev, old_state); + + /* Only re-schedule a PHY state machine change if we are polling the + * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving + * between states from phy_mac_interrupt(). + * + * In state PHY_HALTED the PHY gets suspended, so rescheduling the + * state machine would be pointless and possibly error prone when + * called from phy_disconnect() synchronously. + */ + if (phy_polling_mode(phydev) && phy_is_started(phydev)) + phy_queue_state_machine(phydev, PHY_STATE_TIME); + + return state_work; +} + +/* unlocked part of the PHY state machine */ +static void _phy_state_machine_post_work(struct phy_device *phydev, + enum phy_state_work state_work) +{ + if (state_work == PHY_STATE_WORK_SUSPEND) + phy_suspend(phydev); +} + +/** + * phy_state_machine - Handle the state machine + * @work: work_struct that describes the work to be done + */ +void phy_state_machine(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct phy_device *phydev = + container_of(dwork, struct phy_device, state_queue); + enum phy_state_work state_work; + + mutex_lock(&phydev->lock); + state_work = _phy_state_machine(phydev); + mutex_unlock(&phydev->lock); + + _phy_state_machine_post_work(phydev, state_work); +} + /** * phy_stop - Bring down the PHY link, and stop checking the status * @phydev: target phy_device struct @@ -1328,6 +1468,7 @@ EXPORT_SYMBOL(phy_free_interrupt); void phy_stop(struct phy_device *phydev) { struct net_device *dev = phydev->attached_dev; + enum phy_state_work state_work; enum phy_state old_state; if (!phy_is_started(phydev) && phydev->state != PHY_DOWN && @@ -1351,9 +1492,10 @@ void phy_stop(struct phy_device *phydev) phydev->state = PHY_HALTED; phy_process_state_change(phydev, old_state); + state_work = _phy_state_machine(phydev); mutex_unlock(&phydev->lock); - phy_state_machine(&phydev->state_queue.work); + _phy_state_machine_post_work(phydev, state_work); phy_stop_machine(phydev); /* Cannot call flush_scheduled_work() here as desired because @@ -1398,97 +1540,6 @@ out: EXPORT_SYMBOL(phy_start); /** - * phy_state_machine - Handle the state machine - * @work: work_struct that describes the work to be done - */ -void phy_state_machine(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct phy_device *phydev = - container_of(dwork, struct phy_device, state_queue); - struct net_device *dev = phydev->attached_dev; - bool needs_aneg = false, do_suspend = false; - enum phy_state old_state; - const void *func = NULL; - bool finished = false; - int err = 0; - - mutex_lock(&phydev->lock); - - old_state = phydev->state; - - switch (phydev->state) { - case PHY_DOWN: - case PHY_READY: - break; - case PHY_UP: - needs_aneg = true; - - break; - case PHY_NOLINK: - case PHY_RUNNING: - err = phy_check_link_status(phydev); - func = &phy_check_link_status; - break; - case PHY_CABLETEST: - err = phydev->drv->cable_test_get_status(phydev, &finished); - if (err) { - phy_abort_cable_test(phydev); - netif_testing_off(dev); - needs_aneg = true; - phydev->state = PHY_UP; - break; - } - - if (finished) { - ethnl_cable_test_finished(phydev); - netif_testing_off(dev); - needs_aneg = true; - phydev->state = PHY_UP; - } - break; - case PHY_HALTED: - case PHY_ERROR: - if (phydev->link) { - phydev->link = 0; - phy_link_down(phydev); - } - do_suspend = true; - break; - } - - mutex_unlock(&phydev->lock); - - if (needs_aneg) { - err = phy_start_aneg(phydev); - func = &phy_start_aneg; - } else if (do_suspend) { - phy_suspend(phydev); - } - - if (err == -ENODEV) - return; - - if (err < 0) - phy_error_precise(phydev, func, err); - - phy_process_state_change(phydev, old_state); - - /* Only re-schedule a PHY state machine change if we are polling the - * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving - * between states from phy_mac_interrupt(). - * - * In state PHY_HALTED the PHY gets suspended, so rescheduling the - * state machine would be pointless and possibly error prone when - * called from phy_disconnect() synchronously. - */ - mutex_lock(&phydev->lock); - if (phy_polling_mode(phydev) && phy_is_started(phydev)) - phy_queue_state_machine(phydev, PHY_STATE_TIME); - mutex_unlock(&phydev->lock); -} - -/** * phy_mac_interrupt - MAC says the link has changed * @phydev: phy_device struct with changed link * diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c7cf61fe41cf..a42df2c1bd04 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -27,9 +27,11 @@ #include <linux/of.h> #include <linux/netdevice.h> #include <linux/phy.h> +#include <linux/phylib_stubs.h> #include <linux/phy_led_triggers.h> #include <linux/pse-pd/pse.h> #include <linux/property.h> +#include <linux/rtnetlink.h> #include <linux/sfp.h> #include <linux/skbuff.h> #include <linux/slab.h> @@ -1487,8 +1489,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (phydev->sfp_bus_attached) dev->sfp_bus = phydev->sfp_bus; - else if (dev->sfp_bus) - phydev->is_on_sfp_module = true; } /* Some Ethernet drivers try to connect to a PHY device before @@ -1548,7 +1548,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, goto error; phy_resume(phydev); - phy_led_triggers_register(phydev); + if (!phydev->is_on_sfp_module) + phy_led_triggers_register(phydev); /** * If the external phy used by current mac interface is managed by @@ -1817,7 +1818,8 @@ void phy_detach(struct phy_device *phydev) } phydev->phylink = NULL; - phy_led_triggers_unregister(phydev); + if (!phydev->is_on_sfp_module) + phy_led_triggers_unregister(phydev); if (phydev->mdio.dev.driver) module_put(phydev->mdio.dev.driver->owner); @@ -3020,6 +3022,61 @@ static int phy_led_blink_set(struct led_classdev *led_cdev, return err; } +static __maybe_unused struct device * +phy_led_hw_control_get_device(struct led_classdev *led_cdev) +{ + struct phy_led *phyled = to_phy_led(led_cdev); + struct phy_device *phydev = phyled->phydev; + + if (phydev->attached_dev) + return &phydev->attached_dev->dev; + return NULL; +} + +static int __maybe_unused +phy_led_hw_control_get(struct led_classdev *led_cdev, + unsigned long *rules) +{ + struct phy_led *phyled = to_phy_led(led_cdev); + struct phy_device *phydev = phyled->phydev; + int err; + + mutex_lock(&phydev->lock); + err = phydev->drv->led_hw_control_get(phydev, phyled->index, rules); + mutex_unlock(&phydev->lock); + + return err; +} + +static int __maybe_unused +phy_led_hw_control_set(struct led_classdev *led_cdev, + unsigned long rules) +{ + struct phy_led *phyled = to_phy_led(led_cdev); + struct phy_device *phydev = phyled->phydev; + int err; + + mutex_lock(&phydev->lock); + err = phydev->drv->led_hw_control_set(phydev, phyled->index, rules); + mutex_unlock(&phydev->lock); + + return err; +} + +static __maybe_unused int phy_led_hw_is_supported(struct led_classdev *led_cdev, + unsigned long rules) +{ + struct phy_led *phyled = to_phy_led(led_cdev); + struct phy_device *phydev = phyled->phydev; + int err; + + mutex_lock(&phydev->lock); + err = phydev->drv->led_hw_is_supported(phydev, phyled->index, rules); + mutex_unlock(&phydev->lock); + + return err; +} + static void phy_leds_unregister(struct phy_device *phydev) { struct phy_led *phyled; @@ -3057,6 +3114,19 @@ static int of_phy_led(struct phy_device *phydev, cdev->brightness_set_blocking = phy_led_set_brightness; if (phydev->drv->led_blink_set) cdev->blink_set = phy_led_blink_set; + +#ifdef CONFIG_LEDS_TRIGGERS + if (phydev->drv->led_hw_is_supported && + phydev->drv->led_hw_control_set && + phydev->drv->led_hw_control_get) { + cdev->hw_control_is_supported = phy_led_hw_is_supported; + cdev->hw_control_set = phy_led_hw_control_set; + cdev->hw_control_get = phy_led_hw_control_get; + cdev->hw_control_trigger = "netdev"; + } + + cdev->hw_control_get_device = phy_led_hw_control_get_device; +#endif cdev->max_brightness = 1; init_data.devicename = dev_name(&phydev->mdio.dev); init_data.fwnode = of_fwnode_handle(led); @@ -3438,11 +3508,29 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = { .start_cable_test_tdr = phy_start_cable_test_tdr, }; +static const struct phylib_stubs __phylib_stubs = { + .hwtstamp_get = __phy_hwtstamp_get, + .hwtstamp_set = __phy_hwtstamp_set, +}; + +static void phylib_register_stubs(void) +{ + phylib_stubs = &__phylib_stubs; +} + +static void phylib_unregister_stubs(void) +{ + phylib_stubs = NULL; +} + static int __init phy_init(void) { int rc; + rtnl_lock(); ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops); + phylib_register_stubs(); + rtnl_unlock(); rc = mdio_bus_init(); if (rc) @@ -3465,7 +3553,10 @@ err_c45: err_mdio_bus: mdio_bus_exit(); err_ethtool_phy_ops: + rtnl_lock(); + phylib_unregister_stubs(); ethtool_set_ethtool_phy_ops(NULL); + rtnl_unlock(); return rc; } @@ -3475,7 +3566,10 @@ static void __exit phy_exit(void) phy_driver_unregister(&genphy_c45_driver); phy_driver_unregister(&genphy_driver); mdio_bus_exit(); + rtnl_lock(); + phylib_unregister_stubs(); ethtool_set_ethtool_phy_ops(NULL); + rtnl_unlock(); } subsys_initcall(phy_init); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index d0aaa5cad853..25c19496a336 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -34,6 +34,10 @@ enum { PHYLINK_DISABLE_STOPPED, PHYLINK_DISABLE_LINK, PHYLINK_DISABLE_MAC_WOL, + + PCS_STATE_DOWN = 0, + PCS_STATE_STARTING, + PCS_STATE_STARTED, }; /** @@ -72,6 +76,7 @@ struct phylink { struct phylink_link_state phy_state; struct work_struct resolve; unsigned int pcs_neg_mode; + unsigned int pcs_state; bool mac_link_dropped; bool using_mac_select_pcs; @@ -205,6 +210,7 @@ static int phylink_interface_max_speed(phy_interface_t interface) case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_PSGMII: case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_QUSGMII: case PHY_INTERFACE_MODE_SGMII: @@ -251,7 +257,8 @@ static int phylink_interface_max_speed(phy_interface_t interface) * Set all possible pause, speed and duplex linkmodes in @linkmodes that are * supported by the @caps. @linkmodes must have been initialised previously. */ -void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps) +static void phylink_caps_to_linkmodes(unsigned long *linkmodes, + unsigned long caps) { if (caps & MAC_SYM_PAUSE) __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes); @@ -394,7 +401,6 @@ void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps) __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes); } } -EXPORT_SYMBOL_GPL(phylink_caps_to_linkmodes); static struct { unsigned long mask; @@ -421,6 +427,24 @@ static struct { }; /** + * phylink_limit_mac_speed - limit the phylink_config to a maximum speed + * @config: pointer to a &struct phylink_config + * @max_speed: maximum speed + * + * Mask off MAC capabilities for speeds higher than the @max_speed parameter. + * Any further motifications of config.mac_capabilities will override this. + */ +void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phylink_caps_params) && + phylink_caps_params[i].speed > max_speed; i++) + config->mac_capabilities &= ~phylink_caps_params[i].mask; +} +EXPORT_SYMBOL_GPL(phylink_limit_mac_speed); + +/** * phylink_cap_from_speed_duplex - Get mac capability from speed/duplex * @speed: the speed to search for * @duplex: the duplex to search for @@ -453,9 +477,9 @@ static unsigned long phylink_cap_from_speed_duplex(int speed, * Get the MAC capabilities that are supported by the @interface mode and * @mac_capabilities. */ -unsigned long phylink_get_capabilities(phy_interface_t interface, - unsigned long mac_capabilities, - int rate_matching) +static unsigned long phylink_get_capabilities(phy_interface_t interface, + unsigned long mac_capabilities, + int rate_matching) { int max_speed = phylink_interface_max_speed(interface); unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE; @@ -470,6 +494,7 @@ unsigned long phylink_get_capabilities(phy_interface_t interface, case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_PSGMII: case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_QUSGMII: case PHY_INTERFACE_MODE_SGMII: @@ -581,7 +606,6 @@ unsigned long phylink_get_capabilities(phy_interface_t interface, return (caps & mac_capabilities) | matched_caps; } -EXPORT_SYMBOL_GPL(phylink_get_capabilities); /** * phylink_validate_mask_caps() - Restrict link modes based on caps @@ -593,9 +617,9 @@ EXPORT_SYMBOL_GPL(phylink_get_capabilities); * @supported and @state based on that. Use this function if your capabiliies * aren't constant, such as if they vary depending on the interface. */ -void phylink_validate_mask_caps(unsigned long *supported, - struct phylink_link_state *state, - unsigned long mac_capabilities) +static void phylink_validate_mask_caps(unsigned long *supported, + struct phylink_link_state *state, + unsigned long mac_capabilities) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; unsigned long caps; @@ -609,29 +633,12 @@ void phylink_validate_mask_caps(unsigned long *supported, linkmode_and(supported, supported, mask); linkmode_and(state->advertising, state->advertising, mask); } -EXPORT_SYMBOL_GPL(phylink_validate_mask_caps); - -/** - * phylink_generic_validate() - generic validate() callback implementation - * @config: a pointer to a &struct phylink_config. - * @supported: ethtool bitmask for supported link modes. - * @state: a pointer to a &struct phylink_link_state. - * - * Generic implementation of the validate() callback that MAC drivers can - * use when they pass the range of supported interfaces and MAC capabilities. - */ -void phylink_generic_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - phylink_validate_mask_caps(supported, state, config->mac_capabilities); -} -EXPORT_SYMBOL_GPL(phylink_generic_validate); static int phylink_validate_mac_and_pcs(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { + unsigned long capabilities; struct phylink_pcs *pcs; int ret; @@ -671,10 +678,13 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, } /* Then validate the link parameters with the MAC */ - if (pl->mac_ops->validate) - pl->mac_ops->validate(pl->config, supported, state); + if (pl->mac_ops->mac_get_caps) + capabilities = pl->mac_ops->mac_get_caps(pl->config, + state->interface); else - phylink_generic_validate(pl->config, supported, state); + capabilities = pl->config->mac_capabilities; + + phylink_validate_mask_caps(supported, state, capabilities); return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; } @@ -863,6 +873,7 @@ static int phylink_parse_mode(struct phylink *pl, switch (pl->link_config.interface) { case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_PSGMII: case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_QUSGMII: case PHY_INTERFACE_MODE_RGMII: @@ -993,6 +1004,40 @@ static void phylink_resolve_an_pause(struct phylink_link_state *state) } } +static void phylink_pcs_pre_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + if (pcs && pcs->ops->pcs_pre_config) + pcs->ops->pcs_pre_config(pcs, interface); +} + +static int phylink_pcs_post_config(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + int err = 0; + + if (pcs && pcs->ops->pcs_post_config) + err = pcs->ops->pcs_post_config(pcs, interface); + + return err; +} + +static void phylink_pcs_disable(struct phylink_pcs *pcs) +{ + if (pcs && pcs->ops->pcs_disable) + pcs->ops->pcs_disable(pcs); +} + +static int phylink_pcs_enable(struct phylink_pcs *pcs) +{ + int err = 0; + + if (pcs && pcs->ops->pcs_enable) + err = pcs->ops->pcs_enable(pcs); + + return err; +} + static int phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, const struct phylink_link_state *state, bool permit_pause_to_mac) @@ -1027,30 +1072,33 @@ static void phylink_pcs_poll_start(struct phylink *pl) static void phylink_mac_config(struct phylink *pl, const struct phylink_link_state *state) { + struct phylink_link_state st = *state; + + /* Stop drivers incorrectly using these */ + linkmode_zero(st.lp_advertising); + st.speed = SPEED_UNKNOWN; + st.duplex = DUPLEX_UNKNOWN; + st.an_complete = false; + st.link = false; + phylink_dbg(pl, - "%s: mode=%s/%s/%s/%s/%s adv=%*pb pause=%02x link=%u\n", + "%s: mode=%s/%s/%s adv=%*pb pause=%02x\n", __func__, phylink_an_mode_str(pl->cur_link_an_mode), - phy_modes(state->interface), - phy_speed_to_str(state->speed), - phy_duplex_to_str(state->duplex), - phy_rate_matching_to_str(state->rate_matching), - __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising, - state->pause, state->link); + phy_modes(st.interface), + phy_rate_matching_to_str(st.rate_matching), + __ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising, + st.pause); - pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, state); + pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st); } -static void phylink_mac_pcs_an_restart(struct phylink *pl) +static void phylink_pcs_an_restart(struct phylink *pl) { - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - pl->link_config.advertising) && + if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + pl->link_config.advertising) && phy_interface_mode_is_8023z(pl->link_config.interface) && - phylink_autoneg_inband(pl->cur_link_an_mode)) { - if (pl->pcs) - pl->pcs->ops->pcs_an_restart(pl->pcs); - else if (pl->config->legacy_pre_march2020) - pl->mac_ops->mac_an_restart(pl->config); - } + phylink_autoneg_inband(pl->cur_link_an_mode)) + pl->pcs->ops->pcs_an_restart(pl->pcs); } static void phylink_major_config(struct phylink *pl, bool restart, @@ -1095,11 +1143,28 @@ static void phylink_major_config(struct phylink *pl, bool restart, /* If we have a new PCS, switch to the new PCS after preparing the MAC * for the change. */ - if (pcs_changed) + if (pcs_changed) { + phylink_pcs_disable(pl->pcs); + + if (pl->pcs) + pl->pcs->phylink = NULL; + + pcs->phylink = pl; + pl->pcs = pcs; + } + + if (pl->pcs) + phylink_pcs_pre_config(pl->pcs, state->interface); phylink_mac_config(pl, state); + if (pl->pcs) + phylink_pcs_post_config(pl->pcs, state->interface); + + if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed) + phylink_pcs_enable(pl->pcs); + neg_mode = pl->cur_link_an_mode; if (pl->pcs && pl->pcs->neg_mode) neg_mode = pl->pcs_neg_mode; @@ -1113,7 +1178,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, restart = true; if (restart) - phylink_mac_pcs_an_restart(pl); + phylink_pcs_an_restart(pl); if (pl->mac_ops->mac_finish) { err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, @@ -1146,13 +1211,6 @@ static int phylink_change_inband_advert(struct phylink *pl) if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) return 0; - if (!pl->pcs && pl->config->legacy_pre_march2020) { - /* Legacy method */ - phylink_mac_config(pl, &pl->link_config); - phylink_mac_pcs_an_restart(pl); - return 0; - } - phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__, phylink_an_mode_str(pl->cur_link_an_mode), phy_modes(pl->link_config.interface), @@ -1178,7 +1236,7 @@ static int phylink_change_inband_advert(struct phylink *pl) return ret; if (ret > 0) - phylink_mac_pcs_an_restart(pl); + phylink_pcs_an_restart(pl); return 0; } @@ -1205,9 +1263,6 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, if (pl->pcs) pl->pcs->ops->pcs_get_state(pl->pcs, state); - else if (pl->mac_ops->mac_pcs_get_state && - pl->config->legacy_pre_march2020) - pl->mac_ops->mac_pcs_get_state(pl->config, state); else state->link = 0; } @@ -1440,13 +1495,6 @@ static void phylink_resolve(struct work_struct *w) } phylink_major_config(pl, false, &link_state); pl->link_config.interface = link_state.interface; - } else if (!pl->pcs && pl->config->legacy_pre_march2020) { - /* The interface remains unchanged, only the speed, - * duplex or pause settings have changed. Call the - * old mac_config() method to configure the MAC/PCS - * only if we do not have a legacy MAC driver. - */ - phylink_mac_config(pl, &link_state); } } @@ -1568,6 +1616,7 @@ struct phylink *phylink_create(struct phylink_config *config, pl->config = config; if (config->type == PHYLINK_NETDEV) { pl->netdev = to_net_dev(config->dev); + netif_carrier_off(pl->netdev); } else if (config->type == PHYLINK_DEV) { pl->dev = config->dev; } else { @@ -1586,6 +1635,7 @@ struct phylink *phylink_create(struct phylink_config *config, pl->link_config.pause = MLO_PAUSE_AN; pl->link_config.speed = SPEED_UNKNOWN; pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->pcs_state = PCS_STATE_DOWN; pl->mac_ops = mac_ops; __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); timer_setup(&pl->link_poll, phylink_fixed_poll, 0); @@ -1939,6 +1989,14 @@ void phylink_disconnect_phy(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_disconnect_phy); +static void phylink_link_changed(struct phylink *pl, bool up, const char *what) +{ + if (!up) + pl->mac_link_dropped = true; + phylink_run_resolve(pl); + phylink_dbg(pl, "%s link %s\n", what, up ? "up" : "down"); +} + /** * phylink_mac_change() - notify phylink of a change in MAC state * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -1949,13 +2007,30 @@ EXPORT_SYMBOL_GPL(phylink_disconnect_phy); */ void phylink_mac_change(struct phylink *pl, bool up) { - if (!up) - pl->mac_link_dropped = true; - phylink_run_resolve(pl); - phylink_dbg(pl, "mac link %s\n", up ? "up" : "down"); + phylink_link_changed(pl, up, "mac"); } EXPORT_SYMBOL_GPL(phylink_mac_change); +/** + * phylink_pcs_change() - notify phylink of a change to PCS link state + * @pcs: pointer to &struct phylink_pcs + * @up: indicates whether the link is currently up. + * + * The PCS driver should call this when the state of its link changes + * (e.g. link failure, new negotiation results, etc.) Note: it should + * not determine "up" by reading the BMSR. If in doubt about the link + * state at interrupt time, then pass true if pcs_get_state() returns + * the latched link-down state, otherwise pass false. + */ +void phylink_pcs_change(struct phylink_pcs *pcs, bool up) +{ + struct phylink *pl = pcs->phylink; + + if (pl) + phylink_link_changed(pl, up, "pcs"); +} +EXPORT_SYMBOL_GPL(phylink_pcs_change); + static irqreturn_t phylink_link_handler(int irq, void *data) { struct phylink *pl = data; @@ -1987,6 +2062,8 @@ void phylink_start(struct phylink *pl) if (pl->netdev) netif_carrier_off(pl->netdev); + pl->pcs_state = PCS_STATE_STARTING; + /* Apply the link configuration to the MAC when starting. This allows * a fixed-link to start with the correct parameters, and also * ensures that we set the appropriate advertisement for Serdes links. @@ -1997,6 +2074,8 @@ void phylink_start(struct phylink *pl) */ phylink_mac_initial_config(pl, true); + pl->pcs_state = PCS_STATE_STARTED; + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_STOPPED); if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) { @@ -2015,15 +2094,9 @@ void phylink_start(struct phylink *pl) poll = true; } - switch (pl->cfg_link_an_mode) { - case MLO_AN_FIXED: + if (pl->cfg_link_an_mode == MLO_AN_FIXED) poll |= pl->config->poll_fixed_state; - break; - case MLO_AN_INBAND: - if (pl->pcs) - poll |= pl->pcs->poll; - break; - } + if (poll) mod_timer(&pl->link_poll, jiffies + HZ); if (pl->phydev) @@ -2060,6 +2133,10 @@ void phylink_stop(struct phylink *pl) } phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); + + pl->pcs_state = PCS_STATE_DOWN; + + phylink_pcs_disable(pl->pcs); } EXPORT_SYMBOL_GPL(phylink_stop); @@ -2449,7 +2526,7 @@ int phylink_ethtool_nway_reset(struct phylink *pl) if (pl->phydev) ret = phy_restart_aneg(pl->phydev); - phylink_mac_pcs_an_restart(pl); + phylink_pcs_an_restart(pl); return ret; } @@ -3433,7 +3510,7 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state, * * Parse the Clause 37 or Cisco SGMII link partner negotiation word into * the phylink @state structure. This is suitable to be used for implementing - * the mac_pcs_get_state() member of the struct phylink_mac_ops structure if + * the pcs_get_state() member of the struct phylink_pcs_ops structure if * accessing @bmsr and @lpa cannot be done with MDIO directly. */ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, @@ -3483,7 +3560,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state); * Read the MAC PCS state from the MII device configured in @config and * parse the Clause 37 or Cisco SGMII link partner negotiation word into * the phylink @state structure. This is suitable to be directly plugged - * into the mac_pcs_get_state() member of the struct phylink_mac_ops + * into the pcs_get_state() member of the struct phylink_pcs_ops * structure. */ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, @@ -3594,8 +3671,8 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config); * clause 37 negotiation. * * Restart the clause 37 negotiation with the link partner. This is - * suitable to be directly plugged into the mac_pcs_get_state() member - * of the struct phylink_mac_ops structure. + * suitable to be directly plugged into the pcs_get_state() member + * of the struct phylink_pcs_ops structure. */ void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs) { @@ -3650,3 +3727,4 @@ static int __init phylink_init(void) module_init(phylink_init); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("phylink models the MAC to optional PHY connection"); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index d855a18308d7..5468bd209fab 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -257,6 +257,7 @@ struct sfp { unsigned int state_hw_drive; unsigned int state_hw_mask; unsigned int state_soft_mask; + unsigned int state_ignore_mask; unsigned int state; struct delayed_work poll; @@ -280,7 +281,6 @@ struct sfp { unsigned int rs_state_mask; bool have_a2; - bool tx_fault_ignore; const struct sfp_quirk *quirk; @@ -345,9 +345,24 @@ static void sfp_fixup_long_startup(struct sfp *sfp) sfp->module_t_start_up = T_START_UP_BAD_GPON; } +static void sfp_fixup_ignore_los(struct sfp *sfp) +{ + /* This forces LOS to zero, so we ignore transitions */ + sfp->state_ignore_mask |= SFP_F_LOS; + /* Make sure that LOS options are clear */ + sfp->id.ext.options &= ~cpu_to_be16(SFP_OPTIONS_LOS_INVERTED | + SFP_OPTIONS_LOS_NORMAL); +} + static void sfp_fixup_ignore_tx_fault(struct sfp *sfp) { - sfp->tx_fault_ignore = true; + sfp->state_ignore_mask |= SFP_F_TX_FAULT; +} + +static void sfp_fixup_nokia(struct sfp *sfp) +{ + sfp_fixup_long_startup(sfp); + sfp_fixup_ignore_los(sfp); } // For 10GBASE-T short-reach modules @@ -446,12 +461,17 @@ static const struct sfp_quirk sfp_quirks[] = { // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd // NRZ in their EEPROM SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex, - sfp_fixup_long_startup), + sfp_fixup_nokia), // Fiberstore SFP-10G-T doesn't identify as copper, and uses the // Rollball protocol to talk to the PHY. SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt), + // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd + // NRZ in their EEPROM + SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex, + sfp_fixup_ignore_tx_fault), + SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp), // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports @@ -463,6 +483,9 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, sfp_fixup_ignore_tx_fault), + // FS 2.5G Base-T + SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g), + // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report // 2500MBd NRZ in their EEPROM SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex), @@ -784,7 +807,8 @@ static void sfp_soft_start_poll(struct sfp *sfp) mutex_lock(&sfp->st_mutex); // Poll the soft state for hardware pins we want to ignore - sfp->state_soft_mask = ~sfp->state_hw_mask & mask; + sfp->state_soft_mask = ~sfp->state_hw_mask & ~sfp->state_ignore_mask & + mask; if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) && !sfp->need_poll) @@ -1763,6 +1787,9 @@ static int sfp_sm_probe_phy(struct sfp *sfp, int addr, bool is_c45) return PTR_ERR(phy); } + /* Mark this PHY as being on a SFP module */ + phy->is_on_sfp_module = true; + err = phy_device_register(phy); if (err) { phy_device_free(phy); @@ -2306,7 +2333,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) sfp->module_t_start_up = T_START_UP; sfp->module_t_wait = T_WAIT; - sfp->tx_fault_ignore = false; + sfp->state_ignore_mask = 0; if (sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SFI || sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SR || @@ -2329,6 +2356,8 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) if (sfp->quirk && sfp->quirk->fixup) sfp->quirk->fixup(sfp); + + sfp->state_hw_mask &= ~sfp->state_ignore_mask; mutex_unlock(&sfp->st_mutex); return 0; @@ -2830,10 +2859,7 @@ static void sfp_check_state(struct sfp *sfp) mutex_lock(&sfp->st_mutex); state = sfp_get_state(sfp); changed = state ^ sfp->state; - if (sfp->tx_fault_ignore) - changed &= SFP_F_PRESENT | SFP_F_LOS; - else - changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; for (i = 0; i < GPIO_MAX; i++) if (changed & BIT(i)) @@ -3127,3 +3153,4 @@ module_exit(sfp_exit); MODULE_ALIAS("platform:sfp"); MODULE_AUTHOR("Russell King"); MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SFP cage support"); diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index c7cb50d10099..1fd097dccb9f 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -37,7 +37,6 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id, void sfp_module_remove(struct sfp_bus *bus); int sfp_module_start(struct sfp_bus *bus); void sfp_module_stop(struct sfp_bus *bus); -int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id); struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, const struct sfp_socket_ops *ops); void sfp_unregister_socket(struct sfp_bus *bus); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 692930750215..1c7306a1af13 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -20,6 +20,8 @@ #include <linux/of.h> #include <linux/phy.h> #include <linux/netdevice.h> +#include <linux/crc16.h> +#include <linux/etherdevice.h> #include <linux/smscphy.h> /* Vendor-specific PHY Definitions */ @@ -51,6 +53,7 @@ struct smsc_phy_priv { unsigned int edpd_enable:1; unsigned int edpd_mode_set_by_user:1; unsigned int edpd_max_wait_ms; + bool wol_arp; }; static int smsc_phy_ack_interrupt(struct phy_device *phydev) @@ -258,6 +261,243 @@ int lan87xx_read_status(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(lan87xx_read_status); +static int lan874x_phy_config_init(struct phy_device *phydev) +{ + u16 val; + int rc; + + /* Setup LED2/nINT/nPME pin to function as nPME. May need user option + * to use LED1/nINT/nPME. + */ + val = MII_LAN874X_PHY_PME2_SET; + + /* The bits MII_LAN874X_PHY_WOL_PFDA_FR, MII_LAN874X_PHY_WOL_WUFR, + * MII_LAN874X_PHY_WOL_MPR, and MII_LAN874X_PHY_WOL_BCAST_FR need to + * be cleared to de-assert PME signal after a WoL event happens, but + * using PME auto clear gets around that. + */ + val |= MII_LAN874X_PHY_PME_SELF_CLEAR; + rc = phy_write_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR, + val); + if (rc < 0) + return rc; + + /* set nPME self clear delay time */ + rc = phy_write_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_MCFGR, + MII_LAN874X_PHY_PME_SELF_CLEAR_DELAY); + if (rc < 0) + return rc; + + return smsc_phy_config_init(phydev); +} + +static void lan874x_get_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct smsc_phy_priv *priv = phydev->priv; + int rc; + + wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC | + WAKE_ARP | WAKE_MCAST); + wol->wolopts = 0; + + rc = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR); + if (rc < 0) + return; + + if (rc & MII_LAN874X_PHY_WOL_PFDAEN) + wol->wolopts |= WAKE_UCAST; + + if (rc & MII_LAN874X_PHY_WOL_BCSTEN) + wol->wolopts |= WAKE_BCAST; + + if (rc & MII_LAN874X_PHY_WOL_MPEN) + wol->wolopts |= WAKE_MAGIC; + + if (rc & MII_LAN874X_PHY_WOL_WUEN) { + if (priv->wol_arp) + wol->wolopts |= WAKE_ARP; + else + wol->wolopts |= WAKE_MCAST; + } +} + +static u16 smsc_crc16(const u8 *buffer, size_t len) +{ + return bitrev16(crc16(0xFFFF, buffer, len)); +} + +static int lan874x_chk_wol_pattern(const u8 pattern[], const u16 *mask, + u8 len, u8 *data, u8 *datalen) +{ + size_t i, j, k; + int ret = 0; + u16 bits; + + /* Pattern filtering can match up to 128 bytes of frame data. There + * are 8 registers to program the 16-bit masks, where each bit means + * the byte will be compared. The frame data will then go through a + * CRC16 calculation for hardware comparison. This helper function + * makes sure only relevant frame data are included in this + * calculation. It provides a warning when the masks and expected + * data size do not match. + */ + i = 0; + k = 0; + while (len > 0) { + bits = *mask; + for (j = 0; j < 16; j++, i++, len--) { + /* No more pattern. */ + if (!len) { + /* The rest of bitmap is not empty. */ + if (bits) + ret = i + 1; + break; + } + if (bits & 1) + data[k++] = pattern[i]; + bits >>= 1; + } + mask++; + } + *datalen = k; + return ret; +} + +static int lan874x_set_wol_pattern(struct phy_device *phydev, u16 val, + const u8 data[], u8 datalen, + const u16 *mask, u8 masklen) +{ + u16 crc, reg; + int rc; + + /* Starting pattern offset is set before calling this function. */ + val |= MII_LAN874X_PHY_WOL_FILTER_EN; + rc = phy_write_mmd(phydev, MDIO_MMD_PCS, + MII_LAN874X_PHY_MMD_WOL_WUF_CFGA, val); + if (rc < 0) + return rc; + + crc = smsc_crc16(data, datalen); + rc = phy_write_mmd(phydev, MDIO_MMD_PCS, + MII_LAN874X_PHY_MMD_WOL_WUF_CFGB, crc); + if (rc < 0) + return rc; + + masklen = (masklen + 15) & ~0xf; + reg = MII_LAN874X_PHY_MMD_WOL_WUF_MASK7; + while (masklen >= 16) { + rc = phy_write_mmd(phydev, MDIO_MMD_PCS, reg, *mask); + if (rc < 0) + return rc; + reg--; + mask++; + masklen -= 16; + } + + /* Clear out the rest of mask registers. */ + while (reg != MII_LAN874X_PHY_MMD_WOL_WUF_MASK0) { + phy_write_mmd(phydev, MDIO_MMD_PCS, reg, 0); + reg--; + } + return rc; +} + +static int lan874x_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct net_device *ndev = phydev->attached_dev; + struct smsc_phy_priv *priv = phydev->priv; + u16 val, val_wucsr; + u8 data[128]; + u8 datalen; + int rc; + + /* lan874x has only one WoL filter pattern */ + if ((wol->wolopts & (WAKE_ARP | WAKE_MCAST)) == + (WAKE_ARP | WAKE_MCAST)) { + phydev_info(phydev, + "lan874x WoL supports one of ARP|MCAST at a time\n"); + return -EOPNOTSUPP; + } + + rc = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR); + if (rc < 0) + return rc; + + val_wucsr = rc; + + if (wol->wolopts & WAKE_UCAST) + val_wucsr |= MII_LAN874X_PHY_WOL_PFDAEN; + else + val_wucsr &= ~MII_LAN874X_PHY_WOL_PFDAEN; + + if (wol->wolopts & WAKE_BCAST) + val_wucsr |= MII_LAN874X_PHY_WOL_BCSTEN; + else + val_wucsr &= ~MII_LAN874X_PHY_WOL_BCSTEN; + + if (wol->wolopts & WAKE_MAGIC) + val_wucsr |= MII_LAN874X_PHY_WOL_MPEN; + else + val_wucsr &= ~MII_LAN874X_PHY_WOL_MPEN; + + /* Need to use pattern matching */ + if (wol->wolopts & (WAKE_ARP | WAKE_MCAST)) + val_wucsr |= MII_LAN874X_PHY_WOL_WUEN; + else + val_wucsr &= ~MII_LAN874X_PHY_WOL_WUEN; + + if (wol->wolopts & WAKE_ARP) { + const u8 pattern[2] = { 0x08, 0x06 }; + const u16 mask[1] = { 0x0003 }; + + rc = lan874x_chk_wol_pattern(pattern, mask, 2, data, + &datalen); + if (rc) + phydev_dbg(phydev, "pattern not valid at %d\n", rc); + + /* Need to match broadcast destination address and provided + * data pattern at offset 12. + */ + val = 12 | MII_LAN874X_PHY_WOL_FILTER_BCSTEN; + rc = lan874x_set_wol_pattern(phydev, val, data, datalen, mask, + 2); + if (rc < 0) + return rc; + priv->wol_arp = true; + } + + if (wol->wolopts & WAKE_MCAST) { + /* Need to match multicast destination address. */ + val = MII_LAN874X_PHY_WOL_FILTER_MCASTTEN; + rc = lan874x_set_wol_pattern(phydev, val, data, 0, NULL, 0); + if (rc < 0) + return rc; + priv->wol_arp = false; + } + + if (wol->wolopts & (WAKE_MAGIC | WAKE_UCAST)) { + const u8 *mac = (const u8 *)ndev->dev_addr; + int i, reg; + + reg = MII_LAN874X_PHY_MMD_WOL_RX_ADDRC; + for (i = 0; i < 6; i += 2, reg--) { + rc = phy_write_mmd(phydev, MDIO_MMD_PCS, reg, + ((mac[i + 1] << 8) | mac[i])); + if (rc < 0) + return rc; + } + } + + rc = phy_write_mmd(phydev, MDIO_MMD_PCS, MII_LAN874X_PHY_MMD_WOL_WUCSR, + val_wucsr); + if (rc < 0) + return rc; + + return 0; +} + static int smsc_get_sset_count(struct phy_device *phydev) { return ARRAY_SIZE(smsc_hw_stats); @@ -267,10 +507,8 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data) { int i; - for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++) { - strncpy(data + i * ETH_GSTRING_LEN, - smsc_hw_stats[i].string, ETH_GSTRING_LEN); - } + for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++) + ethtool_sprintf(&data, "%s", smsc_hw_stats[i].string); } static u64 smsc_get_stat(struct phy_device *phydev, int i) @@ -533,7 +771,7 @@ static struct phy_driver smsc_phy_driver[] = { /* basic functions */ .read_status = lan87xx_read_status, - .config_init = smsc_phy_config_init, + .config_init = lan874x_phy_config_init, .soft_reset = smsc_phy_reset, /* IRQ related */ @@ -548,6 +786,10 @@ static struct phy_driver smsc_phy_driver[] = { .get_tunable = smsc_phy_get_tunable, .set_tunable = smsc_phy_set_tunable, + /* WoL */ + .set_wol = lan874x_set_wol, + .get_wol = lan874x_get_wol, + .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -566,7 +808,7 @@ static struct phy_driver smsc_phy_driver[] = { /* basic functions */ .read_status = lan87xx_read_status, - .config_init = smsc_phy_config_init, + .config_init = lan874x_phy_config_init, .soft_reset = smsc_phy_reset, /* IRQ related */ @@ -581,6 +823,10 @@ static struct phy_driver smsc_phy_driver[] = { .get_tunable = smsc_phy_get_tunable, .set_tunable = smsc_phy_set_tunable, + /* WoL */ + .set_wol = lan874x_set_wol, + .get_wol = lan874x_get_wol, + .suspend = genphy_suspend, .resume = genphy_resume, } }; diff --git a/drivers/net/phy/stubs.c b/drivers/net/phy/stubs.c new file mode 100644 index 000000000000..cfb9f275eb18 --- /dev/null +++ b/drivers/net/phy/stubs.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Stubs for PHY library functionality called by the core network stack. + * These are necessary because CONFIG_PHYLIB can be a module, and built-in + * code cannot directly call symbols exported by modules. + */ +#include <linux/phylib_stubs.h> + +const struct phylib_stubs *phylib_stubs; +EXPORT_SYMBOL_GPL(phylib_stubs); diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index 15a179631903..fbaaa8c102a1 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -98,7 +98,7 @@ static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb); static int ppp_async_push(struct asyncppp *ap); static void ppp_async_flush_output(struct asyncppp *ap); static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, - const char *flags, int count); + const u8 *flags, int count); static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg); static void ppp_async_process(struct tasklet_struct *t); @@ -257,9 +257,8 @@ static void ppp_asynctty_hangup(struct tty_struct *tty) * Pppd reads and writes packets via /dev/ppp instead. */ static ssize_t -ppp_asynctty_read(struct tty_struct *tty, struct file *file, - unsigned char *buf, size_t count, - void **cookie, unsigned long offset) +ppp_asynctty_read(struct tty_struct *tty, struct file *file, u8 *buf, + size_t count, void **cookie, unsigned long offset) { return -EAGAIN; } @@ -269,8 +268,8 @@ ppp_asynctty_read(struct tty_struct *tty, struct file *file, * from the ppp generic stuff. */ static ssize_t -ppp_asynctty_write(struct tty_struct *tty, struct file *file, - const unsigned char *buf, size_t count) +ppp_asynctty_write(struct tty_struct *tty, struct file *file, const u8 *buf, + size_t count) { return -EAGAIN; } @@ -328,17 +327,10 @@ ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) return err; } -/* No kernel lock - fine */ -static __poll_t -ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) -{ - return 0; -} - /* May sleep, don't call from interrupt level or with interrupts disabled */ static void -ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, - const char *cflags, int count) +ppp_asynctty_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags, + size_t count) { struct asyncppp *ap = ap_get(tty); unsigned long flags; @@ -378,7 +370,6 @@ static struct tty_ldisc_ops ppp_ldisc = { .read = ppp_asynctty_read, .write = ppp_asynctty_write, .ioctl = ppp_asynctty_ioctl, - .poll = ppp_asynctty_poll, .receive_buf = ppp_asynctty_receive, .write_wakeup = ppp_asynctty_wakeup, }; @@ -827,8 +818,7 @@ process_input_packet(struct asyncppp *ap) other ldisc functions but will not be re-entered */ static void -ppp_async_input(struct asyncppp *ap, const unsigned char *buf, - const char *flags, int count) +ppp_async_input(struct asyncppp *ap, const u8 *buf, const u8 *flags, int count) { struct sk_buff *skb; int c, i, j, n, s, f; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a9beacd552cf..0193af2d31c9 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -570,8 +570,8 @@ static struct bpf_prog *get_filter(struct sock_fprog *uprog) /* uprog->len is unsigned short, so no overflow here */ fprog.len = uprog->len; - fprog.filter = memdup_user(uprog->filter, - uprog->len * sizeof(struct sock_filter)); + fprog.filter = memdup_array_user(uprog->filter, + uprog->len, sizeof(struct sock_filter)); if (IS_ERR(fprog.filter)) return ERR_CAST(fprog.filter); diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 18283b7b94bc..52d05ce4a281 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -93,8 +93,8 @@ static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, static void ppp_sync_process(struct tasklet_struct *t); static int ppp_sync_push(struct syncppp *ap); static void ppp_sync_flush_output(struct syncppp *ap); -static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf, - const char *flags, int count); +static void ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, + int count); static const struct ppp_channel_ops sync_ops = { .start_xmit = ppp_sync_send, @@ -255,8 +255,7 @@ static void ppp_sync_hangup(struct tty_struct *tty) * Pppd reads and writes packets via /dev/ppp instead. */ static ssize_t -ppp_sync_read(struct tty_struct *tty, struct file *file, - unsigned char *buf, size_t count, +ppp_sync_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t count, void **cookie, unsigned long offset) { return -EAGAIN; @@ -267,8 +266,8 @@ ppp_sync_read(struct tty_struct *tty, struct file *file, * from the ppp generic stuff. */ static ssize_t -ppp_sync_write(struct tty_struct *tty, struct file *file, - const unsigned char *buf, size_t count) +ppp_sync_write(struct tty_struct *tty, struct file *file, const u8 *buf, + size_t count) { return -EAGAIN; } @@ -321,17 +320,10 @@ ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) return err; } -/* No kernel lock - fine */ -static __poll_t -ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait) -{ - return 0; -} - /* May sleep, don't call from interrupt level or with interrupts disabled */ static void -ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, - const char *cflags, int count) +ppp_sync_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags, + size_t count) { struct syncppp *ap = sp_get(tty); unsigned long flags; @@ -371,7 +363,6 @@ static struct tty_ldisc_ops ppp_sync_ldisc = { .read = ppp_sync_read, .write = ppp_sync_write, .ioctl = ppp_synctty_ioctl, - .poll = ppp_sync_poll, .receive_buf = ppp_sync_receive, .write_wakeup = ppp_sync_wakeup, }; @@ -462,6 +453,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) case PPPIOCSMRU: if (get_user(val, (int __user *) argp)) break; + if (val > U16_MAX) { + err = -EINVAL; + break; + } if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; @@ -663,8 +658,7 @@ ppp_sync_flush_output(struct syncppp *ap) * frame is considered to be in error and is tossed. */ static void -ppp_sync_input(struct syncppp *ap, const unsigned char *buf, - const char *flags, int count) +ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count) { struct sk_buff *skb; unsigned char *p; @@ -697,7 +691,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf, /* strip address/control field if present */ p = skb->data; - if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { + if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) { /* chop off address/control */ if (skb->len < 3) goto err; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 3b79c603b936..8e7238e97d0a 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -877,7 +877,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, skb->dev = dev; - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); skb->protocol = cpu_to_be16(ETH_P_PPP_SES); ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr)); @@ -968,7 +968,7 @@ abort: ***********************************************************************/ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) { - struct sock *sk = (struct sock *)chan->private; + struct sock *sk = chan->private; return __pppoe_xmit(sk, skb); } @@ -976,7 +976,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx, struct net_device_path *path, const struct ppp_channel *chan) { - struct sock *sk = (struct sock *)chan->private; + struct sock *sk = chan->private; struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = po->pppoe_dev; diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 32183f24e63f..6833ef0c7930 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -129,10 +129,10 @@ static void del_chan(struct pppox_sock *sock) spin_unlock(&chan_lock); } -static struct rtable *pptp_route_output(struct pppox_sock *po, +static struct rtable *pptp_route_output(const struct pppox_sock *po, struct flowi4 *fl4) { - struct sock *sk = &po->sk; + const struct sock *sk = &po->sk; struct net *net; net = sock_net(sk); @@ -148,7 +148,7 @@ static struct rtable *pptp_route_output(struct pppox_sock *po, static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) { - struct sock *sk = (struct sock *) chan->private; + struct sock *sk = chan->private; struct pppox_sock *po = pppox_sk(sk); struct net *net = sock_net(sk); struct pptp_opt *opt = &po->proto.pptp; @@ -575,7 +575,7 @@ out: static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) { - struct sock *sk = (struct sock *) chan->private; + struct sock *sk = chan->private; struct pppox_sock *po = pppox_sk(sk); struct pptp_opt *opt = &po->proto.pptp; void __user *argp = (void __user *)arg; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 6865d32270e5..e4280e37fec9 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -685,8 +685,8 @@ static void sl_setup(struct net_device *dev) * in parallel */ -static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp, - const char *fp, int count) +static void slip_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, + size_t count) { struct slip *sl = tty->disc_data; diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 36803d932dff..d591e33268e5 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -1194,4 +1194,5 @@ fail: } EXPORT_SYMBOL(sungem_phy_probe); +MODULE_DESCRIPTION("PHY drivers for the sungem Ethernet MAC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 49d1d6acf95e..9f0495e8df4d 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -22,6 +22,7 @@ #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> +#include <net/xdp.h> #include <linux/virtio_net.h> #include <linux/skb_array.h> @@ -614,8 +615,10 @@ static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, if (prepad + len < PAGE_SIZE || !linear) linear = len; + if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, - err, 0); + err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) return NULL; @@ -1396,6 +1399,7 @@ void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) } EXPORT_SYMBOL_GPL(tap_destroy_cdev); +MODULE_DESCRIPTION("Common library for drivers implementing the TAP interface"); MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 382756c3fb83..f575f225d417 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -149,7 +149,6 @@ static int __team_option_inst_add(struct team *team, struct team_option *option, struct team_option_inst *opt_inst; unsigned int array_size; unsigned int i; - int err; array_size = option->array_size; if (!array_size) @@ -165,11 +164,8 @@ static int __team_option_inst_add(struct team *team, struct team_option *option, opt_inst->changed = true; opt_inst->removed = false; list_add_tail(&opt_inst->list, &team->option_inst_list); - if (option->init) { - err = option->init(team, &opt_inst->info); - if (err) - return err; - } + if (option->init) + option->init(team, &opt_inst->info); } return 0; @@ -285,8 +281,10 @@ static int __team_options_register(struct team *team, return 0; inst_rollback: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { __team_option_inst_del_option(team, dst_opts[i]); + list_del(&dst_opts[i]->list); + } i = option_count; alloc_rollback: @@ -362,7 +360,9 @@ static int team_option_get(struct team *team, { if (!opt_inst->option->getter) return -EOPNOTSUPP; - return opt_inst->option->getter(team, ctx); + + opt_inst->option->getter(team, ctx); + return 0; } static int team_option_set(struct team *team, @@ -1377,10 +1377,9 @@ static int team_port_del(struct team *team, struct net_device *port_dev) * Net device ops *****************/ -static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) +static void team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.str_val = team->mode->kind; - return 0; } static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) @@ -1388,11 +1387,10 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) return team_change_mode(team, ctx->data.str_val); } -static int team_notify_peers_count_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_notify_peers_count_get(struct team *team, + struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->notify_peers.count; - return 0; } static int team_notify_peers_count_set(struct team *team, @@ -1402,11 +1400,10 @@ static int team_notify_peers_count_set(struct team *team, return 0; } -static int team_notify_peers_interval_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_notify_peers_interval_get(struct team *team, + struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->notify_peers.interval; - return 0; } static int team_notify_peers_interval_set(struct team *team, @@ -1416,11 +1413,10 @@ static int team_notify_peers_interval_set(struct team *team, return 0; } -static int team_mcast_rejoin_count_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_mcast_rejoin_count_get(struct team *team, + struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->mcast_rejoin.count; - return 0; } static int team_mcast_rejoin_count_set(struct team *team, @@ -1430,11 +1426,10 @@ static int team_mcast_rejoin_count_set(struct team *team, return 0; } -static int team_mcast_rejoin_interval_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_mcast_rejoin_interval_get(struct team *team, + struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->mcast_rejoin.interval; - return 0; } static int team_mcast_rejoin_interval_set(struct team *team, @@ -1444,13 +1439,12 @@ static int team_mcast_rejoin_interval_set(struct team *team, return 0; } -static int team_port_en_option_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_port_en_option_get(struct team *team, + struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = team_port_enabled(port); - return 0; } static int team_port_en_option_set(struct team *team, @@ -1465,13 +1459,12 @@ static int team_port_en_option_set(struct team *team, return 0; } -static int team_user_linkup_option_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_user_linkup_option_get(struct team *team, + struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = port->user.linkup; - return 0; } static void __team_carrier_check(struct team *team); @@ -1487,13 +1480,12 @@ static int team_user_linkup_option_set(struct team *team, return 0; } -static int team_user_linkup_en_option_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_user_linkup_en_option_get(struct team *team, + struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = port->user.linkup_enabled; - return 0; } static int team_user_linkup_en_option_set(struct team *team, @@ -1507,13 +1499,12 @@ static int team_user_linkup_en_option_set(struct team *team, return 0; } -static int team_priority_option_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_priority_option_get(struct team *team, + struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.s32_val = port->priority; - return 0; } static int team_priority_option_set(struct team *team, @@ -1529,13 +1520,12 @@ static int team_priority_option_set(struct team *team, return 0; } -static int team_queue_id_option_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void team_queue_id_option_get(struct team *team, + struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.u32_val = port->queue_id; - return 0; } static int team_queue_id_option_set(struct team *team, @@ -2127,7 +2117,12 @@ static const struct ethtool_ops team_ethtool_ops = { static void team_setup_by_port(struct net_device *dev, struct net_device *port_dev) { - dev->header_ops = port_dev->header_ops; + struct team *team = netdev_priv(dev); + + if (port_dev->type == ARPHRD_ETHER) + dev->header_ops = team->header_ops_cache; + else + dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; dev->needed_headroom = port_dev->needed_headroom; @@ -2174,8 +2169,11 @@ static int team_dev_type_check_change(struct net_device *dev, static void team_setup(struct net_device *dev) { + struct team *team = netdev_priv(dev); + ether_setup(dev); dev->max_mtu = ETH_MAX_MTU; + team->header_ops_cache = dev->header_ops; dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; @@ -2323,8 +2321,7 @@ static struct team *team_nl_team_get(struct genl_info *info) ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); dev = dev_get_by_index(net, ifindex); if (!dev || dev->netdev_ops != &team_netdev_ops) { - if (dev) - dev_put(dev); + dev_put(dev); return NULL; } @@ -2895,7 +2892,7 @@ static int __init team_nl_init(void) return genl_register_family(&team_nl_family); } -static void team_nl_fini(void) +static void __exit team_nl_fini(void) { genl_unregister_family(&team_nl_family); } diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c index 3147a4fdf8d9..e0f599e2a51d 100644 --- a/drivers/net/team/team_mode_activebackup.c +++ b/drivers/net/team/team_mode_activebackup.c @@ -57,14 +57,13 @@ static void ab_port_leave(struct team *team, struct team_port *port) } } -static int ab_active_port_init(struct team *team, - struct team_option_inst_info *info) +static void ab_active_port_init(struct team *team, + struct team_option_inst_info *info) { ab_priv(team)->ap_opt_inst_info = info; - return 0; } -static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) +static void ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *active_port; @@ -74,7 +73,6 @@ static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) ctx->data.u32_val = active_port->dev->ifindex; else ctx->data.u32_val = 0; - return 0; } static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx) diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index 313a3e2d68bf..61d7d79f0c36 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -8,7 +8,6 @@ #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/errno.h> #include <linux/netdevice.h> #include <linux/if_team.h> diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index 18d99fda997c..00f8989c29c0 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -30,8 +30,6 @@ static rx_handler_result_t lb_receive(struct team *team, struct team_port *port, struct lb_priv; typedef struct team_port *lb_select_tx_port_func_t(struct team *, - struct lb_priv *, - struct sk_buff *, unsigned char); #define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */ @@ -118,8 +116,6 @@ static void lb_tx_hash_to_port_mapping_null_port(struct team *team, /* Basic tx selection based solely by hash */ static struct team_port *lb_hash_select_tx_port(struct team *team, - struct lb_priv *lb_priv, - struct sk_buff *skb, unsigned char hash) { int port_index = team_num_to_port_index(team, hash); @@ -129,17 +125,16 @@ static struct team_port *lb_hash_select_tx_port(struct team *team, /* Hash to port mapping select tx port */ static struct team_port *lb_htpm_select_tx_port(struct team *team, - struct lb_priv *lb_priv, - struct sk_buff *skb, unsigned char hash) { + struct lb_priv *lb_priv = get_lb_priv(team); struct team_port *port; port = rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); if (likely(port)) return port; /* If no valid port in the table, fall back to simple hash */ - return lb_hash_select_tx_port(team, lb_priv, skb, hash); + return lb_hash_select_tx_port(team, hash); } struct lb_select_tx_port { @@ -229,7 +224,7 @@ static bool lb_transmit(struct team *team, struct sk_buff *skb) hash = lb_get_skb_hash(lb_priv, skb); select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func); - port = select_tx_port_func(team, lb_priv, skb, hash); + port = select_tx_port_func(team, hash); if (unlikely(!port)) goto drop; if (team_dev_queue_xmit(team, port, skb)) @@ -242,19 +237,18 @@ drop: return false; } -static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) +static void lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); if (!lb_priv->ex->orig_fprog) { ctx->data.bin_val.len = 0; ctx->data.bin_val.ptr = NULL; - return 0; + return; } ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len * sizeof(struct sock_filter); ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter; - return 0; } static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len, @@ -335,7 +329,7 @@ static void lb_bpf_func_free(struct team *team) bpf_prog_destroy(fp); } -static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) +static void lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); lb_select_tx_port_func_t *func; @@ -346,7 +340,6 @@ static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) name = lb_select_tx_port_get_name(func); BUG_ON(!name); ctx->data.str_val = name; - return 0; } static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx) @@ -361,18 +354,17 @@ static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx) return 0; } -static int lb_tx_hash_to_port_mapping_init(struct team *team, - struct team_option_inst_info *info) +static void lb_tx_hash_to_port_mapping_init(struct team *team, + struct team_option_inst_info *info) { struct lb_priv *lb_priv = get_lb_priv(team); unsigned char hash = info->array_index; LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info; - return 0; } -static int lb_tx_hash_to_port_mapping_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void lb_tx_hash_to_port_mapping_get(struct team *team, + struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); struct team_port *port; @@ -380,7 +372,6 @@ static int lb_tx_hash_to_port_mapping_get(struct team *team, port = LB_HTPM_PORT_BY_HASH(lb_priv, hash); ctx->data.u32_val = port ? port->dev->ifindex : 0; - return 0; } static int lb_tx_hash_to_port_mapping_set(struct team *team, @@ -401,44 +392,40 @@ static int lb_tx_hash_to_port_mapping_set(struct team *team, return -ENODEV; } -static int lb_hash_stats_init(struct team *team, - struct team_option_inst_info *info) +static void lb_hash_stats_init(struct team *team, + struct team_option_inst_info *info) { struct lb_priv *lb_priv = get_lb_priv(team); unsigned char hash = info->array_index; lb_priv->ex->stats.info[hash].opt_inst_info = info; - return 0; } -static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx) +static void lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); unsigned char hash = ctx->info->array_index; ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats; ctx->data.bin_val.len = sizeof(struct lb_stats); - return 0; } -static int lb_port_stats_init(struct team *team, - struct team_option_inst_info *info) +static void lb_port_stats_init(struct team *team, + struct team_option_inst_info *info) { struct team_port *port = info->port; struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); lb_port_priv->stats_info.opt_inst_info = info; - return 0; } -static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx) +static void lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats; ctx->data.bin_val.len = sizeof(struct lb_stats); - return 0; } static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info) @@ -531,13 +518,12 @@ static void lb_stats_refresh(struct work_struct *work) mutex_unlock(&team->lock); } -static int lb_stats_refresh_interval_get(struct team *team, - struct team_gsetter_ctx *ctx) +static void lb_stats_refresh_interval_get(struct team *team, + struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); ctx->data.u32_val = lb_priv->ex->stats.refresh_interval; - return 0; } static int lb_stats_refresh_interval_set(struct team *team, diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index 3ec63de97ae3..dd405d82c6ac 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -8,7 +8,6 @@ #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/errno.h> #include <linux/netdevice.h> #include <linux/if_team.h> diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c index 0c1e8970ee58..0a53ec293d04 100644 --- a/drivers/net/thunderbolt/main.c +++ b/drivers/net/thunderbolt/main.c @@ -1049,12 +1049,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, ip_hdr(skb)->protocol, 0); - } else if (skb_is_gso_v6(skb)) { + } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) { tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - return false; } else if (protocol == htons(ETH_P_IPV6)) { tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 100339bc8b04..afa5497f7c35 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1523,11 +1523,13 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, int err; /* Under a page? Don't bother with paged skb. */ - if (prepad + len < PAGE_SIZE || !linear) + if (prepad + len < PAGE_SIZE) linear = len; + if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, - &err, 0); + &err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) return ERR_PTR(err); @@ -1838,6 +1840,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, */ zerocopy = false; } else { + if (!linear) + linear = min_t(size_t, good_linear, copylen); + skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); } @@ -3068,10 +3073,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; - unsigned int ifindex, carrier; + unsigned int carrier; struct ifreq ifr; kuid_t owner; kgid_t group; + int ifindex; int sndbuf; int vnet_hdr_sz; int le; @@ -3127,7 +3133,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = -EFAULT; if (copy_from_user(&ifindex, argp, sizeof(ifindex))) goto unlock; - + ret = -EINVAL; + if (ifindex < 0) + goto unlock; ret = 0; tfile->ifindex = ifindex; goto unlock; @@ -3738,7 +3746,7 @@ err_linkops: return ret; } -static void tun_cleanup(void) +static void __exit tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index a017e9de2119..7b8afa589a53 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 pkt_count = 0; u64 desc_hdr = 0; u16 vlan_tag = 0; - u32 skb_len = 0; + u32 skb_len; if (!skb) goto err; - if (skb->len == 0) + skb_len = skb->len; + if (skb_len < sizeof(desc_hdr)) goto err; - skb_len = skb->len; /* RX Descriptor Header */ - skb_trim(skb, skb->len - sizeof(desc_hdr)); + skb_trim(skb, skb_len - sizeof(desc_hdr)); desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb)); /* Check these packets */ diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index aff39bf3161d..4ea0e155bb0d 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev) *tmp16 = AX_PHYPWR_RSTCTL_IPRL; ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16); - msleep(200); + msleep(500); *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS; ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp); - msleep(100); + msleep(200); /* Ethernet PHY Auto Detach*/ ax88179_auto_detach(dev); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 48d7d278631e..99ec1d4a972d 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc) struct usbnet *dev = netdev_priv(netdev); __le16 res; + int err; if (phy_id) { netdev_dbg(dev->net, "Only internal phy supported\n"); return 0; } - dm_read_shared_word(dev, 1, loc, &res); + err = dm_read_shared_word(dev, 1, loc, &res); + if (err < 0) { + netdev_err(dev->net, "MDIO read error: %d\n", err); + return err; + } netdev_dbg(dev->net, "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index ce1f6081d582..83b8452220ec 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1322,11 +1322,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) } /* close the requested serial port */ -static int hso_serial_write(struct tty_struct *tty, const unsigned char *buf, - int count) +static ssize_t hso_serial_write(struct tty_struct *tty, const u8 *buf, + size_t count) { struct hso_serial *serial = tty->driver_data; - int space, tx_bytes; unsigned long flags; /* sanity check */ @@ -1337,21 +1336,16 @@ static int hso_serial_write(struct tty_struct *tty, const unsigned char *buf, spin_lock_irqsave(&serial->serial_lock, flags); - space = serial->tx_data_length - serial->tx_buffer_count; - tx_bytes = (count < space) ? count : space; + count = min_t(size_t, serial->tx_data_length - serial->tx_buffer_count, + count); + memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, count); + serial->tx_buffer_count += count; - if (!tx_bytes) - goto out; - - memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, tx_bytes); - serial->tx_buffer_count += tx_bytes; - -out: spin_unlock_irqrestore(&serial->serial_lock, flags); hso_kick_transmit(serial); /* done */ - return tx_bytes; + return count; } /* how much room is there for writing */ diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 59cde06aa7f6..5add4145d9fc 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1758,7 +1758,7 @@ static void lan78xx_get_drvinfo(struct net_device *net, { struct lan78xx_net *dev = netdev_priv(net); - strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 344af3c5c836..e2e181378f41 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1289,6 +1289,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0168, 4)}, {QMI_FIXED_INTF(0x19d2, 0x0176, 3)}, {QMI_FIXED_INTF(0x19d2, 0x0178, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */ {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */ {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0738baa5b82e..9bf2140fd0a1 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -764,7 +764,7 @@ enum rtl_register_content { /* rtl8152 flags */ enum rtl8152_flags { - RTL8152_UNPLUG = 0, + RTL8152_INACCESSIBLE = 0, RTL8152_SET_RX_MODE, WORK_ENABLE, RTL8152_LINK_CHG, @@ -773,6 +773,9 @@ enum rtl8152_flags { SCHEDULE_TASKLET, GREEN_ETHERNET, RX_EPROTO, + IN_PRE_RESET, + PROBED_WITH_NO_ERRORS, + PROBE_SHOULD_RETRY, }; #define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e @@ -953,6 +956,8 @@ struct r8152 { u8 version; u8 duplex; u8 autoneg; + + unsigned int reg_access_reset_count; }; /** @@ -1200,6 +1205,96 @@ static unsigned int agg_buf_sz = 16384; #define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) +/* If register access fails then we block access and issue a reset. If this + * happens too many times in a row without a successful access then we stop + * trying to reset and just leave access blocked. + */ +#define REGISTER_ACCESS_MAX_RESETS 3 + +static void rtl_set_inaccessible(struct r8152 *tp) +{ + set_bit(RTL8152_INACCESSIBLE, &tp->flags); + smp_mb__after_atomic(); +} + +static void rtl_set_accessible(struct r8152 *tp) +{ + clear_bit(RTL8152_INACCESSIBLE, &tp->flags); + smp_mb__after_atomic(); +} + +static +int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request, + __u8 requesttype, __u16 value, __u16 index, void *data, + __u16 size, const char *msg_tag) +{ + struct usb_device *udev = tp->udev; + int ret; + + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return -ENODEV; + + ret = usb_control_msg(udev, pipe, request, requesttype, + value, index, data, size, + USB_CTRL_GET_TIMEOUT); + + /* No need to issue a reset to report an error if the USB device got + * unplugged; just return immediately. + */ + if (ret == -ENODEV) + return ret; + + /* If the write was successful then we're done */ + if (ret >= 0) { + tp->reg_access_reset_count = 0; + return ret; + } + + dev_err(&udev->dev, + "Failed to %s %d bytes at %#06x/%#06x (%d)\n", + msg_tag, size, value, index, ret); + + /* Block all future register access until we reset. Much of the code + * in the driver doesn't check for errors. Notably, many parts of the + * driver do a read/modify/write of a register value without + * confirming that the read succeeded. Writing back modified garbage + * like this can fully wedge the adapter, requiring a power cycle. + */ + rtl_set_inaccessible(tp); + + /* If probe hasn't yet finished, then we'll request a retry of the + * whole probe routine if we get any control transfer errors. We + * never have to clear this bit since we free/reallocate the whole "tp" + * structure if we retry probe. + */ + if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) { + set_bit(PROBE_SHOULD_RETRY, &tp->flags); + return ret; + } + + /* Failing to access registers in pre-reset is not surprising since we + * wouldn't be resetting if things were behaving normally. The register + * access we do in pre-reset isn't truly mandatory--we're just reusing + * the disable() function and trying to be nice by powering the + * adapter down before resetting it. Thus, if we're in pre-reset, + * we'll return right away and not try to queue up yet another reset. + * We know the post-reset is already coming. + */ + if (test_bit(IN_PRE_RESET, &tp->flags)) + return ret; + + if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) { + usb_queue_reset_device(tp->intf); + tp->reg_access_reset_count++; + } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) { + dev_err(&udev->dev, + "Tried to reset %d times; giving up.\n", + REGISTER_ACCESS_MAX_RESETS); + } + + return ret; +} + static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { @@ -1210,9 +1305,10 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) if (!tmp) return -ENOMEM; - ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in, - RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, - value, index, tmp, size, 500); + ret = r8152_control_msg(tp, tp->pipe_ctrl_in, + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + value, index, tmp, size, "read"); + if (ret < 0) memset(data, 0xff, size); else @@ -1233,9 +1329,9 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) if (!tmp) return -ENOMEM; - ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out, - RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, - value, index, tmp, size, 500); + ret = r8152_control_msg(tp, tp->pipe_ctrl_out, + RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, + value, index, tmp, size, "write"); kfree(tmp); @@ -1244,10 +1340,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) static void rtl_set_unplug(struct r8152 *tp) { - if (tp->udev->state == USB_STATE_NOTATTACHED) { - set_bit(RTL8152_UNPLUG, &tp->flags); - smp_mb__after_atomic(); - } + if (tp->udev->state == USB_STATE_NOTATTACHED) + rtl_set_inaccessible(tp); } static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, @@ -1256,7 +1350,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, u16 limit = 64; int ret = 0; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ @@ -1300,7 +1394,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 byteen_start, byteen_end, byen; u16 limit = 512; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ @@ -1314,16 +1408,24 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, byteen_end = byteen & BYTE_EN_END_MASK; byen = byteen_start | (byteen_start << 4); - ret = set_registers(tp, index, type | byen, 4, data); - if (ret < 0) - goto error1; - index += 4; - data += 4; - size -= 4; + /* Split the first DWORD if the byte_en is not 0xff */ + if (byen != BYTE_EN_DWORD) { + ret = set_registers(tp, index, type | byen, 4, data); + if (ret < 0) + goto error1; - if (size) { + index += 4; + data += 4; size -= 4; + } + + if (size) { + byen = byteen_end | (byteen_end >> 4); + + /* Split the last DWORD if the byte_en is not 0xff */ + if (byen != BYTE_EN_DWORD) + size -= 4; while (size) { if (size > limit) { @@ -1350,10 +1452,9 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, } } - byen = byteen_end | (byteen_end >> 4); - ret = set_registers(tp, index, type | byen, 4, data); - if (ret < 0) - goto error1; + /* Set the last DWORD */ + if (byen != BYTE_EN_DWORD) + ret = set_registers(tp, index, type | byen, 4, data); } error1: @@ -1530,7 +1631,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg) struct r8152 *tp = netdev_priv(netdev); int ret; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (phy_id != R8152_PHY_ID) @@ -1546,7 +1647,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) { struct r8152 *tp = netdev_priv(netdev); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (phy_id != R8152_PHY_ID) @@ -1751,7 +1852,7 @@ static void read_bulk_callback(struct urb *urb) if (!tp) return; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) @@ -1843,7 +1944,7 @@ static void write_bulk_callback(struct urb *urb) if (!test_bit(WORK_ENABLE, &tp->flags)) return; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!skb_queue_empty(&tp->tx_queue)) @@ -1864,7 +1965,7 @@ static void intr_callback(struct urb *urb) if (!test_bit(WORK_ENABLE, &tp->flags)) return; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; switch (status) { @@ -2442,7 +2543,7 @@ static int rx_bottom(struct r8152 *tp, int budget) } } - if (list_empty(&tp->rx_done)) + if (list_empty(&tp->rx_done) || work_done >= budget) goto out1; clear_bit(RX_EPROTO, &tp->flags); @@ -2458,6 +2559,15 @@ static int rx_bottom(struct r8152 *tp, int budget) struct urb *urb; u8 *rx_data; + /* A bulk transfer of USB may contain may packets, so the + * total packets may more than the budget. Deal with all + * packets in current bulk transfer, and stop to handle the + * next bulk transfer until next schedule, if budget is + * exhausted. + */ + if (work_done >= budget) + break; + list_del_init(cursor); agg = list_entry(cursor, struct rx_agg, list); @@ -2474,12 +2584,11 @@ static int rx_bottom(struct r8152 *tp, int budget) while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; - unsigned int pkt_len, rx_frag_head_sz; + unsigned int pkt_len, rx_frag_head_sz, len; struct sk_buff *skb; + bool use_frags; - /* limit the skb numbers for rx_queue */ - if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000)) - break; + WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000); pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) @@ -2490,45 +2599,77 @@ static int rx_bottom(struct r8152 *tp, int budget) break; pkt_len -= ETH_FCS_LEN; + len = pkt_len; rx_data += sizeof(struct rx_desc); - if (!agg_free || tp->rx_copybreak > pkt_len) - rx_frag_head_sz = pkt_len; + if (!agg_free || tp->rx_copybreak > len) + use_frags = false; else - rx_frag_head_sz = tp->rx_copybreak; + use_frags = true; + + if (use_frags) { + /* If the budget is exhausted, the packet + * would be queued in the driver. That is, + * napi_gro_frags() wouldn't be called, so + * we couldn't use napi_get_frags(). + */ + if (work_done >= budget) { + rx_frag_head_sz = tp->rx_copybreak; + skb = napi_alloc_skb(napi, + rx_frag_head_sz); + } else { + rx_frag_head_sz = 0; + skb = napi_get_frags(napi); + } + } else { + rx_frag_head_sz = 0; + skb = napi_alloc_skb(napi, len); + } - skb = napi_alloc_skb(napi, rx_frag_head_sz); if (!skb) { stats->rx_dropped++; goto find_next_rx; } skb->ip_summed = r8152_rx_csum(tp, rx_desc); - memcpy(skb->data, rx_data, rx_frag_head_sz); - skb_put(skb, rx_frag_head_sz); - pkt_len -= rx_frag_head_sz; - rx_data += rx_frag_head_sz; - if (pkt_len) { + rtl_rx_vlan_tag(rx_desc, skb); + + if (use_frags) { + if (rx_frag_head_sz) { + memcpy(skb->data, rx_data, + rx_frag_head_sz); + skb_put(skb, rx_frag_head_sz); + len -= rx_frag_head_sz; + rx_data += rx_frag_head_sz; + skb->protocol = eth_type_trans(skb, + netdev); + } + skb_add_rx_frag(skb, 0, agg->page, agg_offset(agg, rx_data), - pkt_len, - SKB_DATA_ALIGN(pkt_len)); + len, SKB_DATA_ALIGN(len)); get_page(agg->page); + } else { + memcpy(skb->data, rx_data, len); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, netdev); } - skb->protocol = eth_type_trans(skb, netdev); - rtl_rx_vlan_tag(rx_desc, skb); if (work_done < budget) { + if (use_frags) + napi_gro_frags(napi); + else + napi_gro_receive(napi, skb); + work_done++; stats->rx_packets++; - stats->rx_bytes += skb->len; - napi_gro_receive(napi, skb); + stats->rx_bytes += pkt_len; } else { __skb_queue_tail(&tp->rx_queue, skb); } find_next_rx: - rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN); + rx_data = rx_agg_align(rx_data + len + ETH_FCS_LEN); rx_desc = (struct rx_desc *)rx_data; len_used = agg_offset(agg, rx_data); len_used += sizeof(struct rx_desc); @@ -2557,9 +2698,10 @@ submit: } } + /* Splice the remained list back to rx_done for next schedule */ if (!list_empty(&rx_queue)) { spin_lock_irqsave(&tp->rx_lock, flags); - list_splice_tail(&rx_queue, &tp->rx_done); + list_splice(&rx_queue, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } @@ -2608,7 +2750,7 @@ static void bottom_half(struct tasklet_struct *t) { struct r8152 *tp = from_tasklet(tp, t, tx_tl); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) @@ -2629,6 +2771,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) struct r8152 *tp = container_of(napi, struct r8152, napi); int work_done; + if (!budget) + return 0; + work_done = rx_bottom(tp, budget); if (work_done < budget) { @@ -2648,7 +2793,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) int ret; /* The rx would be stopped, so skip submitting */ - if (test_bit(RTL8152_UNPLUG, &tp->flags) || + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev)) return 0; @@ -2855,6 +3000,8 @@ static void rtl8152_nic_reset(struct r8152 *tp) ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); for (i = 0; i < 1000; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + break; if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) break; usleep_range(100, 400); @@ -3048,7 +3195,7 @@ static int rtl_enable(struct r8152 *tp) static int rtl8152_enable(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); @@ -3135,7 +3282,7 @@ static int rtl8153_enable(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); @@ -3167,7 +3314,7 @@ static void rtl_disable(struct r8152 *tp) u32 ocp_data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -3184,6 +3331,8 @@ static void rtl_disable(struct r8152 *tp) rxdy_gated_en(tp, true); for (i = 0; i < 1000; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY) break; @@ -3191,6 +3340,8 @@ static void rtl_disable(struct r8152 *tp) } for (i = 0; i < 1000; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + break; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY) break; usleep_range(1000, 2000); @@ -3621,7 +3772,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired) } msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } @@ -3653,6 +3804,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) int i; for (i = 0; i < 500; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; @@ -3693,6 +3846,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable) int i; for (i = 0; i < 500; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; @@ -3971,29 +4126,10 @@ static void rtl_reset_bmu(struct r8152 *tp) /* Clear the bp to stop the firmware before loading a new one */ static void rtl_clear_bp(struct r8152 *tp, u16 type) { - switch (tp->version) { - case RTL_VER_01: - case RTL_VER_02: - case RTL_VER_07: - break; - case RTL_VER_03: - case RTL_VER_04: - case RTL_VER_05: - case RTL_VER_06: - ocp_write_byte(tp, type, PLA_BP_EN, 0); - break; - case RTL_VER_14: - ocp_write_word(tp, type, USB_BP2_EN, 0); + u16 bp[16] = {0}; + u16 bp_num; - ocp_write_word(tp, type, USB_BP_8, 0); - ocp_write_word(tp, type, USB_BP_9, 0); - ocp_write_word(tp, type, USB_BP_10, 0); - ocp_write_word(tp, type, USB_BP_11, 0); - ocp_write_word(tp, type, USB_BP_12, 0); - ocp_write_word(tp, type, USB_BP_13, 0); - ocp_write_word(tp, type, USB_BP_14, 0); - ocp_write_word(tp, type, USB_BP_15, 0); - break; + switch (tp->version) { case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: @@ -4001,32 +4137,31 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type) case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: - default: if (type == MCU_TYPE_USB) { ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0); - - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0); - } else { - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); + bp_num = 16; + break; } + fallthrough; + case RTL_VER_03: + case RTL_VER_04: + case RTL_VER_05: + case RTL_VER_06: + ocp_write_byte(tp, type, PLA_BP_EN, 0); + fallthrough; + case RTL_VER_01: + case RTL_VER_02: + case RTL_VER_07: + bp_num = 8; + break; + case RTL_VER_14: + default: + ocp_write_word(tp, type, USB_BP2_EN, 0); + bp_num = 16; break; } - ocp_write_word(tp, type, PLA_BP_0, 0); - ocp_write_word(tp, type, PLA_BP_1, 0); - ocp_write_word(tp, type, PLA_BP_2, 0); - ocp_write_word(tp, type, PLA_BP_3, 0); - ocp_write_word(tp, type, PLA_BP_4, 0); - ocp_write_word(tp, type, PLA_BP_5, 0); - ocp_write_word(tp, type, PLA_BP_6, 0); - ocp_write_word(tp, type, PLA_BP_7, 0); + generic_ocp_write(tp, PLA_BP_0, BYTE_EN_DWORD, bp_num << 1, bp, type); /* wait 3 ms to make sure the firmware is stopped */ usleep_range(3000, 6000); @@ -4056,6 +4191,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait) for (i = 0; wait && i < 5000; i++) { u32 ocp_data; + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return -ENODEV; + usleep_range(1000, 2000); ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT); if ((ocp_data & PATCH_READY) ^ check) @@ -5000,10 +5138,9 @@ static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) { - u16 bp_en_addr, bp_index, type, bp_num, fw_ver_reg; + u16 bp_en_addr, type, fw_ver_reg; u32 length; u8 *data; - int i; switch (__le32_to_cpu(mac->blk_hdr.type)) { case RTL_FW_PLA: @@ -5045,12 +5182,8 @@ static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr), __le16_to_cpu(mac->bp_ba_value)); - bp_index = __le16_to_cpu(mac->bp_start); - bp_num = __le16_to_cpu(mac->bp_num); - for (i = 0; i < bp_num; i++) { - ocp_write_word(tp, type, bp_index, __le16_to_cpu(mac->bp[i])); - bp_index += 2; - } + generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, + __le16_to_cpu(mac->bp_num) << 1, mac->bp, type); bp_en_addr = __le16_to_cpu(mac->bp_en_addr); if (bp_en_addr) @@ -5372,6 +5505,8 @@ static void wait_oob_link_list_ready(struct r8152 *tp) int i; for (i = 0; i < 1000; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; @@ -5386,6 +5521,8 @@ static void r8156b_wait_loading_flash(struct r8152 *tp) int i; for (i = 0; i < 100; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + break; if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE) break; usleep_range(1000, 2000); @@ -5508,6 +5645,8 @@ static int r8153_pre_firmware_1(struct r8152 *tp) for (i = 0; i < 104; i++) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL); + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return -ENODEV; if (!(ocp_data & WTD1_EN)) break; usleep_range(1000, 2000); @@ -5664,6 +5803,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable) data &= ~EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); for (i = 0; i < 20; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return; usleep_range(1000, 2000); if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100) break; @@ -6017,7 +6158,7 @@ static int rtl8156_enable(struct r8152 *tp) u32 ocp_data; u16 speed; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; r8156_fc_parameter(tp); @@ -6075,7 +6216,7 @@ static int rtl8156b_enable(struct r8152 *tp) u32 ocp_data; u16 speed; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); @@ -6261,7 +6402,7 @@ out: static void rtl8152_up(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8152_aldps_en(tp, false); @@ -6271,7 +6412,7 @@ static void rtl8152_up(struct r8152 *tp) static void rtl8152_down(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6286,7 +6427,7 @@ static void rtl8153_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); @@ -6326,7 +6467,7 @@ static void rtl8153_down(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6347,7 +6488,7 @@ static void rtl8153b_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -6371,7 +6512,7 @@ static void rtl8153b_down(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6408,7 +6549,7 @@ static void rtl8153c_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -6489,7 +6630,7 @@ static void rtl8156_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -6562,7 +6703,7 @@ static void rtl8156_down(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6700,7 +6841,7 @@ static void rtl_work_func_t(struct work_struct *work) /* If the device is unplugged or !netif_running(), the workqueue * doesn't need to wake the device, and could return directly. */ - if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev)) return; if (usb_autopm_get_interface(tp->intf) < 0) @@ -6739,7 +6880,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (usb_autopm_get_interface(tp->intf) < 0) @@ -6866,7 +7007,7 @@ static int rtl8152_close(struct net_device *netdev) netif_stop_queue(netdev); res = usb_autopm_get_interface(tp->intf); - if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); rtl_stop_rx(tp); } else { @@ -6899,7 +7040,7 @@ static void r8152b_init(struct r8152 *tp) u32 ocp_data; u16 data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; data = r8152_mdio_read(tp, MII_BMCR); @@ -6943,7 +7084,7 @@ static void r8153_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); @@ -6954,7 +7095,7 @@ static void r8153_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } @@ -7083,7 +7224,7 @@ static void r8153b_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -7094,7 +7235,7 @@ static void r8153b_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } @@ -7165,7 +7306,7 @@ static void r8153c_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -7185,7 +7326,7 @@ static void r8153c_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } @@ -8014,7 +8155,7 @@ static void r8156_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); @@ -8035,7 +8176,7 @@ static void r8156_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } @@ -8110,7 +8251,7 @@ static void r8156b_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); @@ -8144,7 +8285,7 @@ static void r8156b_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } @@ -8270,7 +8411,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf) struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; - if (!tp) + rtnl_lock(); + + if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) return 0; netdev = tp->netdev; @@ -8285,7 +8428,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf) napi_disable(&tp->napi); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); + set_bit(IN_PRE_RESET, &tp->flags); tp->rtl_ops.disable(tp); + clear_bit(IN_PRE_RESET, &tp->flags); mutex_unlock(&tp->control); } @@ -8298,19 +8443,18 @@ static int rtl8152_post_reset(struct usb_interface *intf) struct net_device *netdev; struct sockaddr sa; - if (!tp) - return 0; + if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) + goto exit; + + rtl_set_accessible(tp); /* reset the MAC address in case of policy change */ - if (determine_ethernet_addr(tp, &sa) >= 0) { - rtnl_lock(); + if (determine_ethernet_addr(tp, &sa) >= 0) dev_set_mac_address (tp->netdev, &sa, NULL); - rtnl_unlock(); - } netdev = tp->netdev; if (!netif_running(netdev)) - return 0; + goto exit; set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(netdev)) { @@ -8329,6 +8473,8 @@ static int rtl8152_post_reset(struct usb_interface *intf) if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); +exit: + rtnl_unlock(); return 0; } @@ -9173,7 +9319,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) struct mii_ioctl_data *data = if_mii(rq); int res; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; res = usb_autopm_get_interface(tp->intf); @@ -9275,7 +9421,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { static void rtl8152_unload(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (tp->version != RTL_VER_01) @@ -9284,7 +9430,7 @@ static void rtl8152_unload(struct r8152 *tp) static void rtl8153_unload(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_power_cut_en(tp, false); @@ -9292,7 +9438,7 @@ static void rtl8153_unload(struct r8152 *tp) static void rtl8153b_unload(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_power_cut_en(tp, false); @@ -9502,16 +9648,29 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev) __le32 *tmp; u8 version; int ret; + int i; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return 0; - ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, - PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); - if (ret > 0) - ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; + /* Retry up to 3 times in case there is a transitory error. We do this + * since retrying a read of the version is always safe and this + * function doesn't take advantage of r8152_control_msg(). + */ + for (i = 0; i < 3; i++) { + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), + USB_CTRL_GET_TIMEOUT); + if (ret > 0) { + ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; + break; + } + } + + if (i != 0 && ret > 0) + dev_warn(&udev->dev, "Needed %d retries to read version\n", i); kfree(tmp); @@ -9610,25 +9769,14 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) return 0; } -static int rtl8152_probe(struct usb_interface *intf, - const struct usb_device_id *id) +static int rtl8152_probe_once(struct usb_interface *intf, + const struct usb_device_id *id, u8 version) { struct usb_device *udev = interface_to_usbdev(intf); struct r8152 *tp; struct net_device *netdev; - u8 version; int ret; - if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) - return -ENODEV; - - if (!rtl_check_vendor_ok(intf)) - return -ENODEV; - - version = rtl8152_get_version(intf); - if (version == RTL_VER_UNKNOWN) - return -ENODEV; - usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { @@ -9778,8 +9926,7 @@ static int rtl8152_probe(struct usb_interface *intf, usb_set_intfdata(intf, tp); - netif_napi_add_weight(netdev, &tp->napi, r8152_poll, - tp->support_2500full ? 256 : 64); + netif_napi_add(netdev, &tp->napi, r8152_poll); ret = register_netdev(netdev); if (ret != 0) { @@ -9792,18 +9939,68 @@ static int rtl8152_probe(struct usb_interface *intf, else device_set_wakeup_enable(&udev->dev, false); + /* If we saw a control transfer error while probing then we may + * want to try probe() again. Consider this an error. + */ + if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) + goto out2; + + set_bit(PROBED_WITH_NO_ERRORS, &tp->flags); netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); return 0; +out2: + unregister_netdev(netdev); + out1: tasklet_kill(&tp->tx_tl); + cancel_delayed_work_sync(&tp->hw_phy_work); + if (tp->rtl_ops.unload) + tp->rtl_ops.unload(tp); + rtl8152_release_firmware(tp); usb_set_intfdata(intf, NULL); out: + if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) + ret = -EAGAIN; + free_netdev(netdev); return ret; } +#define RTL8152_PROBE_TRIES 3 + +static int rtl8152_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + u8 version; + int ret; + int i; + + if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) + return -ENODEV; + + if (!rtl_check_vendor_ok(intf)) + return -ENODEV; + + version = rtl8152_get_version(intf); + if (version == RTL_VER_UNKNOWN) + return -ENODEV; + + for (i = 0; i < RTL8152_PROBE_TRIES; i++) { + ret = rtl8152_probe_once(intf, id, version); + if (ret != -EAGAIN) + break; + } + if (ret == -EAGAIN) { + dev_err(&intf->dev, + "r8152 failed probe after %d tries; giving up\n", i); + return -ENODEV; + } + + return ret; +} + static void rtl8152_disconnect(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); @@ -9851,6 +10048,8 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, + { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, + { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 5d6454fedb3f..78ad2da3ee29 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { + if (unlikely(ret < 4)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); return ret; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 563ecd27b93e..a530f20ee257 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (ret < 0) { + if (ret < 4) { + ret = ret < 0 ? ret : -ENODATA; + if (ret != -ENODEV) netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); @@ -897,7 +899,7 @@ static int smsc95xx_reset(struct usbnet *dev) if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n"); - return ret; + return -ETIMEDOUT; } ret = smsc95xx_set_mac_address(dev); diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index f5e19f3ef6cd..143bd4ab160d 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -474,8 +474,8 @@ static void sr_get_drvinfo(struct net_device *net, { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); - strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); - strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); } static u32 sr_get_link(struct net_device *net) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ef8eacb596f7..977861c46b1f 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -26,7 +26,7 @@ #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> #include <linux/net_tstamp.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" @@ -236,8 +236,8 @@ static void veth_get_ethtool_stats(struct net_device *dev, data[tx_idx + j] += *(u64 *)(base + offset); } } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); - pp_idx = tx_idx + VETH_TQ_STATS_LEN; } + pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN; page_pool_stats: veth_get_page_pool_stats(dev, &data[pp_idx]); @@ -344,6 +344,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_rq *rq = NULL; + int ret = NETDEV_TX_OK; struct net_device *rcv; int length = skb->len; bool use_napi = false; @@ -372,26 +373,18 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { if (!use_napi) - dev_lstats_add(dev, length); + dev_sw_netstats_tx_add(dev, 1, length); + else + __veth_xdp_flush(rq); } else { drop: atomic64_inc(&priv->dropped); + ret = NET_XMIT_DROP; } - if (use_napi) - __veth_xdp_flush(rq); - rcu_read_unlock(); - return NETDEV_TX_OK; -} - -static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes) -{ - struct veth_priv *priv = netdev_priv(dev); - - dev_lstats_read(dev, packets, bytes); - return atomic64_read(&priv->dropped); + return ret; } static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) @@ -431,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev, struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; struct veth_stats rx; - u64 packets, bytes; - tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes); - tot->tx_bytes = bytes; - tot->tx_packets = packets; + tot->tx_dropped = atomic64_read(&priv->dropped); + dev_fetch_sw_netstats(tot, dev->tstats); veth_stats_rx(&rx, dev); tot->tx_dropped += rx.xdp_tx_err; tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; - tot->rx_bytes = rx.xdp_bytes; - tot->rx_packets = rx.xdp_packets; + tot->rx_bytes += rx.xdp_bytes; + tot->rx_packets += rx.xdp_packets; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (peer) { - veth_stats_tx(peer, &packets, &bytes); - tot->rx_bytes += bytes; - tot->rx_packets += packets; + struct rtnl_link_stats64 tot_peer = {}; + + dev_fetch_sw_netstats(&tot_peer, peer->tstats); + tot->rx_bytes += tot_peer.tx_bytes; + tot->rx_packets += tot_peer.tx_packets; veth_stats_rx(&rx, peer); tot->tx_dropped += rx.peer_tq_xdp_xmit_err; @@ -736,10 +729,11 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, if (skb_shared(skb) || skb_head_is_locked(skb) || skb_shinfo(skb)->nr_frags || skb_headroom(skb) < XDP_PACKET_HEADROOM) { - u32 size, len, max_head_size, off; + u32 size, len, max_head_size, off, truesize, page_offset; struct sk_buff *nskb; struct page *page; int i, head_off; + void *va; /* We need a private copy of the skb and data buffers since * the ebpf program can modify it. We segment the original skb @@ -752,14 +746,17 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) goto drop; + size = min_t(u32, skb->len, max_head_size); + truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM; + /* Allocate skb head */ - page = page_pool_dev_alloc_pages(rq->page_pool); - if (!page) + va = page_pool_dev_alloc_va(rq->page_pool, &truesize); + if (!va) goto drop; - nskb = napi_build_skb(page_address(page), PAGE_SIZE); + nskb = napi_build_skb(va, truesize); if (!nskb) { - page_pool_put_full_page(rq->page_pool, page, true); + page_pool_free_va(rq->page_pool, va, true); goto drop; } @@ -767,7 +764,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, skb_copy_header(nskb, skb); skb_mark_for_recycle(nskb); - size = min_t(u32, skb->len, max_head_size); if (skb_copy_bits(skb, 0, nskb->data, size)) { consume_skb(nskb); goto drop; @@ -782,15 +778,20 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, len = skb->len - off; for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { - page = page_pool_dev_alloc_pages(rq->page_pool); + size = min_t(u32, len, PAGE_SIZE); + truesize = size; + + page = page_pool_dev_alloc(rq->page_pool, &page_offset, + &truesize); if (!page) { consume_skb(nskb); goto drop; } - size = min_t(u32, len, PAGE_SIZE); - skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE); - if (skb_copy_bits(skb, off, page_address(page), + skb_add_rx_frag(nskb, i, page, page_offset, size, + truesize); + if (skb_copy_bits(skb, off, + page_address(page) + page_offset, size)) { consume_skb(nskb); goto drop; @@ -1445,6 +1446,8 @@ static int veth_open(struct net_device *dev) netif_carrier_on(peer); } + veth_set_xdp_features(dev); + return 0; } @@ -1496,25 +1499,12 @@ static void veth_free_queues(struct net_device *dev) static int veth_dev_init(struct net_device *dev) { - int err; - - dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); - if (!dev->lstats) - return -ENOMEM; - - err = veth_alloc_queues(dev); - if (err) { - free_percpu(dev->lstats); - return err; - } - - return 0; + return veth_alloc_queues(dev); } static void veth_dev_free(struct net_device *dev) { veth_free_queues(dev); - free_percpu(dev->lstats); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1786,6 +1776,7 @@ static void veth_setup(struct net_device *dev) NETIF_F_HW_VLAN_STAG_RX); dev->needs_free_netdev = true; dev->priv_destructor = veth_dev_free; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8e9f4cfe941f..d16f592c2061 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -22,6 +22,7 @@ #include <net/route.h> #include <net/xdp.h> #include <net/net_failover.h> +#include <net/netdev_rx_queue.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -80,24 +81,24 @@ struct virtnet_stat_desc { struct virtnet_sq_stats { struct u64_stats_sync syncp; - u64 packets; - u64 bytes; - u64 xdp_tx; - u64 xdp_tx_drops; - u64 kicks; - u64 tx_timeouts; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t xdp_tx; + u64_stats_t xdp_tx_drops; + u64_stats_t kicks; + u64_stats_t tx_timeouts; }; struct virtnet_rq_stats { struct u64_stats_sync syncp; - u64 packets; - u64 bytes; - u64 drops; - u64 xdp_packets; - u64 xdp_tx; - u64 xdp_redirects; - u64 xdp_drops; - u64 kicks; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t drops; + u64_stats_t xdp_packets; + u64_stats_t xdp_tx; + u64_stats_t xdp_redirects; + u64_stats_t xdp_drops; + u64_stats_t kicks; }; #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) @@ -126,6 +127,19 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) +struct virtnet_interrupt_coalesce { + u32 max_packets; + u32 max_usecs; +}; + +/* The dma information of pages allocated at a time. */ +struct virtnet_rq_dma { + dma_addr_t addr; + u32 ref; + u16 len; + u16 need_sync; +}; + /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ @@ -139,6 +153,8 @@ struct send_queue { struct virtnet_sq_stats stats; + struct virtnet_interrupt_coalesce intr_coal; + struct napi_struct napi; /* Record whether sq is in reset state. */ @@ -156,6 +172,8 @@ struct receive_queue { struct virtnet_rq_stats stats; + struct virtnet_interrupt_coalesce intr_coal; + /* Chain pages by the private ptr. */ struct page *pages; @@ -175,6 +193,12 @@ struct receive_queue { char name[16]; struct xdp_rxq_info xdp_rxq; + + /* Record the last dma info to free after new pages is allocated. */ + struct virtnet_rq_dma *last_dma; + + /* Do dma by self */ + bool do_dma; }; /* This structure can contain rss message with maximum settings for indirection table and keysize @@ -207,6 +231,7 @@ struct control_buf { struct virtio_net_ctrl_rss rss; struct virtio_net_ctrl_coal_tx coal_tx; struct virtio_net_ctrl_coal_rx coal_rx; + struct virtio_net_ctrl_coal_vq coal_vq; }; struct virtnet_info { @@ -281,10 +306,8 @@ struct virtnet_info { u32 speed; /* Interrupt coalescing settings */ - u32 tx_usecs; - u32 rx_usecs; - u32 tx_max_packets; - u32 rx_max_packets; + struct virtnet_interrupt_coalesce intr_coal_tx; + struct virtnet_interrupt_coalesce intr_coal_rx; unsigned long guest_offloads; unsigned long guest_offloads_capable; @@ -303,6 +326,14 @@ struct padded_vnet_hdr { char padding[12]; }; +struct virtio_net_common_hdr { + union { + struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf mrg_hdr; + struct virtio_net_hdr_v1_hash hash_v1_hdr; + }; +}; + static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); @@ -344,9 +375,10 @@ static int rxq2vq(int rxq) return rxq * 2; } -static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) +static inline struct virtio_net_common_hdr * +skb_vnet_common_hdr(struct sk_buff *skb) { - return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; + return (struct virtio_net_common_hdr *)skb->cb; } /* @@ -469,7 +501,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, unsigned int headroom) { struct sk_buff *skb; - struct virtio_net_hdr_mrg_rxbuf *hdr; + struct virtio_net_common_hdr *hdr; unsigned int copy, hdr_len, hdr_padded_len; struct page *page_to_free = NULL; int tailroom, shinfo_size; @@ -554,7 +586,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, give_pages(rq, page); ok: - hdr = skb_vnet_hdr(skb); + hdr = skb_vnet_common_hdr(skb); memcpy(hdr, hdr_p, hdr_len); if (page_to_free) put_page(page_to_free); @@ -562,6 +594,156 @@ ok: return skb; } +static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len) +{ + struct page *page = virt_to_head_page(buf); + struct virtnet_rq_dma *dma; + void *head; + int offset; + + head = page_address(page); + + dma = head; + + --dma->ref; + + if (dma->need_sync && len) { + offset = buf - (head + sizeof(*dma)); + + virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, + offset, len, + DMA_FROM_DEVICE); + } + + if (dma->ref) + return; + + virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + put_page(page); +} + +static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) +{ + void *buf; + + buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); + if (buf && rq->do_dma) + virtnet_rq_unmap(rq, buf, *len); + + return buf; +} + +static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq) +{ + void *buf; + + buf = virtqueue_detach_unused_buf(rq->vq); + if (buf && rq->do_dma) + virtnet_rq_unmap(rq, buf, 0); + + return buf; +} + +static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) +{ + struct virtnet_rq_dma *dma; + dma_addr_t addr; + u32 offset; + void *head; + + if (!rq->do_dma) { + sg_init_one(rq->sg, buf, len); + return; + } + + head = page_address(rq->alloc_frag.page); + + offset = buf - head; + + dma = head; + + addr = dma->addr - sizeof(*dma) + offset; + + sg_init_table(rq->sg, 1); + rq->sg[0].dma_address = addr; + rq->sg[0].length = len; +} + +static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) +{ + struct page_frag *alloc_frag = &rq->alloc_frag; + struct virtnet_rq_dma *dma; + void *buf, *head; + dma_addr_t addr; + + if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp))) + return NULL; + + head = page_address(alloc_frag->page); + + if (rq->do_dma) { + dma = head; + + /* new pages */ + if (!alloc_frag->offset) { + if (rq->last_dma) { + /* Now, the new page is allocated, the last dma + * will not be used. So the dma can be unmapped + * if the ref is 0. + */ + virtnet_rq_unmap(rq, rq->last_dma, 0); + rq->last_dma = NULL; + } + + dma->len = alloc_frag->size - sizeof(*dma); + + addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, + dma->len, DMA_FROM_DEVICE, 0); + if (virtqueue_dma_mapping_error(rq->vq, addr)) + return NULL; + + dma->addr = addr; + dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); + + /* Add a reference to dma to prevent the entire dma from + * being released during error handling. This reference + * will be freed after the pages are no longer used. + */ + get_page(alloc_frag->page); + dma->ref = 1; + alloc_frag->offset = sizeof(*dma); + + rq->last_dma = dma; + } + + ++dma->ref; + } + + buf = head + alloc_frag->offset; + + get_page(alloc_frag->page); + alloc_frag->offset += size; + + return buf; +} + +static void virtnet_rq_set_premapped(struct virtnet_info *vi) +{ + int i; + + /* disable for big mode */ + if (!vi->mergeable_rx_bufs && vi->big_packets) + return; + + for (i = 0; i < vi->max_queue_pairs; i++) { + if (virtqueue_set_dma_premapped(vi->rq[i].vq)) + continue; + + vi->rq[i].do_dma = true; + } +} + static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { unsigned int len; @@ -593,8 +775,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) return; u64_stats_update_begin(&sq->stats.syncp); - sq->stats.bytes += bytes; - sq->stats.packets += packets; + u64_stats_add(&sq->stats.bytes, bytes); + u64_stats_add(&sq->stats.packets, packets); u64_stats_update_end(&sq->stats.syncp); } @@ -793,11 +975,11 @@ static int virtnet_xdp_xmit(struct net_device *dev, } out: u64_stats_update_begin(&sq->stats.syncp); - sq->stats.bytes += bytes; - sq->stats.packets += packets; - sq->stats.xdp_tx += n; - sq->stats.xdp_tx_drops += n - nxmit; - sq->stats.kicks += kicks; + u64_stats_add(&sq->stats.bytes, bytes); + u64_stats_add(&sq->stats.packets, packets); + u64_stats_add(&sq->stats.xdp_tx, n); + u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit); + u64_stats_add(&sq->stats.kicks, kicks); u64_stats_update_end(&sq->stats.syncp); virtnet_xdp_put_sq(vi, sq); @@ -829,14 +1011,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, u32 act; act = bpf_prog_run_xdp(xdp_prog, xdp); - stats->xdp_packets++; + u64_stats_inc(&stats->xdp_packets); switch (act) { case XDP_PASS: return act; case XDP_TX: - stats->xdp_tx++; + u64_stats_inc(&stats->xdp_tx); xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) { netdev_dbg(dev, "convert buff to frame failed for xdp\n"); @@ -854,7 +1036,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, return act; case XDP_REDIRECT: - stats->xdp_redirects++; + u64_stats_inc(&stats->xdp_redirects); err = xdp_do_redirect(dev, xdp, xdp_prog); if (err) return XDP_DROP; @@ -917,7 +1099,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, void *buf; int off; - buf = virtqueue_get_buf(rq->vq, &buflen); + buf = virtnet_rq_get_buf(rq, &buflen, NULL); if (unlikely(!buf)) goto err_buf; @@ -966,7 +1148,7 @@ static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, return NULL; buf += header_offset; - memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); + memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len); return skb; } @@ -1050,9 +1232,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, return skb; err_xdp: - stats->xdp_drops++; + u64_stats_inc(&stats->xdp_drops); err: - stats->drops++; + u64_stats_inc(&stats->drops); put_page(page); xdp_xmit: return NULL; @@ -1071,12 +1253,12 @@ static struct sk_buff *receive_small(struct net_device *dev, struct sk_buff *skb; len -= vi->hdr_len; - stats->bytes += len; + u64_stats_add(&stats->bytes, len); if (unlikely(len > GOOD_PACKET_LEN)) { pr_debug("%s: rx error: len %u exceeds max size %d\n", dev->name, len, GOOD_PACKET_LEN); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); goto err; } @@ -1100,7 +1282,7 @@ static struct sk_buff *receive_small(struct net_device *dev, return skb; err: - stats->drops++; + u64_stats_inc(&stats->drops); put_page(page); return NULL; } @@ -1116,14 +1298,14 @@ static struct sk_buff *receive_big(struct net_device *dev, struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); - stats->bytes += len - vi->hdr_len; + u64_stats_add(&stats->bytes, len - vi->hdr_len); if (unlikely(!skb)) goto err; return skb; err: - stats->drops++; + u64_stats_inc(&stats->drops); give_pages(rq, page); return NULL; } @@ -1137,14 +1319,14 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf, int len; while (num_buf-- > 1) { - buf = virtqueue_get_buf(rq->vq, &len); + buf = virtnet_rq_get_buf(rq, &len, NULL); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); break; } - stats->bytes += len; + u64_stats_add(&stats->bytes, len); page = virt_to_head_page(buf); put_page(page); } @@ -1245,16 +1427,16 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, return -EINVAL; while (--*num_buf > 0) { - buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); + buf = virtnet_rq_get_buf(rq, &len, &ctx); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, *num_buf, virtio16_to_cpu(vi->vdev, hdr->num_buffers)); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); goto err; } - stats->bytes += len; + u64_stats_add(&stats->bytes, len); page = virt_to_head_page(buf); offset = buf - page_address(page); @@ -1269,7 +1451,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, put_page(page); pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)(truesize - room)); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); goto err; } @@ -1418,8 +1600,8 @@ err_xdp: put_page(page); mergeable_buf_free(rq, num_buf, dev, stats); - stats->xdp_drops++; - stats->drops++; + u64_stats_inc(&stats->xdp_drops); + u64_stats_inc(&stats->drops); return NULL; } @@ -1443,12 +1625,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); head_skb = NULL; - stats->bytes += len - vi->hdr_len; + u64_stats_add(&stats->bytes, len - vi->hdr_len); if (unlikely(len > truesize - room)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)(truesize - room)); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); goto err_skb; } @@ -1474,17 +1656,17 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, while (--num_buf) { int num_skb_frags; - buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); + buf = virtnet_rq_get_buf(rq, &len, &ctx); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, num_buf, virtio16_to_cpu(vi->vdev, hdr->num_buffers)); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); goto err_buf; } - stats->bytes += len; + u64_stats_add(&stats->bytes, len); page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -1494,7 +1676,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(len > truesize - room)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)(truesize - room)); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); goto err_skb; } @@ -1536,7 +1718,7 @@ err_skb: mergeable_buf_free(rq, num_buf, dev, stats); err_buf: - stats->drops++; + u64_stats_inc(&stats->drops); dev_kfree_skb(head_skb); return NULL; } @@ -1577,11 +1759,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, { struct net_device *dev = vi->dev; struct sk_buff *skb; - struct virtio_net_hdr_mrg_rxbuf *hdr; + struct virtio_net_common_hdr *hdr; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); - dev->stats.rx_length_errors++; + DEV_STATS_INC(dev, rx_length_errors); virtnet_rq_free_unused_buf(rq->vq, buf); return; } @@ -1597,9 +1779,9 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!skb)) return; - hdr = skb_vnet_hdr(skb); + hdr = skb_vnet_common_hdr(skb); if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) - virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb); + virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1621,7 +1803,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, return; frame_err: - dev->stats.rx_frame_errors++; + DEV_STATS_INC(dev, rx_frame_errors); dev_kfree_skb(skb); } @@ -1633,7 +1815,6 @@ frame_err: static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { - struct page_frag *alloc_frag = &rq->alloc_frag; char *buf; unsigned int xdp_headroom = virtnet_get_headroom(vi); void *ctx = (void *)(unsigned long)xdp_headroom; @@ -1642,17 +1823,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, len = SKB_DATA_ALIGN(len) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) + + buf = virtnet_rq_alloc(rq, len, gfp); + if (unlikely(!buf)) return -ENOMEM; - buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; - get_page(alloc_frag->page); - alloc_frag->offset += len; - sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, - vi->hdr_len + GOOD_PACKET_LEN); + virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, + vi->hdr_len + GOOD_PACKET_LEN); + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); - if (err < 0) + if (err < 0) { + if (rq->do_dma) + virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); + } + return err; } @@ -1729,23 +1914,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, unsigned int headroom = virtnet_get_headroom(vi); unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); - char *buf; + unsigned int len, hole; void *ctx; + char *buf; int err; - unsigned int len, hole; /* Extra tailroom is needed to satisfy XDP's assumption. This * means rx frags coalescing won't work, but consider we've * disabled GSO for XDP, it won't be a big issue. */ len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); - if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) + + buf = virtnet_rq_alloc(rq, len + room, gfp); + if (unlikely(!buf)) return -ENOMEM; - buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf += headroom; /* advance address leaving hole at front of pkt */ - get_page(alloc_frag->page); - alloc_frag->offset += len + room; hole = alloc_frag->size - alloc_frag->offset; if (hole < len + room) { /* To avoid internal fragmentation, if there is very likely not @@ -1759,11 +1943,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, alloc_frag->offset += hole; } - sg_init_one(rq->sg, buf, len); + virtnet_rq_init_one_sg(rq, buf, len); + ctx = mergeable_len_to_ctx(len + room, headroom); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); - if (err < 0) + if (err < 0) { + if (rq->do_dma) + virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); + } return err; } @@ -1797,7 +1985,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, unsigned long flags; flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); - rq->stats.kicks++; + u64_stats_inc(&rq->stats.kicks); u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } @@ -1877,22 +2065,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget, struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_rq_stats stats = {}; unsigned int len; + int packets = 0; void *buf; int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (stats.packets < budget && - (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { + while (packets < budget && + (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); - stats.packets++; + packets++; } } else { - while (stats.packets < budget && - (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { + while (packets < budget && + (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); - stats.packets++; + packets++; } } @@ -1905,17 +2094,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } } + u64_stats_set(&stats.packets, packets); u64_stats_update_begin(&rq->stats.syncp); for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { size_t offset = virtnet_rq_stats_desc[i].offset; - u64 *item; + u64_stats_t *item, *src; - item = (u64 *)((u8 *)&rq->stats + offset); - *item += *(u64 *)((u8 *)&stats + offset); + item = (u64_stats_t *)((u8 *)&rq->stats + offset); + src = (u64_stats_t *)((u8 *)&stats + offset); + u64_stats_add(item, u64_stats_read(src)); } u64_stats_update_end(&rq->stats.syncp); - return stats.packets; + return packets; } static void virtnet_poll_cleantx(struct receive_queue *rq) @@ -1970,7 +2161,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) sq = virtnet_xdp_get_sq(vi); if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); - sq->stats.kicks++; + u64_stats_inc(&sq->stats.kicks); u64_stats_update_end(&sq->stats.syncp); } virtnet_xdp_put_sq(vi, sq); @@ -2105,7 +2296,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (can_push) hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); else - hdr = skb_vnet_hdr(skb); + hdr = &skb_vnet_common_hdr(skb)->mrg_hdr; if (virtio_net_hdr_from_skb(skb, &hdr->hdr, virtio_is_little_endian(vi->vdev), false, @@ -2161,12 +2352,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) /* This should not happen! */ if (unlikely(err)) { - dev->stats.tx_fifo_errors++; + DEV_STATS_INC(dev, tx_fifo_errors); if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2182,7 +2373,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) if (kick || netif_xmit_stopped(txq)) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); - sq->stats.kicks++; + u64_stats_inc(&sq->stats.kicks); u64_stats_update_end(&sq->stats.syncp); } } @@ -2365,16 +2556,16 @@ static void virtnet_stats(struct net_device *dev, do { start = u64_stats_fetch_begin(&sq->stats.syncp); - tpackets = sq->stats.packets; - tbytes = sq->stats.bytes; - terrors = sq->stats.tx_timeouts; + tpackets = u64_stats_read(&sq->stats.packets); + tbytes = u64_stats_read(&sq->stats.bytes); + terrors = u64_stats_read(&sq->stats.tx_timeouts); } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); do { start = u64_stats_fetch_begin(&rq->stats.syncp); - rpackets = rq->stats.packets; - rbytes = rq->stats.bytes; - rdrops = rq->stats.drops; + rpackets = u64_stats_read(&rq->stats.packets); + rbytes = u64_stats_read(&rq->stats.bytes); + rdrops = u64_stats_read(&rq->stats.drops); } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); tot->rx_packets += rpackets; @@ -2385,10 +2576,10 @@ static void virtnet_stats(struct net_device *dev, tot->tx_errors += terrors; } - tot->tx_dropped = dev->stats.tx_dropped; - tot->tx_fifo_errors = dev->stats.tx_fifo_errors; - tot->rx_length_errors = dev->stats.rx_length_errors; - tot->rx_frame_errors = dev->stats.rx_frame_errors; + tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped); + tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors); + tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors); + tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors); } static void virtnet_ack_link_announce(struct virtnet_info *vi) @@ -2667,6 +2858,9 @@ static void virtnet_get_ringparam(struct net_device *dev, ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); } +static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, + u16 vqn, u32 max_usecs, u32 max_packets); + static int virtnet_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -2702,12 +2896,36 @@ static int virtnet_set_ringparam(struct net_device *dev, err = virtnet_tx_resize(vi, sq, ring->tx_pending); if (err) return err; + + /* Upon disabling and re-enabling a transmit virtqueue, the device must + * set the coalescing parameters of the virtqueue to those configured + * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver + * did not set any TX coalescing parameters, to 0. + */ + err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i), + vi->intr_coal_tx.max_usecs, + vi->intr_coal_tx.max_packets); + if (err) + return err; + + vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs; + vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets; } if (ring->rx_pending != rx_pending) { err = virtnet_rx_resize(vi, rq, ring->rx_pending); if (err) return err; + + /* The reason is same as the transmit virtqueue reset */ + err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i), + vi->intr_coal_rx.max_usecs, + vi->intr_coal_rx.max_packets); + if (err) + return err; + + vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs; + vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets; } } @@ -2976,17 +3194,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); unsigned int idx = 0, start, i, j; const u8 *stats_base; + const u64_stats_t *p; size_t offset; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; - stats_base = (u8 *)&rq->stats; + stats_base = (const u8 *)&rq->stats; do { start = u64_stats_fetch_begin(&rq->stats.syncp); for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { offset = virtnet_rq_stats_desc[j].offset; - data[idx + j] = *(u64 *)(stats_base + offset); + p = (const u64_stats_t *)(stats_base + offset); + data[idx + j] = u64_stats_read(p); } } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); idx += VIRTNET_RQ_STATS_LEN; @@ -2995,12 +3215,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, for (i = 0; i < vi->curr_queue_pairs; i++) { struct send_queue *sq = &vi->sq[i]; - stats_base = (u8 *)&sq->stats; + stats_base = (const u8 *)&sq->stats; do { start = u64_stats_fetch_begin(&sq->stats.syncp); for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { offset = virtnet_sq_stats_desc[j].offset; - data[idx + j] = *(u64 *)(stats_base + offset); + p = (const u64_stats_t *)(stats_base + offset); + data[idx + j] = u64_stats_read(p); } } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); idx += VIRTNET_SQ_STATS_LEN; @@ -3045,6 +3266,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { struct scatterlist sgs_tx, sgs_rx; + int i; vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); @@ -3056,8 +3278,12 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, return -EINVAL; /* Save parameters */ - vi->tx_usecs = ec->tx_coalesce_usecs; - vi->tx_max_packets = ec->tx_max_coalesced_frames; + vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; + vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs; + vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; + } vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); @@ -3069,8 +3295,57 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, return -EINVAL; /* Save parameters */ - vi->rx_usecs = ec->rx_coalesce_usecs; - vi->rx_max_packets = ec->rx_max_coalesced_frames; + vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; + vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; + vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; + } + + return 0; +} + +static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, + u16 vqn, u32 max_usecs, u32 max_packets) +{ + struct scatterlist sgs; + + vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); + vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); + vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); + sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, + VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, + &sgs)) + return -EINVAL; + + return 0; +} + +static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, + struct ethtool_coalesce *ec, + u16 queue) +{ + int err; + + err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), + ec->rx_coalesce_usecs, + ec->rx_max_coalesced_frames); + if (err) + return err; + + vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs; + vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames; + + err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), + ec->tx_coalesce_usecs, + ec->tx_max_coalesced_frames); + if (err) + return err; + + vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs; + vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames; return 0; } @@ -3078,7 +3353,7 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) { /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL - * feature is negotiated. + * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated. */ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs) return -EOPNOTSUPP; @@ -3090,22 +3365,42 @@ static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) return 0; } +static int virtnet_should_update_vq_weight(int dev_flags, int weight, + int vq_weight, bool *should_update) +{ + if (weight ^ vq_weight) { + if (dev_flags & IFF_UP) + return -EBUSY; + *should_update = true; + } + + return 0; +} + static int virtnet_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); - int ret, i, napi_weight; + int ret, queue_number, napi_weight; bool update_napi = false; /* Can't change NAPI weight if the link is up */ napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; - if (napi_weight ^ vi->sq[0].napi.weight) { - if (dev->flags & IFF_UP) - return -EBUSY; - else - update_napi = true; + for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) { + ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, + vi->sq[queue_number].napi.weight, + &update_napi); + if (ret) + return ret; + + if (update_napi) { + /* All queues that belong to [queue_number, vi->max_queue_pairs] will be + * updated for the sake of simplicity, which might not be necessary + */ + break; + } } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) @@ -3117,8 +3412,8 @@ static int virtnet_set_coalesce(struct net_device *dev, return ret; if (update_napi) { - for (i = 0; i < vi->max_queue_pairs; i++) - vi->sq[i].napi.weight = napi_weight; + for (; queue_number < vi->max_queue_pairs; queue_number++) + vi->sq[queue_number].napi.weight = napi_weight; } return ret; @@ -3132,10 +3427,10 @@ static int virtnet_get_coalesce(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { - ec->rx_coalesce_usecs = vi->rx_usecs; - ec->tx_coalesce_usecs = vi->tx_usecs; - ec->tx_max_coalesced_frames = vi->tx_max_packets; - ec->rx_max_coalesced_frames = vi->rx_max_packets; + ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs; + ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; + ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; + ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; } else { ec->rx_max_coalesced_frames = 1; @@ -3146,6 +3441,63 @@ static int virtnet_get_coalesce(struct net_device *dev, return 0; } +static int virtnet_set_per_queue_coalesce(struct net_device *dev, + u32 queue, + struct ethtool_coalesce *ec) +{ + struct virtnet_info *vi = netdev_priv(dev); + int ret, napi_weight; + bool update_napi = false; + + if (queue >= vi->max_queue_pairs) + return -EINVAL; + + /* Can't change NAPI weight if the link is up */ + napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; + ret = virtnet_should_update_vq_weight(dev->flags, napi_weight, + vi->sq[queue].napi.weight, + &update_napi); + if (ret) + return ret; + + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) + ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue); + else + ret = virtnet_coal_params_supported(ec); + + if (ret) + return ret; + + if (update_napi) + vi->sq[queue].napi.weight = napi_weight; + + return 0; +} + +static int virtnet_get_per_queue_coalesce(struct net_device *dev, + u32 queue, + struct ethtool_coalesce *ec) +{ + struct virtnet_info *vi = netdev_priv(dev); + + if (queue >= vi->max_queue_pairs) + return -EINVAL; + + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { + ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; + ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; + ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; + ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; + } else { + ec->rx_max_coalesced_frames = 1; + + if (vi->sq[queue].napi.weight) + ec->tx_max_coalesced_frames = 1; + } + + return 0; +} + static void virtnet_init_settings(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -3276,6 +3628,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .set_link_ksettings = virtnet_set_link_ksettings, .set_coalesce = virtnet_set_coalesce, .get_coalesce = virtnet_get_coalesce, + .set_per_queue_coalesce = virtnet_set_per_queue_coalesce, + .get_per_queue_coalesce = virtnet_get_per_queue_coalesce, .get_rxfh_key_size = virtnet_get_rxfh_key_size, .get_rxfh_indir_size = virtnet_get_rxfh_indir_size, .get_rxfh = virtnet_get_rxfh, @@ -3550,7 +3904,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); u64_stats_update_begin(&sq->stats.syncp); - sq->stats.tx_timeouts++; + u64_stats_inc(&sq->stats.tx_timeouts); u64_stats_update_end(&sq->stats.syncp); netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", @@ -3662,8 +4016,11 @@ static void free_receive_page_frags(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) - if (vi->rq[i].alloc_frag.page) + if (vi->rq[i].alloc_frag.page) { + if (vi->rq[i].do_dma && vi->rq[i].last_dma) + virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); put_page(vi->rq[i].alloc_frag.page); + } } static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) @@ -3700,9 +4057,10 @@ static void free_unused_bufs(struct virtnet_info *vi) } for (i = 0; i < vi->max_queue_pairs; i++) { - struct virtqueue *vq = vi->rq[i].vq; - while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) - virtnet_rq_free_unused_buf(vq, buf); + struct receive_queue *rq = &vi->rq[i]; + + while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL) + virtnet_rq_free_unused_buf(rq->vq, buf); cond_resched(); } } @@ -3876,6 +4234,8 @@ static int init_vqs(struct virtnet_info *vi) if (ret) goto err_free; + virtnet_rq_set_premapped(vi); + cpus_read_lock(); virtnet_set_affinity(vi); cpus_read_unlock(); @@ -3952,6 +4312,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev) VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT, "VIRTIO_NET_F_CTRL_VQ") || VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL, "VIRTIO_NET_F_CTRL_VQ"))) { return false; } @@ -4118,13 +4480,6 @@ static int virtnet_probe(struct virtio_device *vdev) dev->xdp_features |= NETDEV_XDP_ACT_RX_SG; } - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { - vi->rx_usecs = 0; - vi->tx_usecs = 0; - vi->tx_max_packets = 0; - vi->rx_max_packets = 0; - } - if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) vi->has_rss_hash_report = true; @@ -4199,6 +4554,27 @@ static int virtnet_probe(struct virtio_device *vdev) if (err) goto free; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) { + vi->intr_coal_rx.max_usecs = 0; + vi->intr_coal_tx.max_usecs = 0; + vi->intr_coal_rx.max_packets = 0; + + /* Keep the default values of the coalescing parameters + * aligned with the default napi_tx state. + */ + if (vi->sq[0].napi.weight) + vi->intr_coal_tx.max_packets = 1; + else + vi->intr_coal_tx.max_packets = 0; + } + + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { + /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */ + for (i = 0; i < vi->max_queue_pairs; i++) + if (vi->sq[i].napi.weight) + vi->sq[i].intr_coal.max_packets = 1; + } + #ifdef CONFIG_SYSFS if (vi->mergeable_rx_bufs) dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; @@ -4376,6 +4752,7 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ + VIRTIO_NET_F_VQ_NOTF_COAL, \ VIRTIO_NET_F_GUEST_HDRLEN static unsigned int features[] = { diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index a666a88ac1ff..f82870c10205 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -32,4 +32,4 @@ obj-$(CONFIG_VMXNET3) += vmxnet3.o -vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o +vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o vmxnet3_xdp.o diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 7fa74b8b2100..0578864792b6 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -28,6 +28,7 @@ #include <net/ip6_checksum.h> #include "vmxnet3_int.h" +#include "vmxnet3_xdp.h" char vmxnet3_driver_name[] = "vmxnet3"; #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" @@ -338,14 +339,16 @@ static void vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, struct pci_dev *pdev) { - if (tbi->map_type == VMXNET3_MAP_SINGLE) + u32 map_type = tbi->map_type; + + if (map_type & VMXNET3_MAP_SINGLE) dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, DMA_TO_DEVICE); - else if (tbi->map_type == VMXNET3_MAP_PAGE) + else if (map_type & VMXNET3_MAP_PAGE) dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, DMA_TO_DEVICE); else - BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); + BUG_ON(map_type & ~VMXNET3_MAP_XDP); tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ } @@ -353,19 +356,20 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, static int vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, - struct pci_dev *pdev, struct vmxnet3_adapter *adapter) + struct pci_dev *pdev, struct vmxnet3_adapter *adapter, + struct xdp_frame_bulk *bq) { - struct sk_buff *skb; + struct vmxnet3_tx_buf_info *tbi; int entries = 0; + u32 map_type; /* no out of order completion */ BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); - skb = tq->buf_info[eop_idx].skb; - BUG_ON(skb == NULL); - tq->buf_info[eop_idx].skb = NULL; - + tbi = &tq->buf_info[eop_idx]; + BUG_ON(!tbi->skb); + map_type = tbi->map_type; VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); while (tq->tx_ring.next2comp != eop_idx) { @@ -381,7 +385,14 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, entries++; } - dev_kfree_skb_any(skb); + if (map_type & VMXNET3_MAP_XDP) + xdp_return_frame_bulk(tbi->xdpf, bq); + else + dev_kfree_skb_any(tbi->skb); + + /* xdpf and skb are in an anonymous union. */ + tbi->skb = NULL; + return entries; } @@ -390,8 +401,12 @@ static int vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { - int completed = 0; union Vmxnet3_GenericDesc *gdesc; + struct xdp_frame_bulk bq; + int completed = 0; + + xdp_frame_bulk_init(&bq); + rcu_read_lock(); gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { @@ -402,11 +417,13 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( &gdesc->tcd), tq, adapter->pdev, - adapter); + adapter, &bq); vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; } + xdp_flush_frame_bulk(&bq); + rcu_read_unlock(); if (completed) { spin_lock(&tq->tx_lock); @@ -426,26 +443,36 @@ static void vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) { + struct xdp_frame_bulk bq; + u32 map_type; int i; + xdp_frame_bulk_init(&bq); + rcu_read_lock(); + while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { struct vmxnet3_tx_buf_info *tbi; tbi = tq->buf_info + tq->tx_ring.next2comp; + map_type = tbi->map_type; vmxnet3_unmap_tx_buf(tbi, adapter->pdev); if (tbi->skb) { - dev_kfree_skb_any(tbi->skb); + if (map_type & VMXNET3_MAP_XDP) + xdp_return_frame_bulk(tbi->xdpf, &bq); + else + dev_kfree_skb_any(tbi->skb); tbi->skb = NULL; } vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); } - /* sanity check, verify all buffers are indeed unmapped and freed */ - for (i = 0; i < tq->tx_ring.size; i++) { - BUG_ON(tq->buf_info[i].skb != NULL || - tq->buf_info[i].map_type != VMXNET3_MAP_NONE); - } + xdp_flush_frame_bulk(&bq); + rcu_read_unlock(); + + /* sanity check, verify all buffers are indeed unmapped */ + for (i = 0; i < tq->tx_ring.size; i++) + BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE); tq->tx_ring.gen = VMXNET3_INIT_GEN; tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; @@ -599,7 +626,17 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, gd = ring->base + ring->next2fill; rbi->comp_state = VMXNET3_RXD_COMP_PENDING; - if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { + if (rbi->buf_type == VMXNET3_RX_BUF_XDP) { + void *data = vmxnet3_pp_get_buff(rq->page_pool, + &rbi->dma_addr, + GFP_KERNEL); + if (!data) { + rq->stats.rx_buf_alloc_failure++; + break; + } + rbi->page = virt_to_page(data); + val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; + } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { if (rbi->skb == NULL) { rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev, rbi->len, @@ -1263,6 +1300,63 @@ drop_pkt: return NETDEV_TX_OK; } +static int +vmxnet3_create_pp(struct vmxnet3_adapter *adapter, + struct vmxnet3_rx_queue *rq, int size) +{ + bool xdp_prog = vmxnet3_xdp_enabled(adapter); + const struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = size, + .nid = NUMA_NO_NODE, + .dev = &adapter->pdev->dev, + .offset = VMXNET3_XDP_RX_OFFSET, + .max_len = VMXNET3_XDP_MAX_FRSIZE, + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + }; + struct page_pool *pp; + int err; + + pp = page_pool_create(&pp_params); + if (IS_ERR(pp)) + return PTR_ERR(pp); + + err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid, + rq->napi.napi_id); + if (err < 0) + goto err_free_pp; + + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp); + if (err) + goto err_unregister_rxq; + + rq->page_pool = pp; + + return 0; + +err_unregister_rxq: + xdp_rxq_info_unreg(&rq->xdp_rxq); +err_free_pp: + page_pool_destroy(pp); + + return err; +} + +void * +vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr, + gfp_t gfp_mask) +{ + struct page *page; + + page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN); + if (unlikely(!page)) + return NULL; + + *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset; + + return page_address(page); +} static netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) @@ -1423,6 +1517,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxDesc rxCmdDesc; struct Vmxnet3_RxCompDesc rxComp; #endif + bool need_flush = false; + vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); while (rcd->gen == rq->comp_ring.gen) { @@ -1463,6 +1559,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, goto rcd_done; } + if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) { + struct sk_buff *skb_xdp_pass; + int act; + + if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) { + ctx->skb = NULL; + goto skip_xdp; /* Handle it later. */ + } + + if (rbi->buf_type != VMXNET3_RX_BUF_XDP) + goto rcd_done; + + act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd, + &skb_xdp_pass); + if (act == XDP_PASS) { + ctx->skb = skb_xdp_pass; + goto sop_done; + } + ctx->skb = NULL; + need_flush |= act == XDP_REDIRECT; + + goto rcd_done; + } +skip_xdp: + if (rcd->sop) { /* first buf of the pkt */ bool rxDataRingUsed; u16 len; @@ -1471,7 +1592,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, (rcd->rqID != rq->qid && rcd->rqID != rq->dataRingQid)); - BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); + BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB && + rbi->buf_type != VMXNET3_RX_BUF_XDP); BUG_ON(ctx->skb != NULL || rbi->skb == NULL); if (unlikely(rcd->len == 0)) { @@ -1489,6 +1611,25 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, rxDataRingUsed = VMXNET3_RX_DATA_RING(adapter, rcd->rqID); len = rxDataRingUsed ? rcd->len : rbi->len; + + if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) { + struct sk_buff *skb_xdp_pass; + size_t sz; + int act; + + sz = rcd->rxdIdx * rq->data_ring.desc_size; + act = vmxnet3_process_xdp_small(adapter, rq, + &rq->data_ring.base[sz], + rcd->len, + &skb_xdp_pass); + if (act == XDP_PASS) { + ctx->skb = skb_xdp_pass; + goto sop_done; + } + need_flush |= act == XDP_REDIRECT; + + goto rcd_done; + } new_skb = netdev_alloc_skb_ip_align(adapter->netdev, len); if (new_skb == NULL) { @@ -1621,6 +1762,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, } +sop_done: skb = ctx->skb; if (rcd->eop) { u32 mtu = adapter->netdev->mtu; @@ -1757,6 +1899,8 @@ refill_buf: vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); } + if (need_flush) + xdp_do_flush(); return num_pkts; } @@ -1775,24 +1919,32 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, for (ring_idx = 0; ring_idx < 2; ring_idx++) { for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { + struct vmxnet3_rx_buf_info *rbi; #ifdef __BIG_ENDIAN_BITFIELD struct Vmxnet3_RxDesc rxDesc; #endif + + rbi = &rq->buf_info[ring_idx][i]; vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && - rq->buf_info[ring_idx][i].skb) { + rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) { + page_pool_recycle_direct(rq->page_pool, + rbi->page); + rbi->page = NULL; + } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && + rbi->skb) { dma_unmap_single(&adapter->pdev->dev, rxd->addr, rxd->len, DMA_FROM_DEVICE); - dev_kfree_skb(rq->buf_info[ring_idx][i].skb); - rq->buf_info[ring_idx][i].skb = NULL; + dev_kfree_skb(rbi->skb); + rbi->skb = NULL; } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && - rq->buf_info[ring_idx][i].page) { + rbi->page) { dma_unmap_page(&adapter->pdev->dev, rxd->addr, rxd->len, DMA_FROM_DEVICE); - put_page(rq->buf_info[ring_idx][i].page); - rq->buf_info[ring_idx][i].page = NULL; + put_page(rbi->page); + rbi->page = NULL; } } @@ -1813,6 +1965,7 @@ vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); + rcu_assign_pointer(adapter->xdp_bpf_prog, NULL); } @@ -1842,6 +1995,11 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } } + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) + xdp_rxq_info_unreg(&rq->xdp_rxq); + page_pool_destroy(rq->page_pool); + rq->page_pool = NULL; + if (rq->data_ring.base) { dma_free_coherent(&adapter->pdev->dev, rq->rx_ring[0].size * rq->data_ring.desc_size, @@ -1885,14 +2043,16 @@ static int vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) { - int i; + int i, err; /* initialize buf_info */ for (i = 0; i < rq->rx_ring[0].size; i++) { - /* 1st buf for a pkt is skbuff */ + /* 1st buf for a pkt is skbuff or xdp page */ if (i % adapter->rx_buf_per_pkt == 0) { - rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; + rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ? + VMXNET3_RX_BUF_XDP : + VMXNET3_RX_BUF_SKB; rq->buf_info[0][i].len = adapter->skb_buf_size; } else { /* subsequent bufs for a pkt is frag */ rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; @@ -1913,8 +2073,18 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, rq->rx_ring[i].gen = VMXNET3_INIT_GEN; rq->rx_ring[i].isOutOfOrder = 0; } + + err = vmxnet3_create_pp(adapter, rq, + rq->rx_ring[0].size + rq->rx_ring[1].size); + if (err) + return err; + if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, adapter) == 0) { + xdp_rxq_info_unreg(&rq->xdp_rxq); + page_pool_destroy(rq->page_pool); + rq->page_pool = NULL; + /* at least has 1 rx buffer for the 1st ring */ return -ENOMEM; } @@ -2016,7 +2186,7 @@ err: } -static int +int vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) { int i, err = 0; @@ -3053,7 +3223,7 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) } -static void +void vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) { size_t sz, i, ring0_size, ring1_size, comp_size; @@ -3612,6 +3782,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vmxnet3_netpoll, #endif + .ndo_bpf = vmxnet3_xdp, + .ndo_xdp_xmit = vmxnet3_xdp_xmit, }; int err; u32 ver; @@ -3864,6 +4036,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); vmxnet3_declare_features(adapter); + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? VMXNET3_DEF_RXDATA_DESC_SIZE : 0; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 18cf7c723201..98c22d7d87a2 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -28,6 +28,7 @@ #include "vmxnet3_int.h" #include <net/vxlan.h> #include <net/geneve.h> +#include "vmxnet3_xdp.h" #define VXLAN_UDP_PORT 8472 @@ -76,6 +77,10 @@ vmxnet3_tq_driver_stats[] = { copy_skb_header) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, oversized_hdr) }, + { " xdp xmit", offsetof(struct vmxnet3_tq_driver_stats, + xdp_xmit) }, + { " xdp xmit err", offsetof(struct vmxnet3_tq_driver_stats, + xdp_xmit_err) }, }; /* per rq stats maintained by the device */ @@ -106,6 +111,16 @@ vmxnet3_rq_driver_stats[] = { drop_fcs) }, { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, rx_buf_alloc_failure) }, + { " xdp packets", offsetof(struct vmxnet3_rq_driver_stats, + xdp_packets) }, + { " xdp tx", offsetof(struct vmxnet3_rq_driver_stats, + xdp_tx) }, + { " xdp redirects", offsetof(struct vmxnet3_rq_driver_stats, + xdp_redirects) }, + { " xdp drops", offsetof(struct vmxnet3_rq_driver_stats, + xdp_drops) }, + { " xdp aborted", offsetof(struct vmxnet3_rq_driver_stats, + xdp_aborted) }, }; /* global stats maintained by the driver */ @@ -249,10 +264,18 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) netdev_features_t vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + /* If Rx checksum is disabled, then LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; + /* If XDP is enabled, then LRO should not be enabled */ + if (vmxnet3_xdp_enabled(adapter) && (features & NETIF_F_LRO)) { + netdev_err(netdev, "LRO is not supported with XDP"); + features &= ~NETIF_F_LRO; + } + return features; } diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3367db23aa13..915aaf18c409 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -56,6 +56,9 @@ #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/log2.h> +#include <linux/bpf.h> +#include <net/page_pool/helpers.h> +#include <net/xdp.h> #include "vmxnet3_defs.h" @@ -188,19 +191,20 @@ struct vmxnet3_tx_data_ring { dma_addr_t basePA; }; -enum vmxnet3_buf_map_type { - VMXNET3_MAP_INVALID = 0, - VMXNET3_MAP_NONE, - VMXNET3_MAP_SINGLE, - VMXNET3_MAP_PAGE, -}; +#define VMXNET3_MAP_NONE 0 +#define VMXNET3_MAP_SINGLE BIT(0) +#define VMXNET3_MAP_PAGE BIT(1) +#define VMXNET3_MAP_XDP BIT(2) struct vmxnet3_tx_buf_info { u32 map_type; u16 len; u16 sop_idx; dma_addr_t dma_addr; - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; }; struct vmxnet3_tq_driver_stats { @@ -217,6 +221,9 @@ struct vmxnet3_tq_driver_stats { u64 linearized; /* # of pkts linearized */ u64 copy_skb_header; /* # of times we have to copy skb header */ u64 oversized_hdr; + + u64 xdp_xmit; + u64 xdp_xmit_err; }; struct vmxnet3_tx_ctx { @@ -253,12 +260,13 @@ struct vmxnet3_tx_queue { * stopped */ int qid; u16 txdata_desc_size; -} __attribute__((__aligned__(SMP_CACHE_BYTES))); +} ____cacheline_aligned; enum vmxnet3_rx_buf_type { VMXNET3_RX_BUF_NONE = 0, VMXNET3_RX_BUF_SKB = 1, - VMXNET3_RX_BUF_PAGE = 2 + VMXNET3_RX_BUF_PAGE = 2, + VMXNET3_RX_BUF_XDP = 3, }; #define VMXNET3_RXD_COMP_PENDING 0 @@ -285,6 +293,12 @@ struct vmxnet3_rq_driver_stats { u64 drop_err; u64 drop_fcs; u64 rx_buf_alloc_failure; + + u64 xdp_packets; /* Total packets processed by XDP. */ + u64 xdp_tx; + u64 xdp_redirects; + u64 xdp_drops; + u64 xdp_aborted; }; struct vmxnet3_rx_data_ring { @@ -307,7 +321,9 @@ struct vmxnet3_rx_queue { struct vmxnet3_rx_buf_info *buf_info[2]; struct Vmxnet3_RxQueueCtrl *shared; struct vmxnet3_rq_driver_stats stats; -} __attribute__((__aligned__(SMP_CACHE_BYTES))); + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; +} ____cacheline_aligned; #define VMXNET3_DEVICE_MAX_TX_QUEUES 32 #define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */ @@ -415,6 +431,7 @@ struct vmxnet3_adapter { u16 tx_prod_offset; u16 rx_prod_offset; u16 rx_prod2_offset; + struct bpf_prog __rcu *xdp_bpf_prog; }; #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ @@ -490,6 +507,12 @@ vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter); void vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter); +int +vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter); + +void +vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter); + netdev_features_t vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c new file mode 100644 index 000000000000..80ddaff759d4 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_xdp.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved. + * Maintained by: pv-drivers@vmware.com + * + */ + +#include "vmxnet3_int.h" +#include "vmxnet3_xdp.h" + +static void +vmxnet3_xdp_exchange_program(struct vmxnet3_adapter *adapter, + struct bpf_prog *prog) +{ + rcu_assign_pointer(adapter->xdp_bpf_prog, prog); +} + +static inline struct vmxnet3_tx_queue * +vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter) +{ + struct vmxnet3_tx_queue *tq; + int tq_number; + int cpu; + + tq_number = adapter->num_tx_queues; + cpu = smp_processor_id(); + if (likely(cpu < tq_number)) + tq = &adapter->tx_queue[cpu]; + else + tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)]; + + return tq; +} + +static int +vmxnet3_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf, + struct netlink_ext_ack *extack) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct bpf_prog *new_bpf_prog = bpf->prog; + struct bpf_prog *old_bpf_prog; + bool need_update; + bool running; + int err; + + if (new_bpf_prog && netdev->mtu > VMXNET3_XDP_MAX_MTU) { + NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP", + netdev->mtu); + return -EOPNOTSUPP; + } + + if (adapter->netdev->features & NETIF_F_LRO) { + NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP"); + adapter->netdev->features &= ~NETIF_F_LRO; + } + + old_bpf_prog = rcu_dereference(adapter->xdp_bpf_prog); + if (!new_bpf_prog && !old_bpf_prog) + return 0; + + running = netif_running(netdev); + need_update = !!old_bpf_prog != !!new_bpf_prog; + + if (running && need_update) + vmxnet3_quiesce_dev(adapter); + + vmxnet3_xdp_exchange_program(adapter, new_bpf_prog); + if (old_bpf_prog) + bpf_prog_put(old_bpf_prog); + + if (!running || !need_update) + return 0; + + if (new_bpf_prog) + xdp_features_set_redirect_target(netdev, false); + else + xdp_features_clear_redirect_target(netdev); + + vmxnet3_reset_dev(adapter); + vmxnet3_rq_destroy_all(adapter); + vmxnet3_adjust_rx_ring_size(adapter); + err = vmxnet3_rq_create_all(adapter); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "failed to re-create rx queues for XDP."); + return -EOPNOTSUPP; + } + err = vmxnet3_activate_dev(adapter); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "failed to activate device for XDP."); + return -EOPNOTSUPP; + } + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + + return 0; +} + +/* This is the main xdp call used by kernel to set/unset eBPF program. */ +int +vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf) +{ + switch (bpf->command) { + case XDP_SETUP_PROG: + return vmxnet3_xdp_set(netdev, bpf, bpf->extack); + default: + return -EINVAL; + } + + return 0; +} + +static int +vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter, + struct xdp_frame *xdpf, + struct vmxnet3_tx_queue *tq, bool dma_map) +{ + struct vmxnet3_tx_buf_info *tbi = NULL; + union Vmxnet3_GenericDesc *gdesc; + struct vmxnet3_tx_ctx ctx; + int tx_num_deferred; + struct page *page; + u32 buf_size; + u32 dw2; + + dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; + dw2 |= xdpf->len; + ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; + gdesc = ctx.sop_txd; + + buf_size = xdpf->len; + tbi = tq->buf_info + tq->tx_ring.next2fill; + + if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) { + tq->stats.tx_ring_full++; + return -ENOSPC; + } + + tbi->map_type = VMXNET3_MAP_XDP; + if (dma_map) { /* ndo_xdp_xmit */ + tbi->dma_addr = dma_map_single(&adapter->pdev->dev, + xdpf->data, buf_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) + return -EFAULT; + tbi->map_type |= VMXNET3_MAP_SINGLE; + } else { /* XDP buffer from page pool */ + page = virt_to_page(xdpf->data); + tbi->dma_addr = page_pool_get_dma_addr(page) + + VMXNET3_XDP_HEADROOM; + dma_sync_single_for_device(&adapter->pdev->dev, + tbi->dma_addr, buf_size, + DMA_TO_DEVICE); + } + tbi->xdpf = xdpf; + tbi->len = buf_size; + + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen); + + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); + gdesc->dword[2] = cpu_to_le32(dw2); + + /* Setup the EOP desc */ + gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); + + gdesc->txd.om = 0; + gdesc->txd.msscof = 0; + gdesc->txd.hlen = 0; + gdesc->txd.ti = 0; + + tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); + le32_add_cpu(&tq->shared->txNumDeferred, 1); + tx_num_deferred++; + + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + + /* set the last buf_info for the pkt */ + tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base; + + dma_wmb(); + gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ + VMXNET3_TXD_GEN); + + /* No need to handle the case when tx_num_deferred doesn't reach + * threshold. Backend driver at hypervisor side will poll and reset + * tq->shared->txNumDeferred to 0. + */ + if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { + tq->shared->txNumDeferred = 0; + VMXNET3_WRITE_BAR0_REG(adapter, + VMXNET3_REG_TXPROD + tq->qid * 8, + tq->tx_ring.next2fill); + } + + return 0; +} + +static int +vmxnet3_xdp_xmit_back(struct vmxnet3_adapter *adapter, + struct xdp_frame *xdpf) +{ + struct vmxnet3_tx_queue *tq; + struct netdev_queue *nq; + int err; + + tq = vmxnet3_xdp_get_tq(adapter); + if (tq->stopped) + return -ENETDOWN; + + nq = netdev_get_tx_queue(adapter->netdev, tq->qid); + + __netif_tx_lock(nq, smp_processor_id()); + err = vmxnet3_xdp_xmit_frame(adapter, xdpf, tq, false); + __netif_tx_unlock(nq); + + return err; +} + +/* ndo_xdp_xmit */ +int +vmxnet3_xdp_xmit(struct net_device *dev, + int n, struct xdp_frame **frames, u32 flags) +{ + struct vmxnet3_adapter *adapter = netdev_priv(dev); + struct vmxnet3_tx_queue *tq; + int i; + + if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))) + return -ENETDOWN; + if (unlikely(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))) + return -EINVAL; + + tq = vmxnet3_xdp_get_tq(adapter); + if (tq->stopped) + return -ENETDOWN; + + for (i = 0; i < n; i++) { + if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) { + tq->stats.xdp_xmit_err++; + break; + } + } + tq->stats.xdp_xmit += i; + + return i; +} + +static int +vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, + struct bpf_prog *prog) +{ + struct xdp_frame *xdpf; + struct page *page; + int err; + u32 act; + + rq->stats.xdp_packets++; + act = bpf_prog_run_xdp(prog, xdp); + page = virt_to_page(xdp->data_hard_start); + + switch (act) { + case XDP_PASS: + return act; + case XDP_REDIRECT: + err = xdp_do_redirect(rq->adapter->netdev, xdp, prog); + if (!err) { + rq->stats.xdp_redirects++; + } else { + rq->stats.xdp_drops++; + page_pool_recycle_direct(rq->page_pool, page); + } + return act; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf || + vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) { + rq->stats.xdp_drops++; + page_pool_recycle_direct(rq->page_pool, page); + } else { + rq->stats.xdp_tx++; + } + return act; + default: + bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rq->adapter->netdev, prog, act); + rq->stats.xdp_aborted++; + break; + case XDP_DROP: + rq->stats.xdp_drops++; + break; + } + + page_pool_recycle_direct(rq->page_pool, page); + + return act; +} + +static struct sk_buff * +vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page, + const struct xdp_buff *xdp) +{ + struct sk_buff *skb; + + skb = build_skb(page_address(page), PAGE_SIZE); + if (unlikely(!skb)) { + page_pool_recycle_direct(rq->page_pool, page); + rq->stats.rx_buf_alloc_failure++; + return NULL; + } + + /* bpf prog might change len and data position. */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + skb_mark_for_recycle(skb); + + return skb; +} + +/* Handle packets from DataRing. */ +int +vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter, + struct vmxnet3_rx_queue *rq, + void *data, int len, + struct sk_buff **skb_xdp_pass) +{ + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + struct page *page; + int act; + + page = page_pool_alloc_pages(rq->page_pool, GFP_ATOMIC); + if (unlikely(!page)) { + rq->stats.rx_buf_alloc_failure++; + return XDP_DROP; + } + + xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); + xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, + len, false); + xdp_buff_clear_frags_flag(&xdp); + + /* Must copy the data because it's at dataring. */ + memcpy(xdp.data, data, len); + + xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog); + if (!xdp_prog) { + act = XDP_PASS; + goto out_skb; + } + act = vmxnet3_run_xdp(rq, &xdp, xdp_prog); + if (act != XDP_PASS) + return act; + +out_skb: + *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp); + if (!*skb_xdp_pass) + return XDP_DROP; + + /* No need to refill. */ + return likely(*skb_xdp_pass) ? act : XDP_DROP; +} + +int +vmxnet3_process_xdp(struct vmxnet3_adapter *adapter, + struct vmxnet3_rx_queue *rq, + struct Vmxnet3_RxCompDesc *rcd, + struct vmxnet3_rx_buf_info *rbi, + struct Vmxnet3_RxDesc *rxd, + struct sk_buff **skb_xdp_pass) +{ + struct bpf_prog *xdp_prog; + dma_addr_t new_dma_addr; + struct xdp_buff xdp; + struct page *page; + void *new_data; + int act; + + page = rbi->page; + dma_sync_single_for_cpu(&adapter->pdev->dev, + page_pool_get_dma_addr(page) + + rq->page_pool->p.offset, rcd->len, + page_pool_get_dma_dir(rq->page_pool)); + + xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq); + xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, + rcd->len, false); + xdp_buff_clear_frags_flag(&xdp); + + xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog); + if (!xdp_prog) { + act = XDP_PASS; + goto out_skb; + } + act = vmxnet3_run_xdp(rq, &xdp, xdp_prog); + + if (act == XDP_PASS) { +out_skb: + *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp); + if (!*skb_xdp_pass) + act = XDP_DROP; + } + + new_data = vmxnet3_pp_get_buff(rq->page_pool, &new_dma_addr, + GFP_ATOMIC); + if (!new_data) { + rq->stats.rx_buf_alloc_failure++; + return XDP_DROP; + } + rbi->page = virt_to_page(new_data); + rbi->dma_addr = new_dma_addr; + rxd->addr = cpu_to_le64(rbi->dma_addr); + rxd->len = rbi->len; + + return act; +} diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.h b/drivers/net/vmxnet3/vmxnet3_xdp.h new file mode 100644 index 000000000000..f9d843e060a3 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_xdp.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * + * Linux driver for VMware's vmxnet3 ethernet NIC. + * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved. + * Maintained by: pv-drivers@vmware.com + * + */ + +#ifndef _VMXNET3_XDP_H +#define _VMXNET3_XDP_H + +#include <linux/filter.h> +#include <linux/bpf_trace.h> +#include <linux/netlink.h> + +#include "vmxnet3_int.h" + +#define VMXNET3_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN) +#define VMXNET3_XDP_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define VMXNET3_XDP_RX_OFFSET VMXNET3_XDP_HEADROOM +#define VMXNET3_XDP_MAX_FRSIZE (PAGE_SIZE - VMXNET3_XDP_HEADROOM - \ + VMXNET3_XDP_RX_TAILROOM) +#define VMXNET3_XDP_MAX_MTU (VMXNET3_XDP_MAX_FRSIZE - ETH_HLEN - \ + 2 * VLAN_HLEN - ETH_FCS_LEN) + +int vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf); +int vmxnet3_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); +int vmxnet3_process_xdp(struct vmxnet3_adapter *adapter, + struct vmxnet3_rx_queue *rq, + struct Vmxnet3_RxCompDesc *rcd, + struct vmxnet3_rx_buf_info *rbi, + struct Vmxnet3_RxDesc *rxd, + struct sk_buff **skb_xdp_pass); +int vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter, + struct vmxnet3_rx_queue *rq, + void *data, int len, + struct sk_buff **skb_xdp_pass); +void *vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr, + gfp_t gfp_mask); + +static inline bool vmxnet3_xdp_enabled(struct vmxnet3_adapter *adapter) +{ + return !!rcu_access_pointer(adapter->xdp_bpf_prog); +} + +#endif diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 6043e63b42f9..bb95ce43cd97 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -121,22 +121,12 @@ struct net_vrf { int ifindex; }; -struct pcpu_dstats { - u64 tx_pkts; - u64 tx_bytes; - u64 tx_drps; - u64 rx_pkts; - u64 rx_bytes; - u64 rx_drps; - struct u64_stats_sync syncp; -}; - static void vrf_rx_stats(struct net_device *dev, int len) { struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); - dstats->rx_pkts++; + dstats->rx_packets++; dstats->rx_bytes += len; u64_stats_update_end(&dstats->syncp); } @@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev, do { start = u64_stats_fetch_begin(&dstats->syncp); tbytes = dstats->tx_bytes; - tpkts = dstats->tx_pkts; - tdrops = dstats->tx_drps; + tpkts = dstats->tx_packets; + tdrops = dstats->tx_drops; rbytes = dstats->rx_bytes; - rpkts = dstats->rx_pkts; + rpkts = dstats->rx_packets; } while (u64_stats_fetch_retry(&dstats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; @@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) vrf_rx_stats(dev, len); else - this_cpu_inc(dev->dstats->rx_drps); + this_cpu_inc(dev->dstats->rx_drops); return NETDEV_TX_OK; } @@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); - dstats->tx_pkts++; + dstats->tx_packets++; dstats->tx_bytes += len; u64_stats_update_end(&dstats->syncp); } else { - this_cpu_inc(dev->dstats->tx_drps); + this_cpu_inc(dev->dstats->tx_drops); } return ret; @@ -638,9 +628,7 @@ static void vrf_finish_direct(struct sk_buff *skb) eth_zero_addr(eth->h_dest); eth->h_proto = skb->protocol; - rcu_read_lock_bh(); dev_queue_xmit_nit(skb, vrf_dev); - rcu_read_unlock_bh(); skb_pull(skb, ETH_HLEN); } @@ -1176,22 +1164,15 @@ static void vrf_dev_uninit(struct net_device *dev) vrf_rtable_release(dev, vrf); vrf_rt6_release(dev, vrf); - - free_percpu(dev->dstats); - dev->dstats = NULL; } static int vrf_dev_init(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); - if (!dev->dstats) - goto out_nomem; - /* create the default dst which points back to us */ if (vrf_rtable_create(dev) != 0) - goto out_stats; + goto out_nomem; if (vrf_rt6_create(dev) != 0) goto out_rth; @@ -1205,9 +1186,6 @@ static int vrf_dev_init(struct net_device *dev) out_rth: vrf_rtable_release(dev, vrf); -out_stats: - free_percpu(dev->dstats); - dev->dstats = NULL; out_nomem: return -ENOMEM; } @@ -1706,6 +1684,8 @@ static void vrf_setup(struct net_device *dev) dev->min_mtu = IPV6_MIN_MTU; dev->max_mtu = IP6_MAX_MTU; dev->mtu = dev->max_mtu; + + dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1965,7 +1945,6 @@ static const struct ctl_table vrf_table[] = { /* set by the vrf_netns_init */ .extra1 = NULL, }, - { }, }; static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) @@ -1979,7 +1958,8 @@ static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) /* init the extra1 parameter with the reference to current netns */ table[0].extra1 = net; - nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table); + nn_vrf->ctl_hdr = register_net_sysctl_sz(net, "net/vrf", table, + ARRAY_SIZE(vrf_table)); if (!nn_vrf->ctl_hdr) { kfree(table); return -ENOMEM; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index c9a9373733c0..412c3c0b6990 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -2215,127 +2215,16 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, return 0; } -static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, - struct vxlan_sock *sock4, - struct sk_buff *skb, int oif, u8 tos, - __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, - __u8 flow_flags, struct dst_cache *dst_cache, - const struct ip_tunnel_info *info) -{ - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct rtable *rt = NULL; - struct flowi4 fl4; - - if (!sock4) - return ERR_PTR(-EIO); - - if (tos && !info) - use_cache = false; - if (use_cache) { - rt = dst_cache_get_ip4(dst_cache, saddr); - if (rt) - return rt; - } - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = oif; - fl4.flowi4_tos = RT_TOS(tos); - fl4.flowi4_mark = skb->mark; - fl4.flowi4_proto = IPPROTO_UDP; - fl4.daddr = daddr; - fl4.saddr = *saddr; - fl4.fl4_dport = dport; - fl4.fl4_sport = sport; - fl4.flowi4_flags = flow_flags; - - rt = ip_route_output_key(vxlan->net, &fl4); - if (!IS_ERR(rt)) { - if (rt->dst.dev == dev) { - netdev_dbg(dev, "circular route to %pI4\n", &daddr); - ip_rt_put(rt); - return ERR_PTR(-ELOOP); - } - - *saddr = fl4.saddr; - if (use_cache) - dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); - } else { - netdev_dbg(dev, "no route to %pI4\n", &daddr); - return ERR_PTR(-ENETUNREACH); - } - return rt; -} - -#if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, - struct net_device *dev, - struct vxlan_sock *sock6, - struct sk_buff *skb, int oif, u8 tos, - __be32 label, - const struct in6_addr *daddr, - struct in6_addr *saddr, - __be16 dport, __be16 sport, - struct dst_cache *dst_cache, - const struct ip_tunnel_info *info) -{ - bool use_cache = ip_tunnel_dst_cache_usable(skb, info); - struct dst_entry *ndst; - struct flowi6 fl6; - - if (!sock6) - return ERR_PTR(-EIO); - - if (tos && !info) - use_cache = false; - if (use_cache) { - ndst = dst_cache_get_ip6(dst_cache, saddr); - if (ndst) - return ndst; - } - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = oif; - fl6.daddr = *daddr; - fl6.saddr = *saddr; - fl6.flowlabel = ip6_make_flowinfo(tos, label); - fl6.flowi6_mark = skb->mark; - fl6.flowi6_proto = IPPROTO_UDP; - fl6.fl6_dport = dport; - fl6.fl6_sport = sport; - - ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, - &fl6, NULL); - if (IS_ERR(ndst)) { - netdev_dbg(dev, "no route to %pI6\n", daddr); - return ERR_PTR(-ENETUNREACH); - } - - if (unlikely(ndst->dev == dev)) { - netdev_dbg(dev, "circular route to %pI6\n", daddr); - dst_release(ndst); - return ERR_PTR(-ELOOP); - } - - *saddr = fl6.saddr; - if (use_cache) - dst_cache_set_ip6(dst_cache, ndst, saddr); - return ndst; -} -#endif - /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct vxlan_dev *dst_vxlan, __be32 vni, bool snoop) { - struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; struct net_device *dev; int len = skb->len; - tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); - rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; skb->dev = dst_vxlan->dev; @@ -2361,17 +2250,11 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop) vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); - u64_stats_update_begin(&tx_stats->syncp); - u64_stats_inc(&tx_stats->tx_packets); - u64_stats_add(&tx_stats->tx_bytes, len); - u64_stats_update_end(&tx_stats->syncp); + dev_sw_netstats_tx_add(src_vxlan->dev, 1, len); vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len); if (__netif_rx(skb) == NET_RX_SUCCESS) { - u64_stats_update_begin(&rx_stats->syncp); - u64_stats_inc(&rx_stats->rx_packets); - u64_stats_add(&rx_stats->rx_bytes, len); - u64_stats_update_end(&rx_stats->syncp); + dev_sw_netstats_rx_add(dst_vxlan->dev, len); vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX, len); } else { @@ -2385,7 +2268,7 @@ drop: static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, struct vxlan_dev *vxlan, - union vxlan_addr *daddr, + int addr_family, __be16 dst_port, int dst_ifindex, __be32 vni, struct dst_entry *dst, u32 rt_flags) @@ -2405,7 +2288,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, dst_release(dst); dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, - daddr->sa.sa_family, dst_port, + addr_family, dst_port, vxlan->cfg.flags); if (!dst_vxlan) { dev->stats.tx_errors++; @@ -2427,31 +2310,33 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, { struct dst_cache *dst_cache; struct ip_tunnel_info *info; + struct ip_tunnel_key *pkey; + struct ip_tunnel_key key; struct vxlan_dev *vxlan = netdev_priv(dev); const struct iphdr *old_iph = ip_hdr(skb); - union vxlan_addr *dst; - union vxlan_addr remote_ip, local_ip; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; unsigned int pkt_len = skb->len; __be16 src_port = 0, dst_port; struct dst_entry *ndst = NULL; - __u8 tos, ttl, flow_flags = 0; + int addr_family; + __u8 tos, ttl; int ifindex; int err; u32 flags = vxlan->cfg.flags; + bool use_cache; bool udp_sum = false; bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); __be32 vni = 0; -#if IS_ENABLED(CONFIG_IPV6) - __be32 label; -#endif info = skb_tunnel_info(skb); + use_cache = ip_tunnel_dst_cache_usable(skb, info); if (rdst) { - dst = &rdst->remote_ip; - if (vxlan_addr_any(dst)) { + memset(&key, 0, sizeof(key)); + pkey = &key; + + if (vxlan_addr_any(&rdst->remote_ip)) { if (did_rsc) { /* short-circuited back to local bridge */ vxlan_encap_bypass(skb, vxlan, vxlan, @@ -2461,30 +2346,40 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; } + addr_family = vxlan->cfg.saddr.sa.sa_family; dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = (rdst->remote_vni) ? : default_vni; ifindex = rdst->remote_ifindex; - local_ip = vxlan->cfg.saddr; + + if (addr_family == AF_INET) { + key.u.ipv4.src = vxlan->cfg.saddr.sin.sin_addr.s_addr; + key.u.ipv4.dst = rdst->remote_ip.sin.sin_addr.s_addr; + } else { + key.u.ipv6.src = vxlan->cfg.saddr.sin6.sin6_addr; + key.u.ipv6.dst = rdst->remote_ip.sin6.sin6_addr; + } + dst_cache = &rdst->dst_cache; md->gbp = skb->mark; if (flags & VXLAN_F_TTL_INHERIT) { ttl = ip_tunnel_get_ttl(old_iph, skb); } else { ttl = vxlan->cfg.ttl; - if (!ttl && vxlan_addr_multicast(dst)) + if (!ttl && vxlan_addr_multicast(&rdst->remote_ip)) ttl = 1; } - tos = vxlan->cfg.tos; if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); + if (tos && !info) + use_cache = false; - if (dst->sa.sa_family == AF_INET) + if (addr_family == AF_INET) udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); else udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); #if IS_ENABLED(CONFIG_IPV6) - label = vxlan->cfg.label; + key.label = vxlan->cfg.label; #endif } else { if (!info) { @@ -2492,17 +2387,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dev->name); goto drop; } - remote_ip.sa.sa_family = ip_tunnel_info_af(info); - if (remote_ip.sa.sa_family == AF_INET) { - remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; - local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; - } else { - remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; - local_ip.sin6.sin6_addr = info->key.u.ipv6.src; - } - dst = &remote_ip; + pkey = &info->key; + addr_family = ip_tunnel_info_af(info); dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; - flow_flags = info->key.flow_flags; vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; @@ -2513,28 +2400,24 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = info->key.ttl; tos = info->key.tos; -#if IS_ENABLED(CONFIG_IPV6) - label = info->key.label; -#endif udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); } src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, vxlan->cfg.port_max, true); rcu_read_lock(); - if (dst->sa.sa_family == AF_INET) { + if (addr_family == AF_INET) { struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt; __be16 df = 0; + __be32 saddr; if (!ifindex) ifindex = sock4->sock->sk->sk_bound_dev_if; - rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, - dst->sin.sin_addr.s_addr, - &local_ip.sin.sin_addr.s_addr, - dst_port, src_port, flow_flags, - dst_cache, info); + rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, ifindex, + &saddr, pkey, src_port, dst_port, + tos, use_cache ? dst_cache : NULL); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto tx_error; @@ -2542,7 +2425,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (!info) { /* Bypass encapsulation if the destination is local */ - err = encap_bypass_if_local(skb, dev, vxlan, dst, + err = encap_bypass_if_local(skb, dev, vxlan, AF_INET, dst_port, ifindex, vni, &rt->dst, rt->rt_flags); if (err) @@ -2570,16 +2453,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } else if (err) { if (info) { struct ip_tunnel_info *unclone; - struct in_addr src, dst; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) goto tx_error; - src = remote_ip.sin.sin_addr; - dst = local_ip.sin.sin_addr; - unclone->key.u.ipv4.src = src.s_addr; - unclone->key.u.ipv4.dst = dst.s_addr; + unclone->key.u.ipv4.src = pkey->u.ipv4.dst; + unclone->key.u.ipv4.dst = saddr; } vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); dst_release(ndst); @@ -2593,21 +2473,21 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (err < 0) goto tx_error; - udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr, - dst->sin.sin_addr.s_addr, tos, ttl, df, + udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr, + pkey->u.ipv4.dst, tos, ttl, df, src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); + struct in6_addr saddr; if (!ifindex) ifindex = sock6->sock->sk->sk_bound_dev_if; - ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, - label, &dst->sin6.sin6_addr, - &local_ip.sin6.sin6_addr, - dst_port, src_port, - dst_cache, info); + ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, + ifindex, &saddr, pkey, + src_port, dst_port, tos, + use_cache ? dst_cache : NULL); if (IS_ERR(ndst)) { err = PTR_ERR(ndst); ndst = NULL; @@ -2617,7 +2497,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (!info) { u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; - err = encap_bypass_if_local(skb, dev, vxlan, dst, + err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6, dst_port, ifindex, vni, ndst, rt6i_flags); if (err) @@ -2632,16 +2512,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } else if (err) { if (info) { struct ip_tunnel_info *unclone; - struct in6_addr src, dst; unclone = skb_tunnel_info_unclone(skb); if (unlikely(!unclone)) goto tx_error; - src = remote_ip.sin6.sin6_addr; - dst = local_ip.sin6.sin6_addr; - unclone->key.u.ipv6.src = src; - unclone->key.u.ipv6.dst = dst; + unclone->key.u.ipv6.src = pkey->u.ipv6.dst; + unclone->key.u.ipv6.dst = saddr; } vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); @@ -2658,9 +2535,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, - &local_ip.sin6.sin6_addr, - &dst->sin6.sin6_addr, tos, ttl, - label, src_port, dst_port, !udp_sum); + &saddr, &pkey->u.ipv6.dst, tos, ttl, + pkey->label, src_port, dst_port, !udp_sum); #endif } vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len); @@ -2720,6 +2596,45 @@ drop: dev_kfree_skb(skb); } +static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev, + u32 nhid, __be32 vni) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst nh_rdst; + struct nexthop *nh; + bool do_xmit; + u32 hash; + + memset(&nh_rdst, 0, sizeof(struct vxlan_rdst)); + hash = skb_get_hash(skb); + + rcu_read_lock(); + nh = nexthop_find_by_id(dev_net(dev), nhid); + if (unlikely(!nh || !nexthop_is_fdb(nh) || !nexthop_is_multipath(nh))) { + rcu_read_unlock(); + goto drop; + } + do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst); + rcu_read_unlock(); + + if (vxlan->cfg.saddr.sa.sa_family != nh_rdst.remote_ip.sa.sa_family) + goto drop; + + if (likely(do_xmit)) + vxlan_xmit_one(skb, dev, vni, &nh_rdst, false); + else + goto drop; + + return NETDEV_TX_OK; + +drop: + dev->stats.tx_dropped++; + vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, + VXLAN_VNI_STATS_TX_DROPS, 0); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + /* Transmit local packets over Vxlan * * Outer IP header inherits ECN and DF from inner header. @@ -2735,6 +2650,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) struct vxlan_fdb *f; struct ethhdr *eth; __be32 vni = 0; + u32 nhid = 0; info = skb_tunnel_info(skb); @@ -2744,6 +2660,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (info && info->mode & IP_TUNNEL_INFO_BRIDGE && info->mode & IP_TUNNEL_INFO_TX) { vni = tunnel_id_to_key32(info->key.tun_id); + nhid = info->key.nhid; } else { if (info && info->mode & IP_TUNNEL_INFO_TX) vxlan_xmit_one(skb, dev, vni, NULL, false); @@ -2771,6 +2688,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) #endif } + if (nhid) + return vxlan_xmit_nhid(skb, dev, nhid, vni); + if (vxlan->cfg.flags & VXLAN_F_MDB) { struct vxlan_mdb_entry *mdb_entry; @@ -2987,9 +2907,101 @@ static int vxlan_open(struct net_device *dev) return ret; } +struct vxlan_fdb_flush_desc { + bool ignore_default_entry; + unsigned long state; + unsigned long state_mask; + unsigned long flags; + unsigned long flags_mask; + __be32 src_vni; + u32 nhid; + __be32 vni; + __be16 port; + union vxlan_addr dst_ip; +}; + +static bool vxlan_fdb_is_default_entry(const struct vxlan_fdb *f, + const struct vxlan_dev *vxlan) +{ + return is_zero_ether_addr(f->eth_addr) && f->vni == vxlan->cfg.vni; +} + +static bool vxlan_fdb_nhid_matches(const struct vxlan_fdb *f, u32 nhid) +{ + struct nexthop *nh = rtnl_dereference(f->nh); + + return nh && nh->id == nhid; +} + +static bool vxlan_fdb_flush_matches(const struct vxlan_fdb *f, + const struct vxlan_dev *vxlan, + const struct vxlan_fdb_flush_desc *desc) +{ + if (desc->state_mask && (f->state & desc->state_mask) != desc->state) + return false; + + if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags) + return false; + + if (desc->ignore_default_entry && vxlan_fdb_is_default_entry(f, vxlan)) + return false; + + if (desc->src_vni && f->vni != desc->src_vni) + return false; + + if (desc->nhid && !vxlan_fdb_nhid_matches(f, desc->nhid)) + return false; + + return true; +} + +static bool +vxlan_fdb_flush_should_match_remotes(const struct vxlan_fdb_flush_desc *desc) +{ + return desc->vni || desc->port || desc->dst_ip.sa.sa_family; +} + +static bool +vxlan_fdb_flush_remote_matches(const struct vxlan_fdb_flush_desc *desc, + const struct vxlan_rdst *rd) +{ + if (desc->vni && rd->remote_vni != desc->vni) + return false; + + if (desc->port && rd->remote_port != desc->port) + return false; + + if (desc->dst_ip.sa.sa_family && + !vxlan_addr_equal(&rd->remote_ip, &desc->dst_ip)) + return false; + + return true; +} + +static void +vxlan_fdb_flush_match_remotes(struct vxlan_fdb *f, struct vxlan_dev *vxlan, + const struct vxlan_fdb_flush_desc *desc, + bool *p_destroy_fdb) +{ + bool remotes_flushed = false; + struct vxlan_rdst *rd, *tmp; + + list_for_each_entry_safe(rd, tmp, &f->remotes, list) { + if (!vxlan_fdb_flush_remote_matches(desc, rd)) + continue; + + vxlan_fdb_dst_destroy(vxlan, f, rd, true); + remotes_flushed = true; + } + + *p_destroy_fdb = remotes_flushed && list_empty(&f->remotes); +} + /* Purge the forwarding table */ -static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) +static void vxlan_flush(struct vxlan_dev *vxlan, + const struct vxlan_fdb_flush_desc *desc) { + bool match_remotes = vxlan_fdb_flush_should_match_remotes(desc); unsigned int h; for (h = 0; h < FDB_HASH_SIZE; ++h) { @@ -2999,28 +3011,122 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); - if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) - continue; - /* the all_zeros_mac entry is deleted at vxlan_uninit */ - if (is_zero_ether_addr(f->eth_addr) && - f->vni == vxlan->cfg.vni) + + if (!vxlan_fdb_flush_matches(f, vxlan, desc)) continue; + + if (match_remotes) { + bool destroy_fdb = false; + + vxlan_fdb_flush_match_remotes(f, vxlan, desc, + &destroy_fdb); + + if (!destroy_fdb) + continue; + } + vxlan_fdb_destroy(vxlan, f, true, true); } spin_unlock_bh(&vxlan->hash_lock[h]); } } +static const struct nla_policy vxlan_del_bulk_policy[NDA_MAX + 1] = { + [NDA_SRC_VNI] = { .type = NLA_U32 }, + [NDA_NH_ID] = { .type = NLA_U32 }, + [NDA_VNI] = { .type = NLA_U32 }, + [NDA_PORT] = { .type = NLA_U16 }, + [NDA_DST] = NLA_POLICY_RANGE(NLA_BINARY, sizeof(struct in_addr), + sizeof(struct in6_addr)), + [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, + [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, +}; + +#define VXLAN_FDB_FLUSH_IGNORED_NDM_FLAGS (NTF_MASTER | NTF_SELF) +#define VXLAN_FDB_FLUSH_ALLOWED_NDM_STATES (NUD_PERMANENT | NUD_NOARP) +#define VXLAN_FDB_FLUSH_ALLOWED_NDM_FLAGS (NTF_EXT_LEARNED | NTF_OFFLOADED | \ + NTF_ROUTER) + +static int vxlan_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb_flush_desc desc = {}; + struct ndmsg *ndm = nlmsg_data(nlh); + struct nlattr *tb[NDA_MAX + 1]; + u8 ndm_flags; + int err; + + ndm_flags = ndm->ndm_flags & ~VXLAN_FDB_FLUSH_IGNORED_NDM_FLAGS; + + err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, vxlan_del_bulk_policy, + extack); + if (err) + return err; + + if (ndm_flags & ~VXLAN_FDB_FLUSH_ALLOWED_NDM_FLAGS) { + NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set"); + return -EINVAL; + } + if (ndm->ndm_state & ~VXLAN_FDB_FLUSH_ALLOWED_NDM_STATES) { + NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set"); + return -EINVAL; + } + + desc.state = ndm->ndm_state; + desc.flags = ndm_flags; + + if (tb[NDA_NDM_STATE_MASK]) + desc.state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]); + + if (tb[NDA_NDM_FLAGS_MASK]) + desc.flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]); + + if (tb[NDA_SRC_VNI]) + desc.src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); + + if (tb[NDA_NH_ID]) + desc.nhid = nla_get_u32(tb[NDA_NH_ID]); + + if (tb[NDA_VNI]) + desc.vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); + + if (tb[NDA_PORT]) + desc.port = nla_get_be16(tb[NDA_PORT]); + + if (tb[NDA_DST]) { + union vxlan_addr ip; + + err = vxlan_nla_get_addr(&ip, tb[NDA_DST]); + if (err) { + NL_SET_ERR_MSG_ATTR(extack, tb[NDA_DST], + "Unsupported address family"); + return err; + } + desc.dst_ip = ip; + } + + vxlan_flush(vxlan, &desc); + + return 0; +} + /* Cleanup timer and forwarding table on shutdown */ static int vxlan_stop(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb_flush_desc desc = { + /* Default entry is deleted at vxlan_uninit. */ + .ignore_default_entry = true, + .state = 0, + .state_mask = NUD_PERMANENT | NUD_NOARP, + }; vxlan_multicast_leave(vxlan); del_timer_sync(&vxlan->age_timer); - vxlan_flush(vxlan, false); + vxlan_flush(vxlan, &desc); vxlan_sock_release(vxlan); return 0; @@ -3065,11 +3171,14 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); struct rtable *rt; - rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, - info->key.u.ipv4.dst, - &info->key.u.ipv4.src, dport, sport, - info->key.flow_flags, &info->dst_cache, - info); + if (!sock4) + return -EIO; + + rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, 0, + &info->key.u.ipv4.src, + &info->key, + sport, dport, info->key.tos, + &info->dst_cache); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); @@ -3078,10 +3187,14 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); struct dst_entry *ndst; - ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, - info->key.label, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src, dport, sport, - &info->dst_cache, info); + if (!sock6) + return -EIO; + + ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, + 0, &info->key.u.ipv6.src, + &info->key, + sport, dport, info->key.tos, + &info->dst_cache); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); @@ -3107,11 +3220,13 @@ static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_fdb_add = vxlan_fdb_add, .ndo_fdb_del = vxlan_fdb_delete, + .ndo_fdb_del_bulk = vxlan_fdb_delete_bulk, .ndo_fdb_dump = vxlan_fdb_dump, .ndo_fdb_get = vxlan_fdb_get, .ndo_mdb_add = vxlan_mdb_add, .ndo_mdb_del = vxlan_mdb_del, .ndo_mdb_dump = vxlan_mdb_dump, + .ndo_mdb_get = vxlan_mdb_get, .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, }; @@ -4259,8 +4374,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], static void vxlan_dellink(struct net_device *dev, struct list_head *head) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb_flush_desc desc = { + /* Default entry is deleted at vxlan_uninit. */ + .ignore_default_entry = true, + }; - vxlan_flush(vxlan, true); + vxlan_flush(vxlan, &desc); list_del(&vxlan->next); unregister_netdevice_queue(dev, head); @@ -4270,7 +4389,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) static size_t vxlan_get_size(const struct net_device *dev) { - return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ @@ -4288,7 +4406,6 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ - nla_total_size(sizeof(struct ifla_vxlan_port_range)) + nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ @@ -4296,6 +4413,12 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */ + /* IFLA_VXLAN_PORT_RANGE */ + nla_total_size(sizeof(struct ifla_vxlan_port_range)) + + nla_total_size(0) + /* IFLA_VXLAN_GBP */ + nla_total_size(0) + /* IFLA_VXLAN_GPE */ + nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */ 0; } diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c index 5e041622261a..eb4c580b5cee 100644 --- a/drivers/net/vxlan/vxlan_mdb.c +++ b/drivers/net/vxlan/vxlan_mdb.c @@ -311,7 +311,7 @@ vxlan_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = { [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(vxlan_mdbe_src_list_entry_pol), }; -static struct netlink_range_validation vni_range = { +static const struct netlink_range_validation vni_range = { .max = VXLAN_N_VID - 1, }; @@ -370,12 +370,10 @@ static bool vxlan_mdb_is_valid_source(const struct nlattr *attr, __be16 proto, return true; } -static void vxlan_mdb_config_group_set(struct vxlan_mdb_config *cfg, - const struct br_mdb_entry *entry, - const struct nlattr *source_attr) +static void vxlan_mdb_group_set(struct vxlan_mdb_entry_key *group, + const struct br_mdb_entry *entry, + const struct nlattr *source_attr) { - struct vxlan_mdb_entry_key *group = &cfg->group; - switch (entry->addr.proto) { case htons(ETH_P_IP): group->dst.sa.sa_family = AF_INET; @@ -503,7 +501,7 @@ static int vxlan_mdb_config_attrs_init(struct vxlan_mdb_config *cfg, entry->addr.proto, extack)) return -EINVAL; - vxlan_mdb_config_group_set(cfg, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); + vxlan_mdb_group_set(&cfg->group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); /* rtnetlink code only validates that IPv4 group address is * multicast. @@ -927,23 +925,20 @@ vxlan_mdb_nlmsg_src_list_size(const struct vxlan_mdb_entry_key *group, return nlmsg_size; } -static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, - const struct vxlan_mdb_entry *mdb_entry, - const struct vxlan_mdb_remote *remote) +static size_t +vxlan_mdb_nlmsg_remote_size(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) { const struct vxlan_mdb_entry_key *group = &mdb_entry->key; struct vxlan_rdst *rd = rtnl_dereference(remote->rd); size_t nlmsg_size; - nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + - /* MDBA_MDB */ - nla_total_size(0) + - /* MDBA_MDB_ENTRY */ - nla_total_size(0) + /* MDBA_MDB_ENTRY_INFO */ - nla_total_size(sizeof(struct br_mdb_entry)) + + nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) + /* MDBA_MDB_EATTR_TIMER */ nla_total_size(sizeof(u32)); + /* MDBA_MDB_EATTR_SOURCE */ if (vxlan_mdb_is_sg(group)) nlmsg_size += nla_total_size(vxlan_addr_size(&group->dst)); @@ -971,6 +966,19 @@ static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, return nlmsg_size; } +static size_t vxlan_mdb_nlmsg_size(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry, + const struct vxlan_mdb_remote *remote) +{ + return NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0) + + /* Remote entry */ + vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry, remote); +} + static int vxlan_mdb_nlmsg_fill(const struct vxlan_dev *vxlan, struct sk_buff *skb, const struct vxlan_mdb_entry *mdb_entry, @@ -1298,6 +1306,156 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[], return err; } +static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = { + [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY, + sizeof(struct in_addr), + sizeof(struct in6_addr)), + [MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range), +}; + +static int vxlan_mdb_get_parse(struct net_device *dev, struct nlattr *tb[], + struct vxlan_mdb_entry_key *group, + struct netlink_ext_ack *extack) +{ + struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]); + struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1]; + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + + memset(group, 0, sizeof(*group)); + group->vni = vxlan->default_dst.remote_vni; + + if (!tb[MDBA_GET_ENTRY_ATTRS]) { + vxlan_mdb_group_set(group, entry, NULL); + return 0; + } + + err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX, + tb[MDBA_GET_ENTRY_ATTRS], + vxlan_mdbe_attrs_get_pol, extack); + if (err) + return err; + + if (mdbe_attrs[MDBE_ATTR_SOURCE] && + !vxlan_mdb_is_valid_source(mdbe_attrs[MDBE_ATTR_SOURCE], + entry->addr.proto, extack)) + return -EINVAL; + + vxlan_mdb_group_set(group, entry, mdbe_attrs[MDBE_ATTR_SOURCE]); + + if (mdbe_attrs[MDBE_ATTR_SRC_VNI]) + group->vni = + cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI])); + + return 0; +} + +static struct sk_buff * +vxlan_mdb_get_reply_alloc(const struct vxlan_dev *vxlan, + const struct vxlan_mdb_entry *mdb_entry) +{ + struct vxlan_mdb_remote *remote; + size_t nlmsg_size; + + nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) + + /* MDBA_MDB */ + nla_total_size(0) + + /* MDBA_MDB_ENTRY */ + nla_total_size(0); + + list_for_each_entry(remote, &mdb_entry->remotes, list) + nlmsg_size += vxlan_mdb_nlmsg_remote_size(vxlan, mdb_entry, + remote); + + return nlmsg_new(nlmsg_size, GFP_KERNEL); +} + +static int +vxlan_mdb_get_reply_fill(const struct vxlan_dev *vxlan, + struct sk_buff *skb, + const struct vxlan_mdb_entry *mdb_entry, + u32 portid, u32 seq) +{ + struct nlattr *mdb_nest, *mdb_entry_nest; + struct vxlan_mdb_remote *remote; + struct br_port_msg *bpm; + struct nlmsghdr *nlh; + int err; + + nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0); + if (!nlh) + return -EMSGSIZE; + + bpm = nlmsg_data(nlh); + memset(bpm, 0, sizeof(*bpm)); + bpm->family = AF_BRIDGE; + bpm->ifindex = vxlan->dev->ifindex; + mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB); + if (!mdb_nest) { + err = -EMSGSIZE; + goto cancel; + } + mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY); + if (!mdb_entry_nest) { + err = -EMSGSIZE; + goto cancel; + } + + list_for_each_entry(remote, &mdb_entry->remotes, list) { + err = vxlan_mdb_entry_info_fill(vxlan, skb, mdb_entry, remote); + if (err) + goto cancel; + } + + nla_nest_end(skb, mdb_entry_nest); + nla_nest_end(skb, mdb_nest); + nlmsg_end(skb, nlh); + + return 0; + +cancel: + nlmsg_cancel(skb, nlh); + return err; +} + +int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, + u32 seq, struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_mdb_entry *mdb_entry; + struct vxlan_mdb_entry_key group; + struct sk_buff *skb; + int err; + + ASSERT_RTNL(); + + err = vxlan_mdb_get_parse(dev, tb, &group, extack); + if (err) + return err; + + mdb_entry = vxlan_mdb_entry_lookup(vxlan, &group); + if (!mdb_entry) { + NL_SET_ERR_MSG_MOD(extack, "MDB entry not found"); + return -ENOENT; + } + + skb = vxlan_mdb_get_reply_alloc(vxlan, mdb_entry); + if (!skb) + return -ENOMEM; + + err = vxlan_mdb_get_reply_fill(vxlan, skb, mdb_entry, portid, seq); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply"); + goto free; + } + + return rtnl_unicast(skb, dev_net(dev), portid); + +free: + kfree_skb(skb); + return err; +} + struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan, struct sk_buff *skb, __be32 src_vni) diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h index 817fa3075842..db679c380955 100644 --- a/drivers/net/vxlan/vxlan_private.h +++ b/drivers/net/vxlan/vxlan_private.h @@ -235,6 +235,8 @@ int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags, struct netlink_ext_ack *extack); int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack); +int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, + u32 seq, struct netlink_ext_ack *extack); struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan, struct sk_buff *skb, __be32 src_vni); diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index c3ff30ab782e..9c59d0bf8c3d 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -696,7 +696,7 @@ static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan, { struct vxlan_vni_node *vninode; - vninode = kzalloc(sizeof(*vninode), GFP_ATOMIC); + vninode = kzalloc(sizeof(*vninode), GFP_KERNEL); if (!vninode) return NULL; vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 47c2ad7a3e42..fd50bb313b92 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -34,6 +34,8 @@ #define TDM_PPPOHT_SLIC_MAXIN #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S) +static int uhdlc_close(struct net_device *dev); + static struct ucc_tdm_info utdm_primary_info = { .uf_info = { .tsa = 0, @@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev) hdlc_device *hdlc = dev_to_hdlc(dev); struct ucc_hdlc_private *priv = hdlc->priv; struct ucc_tdm *utdm = priv->utdm; + int rc = 0; if (priv->hdlc_busy != 1) { if (request_irq(priv->ut_info->uf_info.irq, @@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev) napi_enable(&priv->napi); netdev_reset_queue(dev); netif_start_queue(dev); - hdlc_open(dev); + + rc = hdlc_open(dev); + if (rc) + uhdlc_close(dev); } - return 0; + return rc; } static void uhdlc_memclean(struct ucc_hdlc_private *priv) @@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev) netdev_reset_queue(dev); priv->hdlc_busy = 0; + hdlc_close(dev); + return 0; } diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index e46b7f5ee49e..b09f4c235142 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -687,10 +687,10 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget) napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && - napi_reschedule(napi)) { + napi_schedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" - " napi_reschedule succeeded\n", + " napi_schedule succeeded\n", dev->name); #endif qmgr_disable_irq(rxq); diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c index 4956f0499c19..f89581b5e8cb 100644 --- a/drivers/net/wireguard/cookie.c +++ b/drivers/net/wireguard/cookie.c @@ -12,9 +12,9 @@ #include <crypto/blake2s.h> #include <crypto/chacha20poly1305.h> +#include <crypto/utils.h> #include <net/ipv6.h> -#include <crypto/algapi.h> void wg_cookie_checker_init(struct cookie_checker *checker, struct wg_device *wg) diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 258dcc103921..deb9636b0ecf 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) */ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); - ++dev->stats.tx_dropped; + DEV_STATS_INC(dev, tx_dropped); } skb_queue_splice_tail(&packets, &peer->staged_packet_queue); spin_unlock_bh(&peer->staged_packet_queue.lock); @@ -228,7 +228,7 @@ err_icmp: else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); err: - ++dev->stats.tx_errors; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); return ret; } diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index 6d1bd9f52d02..e220d761b1f2 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -15,7 +15,7 @@ #include <linux/if.h> #include <net/genetlink.h> #include <net/sock.h> -#include <crypto/algapi.h> +#include <crypto/utils.h> static struct genl_family genl_family; @@ -200,7 +200,7 @@ static int wg_get_device_start(struct netlink_callback *cb) { struct wg_device *wg; - wg = lookup_interface(genl_dumpit_info(cb)->attrs, cb->skb); + wg = lookup_interface(genl_info_dump(cb)->attrs, cb->skb); if (IS_ERR(wg)) return PTR_ERR(wg); DUMP_CTX(cb)->wg = wg; diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 720952b92e78..202a33af5a72 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -15,7 +15,7 @@ #include <linux/bitmap.h> #include <linux/scatterlist.h> #include <linux/highmem.h> -#include <crypto/algapi.h> +#include <crypto/utils.h> /* This implements Noise_IKpsk2: * diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 0b3f0c843550..a176653c8861 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -416,20 +416,20 @@ dishonest_packet_peer: net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", dev->name, skb, peer->internal_id, &peer->endpoint.addr); - ++dev->stats.rx_errors; - ++dev->stats.rx_frame_errors; + DEV_STATS_INC(dev, rx_errors); + DEV_STATS_INC(dev, rx_frame_errors); goto packet_processed; dishonest_packet_type: net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); - ++dev->stats.rx_errors; - ++dev->stats.rx_frame_errors; + DEV_STATS_INC(dev, rx_errors); + DEV_STATS_INC(dev, rx_frame_errors); goto packet_processed; dishonest_packet_size: net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); - ++dev->stats.rx_errors; - ++dev->stats.rx_length_errors; + DEV_STATS_INC(dev, rx_errors); + DEV_STATS_INC(dev, rx_length_errors); goto packet_processed; packet_processed: dev_kfree_skb(skb); diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index 95c853b59e1d..0d48e0f4a1ba 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -333,7 +333,8 @@ err: void wg_packet_purge_staged_packets(struct wg_peer *peer) { spin_lock_bh(&peer->staged_packet_queue.lock); - peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; + DEV_STATS_ADD(peer->device->dev, tx_dropped, + peer->staged_packet_queue.qlen); __skb_queue_purge(&peer->staged_packet_queue); spin_unlock_bh(&peer->staged_packet_queue.lock); } diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 19f61225a708..43e0db78d42b 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -256,7 +256,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, /* always bulk-out a multiple of 4 bytes */ xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3; - hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx; + hdr = cmd->buf_tx; memset(hdr, 0, sizeof(struct ar5523_cmd_hdr)); hdr->len = cpu_to_be32(xferlen); hdr->code = cpu_to_be32(code); diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 4a006fb4d424..a378bc48b1d2 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -5,7 +5,7 @@ */ #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/clk.h> #include <linux/reset.h> #include "core.h" @@ -733,7 +733,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev) int ret; struct ath10k_bus_params bus_params = {}; - hw_rev = (enum ath10k_hw_rev)of_device_get_match_data(&pdev->dev); + hw_rev = (uintptr_t)of_device_get_match_data(&pdev->dev); if (!hw_rev) { dev_err(&pdev->dev, "OF data missing\n"); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 666ce384a1d8..27367bd64e95 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -110,7 +110,7 @@ struct ath10k_ce_ring { struct ce_desc_64 *shadow_base; /* keep last */ - void *per_transfer_context[]; + void *per_transfer_context[] __counted_by(nentries); }; struct ath10k_ce_pipe { diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index f9518e1c9903..ad9cf953a2fc 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1140,7 +1140,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *ath10k_gstrings_stats, + memcpy(data, ath10k_gstrings_stats, sizeof(ath10k_gstrings_stats)); } @@ -1964,20 +1964,13 @@ static ssize_t ath10k_write_btcoex(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - char buf[32]; - size_t buf_size; - int ret; + ssize_t ret; bool val; u32 pdev_param; - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, ubuf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - - if (kstrtobool(buf, &val) != 0) - return -EINVAL; + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; if (!ar->coex_support) return -EOPNOTSUPP; @@ -2000,7 +1993,7 @@ static ssize_t ath10k_write_btcoex(struct file *file, ar->running_fw->fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); if (ret) { - ath10k_warn(ar, "failed to enable btcoex: %d\n", ret); + ath10k_warn(ar, "failed to enable btcoex: %zd\n", ret); ret = count; goto exit; } @@ -2103,19 +2096,12 @@ static ssize_t ath10k_write_peer_stats(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - char buf[32]; - size_t buf_size; - int ret; + ssize_t ret; bool val; - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, ubuf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - - if (kstrtobool(buf, &val) != 0) - return -EINVAL; + ret = kstrtobool_from_user(ubuf, count, &val); + if (ret) + return ret; mutex_lock(&ar->conf_mutex); @@ -2239,21 +2225,16 @@ static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - char buf[32]; - ssize_t len; + ssize_t ret; u32 mask; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoint(buf, 0, &mask)) - return -EINVAL; + ret = kstrtoint_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; ar->sta_tid_stats_mask = mask; - return len; + return count; } static const struct file_operations fops_sta_tid_stats_mask = { diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index e0c9f45e7476..c80470e8886a 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -69,7 +69,7 @@ struct htt_ver_req { * The HTT tx descriptor is defined in two manners: by a struct with * bitfields, and by a series of [dword offset, bit mask, bit shift] * definitions. - * The target should use the struct def, for simplicitly and clarity, + * The target should use the struct def, for simplicity and clarity, * but the host shall use the bit-mast + bit-shift defs, to be endian- * neutral. Specifically, the host shall use the get/set macros built * around the mask + shift defs. @@ -880,8 +880,7 @@ enum htt_data_tx_status { HTT_DATA_TX_STATUS_OK = 0, HTT_DATA_TX_STATUS_DISCARD = 1, HTT_DATA_TX_STATUS_NO_ACK = 2, - HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */ - HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128 + HTT_DATA_TX_STATUS_POSTPONE = 3 /* HL only */ }; enum htt_data_tx_flags { @@ -2086,7 +2085,7 @@ static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt, * for correctly accessing rx descriptor data. */ -/* base struct used for abstracting the rx descritor representation */ +/* base struct used for abstracting the rx descriptor representation */ struct htt_rx_desc { union { /* This field is filled on the host using the msdu buffer diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 438b0caaceb7..b261d6371c0f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2964,7 +2964,6 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, break; case HTT_DATA_TX_STATUS_DISCARD: case HTT_DATA_TX_STATUS_POSTPONE: - case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: tx_done.status = HTT_TX_COMPL_STATE_DISCARD; break; default: diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index bd603feb7953..be4d4536aaa8 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -796,20 +796,16 @@ static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) return 0; } -static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring) +static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, + struct htt_rx_ring_setup_ring32 *rx_ring) { - struct htt_rx_ring_setup_ring32 *ring = - (struct htt_rx_ring_setup_ring32 *)rx_ring; - - ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets); + ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } -static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring) +static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, + struct htt_rx_ring_setup_ring64 *rx_ring) { - struct htt_rx_ring_setup_ring64 *ring = - (struct htt_rx_ring_setup_ring64 *)rx_ring; - - ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets); + ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 03e7bc5b6c0b..2cf693f3fea9 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -728,20 +728,13 @@ static int ath10k_peer_create(struct ath10k *ar, const u8 *addr, enum wmi_peer_type peer_type) { - struct ath10k_vif *arvif; struct ath10k_peer *peer; - int num_peers = 0; int ret; lockdep_assert_held(&ar->conf_mutex); - num_peers = ar->num_peers; - - /* Each vdev consumes a peer entry as well */ - list_for_each_entry(arvif, &ar->arvifs, list) - num_peers++; - - if (num_peers >= ar->max_num_peers) + /* Each vdev consumes a peer entry as well. */ + if (ar->num_peers + list_count_nodes(&ar->arvifs) >= ar->max_num_peers) return -ENOBUFS; ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); @@ -4503,18 +4496,21 @@ void __ath10k_scan_finish(struct ath10k *ar) break; case ATH10K_SCAN_RUNNING: case ATH10K_SCAN_ABORTING: + if (ar->scan.is_roc && ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); + fallthrough; + case ATH10K_SCAN_STARTING: if (!ar->scan.is_roc) { struct cfg80211_scan_info info = { - .aborted = (ar->scan.state == - ATH10K_SCAN_ABORTING), + .aborted = ((ar->scan.state == + ATH10K_SCAN_ABORTING) || + (ar->scan.state == + ATH10K_SCAN_STARTING)), }; ieee80211_scan_completed(ar->hw, &info); - } else if (ar->scan.roc_notify) { - ieee80211_remain_on_channel_expired(ar->hw); } - fallthrough; - case ATH10K_SCAN_STARTING: + ar->scan.state = ATH10K_SCAN_IDLE; ar->scan_channel = NULL; ar->scan.roc_freq = 0; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index a7f44f6335fb..2f8c785277af 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1636,7 +1636,7 @@ static int ath10k_pci_dump_memory_generic(struct ath10k *ar, buf, current_region->len); - /* No individiual memory sections defined so we can + /* No individual memory sections defined so we can * copy the entire memory region. */ ret = ath10k_pci_diag_read_mem(ar, @@ -1963,8 +1963,9 @@ static int ath10k_pci_hif_start(struct ath10k *ar) ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); - pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, - ar_pci->link_ctl); + pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC); return 0; } @@ -2821,8 +2822,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar, pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, &ar_pci->link_ctl); - pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, - ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); /* * Bring the target up cleanly. @@ -3147,7 +3148,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) * immediate servicing. */ if (ath10k_ce_interrupt_summary(ar)) { - napi_reschedule(ctx); + napi_schedule(ctx); goto out; } ath10k_pci_enable_legacy_irq(ar); @@ -3816,7 +3817,7 @@ static void __exit ath10k_pci_exit(void) module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); /* QCA988x 2.0 firmware files */ diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 79e09c7a82b3..56fbcfb80bf8 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -2389,7 +2389,7 @@ static int ath10k_sdio_dump_memory_generic(struct ath10k *ar, buf, current_region->len); - /* No individiual memory sections defined so we can + /* No individual memory sections defined so we can * copy the entire memory region. */ if (fast_dump) diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 26214c00cd0d..2c39bad7ebfb 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -828,12 +828,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar, static inline void ath10k_snoc_irq_disable(struct ath10k *ar) { - ath10k_ce_disable_interrupts(ar); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int id; + + for (id = 0; id < CE_COUNT_MAX; id++) + disable_irq(ar_snoc->ce_irqs[id].irq_line); } static inline void ath10k_snoc_irq_enable(struct ath10k *ar) { - ath10k_ce_enable_interrupts(ar); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int id; + + for (id = 0; id < CE_COUNT_MAX; id++) + enable_irq(ar_snoc->ce_irqs[id].irq_line); } static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) @@ -1090,6 +1098,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar, goto err_free_rri; } + ath10k_ce_enable_interrupts(ar); + return 0; err_free_rri: @@ -1253,8 +1263,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar) for (id = 0; id < CE_COUNT_MAX; id++) { ret = request_irq(ar_snoc->ce_irqs[id].irq_line, - ath10k_snoc_per_engine_handler, 0, - ce_name[id], ar); + ath10k_snoc_per_engine_handler, + IRQF_NO_AUTOEN, ce_name[id], ar); if (ret) { ath10k_err(ar, "failed to register IRQ handler for CE %d: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 68254a967ccb..2240994390ed 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -384,16 +384,11 @@ static ssize_t write_file_spectral_count(struct file *file, { struct ath10k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 255) return -EINVAL; @@ -440,16 +435,11 @@ static ssize_t write_file_spectral_bins(struct file *file, { struct ath10k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index b0067af685b1..3c482baacec1 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -1126,5 +1126,5 @@ static struct usb_driver ath10k_usb_driver = { module_usb_driver(ath10k_usb_driver); MODULE_AUTHOR("Atheros Communications, Inc."); -MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros USB 802.11ac WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 6d04a66fe5e0..b112e8826093 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3854,9 +3854,9 @@ enum wmi_pdev_param { * retransmitting frames. */ WMI_PDEV_PARAM_DYNAMIC_BW, - /* Non aggregrate/ 11g sw retry threshold.0-disable */ + /* Non aggregate/ 11g sw retry threshold.0-disable */ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, - /* aggregrate sw retry threshold. 0-disable*/ + /* aggregate sw retry threshold. 0-disable*/ WMI_PDEV_PARAM_AGG_SW_RETRY_TH, /* Station kickout threshold (non of consecutive failures).0-disable */ WMI_PDEV_PARAM_STA_KICKOUT_TH, @@ -3953,9 +3953,9 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_PROTECTION_MODE, /* Dynamic bandwidth 0: disable 1: enable */ WMI_10X_PDEV_PARAM_DYNAMIC_BW, - /* Non aggregrate/ 11g sw retry threshold.0-disable */ + /* Non aggregate/ 11g sw retry threshold.0-disable */ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, - /* aggregrate sw retry threshold. 0-disable*/ + /* aggregate sw retry threshold. 0-disable*/ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, /* Station kickout threshold (non of consecutive failures).0-disable */ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index cc47e0114595..2c94d50ae36f 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -17,7 +17,8 @@ ath11k-y += core.o \ peer.o \ dbring.o \ hw.o \ - pcic.o + pcic.o \ + fw.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 139da578831a..235336ef2a7a 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -6,6 +6,7 @@ #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/of_device.h> #include <linux/of.h> #include <linux/dma-mapping.h> @@ -14,6 +15,7 @@ #include "ahb.h" #include "debug.h" #include "hif.h" +#include "qmi.h" #include <linux/remoteproc.h> #include "pcic.h" #include <linux/soc/qcom/smem.h> @@ -418,32 +420,6 @@ static void ath11k_ahb_power_down(struct ath11k_base *ab) rproc_shutdown(ab_ahb->tgt_rproc); } -static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab) -{ - int timeout; - - if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done || - ab->hw_params.cold_boot_calib == 0 || - ab->hw_params.cbcal_restart_fw == 0) - return 0; - - ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n"); - timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, - (ab->qmi.cal_done == 1), - ATH11K_COLD_BOOT_FW_RESET_DELAY); - if (timeout <= 0) { - ath11k_cold_boot_cal = 0; - ath11k_warn(ab, "Coldboot Calibration failed timed out\n"); - } - - /* reset the firmware */ - ath11k_ahb_power_down(ab); - ath11k_ahb_power_up(ab); - - ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n"); - return 0; -} - static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) { struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; @@ -1109,19 +1085,12 @@ static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) static int ath11k_ahb_probe(struct platform_device *pdev) { struct ath11k_base *ab; - const struct of_device_id *of_id; const struct ath11k_hif_ops *hif_ops; const struct ath11k_pci_ops *pci_ops; enum ath11k_hw_rev hw_rev; int ret; - of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev); - if (!of_id) { - dev_err(&pdev->dev, "failed to find matching device tree id\n"); - return -EINVAL; - } - - hw_rev = (enum ath11k_hw_rev)of_id->data; + hw_rev = (uintptr_t)device_get_match_data(&pdev->dev); switch (hw_rev) { case ATH11K_HW_IPQ8074: @@ -1226,7 +1195,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev) goto err_ce_free; } - ath11k_ahb_fwreset_from_cold_boot(ab); + ath11k_qmi_fwreset_from_cold_boot(ab); return 0; @@ -1331,17 +1300,7 @@ static struct platform_driver ath11k_ahb_driver = { .shutdown = ath11k_ahb_shutdown, }; -static int ath11k_ahb_init(void) -{ - return platform_driver_register(&ath11k_ahb_driver); -} -module_init(ath11k_ahb_init); - -static void ath11k_ahb_exit(void) -{ - platform_driver_unregister(&ath11k_ahb_driver); -} -module_exit(ath11k_ahb_exit); +module_platform_driver(ath11k_ahb_driver); MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 1fc6360e7f01..c0f6a0ba86df 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -203,9 +203,6 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab); void ath11k_ce_free_pipes(struct ath11k_base *ab); int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id); void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id); -int ath11k_ce_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe); -int ath11k_ce_attr_attach(struct ath11k_base *ab); void ath11k_ce_get_shadow_config(struct ath11k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len); void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index bebfd342e28b..0c6ecbb9a066 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -16,6 +16,7 @@ #include "debug.h" #include "hif.h" #include "wow.h" +#include "fw.h" unsigned int ath11k_debug_mask; EXPORT_SYMBOL(ath11k_debug_mask); @@ -86,7 +87,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, - .cold_boot_calib = true, + .coldboot_cal_mm = true, + .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 0, .num_vdevs = 16 + 1, @@ -167,7 +169,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, - .cold_boot_calib = true, + .coldboot_cal_mm = true, + .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 0, .num_vdevs = 16 + 1, @@ -248,7 +251,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, - .cold_boot_calib = false, + .coldboot_cal_mm = false, + .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 16 + 1, @@ -332,8 +336,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, - .cold_boot_calib = false, - .cbcal_restart_fw = false, + .coldboot_cal_mm = false, + .coldboot_cal_ftm = true, + .cbcal_restart_fw = true, .fw_mem_mode = 2, .num_vdevs = 8, .num_peers = 128, @@ -413,7 +418,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, - .cold_boot_calib = false, + .coldboot_cal_mm = false, + .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 16 + 1, @@ -495,7 +501,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, - .cold_boot_calib = false, + .coldboot_cal_mm = false, + .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 16 + 1, @@ -578,7 +585,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, - .cold_boot_calib = true, + .coldboot_cal_mm = true, + .coldboot_cal_ftm = true, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 16 + 1, @@ -667,7 +675,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_suspend = false, .hal_params = &ath11k_hw_hal_params_ipq8074, .single_pdev_only = false, - .cold_boot_calib = true, + .coldboot_cal_mm = true, + .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fix_l1ss = true, .supports_dynamic_smps_6ghz = false, @@ -749,6 +758,18 @@ void ath11k_fw_stats_free(struct ath11k_fw_stats *stats) ath11k_fw_stats_bcn_free(&stats->bcn); } +bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab) +{ + if (!ath11k_cold_boot_cal) + return false; + + if (ath11k_ftm_mode) + return ab->hw_params.coldboot_cal_ftm; + + else + return ab->hw_params.coldboot_cal_mm; +} + int ath11k_core_suspend(struct ath11k_base *ab) { int ret; @@ -965,9 +986,15 @@ int ath11k_core_check_dt(struct ath11k_base *ab) return 0; } +enum ath11k_bdf_name_type { + ATH11K_BDF_NAME_FULL, + ATH11K_BDF_NAME_BUS_NAME, + ATH11K_BDF_NAME_CHIP_ID, +}; + static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len, bool with_variant, - bool bus_type_mode) + enum ath11k_bdf_name_type name_type) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; @@ -978,11 +1005,8 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, switch (ab->id.bdf_search) { case ATH11K_BDF_SEARCH_BUS_AND_BOARD: - if (bus_type_mode) - scnprintf(name, name_len, - "bus=%s", - ath11k_bus_str(ab->hif.bus)); - else + switch (name_type) { + case ATH11K_BDF_NAME_FULL: scnprintf(name, name_len, "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", ath11k_bus_str(ab->hif.bus), @@ -992,6 +1016,19 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, ab->qmi.target.chip_id, ab->qmi.target.board_id, variant); + break; + case ATH11K_BDF_NAME_BUS_NAME: + scnprintf(name, name_len, + "bus=%s", + ath11k_bus_str(ab->hif.bus)); + break; + case ATH11K_BDF_NAME_CHIP_ID: + scnprintf(name, name_len, + "bus=%s,qmi-chip-id=%d", + ath11k_bus_str(ab->hif.bus), + ab->qmi.target.chip_id); + break; + } break; default: scnprintf(name, name_len, @@ -1010,19 +1047,29 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len) { - return __ath11k_core_create_board_name(ab, name, name_len, true, false); + return __ath11k_core_create_board_name(ab, name, name_len, true, + ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name, size_t name_len) { - return __ath11k_core_create_board_name(ab, name, name_len, false, false); + return __ath11k_core_create_board_name(ab, name, name_len, false, + ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name, size_t name_len) { - return __ath11k_core_create_board_name(ab, name, name_len, false, true); + return __ath11k_core_create_board_name(ab, name, name_len, false, + ATH11K_BDF_NAME_BUS_NAME); +} + +static int ath11k_core_create_chip_id_board_name(struct ath11k_base *ab, char *name, + size_t name_len) +{ + return __ath11k_core_create_board_name(ab, name, name_len, false, + ATH11K_BDF_NAME_CHIP_ID); } const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, @@ -1269,31 +1316,43 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, #define BOARD_NAME_SIZE 200 int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { - char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE]; + char *boardname = NULL, *fallback_boardname = NULL, *chip_id_boardname = NULL; char *filename, filepath[100]; - int ret; + int bd_api; + int ret = 0; filename = ATH11K_BOARD_API2_FILE; + boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); + if (!boardname) { + ret = -ENOMEM; + goto exit; + } - ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname)); + ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create board name: %d", ret); - return ret; + goto exit; } - ab->bd_api = 2; + bd_api = 2; ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) - goto success; + goto exit; + + fallback_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); + if (!fallback_boardname) { + ret = -ENOMEM; + goto exit; + } ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname, - sizeof(fallback_boardname)); + BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create fallback board name: %d", ret); - return ret; + goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname, @@ -1301,9 +1360,30 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) - goto success; + goto exit; + + chip_id_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); + if (!chip_id_boardname) { + ret = -ENOMEM; + goto exit; + } + + ret = ath11k_core_create_chip_id_board_name(ab, chip_id_boardname, + BOARD_NAME_SIZE); + if (ret) { + ath11k_err(ab, "failed to create chip id board name: %d", ret); + goto exit; + } + + ret = ath11k_core_fetch_board_data_api_n(ab, bd, chip_id_boardname, + ATH11K_BD_IE_BOARD, + ATH11K_BD_IE_BOARD_NAME, + ATH11K_BD_IE_BOARD_DATA); + + if (!ret) + goto exit; - ab->bd_api = 1; + bd_api = 1; ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE); if (ret) { ath11k_core_create_firmware_path(ab, filename, @@ -1314,14 +1394,22 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) ath11k_err(ab, "failed to fetch board data for %s from %s\n", fallback_boardname, filepath); + ath11k_err(ab, "failed to fetch board data for %s from %s\n", + chip_id_boardname, filepath); + ath11k_err(ab, "failed to fetch board.bin from %s\n", ab->hw_params.fw.dir); - return ret; } -success: - ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api); - return 0; +exit: + kfree(boardname); + kfree(fallback_boardname); + kfree(chip_id_boardname); + + if (!ret) + ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", bd_api); + + return ret; } int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd) @@ -1985,6 +2073,12 @@ int ath11k_core_pre_init(struct ath11k_base *ab) return ret; } + ret = ath11k_fw_pre_init(ab); + if (ret) { + ath11k_err(ab, "failed to pre init firmware: %d", ret); + return ret; + } + return 0; } EXPORT_SYMBOL(ath11k_core_pre_init); @@ -2015,6 +2109,7 @@ void ath11k_core_deinit(struct ath11k_base *ab) ath11k_hif_power_down(ab); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); + ath11k_fw_destroy(ab); } EXPORT_SYMBOL(ath11k_core_deinit); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 9d15b4390b9c..f12b606e2d2e 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -15,6 +15,8 @@ #include <linux/ctype.h> #include <linux/rhashtable.h> #include <linux/average.h> +#include <linux/firmware.h> + #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -29,6 +31,7 @@ #include "dbring.h" #include "spectral.h" #include "wow.h" +#include "fw.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -901,14 +904,11 @@ struct ath11k_base { struct list_head peers; wait_queue_head_t peer_mapping_wq; u8 mac_addr[ETH_ALEN]; - bool wmi_ready; - u32 wlan_init_status; int irq_num[ATH11K_IRQ_NUM_MAX]; struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; struct ath11k_targ_cap target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; - int bd_api; struct ath11k_hw_params hw_params; @@ -984,6 +984,18 @@ struct ath11k_base { const struct ath11k_pci_ops *ops; } pci; + struct { + u32 api_version; + + const struct firmware *fw; + const u8 *amss_data; + size_t amss_len; + const u8 *m3_data; + size_t m3_len; + + DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT); + } fw; + #ifdef CONFIG_NL80211_TESTMODE struct { u32 data_pos; @@ -1186,6 +1198,7 @@ void ath11k_core_halt(struct ath11k *ar); int ath11k_core_resume(struct ath11k_base *ab); int ath11k_core_suspend(struct ath11k_base *ab); void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab); +bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab); const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, const char *filename); @@ -1224,6 +1237,11 @@ static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif) return (struct ath11k_vif *)vif->drv_priv; } +static inline struct ath11k_sta *ath11k_sta_to_arsta(struct ieee80211_sta *sta) +{ + return (struct ath11k_sta *)sta->drv_priv; +} + static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab, int mac_id) { diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 5bb6fd17fdf6..be76e7d1c436 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -1459,7 +1459,7 @@ static void ath11k_reset_peer_ps_duration(void *data, struct ieee80211_sta *sta) { struct ath11k *ar = data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); arsta->ps_total_duration = 0; @@ -1510,7 +1510,7 @@ static void ath11k_peer_ps_state_disable(void *data, struct ieee80211_sta *sta) { struct ath11k *ar = data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; @@ -1591,10 +1591,10 @@ static const struct file_operations fops_ps_state_enable = { int ath11k_debugfs_register(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; - char pdev_name[5]; + char pdev_name[10]; char buf[100] = {0}; - snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); + snprintf(pdev_name, sizeof(pdev_name), "%s%u", "mac", ar->pdev_idx); ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); if (IS_ERR(ar->debug.debugfs_pdev)) diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c index 9cc4ef28e751..8c177fba6f14 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c @@ -136,7 +136,7 @@ static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct ath11k_htt_data_stats *stats; static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail", @@ -243,7 +243,7 @@ static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; int len = 0, i, retval = 0; @@ -340,7 +340,7 @@ static int ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file) { struct ieee80211_sta *sta = inode->i_private; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct debug_htt_stats_req *stats_req; int type = ar->debug.htt_stats.type; @@ -376,7 +376,7 @@ static int ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file) { struct ieee80211_sta *sta = inode->i_private; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; mutex_lock(&ar->conf_mutex); @@ -413,7 +413,7 @@ static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; int ret, enable; @@ -453,7 +453,7 @@ static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[32] = {0}; int len; @@ -480,7 +480,7 @@ static ssize_t ath11k_dbg_sta_write_delba(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, initiator, reason; int ret; @@ -531,7 +531,7 @@ static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, status; int ret; @@ -581,7 +581,7 @@ static ssize_t ath11k_dbg_sta_write_addba(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, buf_size; int ret; @@ -632,7 +632,7 @@ static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[64]; int len = 0; @@ -652,7 +652,7 @@ static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 aggr_mode; int ret; @@ -697,7 +697,7 @@ ath11k_write_htt_peer_stats_reset(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct htt_ext_stats_cfg_params cfg_params = { 0 }; int ret; @@ -756,7 +756,7 @@ static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[20]; int len; @@ -783,7 +783,7 @@ static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u64 time_since_station_in_power_save; char buf[20]; @@ -817,7 +817,7 @@ static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[20]; u64 power_save_duration; diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index d070bcb3fe24..a7252b52555c 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -1009,7 +1009,7 @@ void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) { - struct ath11k_base *ab = (struct ath11k_base *)ctx; + struct ath11k_base *ab = ctx; struct sk_buff *msdu = skb; dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index d04f78ab6b37..15815af453b2 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -635,7 +635,7 @@ enum htt_ppdu_stats_tag_type { * b'24 - status_swap: 1 is to swap status TLV * b'25 - pkt_swap: 1 is to swap packet TLV * b'26:31 - rsvd1: reserved for future use - * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring, + * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring, * in byte units. * Valid only for HW_TO_SW_RING and SW_TO_HW_RING * - b'16:31 - rsvd2: Reserved for future use diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 5c76664ba0dd..7eac93ce7a1d 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1099,7 +1099,7 @@ int ath11k_dp_rx_ampdu_start(struct ath11k *ar, struct ieee80211_ampdu_params *params) { struct ath11k_base *ab = ar->ab; - struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret; @@ -1117,7 +1117,7 @@ int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; - struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; dma_addr_t paddr; bool active; @@ -1256,7 +1256,7 @@ static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, int cur_user; u16 peer_id; - ppdu_info = (struct htt_ppdu_stats_info *)data; + ppdu_info = data; switch (tag) { case HTT_PPDU_STATS_TAG_COMMON: @@ -1388,9 +1388,6 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, u8 tid = HTT_PPDU_STATS_NON_QOS_TID; bool is_ampdu = false; - if (!usr_stats) - return; - if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) return; @@ -1459,7 +1456,7 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, } sta = peer->sta; - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); @@ -1621,14 +1618,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) u8 pdev_id; pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); - return; + goto out; } trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, ar->ab->pktlog_defs_checksum); + +out: + rcu_read_unlock(); } static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, @@ -2408,7 +2411,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; - } else if (channel_num >= 36 && channel_num <= 173) { + } else if (channel_num >= 36 && channel_num <= 177) { rx_status->band = NL80211_BAND_5GHZ; } else { spin_lock_bh(&ar->data_lock); @@ -3423,7 +3426,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, ab->hw_params.hal_params->rx_buf_rbm); - /* Fill mpdu details into reo entrace ring */ + /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); @@ -4489,8 +4492,7 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); if (src_srng_desc) { - struct ath11k_buffer_addr *src_desc = - (struct ath11k_buffer_addr *)src_srng_desc; + struct ath11k_buffer_addr *src_desc = src_srng_desc; *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); } else { @@ -4509,8 +4511,7 @@ void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, u8 *rbm, void **pp_buf_addr_info) { - struct hal_rx_msdu_link *msdu_link = - (struct hal_rx_msdu_link *)rx_msdu_link_desc; + struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc; struct ath11k_buffer_addr *buf_addr_info; buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; @@ -4551,7 +4552,7 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); u8 tmp = 0; - msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; + msdu_link = msdu_link_desc; msdu_details = &msdu_link->msdu_link[0]; for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { @@ -4648,8 +4649,7 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, bool is_frag, is_first_msdu; bool drop_mpdu = false; struct ath11k_skb_rxcb *rxcb; - struct hal_reo_entrance_ring *ent_desc = - (struct hal_reo_entrance_ring *)ring_entry; + struct hal_reo_entrance_ring *ent_desc = ring_entry; int buf_id; u32 rx_link_buf_info[2]; u8 rbm; @@ -5097,13 +5097,6 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; - if (!mon_dst_srng) { - ath11k_warn(ar->ab, - "HAL Monitor Destination Ring Init Failed -- %p", - mon_dst_srng); - return; - } - spin_lock_bh(&pmon->mon_lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); @@ -5255,7 +5248,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, goto next_skb; } - arsta = (struct ath11k_sta *)peer->sta->drv_priv; + arsta = ath11k_sta_to_arsta(peer->sta); ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index a34833de7c67..a5fa08bc623b 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -238,7 +238,7 @@ tcl_ring_sel: spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; - /* Checking for available tcl descritors in another ring in + /* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. @@ -344,7 +344,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (!skb_cb->vif) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } @@ -369,7 +369,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, "dp_tx: failed to find the peer with peer_id %d\n", ts->peer_id); spin_unlock_bh(&ab->base_lock); - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } spin_unlock_bh(&ab->base_lock); @@ -467,7 +467,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) } sta = peer->sta; - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, @@ -566,12 +566,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } if (unlikely(!skb_cb->vif)) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } @@ -624,10 +624,10 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, "dp_tx: failed to find the peer with peer_id %d\n", ts->peer_id); spin_unlock_bh(&ab->base_lock); - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->hw, msdu); return; } - arsta = (struct ath11k_sta *)peer->sta->drv_priv; + arsta = ath11k_sta_to_arsta(peer->sta); status.sta = peer->sta; status.skb = msdu; status.info = info; diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c new file mode 100644 index 000000000000..8f84fba29886 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/fw.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" + +#include "debug.h" + +static int ath11k_fw_request_firmware_api_n(struct ath11k_base *ab, + const char *name) +{ + size_t magic_len, len, ie_len; + int ie_id, i, index, bit, ret; + struct ath11k_fw_ie *hdr; + const u8 *data; + __le32 *timestamp; + + ab->fw.fw = ath11k_core_firmware_request(ab, name); + if (IS_ERR(ab->fw.fw)) { + ret = PTR_ERR(ab->fw.fw); + ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to load %s: %d\n", name, ret); + ab->fw.fw = NULL; + return ret; + } + + data = ab->fw.fw->data; + len = ab->fw.fw->size; + + /* magic also includes the null byte, check that as well */ + magic_len = strlen(ATH11K_FIRMWARE_MAGIC) + 1; + + if (len < magic_len) { + ath11k_err(ab, "firmware image too small to contain magic: %zu\n", + len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH11K_FIRMWARE_MAGIC, magic_len) != 0) { + ath11k_err(ab, "Invalid firmware magic\n"); + ret = -EINVAL; + goto err; + } + + /* jump over the padding */ + magic_len = ALIGN(magic_len, 4); + + /* make sure there's space for padding */ + if (magic_len > len) { + ath11k_err(ab, "No space for padding after magic\n"); + ret = -EINVAL; + goto err; + } + + len -= magic_len; + data += magic_len; + + /* loop elements */ + while (len > sizeof(struct ath11k_fw_ie)) { + hdr = (struct ath11k_fw_ie *)data; + + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data += sizeof(*hdr); + + if (len < ie_len) { + ath11k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n", + ie_id, len, ie_len); + ret = -EINVAL; + goto err; + } + + switch (ie_id) { + case ATH11K_FW_IE_TIMESTAMP: + if (ie_len != sizeof(u32)) + break; + + timestamp = (__le32 *)data; + + ath11k_dbg(ab, ATH11K_DBG_BOOT, "found fw timestamp %d\n", + le32_to_cpup(timestamp)); + break; + case ATH11K_FW_IE_FEATURES: + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "found firmware features ie (%zd B)\n", + ie_len); + + for (i = 0; i < ATH11K_FW_FEATURE_COUNT; i++) { + index = i / 8; + bit = i % 8; + + if (index == ie_len) + break; + + if (data[index] & (1 << bit)) + __set_bit(i, ab->fw.fw_features); + } + + ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "features", "", + ab->fw.fw_features, + sizeof(ab->fw.fw_features)); + break; + case ATH11K_FW_IE_AMSS_IMAGE: + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "found fw image ie (%zd B)\n", + ie_len); + + ab->fw.amss_data = data; + ab->fw.amss_len = ie_len; + break; + case ATH11K_FW_IE_M3_IMAGE: + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "found m3 image ie (%zd B)\n", + ie_len); + + ab->fw.m3_data = data; + ab->fw.m3_len = ie_len; + break; + default: + ath11k_warn(ab, "Unknown FW IE: %u\n", ie_id); + break; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + /* make sure there's space for padding */ + if (ie_len > len) + break; + + len -= ie_len; + data += ie_len; + }; + + return 0; + +err: + release_firmware(ab->fw.fw); + ab->fw.fw = NULL; + return ret; +} + +int ath11k_fw_pre_init(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_fw_request_firmware_api_n(ab, ATH11K_FW_API2_FILE); + if (ret == 0) { + ab->fw.api_version = 2; + goto out; + } + + ab->fw.api_version = 1; + +out: + ath11k_dbg(ab, ATH11K_DBG_BOOT, "using fw api %d\n", + ab->fw.api_version); + + return 0; +} + +void ath11k_fw_destroy(struct ath11k_base *ab) +{ + release_firmware(ab->fw.fw); +} diff --git a/drivers/net/wireless/ath/ath11k/fw.h b/drivers/net/wireless/ath/ath11k/fw.h new file mode 100644 index 000000000000..d9893ceb2c3d --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/fw.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH11K_FW_H +#define ATH11K_FW_H + +#define ATH11K_FW_API2_FILE "firmware-2.bin" +#define ATH11K_FIRMWARE_MAGIC "QCOM-ATH11K-FW" + +enum ath11k_fw_ie_type { + ATH11K_FW_IE_TIMESTAMP = 0, + ATH11K_FW_IE_FEATURES = 1, + ATH11K_FW_IE_AMSS_IMAGE = 2, + ATH11K_FW_IE_M3_IMAGE = 3, +}; + +enum ath11k_fw_features { + /* keep last */ + ATH11K_FW_FEATURE_COUNT, +}; + +int ath11k_fw_pre_init(struct ath11k_base *ab); +void ath11k_fw_destroy(struct ath11k_base *ab); + +#endif /* ATH11K_FW_H */ diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 0a99aa7ddbf4..23f3af8e372d 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -571,7 +571,7 @@ u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type) void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, u8 byte_swap_data) { - struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf; + struct hal_ce_srng_src_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = @@ -586,8 +586,7 @@ void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) { - struct hal_ce_srng_dest_desc *desc = - (struct hal_ce_srng_dest_desc *)buf; + struct hal_ce_srng_dest_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = @@ -597,8 +596,7 @@ void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) u32 ath11k_hal_ce_dst_status_get_length(void *buf) { - struct hal_ce_srng_dst_status_desc *desc = - (struct hal_ce_srng_dst_status_desc *)buf; + struct hal_ce_srng_dst_status_desc *desc = buf; u32 len; len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c index e5ed5efb139e..41946795d620 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.c +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c @@ -265,7 +265,7 @@ out: void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr, u32 cookie, u8 manager) { - struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc; + struct ath11k_buffer_addr *binfo = desc; u32 paddr_lo, paddr_hi; paddr_lo = lower_32_bits(paddr); @@ -279,7 +279,7 @@ void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr, void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr, u32 *cookie, u8 *rbm) { - struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc; + struct ath11k_buffer_addr *binfo = desc; *paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) | @@ -292,7 +292,7 @@ void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus, u32 *msdu_cookies, enum hal_rx_buf_return_buf_manager *rbm) { - struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc; + struct hal_rx_msdu_link *link = link_desc; struct hal_rx_msdu_details *msdu; int i; @@ -699,7 +699,7 @@ u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid) void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size, u32 start_seq, enum hal_pn_type type) { - struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr; + struct hal_rx_reo_queue *qdesc = vaddr; struct hal_rx_reo_queue_ext *ext_desc; memset(qdesc, 0, sizeof(*qdesc)); @@ -809,27 +809,25 @@ static inline void ath11k_hal_rx_handle_ofdma_info(void *rx_tlv, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv; rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6); - rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]); + rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->info10); } static inline void ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv; rx_user_status->mpdu_ok_byte_count = - FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT, - __le32_to_cpu(ppdu_end_user->rsvd2[6])); + FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_OK_BYTE_COUNT, + __le32_to_cpu(ppdu_end_user->info8)); rx_user_status->mpdu_err_byte_count = - FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT, - __le32_to_cpu(ppdu_end_user->rsvd2[8])); + FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO9_MPDU_ERR_BYTE_COUNT, + __le32_to_cpu(ppdu_end_user->info9)); } static inline void @@ -903,8 +901,8 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab, FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX, __le32_to_cpu(eu_stats->info2)); ppdu_info->tid = - ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP, - __le32_to_cpu(eu_stats->info6))) - 1; + ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO7_TID_BITMAP, + __le32_to_cpu(eu_stats->info7))) - 1; ppdu_info->tcp_msdu_count = FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT, __le32_to_cpu(eu_stats->info4)); @@ -1540,8 +1538,7 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie, void **pp_buf_addr, u8 *rbm, u32 *msdu_cnt) { - struct hal_reo_entrance_ring *reo_ent_ring = - (struct hal_reo_entrance_ring *)rx_desc; + struct hal_reo_entrance_ring *reo_ent_ring = rx_desc; struct ath11k_buffer_addr *buf_addr_info; struct rx_mpdu_desc *rx_mpdu_desc_info_details; diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h index 61bd8416c4fd..472a52cf5889 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.h +++ b/drivers/net/wireless/ath/ath11k/hal_rx.h @@ -149,7 +149,7 @@ struct hal_rx_mon_ppdu_info { u8 beamformed; u8 rssi_comb; u8 rssi_chain_pri20[HAL_RX_MAX_NSS]; - u8 tid; + u16 tid; u16 ht_flags; u16 vht_flags; u16 he_flags; @@ -219,11 +219,11 @@ struct hal_rx_ppdu_start { #define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0) #define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16) -#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0) -#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16) +#define HAL_RX_PPDU_END_USER_STATS_INFO7_TID_BITMAP GENMASK(15, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO7_TID_EOSP_BITMAP GENMASK(31, 16) -#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0) -#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_OK_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO9_MPDU_ERR_BYTE_COUNT GENMASK(24, 0) struct hal_rx_ppdu_end_user_stats { __le32 rsvd0[2]; @@ -236,7 +236,13 @@ struct hal_rx_ppdu_end_user_stats { __le32 info4; __le32 info5; __le32 info6; - __le32 rsvd2[11]; + __le32 info7; + __le32 rsvd2[4]; + __le32 info8; + __le32 rsvd3; + __le32 info9; + __le32 rsvd4[2]; + __le32 info10; } __packed; struct hal_rx_ppdu_end_user_stats_ext { diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c index d1b0e36e04a9..b919df6ce743 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.c +++ b/drivers/net/wireless/ath/ath11k/hal_tx.c @@ -37,7 +37,7 @@ static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = { void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd, struct hal_tx_info *ti) { - struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd; + struct hal_tcl_data_cmd *tcl_cmd = cmd; tcl_cmd->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr); diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index 659b80d2abd4..d68ed4214dec 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -9,18 +9,18 @@ #include "core.h" struct ath11k_hif_ops { - u32 (*read32)(struct ath11k_base *sc, u32 address); - void (*write32)(struct ath11k_base *sc, u32 address, u32 data); + u32 (*read32)(struct ath11k_base *ab, u32 address); + void (*write32)(struct ath11k_base *ab, u32 address, u32 data); int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end); - void (*irq_enable)(struct ath11k_base *sc); - void (*irq_disable)(struct ath11k_base *sc); - int (*start)(struct ath11k_base *sc); - void (*stop)(struct ath11k_base *sc); - int (*power_up)(struct ath11k_base *sc); - void (*power_down)(struct ath11k_base *sc); + void (*irq_enable)(struct ath11k_base *ab); + void (*irq_disable)(struct ath11k_base *ab); + int (*start)(struct ath11k_base *ab); + void (*stop)(struct ath11k_base *ab); + int (*power_up)(struct ath11k_base *ab); + void (*power_down)(struct ath11k_base *ab); int (*suspend)(struct ath11k_base *ab); int (*resume)(struct ath11k_base *ab); - int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id, + int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); int (*get_user_msi_vector)(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, @@ -44,34 +44,34 @@ static inline void ath11k_hif_ce_irq_disable(struct ath11k_base *ab) ab->hif.ops->ce_irq_disable(ab); } -static inline int ath11k_hif_start(struct ath11k_base *sc) +static inline int ath11k_hif_start(struct ath11k_base *ab) { - return sc->hif.ops->start(sc); + return ab->hif.ops->start(ab); } -static inline void ath11k_hif_stop(struct ath11k_base *sc) +static inline void ath11k_hif_stop(struct ath11k_base *ab) { - sc->hif.ops->stop(sc); + ab->hif.ops->stop(ab); } -static inline void ath11k_hif_irq_enable(struct ath11k_base *sc) +static inline void ath11k_hif_irq_enable(struct ath11k_base *ab) { - sc->hif.ops->irq_enable(sc); + ab->hif.ops->irq_enable(ab); } -static inline void ath11k_hif_irq_disable(struct ath11k_base *sc) +static inline void ath11k_hif_irq_disable(struct ath11k_base *ab) { - sc->hif.ops->irq_disable(sc); + ab->hif.ops->irq_disable(ab); } -static inline int ath11k_hif_power_up(struct ath11k_base *sc) +static inline int ath11k_hif_power_up(struct ath11k_base *ab) { - return sc->hif.ops->power_up(sc); + return ab->hif.ops->power_up(ab); } -static inline void ath11k_hif_power_down(struct ath11k_base *sc) +static inline void ath11k_hif_power_down(struct ath11k_base *ab) { - sc->hif.ops->power_down(sc); + ab->hif.ops->power_down(ab); } static inline int ath11k_hif_suspend(struct ath11k_base *ab) @@ -90,14 +90,14 @@ static inline int ath11k_hif_resume(struct ath11k_base *ab) return 0; } -static inline u32 ath11k_hif_read32(struct ath11k_base *sc, u32 address) +static inline u32 ath11k_hif_read32(struct ath11k_base *ab, u32 address) { - return sc->hif.ops->read32(sc, address); + return ab->hif.ops->read32(ab, address); } -static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 data) +static inline void ath11k_hif_write32(struct ath11k_base *ab, u32 address, u32 data) { - sc->hif.ops->write32(sc, address, data); + ab->hif.ops->write32(ab, address, data); } static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf, @@ -109,10 +109,10 @@ static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf, return ab->hif.ops->read(ab, buf, start, end); } -static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id, +static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) { - return sc->hif.ops->map_service_to_pipe(sc, service_id, ul_pipe, dl_pipe); + return ab->hif.ops->map_service_to_pipe(ab, service_id, ul_pipe, dl_pipe); } static inline int ath11k_get_user_msi_vector(struct ath11k_base *ab, char *user_name, diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h index f429b37cfdf7..d31e501c807c 100644 --- a/drivers/net/wireless/ath/ath11k/htc.h +++ b/drivers/net/wireless/ath/ath11k/htc.h @@ -156,18 +156,6 @@ struct ath11k_htc_record { }; } __packed __aligned(4); -/* note: the trailer offset is dynamic depending - * on payload length. this is only a struct layout draft - */ -struct ath11k_htc_frame { - struct ath11k_htc_hdr hdr; - union { - struct ath11k_htc_msg msg; - u8 payload[0]; - }; - struct ath11k_htc_record trailer[0]; -} __packed __aligned(4); - enum ath11k_htc_svc_gid { ATH11K_HTC_SVC_GRP_RSVD = 0, ATH11K_HTC_SVC_GRP_WMI = 1, diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index f5533630a7f9..d51a99669dd6 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -187,7 +187,8 @@ struct ath11k_hw_params { bool supports_shadow_regs; bool idle_ps; bool supports_sta_ps; - bool cold_boot_calib; + bool coldboot_cal_mm; + bool coldboot_cal_ftm; bool cbcal_restart_fw; int fw_mem_mode; u32 num_vdevs; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 8c77ade49437..7f7b39817773 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -5,6 +5,7 @@ */ #include <net/mac80211.h> +#include <net/cfg80211.h> #include <linux/etherdevice.h> #include <linux/bitfield.h> #include <linux/inetdevice.h> @@ -566,7 +567,7 @@ static void ath11k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_vif_iter *arvif_iter = data; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; @@ -1464,7 +1465,7 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif) u32 params = 0; u8 i = 0; - tx_arvif = (void *)arvif->vif->mbssid_tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw, tx_arvif->vif, 0); @@ -1520,8 +1521,8 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif) struct sk_buff *bcn; int ret; - if (arvif->vif->mbssid_tx_vif) { - tx_arvif = (void *)arvif->vif->mbssid_tx_vif->drv_priv; + if (vif->mbssid_tx_vif) { + tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif); if (tx_arvif != arvif) { ar = tx_arvif->ar; ab = ar->ab; @@ -1562,7 +1563,7 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) * non-transmitting interfaces, and results in a crash if sent. */ if (vif->mbssid_tx_vif && - arvif != (void *)vif->mbssid_tx_vif->drv_priv && arvif->is_up) + arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up) return 0; if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif) @@ -1626,7 +1627,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif, ether_addr_copy(arvif->bssid, info->bssid); if (arvif->vif->mbssid_tx_vif) - tx_arvif = (struct ath11k_vif *)arvif->vif->mbssid_tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, @@ -1649,7 +1650,7 @@ static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac, { struct sk_buff *skb = data; struct ieee80211_mgmt *mgmt = (void *)skb->data; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (vif->type != NL80211_IFTYPE_STATION) return; @@ -1672,7 +1673,7 @@ static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { u32 *vdev_id = data; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k *ar = arvif->ar; struct ieee80211_hw *hw = ar->hw; @@ -1718,7 +1719,7 @@ static void ath11k_peer_assoc_h_basic(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 aid; lockdep_assert_held(&ar->conf_mutex); @@ -1746,7 +1747,7 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar, struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; - struct ath11k_vif *arvif = (struct ath11k_vif *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const u8 *rsnie = NULL; const u8 *wpaie = NULL; @@ -1804,7 +1805,7 @@ static void ath11k_peer_assoc_h_rates(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; @@ -1867,7 +1868,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar, struct peer_assoc_params *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2064,7 +2065,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, struct peer_assoc_params *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; u16 *vht_mcs_mask; @@ -2261,7 +2262,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; enum nl80211_band band; @@ -2584,7 +2585,7 @@ static void ath11k_peer_assoc_h_qos(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: @@ -2747,7 +2748,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2831,7 +2832,7 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar, lockdep_assert_held(&ar->conf_mutex); - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); memset(arg, 0, sizeof(*arg)); @@ -2933,7 +2934,7 @@ static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta_he_cap *he_cap) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_he_cap_elem he_cap_elem = {0}; struct ieee80211_sta_he_cap *cap_band = NULL; struct cfg80211_chan_def def; @@ -2995,7 +2996,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct peer_assoc_params peer_arg; struct ieee80211_sta *ap_sta; struct ath11k_peer *peer; @@ -3111,7 +3112,7 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); @@ -3160,7 +3161,7 @@ static void ath11k_recalculate_mgmt_rate(struct ath11k *ar, struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const struct ieee80211_supported_band *sband; u8 basic_rate_idx; int hw_rate_code; @@ -4314,7 +4315,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: @@ -4632,7 +4633,7 @@ static int ath11k_station_disassoc(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; lockdep_assert_held(&ar->conf_mutex); @@ -4905,7 +4906,7 @@ static int ath11k_mac_station_add(struct ath11k *ar, { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct peer_create_params peer_param; int ret; @@ -5029,7 +5030,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k_peer *peer; int ret = 0; @@ -5160,7 +5161,7 @@ static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; s16 txpwr; @@ -5195,7 +5196,7 @@ static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool enabled) { struct ath11k *ar = hw->priv; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); if (enabled && !arsta->use_4addr_set) { ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk); @@ -5209,8 +5210,8 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, u32 changed) { struct ath11k *ar = hw->priv; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; u32 bw, smps; @@ -5337,7 +5338,7 @@ static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_wmm_params_arg *p = NULL; int ret; @@ -5893,8 +5894,9 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar, ar->mac.iftype[NL80211_BAND_2GHZ], NL80211_BAND_2GHZ); band = &ar->mac.sbands[NL80211_BAND_2GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ]; - band->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(band, + ar->mac.iftype[NL80211_BAND_2GHZ], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { @@ -5902,8 +5904,9 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar, ar->mac.iftype[NL80211_BAND_5GHZ], NL80211_BAND_5GHZ); band = &ar->mac.sbands[NL80211_BAND_5GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ]; - band->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(band, + ar->mac.iftype[NL80211_BAND_5GHZ], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && @@ -5912,8 +5915,9 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar, ar->mac.iftype[NL80211_BAND_6GHZ], NL80211_BAND_6GHZ); band = &ar->mac.sbands[NL80211_BAND_6GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ]; - band->n_iftype_data = count; + _ieee80211_set_sband_iftype_data(band, + ar->mac.iftype[NL80211_BAND_6GHZ], + count); } } @@ -6199,7 +6203,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, } if (control->sta) - arsta = (struct ath11k_sta *)control->sta->drv_priv; + arsta = ath11k_sta_to_arsta(control->sta); ret = ath11k_dp_tx(ar, arvif, arsta, skb); if (unlikely(ret)) { @@ -6455,7 +6459,7 @@ static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif, return 0; } - tx_arvif = (void *)tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(tx_vif); if (arvif->vif->bss_conf.nontransmitted) { if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) @@ -6967,8 +6971,8 @@ err: static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx) { - struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx; - struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb); + struct ieee80211_vif *vif = ctx; + struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); if (skb_cb->vif == vif) skb_cb->vif = NULL; @@ -7193,6 +7197,7 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, struct wmi_vdev_start_req_arg arg = {}; const struct cfg80211_chan_def *chandef = &ctx->def; int ret = 0; + unsigned int dfs_cac_time; lockdep_assert_held(&ar->conf_mutex); @@ -7272,20 +7277,21 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n", arvif->vif->addr, arvif->vdev_id); - /* Enable CAC Flag in the driver by checking the channel DFS cac time, - * i.e dfs_cac_ms value which will be valid only for radar channels - * and state as NL80211_DFS_USABLE which indicates CAC needs to be + /* Enable CAC Flag in the driver by checking the all sub-channel's DFS + * state as NL80211_DFS_USABLE which indicates CAC needs to be * done before channel usage. This flags is used to drop rx packets. * during CAC. */ /* TODO Set the flag for other interface types as required */ - if (arvif->vdev_type == WMI_VDEV_TYPE_AP && - chandef->chan->dfs_cac_ms && - chandef->chan->dfs_state == NL80211_DFS_USABLE) { + if (arvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled && + cfg80211_chandef_dfs_usable(ar->hw->wiphy, chandef)) { set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); + dfs_cac_time = cfg80211_chandef_dfs_cac_time(ar->hw->wiphy, + chandef); ath11k_dbg(ab, ATH11K_DBG_MAC, - "CAC Started in chan_freq %d for vdev %d\n", - arg.channel.freq, arg.vdev_id); + "cac started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n", + dfs_cac_time, arg.channel.freq, chandef->center_freq1, + arg.vdev_id); } ret = ath11k_mac_set_txbf_conf(arvif); @@ -7408,7 +7414,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, /* TODO: Update ar->rx_channel */ for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; + arvif = ath11k_vif_to_arvif(vifs[i].vif); if (WARN_ON(!arvif->is_started)) continue; @@ -7450,7 +7456,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, mbssid_tx_vif = arvif->vif->mbssid_tx_vif; if (mbssid_tx_vif) - tx_arvif = (struct ath11k_vif *)mbssid_tx_vif->drv_priv; + tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, @@ -7546,7 +7552,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; if (WARN_ON(arvif->is_started)) @@ -7596,7 +7602,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; struct peer_create_params param; @@ -7686,7 +7692,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; int ret; @@ -7910,12 +7916,14 @@ ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap) static bool ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, + struct ath11k_vif *arvif, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, int *nss) { struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); + const struct ieee80211_sta_he_cap *he_cap; u16 he_mcs_map = 0; u8 ht_nss_mask = 0; u8 vht_nss_mask = 0; @@ -7946,7 +7954,11 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, return false; } - he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, arvif->vif); + if (!he_cap) + return false; + + he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(he_cap)); for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { if (mask->control[band].he_mcs[i] == 0) @@ -8223,7 +8235,7 @@ static void ath11k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = data; - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arvif->ar; spin_lock_bh(&ar->data_lock); @@ -8255,7 +8267,7 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b const struct cfg80211_bitrate_mask *mask) { bool he_fixed_rate = false, vht_fixed_rate = false; - struct ath11k_peer *peer, *tmp; + struct ath11k_peer *peer; const u16 *vht_mcs_mask, *he_mcs_mask; struct ieee80211_link_sta *deflink; u8 vht_nss, he_nss; @@ -8278,7 +8290,7 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b rcu_read_lock(); spin_lock_bh(&ar->ab->base_lock); - list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) { + list_for_each_entry(peer, &ar->ab->peers, list) { if (peer->sta) { deflink = &peer->sta->deflink; @@ -8307,7 +8319,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath11k_pdev_cap *cap; struct ath11k *ar = arvif->ar; @@ -8362,7 +8374,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_disable_peer_fixed_rate, arvif); - } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask, + } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask, &single_nss)) { rate = WMI_FIXED_RATE_NONE; nss = single_nss; @@ -8627,7 +8639,7 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { - struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; s8 signal; bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, @@ -8904,8 +8916,8 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct ath11k *ar = hw->priv; - struct ath11k_vif *arvif = (void *)vif->drv_priv; - struct scan_req_params arg; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct scan_req_params *arg; int ret; u32 scan_time_msec; @@ -8937,27 +8949,31 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; - memset(&arg, 0, sizeof(arg)); - ath11k_wmi_start_scan_init(ar, &arg); - arg.num_chan = 1; - arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), - GFP_KERNEL); - if (!arg.chan_list) { + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { ret = -ENOMEM; goto exit; } + ath11k_wmi_start_scan_init(ar, arg); + arg->num_chan = 1; + arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), + GFP_KERNEL); + if (!arg->chan_list) { + ret = -ENOMEM; + goto free_arg; + } - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH11K_SCAN_ID; - arg.chan_list[0] = chan->center_freq; - arg.dwell_time_active = scan_time_msec; - arg.dwell_time_passive = scan_time_msec; - arg.max_scan_time = scan_time_msec; - arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; - arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ; - arg.burst_duration = duration; - - ret = ath11k_start_scan(ar, &arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH11K_SCAN_ID; + arg->chan_list[0] = chan->center_freq; + arg->dwell_time_active = scan_time_msec; + arg->dwell_time_passive = scan_time_msec; + arg->max_scan_time = scan_time_msec; + arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE; + arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ; + arg->burst_duration = duration; + + ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret); @@ -8983,7 +8999,9 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, ret = 0; free_chan_list: - kfree(arg.chan_list); + kfree(arg->chan_list); +free_arg: + kfree(arg); exit: mutex_unlock(&ar->conf_mutex); return ret; @@ -9042,6 +9060,14 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, if (ar->state != ATH11K_STATE_ON) goto err_fallback; + /* Firmware doesn't provide Tx power during CAC hence no need to fetch + * the stats. + */ + if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { + mutex_unlock(&ar->conf_mutex); + return -EAGAIN; + } + req_param.pdev_id = ar->pdev->pdev_id; req_param.stats_id = WMI_REQUEST_PDEV_STAT; diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 3ac689f1def4..afeabd6ecc67 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -6,6 +6,7 @@ #include <linux/msi.h> #include <linux/pci.h> +#include <linux/firmware.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/ioport.h> @@ -333,6 +334,7 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); break; case MHI_CB_EE_RDDM: + ath11k_warn(ab, "firmware crashed: MHI_CB_EE_RDDM\n"); if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))) queue_work(ab->workqueue_aux, &ab->reset_work); break; @@ -389,16 +391,23 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) if (!mhi_ctrl) return -ENOMEM; - ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE, - ab_pci->amss_path, - sizeof(ab_pci->amss_path)); - ab_pci->mhi_ctrl = mhi_ctrl; mhi_ctrl->cntrl_dev = ab->dev; - mhi_ctrl->fw_image = ab_pci->amss_path; mhi_ctrl->regs = ab->mem; mhi_ctrl->reg_len = ab->mem_len; + if (ab->fw.amss_data && ab->fw.amss_len > 0) { + /* use MHI firmware file from firmware-N.bin */ + mhi_ctrl->fw_data = ab->fw.amss_data; + mhi_ctrl->fw_sz = ab->fw.amss_len; + } else { + /* use the old separate mhi.bin MHI firmware file */ + ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE, + ab_pci->amss_path, + sizeof(ab_pci->amss_path)); + mhi_ctrl->fw_image = ab_pci->amss_path; + } + ret = ath11k_mhi_get_msi(ab_pci); if (ret) { ath11k_err(ab, "failed to get msi for mhi\n"); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 79e2cbe82638..09e65c5e55c4 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -15,6 +15,7 @@ #include "mhi.h" #include "debug.h" #include "pcic.h" +#include "qmi.h" #define ATH11K_PCI_BAR_NUM 0 #define ATH11K_PCI_DMA_MASK 32 @@ -581,8 +582,8 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci) u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); /* disable L0s and L1 */ - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags); } @@ -590,8 +591,10 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci) static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) { if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl); + pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + ab_pci->link_ctl & + PCI_EXP_LNKCTL_ASPMC); } static int ath11k_pci_power_up(struct ath11k_base *ab) @@ -851,10 +854,16 @@ unsupported_wcn6855_soc: if (ret) goto err_pci_disable_msi; + ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); + if (ret) { + ath11k_err(ab, "failed to set irq affinity %d\n", ret); + goto err_pci_disable_msi; + } + ret = ath11k_mhi_register(ab_pci); if (ret) { ath11k_err(ab, "failed to register mhi: %d\n", ret); - goto err_pci_disable_msi; + goto err_irq_affinity_cleanup; } ret = ath11k_hal_srng_init(ab); @@ -875,12 +884,6 @@ unsupported_wcn6855_soc: goto err_ce_free; } - ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); - if (ret) { - ath11k_err(ab, "failed to set irq affinity %d\n", ret); - goto err_free_irq; - } - /* kernel may allocate a dummy vector before request_irq and * then allocate a real vector when request_irq is called. * So get msi_data here again to avoid spurious interrupt @@ -889,19 +892,17 @@ unsupported_wcn6855_soc: ret = ath11k_pci_config_msi_data(ab_pci); if (ret) { ath11k_err(ab, "failed to config msi_data: %d\n", ret); - goto err_irq_affinity_cleanup; + goto err_free_irq; } ret = ath11k_core_init(ab); if (ret) { ath11k_err(ab, "failed to init core: %d\n", ret); - goto err_irq_affinity_cleanup; + goto err_free_irq; } + ath11k_qmi_fwreset_from_cold_boot(ab); return 0; -err_irq_affinity_cleanup: - ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); - err_free_irq: ath11k_pcic_free_irq(ab); @@ -914,6 +915,9 @@ err_hal_srng_deinit: err_mhi_unregister: ath11k_mhi_unregister(ab_pci); +err_irq_affinity_cleanup: + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); + err_pci_disable_msi: ath11k_pci_free_msi(ab_pci); @@ -1034,7 +1038,7 @@ static void ath11k_pci_exit(void) module_exit(ath11k_pci_exit); -MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); /* firmware files */ diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index c63083633b37..16d1e332193f 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -422,14 +422,14 @@ static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } -static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc) +static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) { int i; - clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags); + clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath11k_pcic_ext_grp_disable(irq_grp); diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c index 114aa3a9a339..1c79a932d17f 100644 --- a/drivers/net/wireless/ath/ath11k/peer.c +++ b/drivers/net/wireless/ath/ath11k/peer.c @@ -446,7 +446,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; if (sta) { - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) | FIELD_PREP(HTT_TCL_META_DATA_PEER_ID, peer->peer_id); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index d4eaf7d2ba84..c270dc46d506 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -9,11 +9,11 @@ #include "qmi.h" #include "core.h" #include "debug.h" +#include "hif.h" #include <linux/of.h> #include <linux/of_address.h> #include <linux/ioport.h> #include <linux/firmware.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 @@ -2079,7 +2079,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) return -EINVAL; } - if (ath11k_cold_boot_cal && ab->hw_params.cold_boot_calib) { + if (ath11k_core_coldboot_cal_support(ab)) { if (hremote_node) { ab->qmi.target_mem[idx].paddr = res.start + host_ddr_sz; @@ -2502,38 +2502,56 @@ out: static int ath11k_qmi_m3_load(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - const struct firmware *fw; + const struct firmware *fw = NULL; + const void *m3_data; char path[100]; + size_t m3_len; int ret; - fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE); - if (IS_ERR(fw)) { - ret = PTR_ERR(fw); - ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE, - path, sizeof(path)); - ath11k_err(ab, "failed to load %s: %d\n", path, ret); - return ret; - } + if (m3_mem->vaddr) + /* m3 firmware buffer is already available in the DMA buffer */ + return 0; + + if (ab->fw.m3_data && ab->fw.m3_len > 0) { + /* firmware-N.bin had a m3 firmware file so use that */ + m3_data = ab->fw.m3_data; + m3_len = ab->fw.m3_len; + } else { + /* No m3 file in firmware-N.bin so try to request old + * separate m3.bin. + */ + fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE); + if (IS_ERR(fw)) { + ret = PTR_ERR(fw); + ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE, + path, sizeof(path)); + ath11k_err(ab, "failed to load %s: %d\n", path, ret); + return ret; + } - if (m3_mem->vaddr || m3_mem->size) - goto skip_m3_alloc; + m3_data = fw->data; + m3_len = fw->size; + } m3_mem->vaddr = dma_alloc_coherent(ab->dev, - fw->size, &m3_mem->paddr, + m3_len, &m3_mem->paddr, GFP_KERNEL); if (!m3_mem->vaddr) { ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n", fw->size); - release_firmware(fw); - return -ENOMEM; + ret = -ENOMEM; + goto out; } -skip_m3_alloc: - memcpy(m3_mem->vaddr, fw->data, fw->size); - m3_mem->size = fw->size; + memcpy(m3_mem->vaddr, m3_data, m3_len); + m3_mem->size = m3_len; + + ret = 0; + +out: release_firmware(fw); - return 0; + return ret; } static void ath11k_qmi_m3_free(struct ath11k_base *ab) @@ -2839,6 +2857,33 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab, return 0; } +int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) +{ + int timeout; + + if (!ath11k_core_coldboot_cal_support(ab) || + ab->hw_params.cbcal_restart_fw == 0) + return 0; + + ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n"); + + timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, + (ab->qmi.cal_done == 1), + ATH11K_COLD_BOOT_FW_RESET_DELAY); + + if (timeout <= 0) { + ath11k_warn(ab, "Coldboot Calibration timed out\n"); + return -ETIMEDOUT; + } + + /* reset the firmware */ + ath11k_hif_power_down(ab); + ath11k_hif_power_up(ab); + ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n"); + return 0; +} +EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot); + static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) { int timeout; @@ -3209,8 +3254,8 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work) break; } - if (ath11k_cold_boot_cal && ab->qmi.cal_done == 0 && - ab->hw_params.cold_boot_calib) { + if (ab->qmi.cal_done == 0 && + ath11k_core_coldboot_cal_support(ab)) { ath11k_qmi_process_coldboot_calibration(ab); } else { clear_bit(ATH11K_FLAG_CRASH_FLUSH, diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 0909d53cefeb..d477e2be814b 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -37,7 +37,7 @@ #define QMI_WLANFW_MAX_DATA_SIZE_V01 6144 #define ATH11K_FIRMWARE_MODE_OFF 4 -#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ) +#define ATH11K_COLD_BOOT_FW_RESET_DELAY (60 * HZ) #define ATH11K_QMI_DEVICE_BAR_SIZE 0x200000 @@ -514,10 +514,9 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 { int ath11k_qmi_firmware_start(struct ath11k_base *ab, u32 mode); void ath11k_qmi_firmware_stop(struct ath11k_base *ab); -void ath11k_qmi_event_work(struct work_struct *work); -void ath11k_qmi_msg_recv_work(struct work_struct *work); void ath11k_qmi_deinit_service(struct ath11k_base *ab); int ath11k_qmi_init_service(struct ath11k_base *ab); void ath11k_qmi_free_resource(struct ath11k_base *ab); +int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab); #endif diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 7f9fb968dac6..3c7debae800a 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -352,6 +352,16 @@ static u32 ath11k_map_fw_reg_flags(u16 reg_flags) return flags; } +static u32 ath11k_map_fw_phy_flags(u32 phy_flags) +{ + u32 flags = 0; + + if (phy_flags & ATH11K_REG_PHY_BITMAP_NO11AX) + flags |= NL80211_RRF_NO_HE; + + return flags; +} + static bool ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1, struct ieee80211_reg_rule *rule2) @@ -685,6 +695,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab, } flags |= ath11k_map_fw_reg_flags(reg_rule->flags); + flags |= ath11k_map_fw_phy_flags(reg_info->phybitmap); ath11k_reg_update_rule(tmp_regd->reg_rules + i, reg_rule->start_freq, diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 2f284f26378d..84daa6543b6a 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -24,6 +24,9 @@ enum ath11k_dfs_region { ATH11K_DFS_REG_UNDEF, }; +/* Phy bitmaps */ +#define ATH11K_REG_PHY_BITMAP_NO11AX BIT(5) + /* ATH11K Regulatory API's */ void ath11k_reg_init(struct ath11k *ar); void ath11k_reg_free(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c index 705868198df4..0b7b7122cc05 100644 --- a/drivers/net/wireless/ath/ath11k/spectral.c +++ b/drivers/net/wireless/ath/ath11k/spectral.c @@ -382,16 +382,11 @@ static ssize_t ath11k_write_file_spectral_count(struct file *file, { struct ath11k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX) return -EINVAL; @@ -437,16 +432,11 @@ static ssize_t ath11k_write_file_spectral_bins(struct file *file, { struct ath11k *ar = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val < ATH11K_SPECTRAL_MIN_BINS || val > ar->ab->hw_params.spectral.max_fft_bins) @@ -598,7 +588,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar, return -EINVAL; } - tlv = (struct spectral_tlv *)data; + tlv = data; tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header)); /* convert Dword into bytes */ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE; diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c index 8fc5cddb28bd..43bb23265d34 100644 --- a/drivers/net/wireless/ath/ath11k/testmode.c +++ b/drivers/net/wireless/ath/ath11k/testmode.c @@ -350,7 +350,7 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[], if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM && (tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) { if (vif) { - arvif = (struct ath11k_vif *)vif->drv_priv; + arvif = ath11k_vif_to_arvif(vif); *ptr = arvif->vdev_id; } else { ret = -EINVAL; diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c index 23ed01bd44f9..c9b012f97ba5 100644 --- a/drivers/net/wireless/ath/ath11k/thermal.c +++ b/drivers/net/wireless/ath/ath11k/thermal.c @@ -125,7 +125,7 @@ ATTRIBUTE_GROUPS(ath11k_hwmon); int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state) { - struct ath11k_base *sc = ar->ab; + struct ath11k_base *ab = ar->ab; struct thermal_mitigation_params param; int ret = 0; @@ -147,14 +147,14 @@ int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state) ret = ath11k_wmi_send_thermal_mitigation_param_cmd(ar, ¶m); if (ret) { - ath11k_warn(sc, "failed to send thermal mitigation duty cycle %u ret %d\n", + ath11k_warn(ab, "failed to send thermal mitigation duty cycle %u ret %d\n", throttle_state, ret); } return ret; } -int ath11k_thermal_register(struct ath11k_base *sc) +int ath11k_thermal_register(struct ath11k_base *ab) { struct thermal_cooling_device *cdev; struct device *hwmon_dev; @@ -162,8 +162,8 @@ int ath11k_thermal_register(struct ath11k_base *sc) struct ath11k_pdev *pdev; int i, ret; - for (i = 0; i < sc->num_radios; i++) { - pdev = &sc->pdevs[i]; + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; @@ -172,7 +172,7 @@ int ath11k_thermal_register(struct ath11k_base *sc) &ath11k_thermal_ops); if (IS_ERR(cdev)) { - ath11k_err(sc, "failed to setup thermal device result: %ld\n", + ath11k_err(ab, "failed to setup thermal device result: %ld\n", PTR_ERR(cdev)); ret = -EINVAL; goto err_thermal_destroy; @@ -183,7 +183,7 @@ int ath11k_thermal_register(struct ath11k_base *sc) ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj, "cooling_device"); if (ret) { - ath11k_err(sc, "failed to create cooling device symlink\n"); + ath11k_err(ab, "failed to create cooling device symlink\n"); goto err_thermal_destroy; } @@ -204,18 +204,18 @@ int ath11k_thermal_register(struct ath11k_base *sc) return 0; err_thermal_destroy: - ath11k_thermal_unregister(sc); + ath11k_thermal_unregister(ab); return ret; } -void ath11k_thermal_unregister(struct ath11k_base *sc) +void ath11k_thermal_unregister(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; - for (i = 0; i < sc->num_radios; i++) { - pdev = &sc->pdevs[i]; + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h index 3e39675ef7f5..83cb67686733 100644 --- a/drivers/net/wireless/ath/ath11k/thermal.h +++ b/drivers/net/wireless/ath/ath11k/thermal.h @@ -26,17 +26,17 @@ struct ath11k_thermal { }; #if IS_REACHABLE(CONFIG_THERMAL) -int ath11k_thermal_register(struct ath11k_base *sc); -void ath11k_thermal_unregister(struct ath11k_base *sc); +int ath11k_thermal_register(struct ath11k_base *ab); +void ath11k_thermal_unregister(struct ath11k_base *ab); int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state); void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature); #else -static inline int ath11k_thermal_register(struct ath11k_base *sc) +static inline int ath11k_thermal_register(struct ath11k_base *ab) { return 0; } -static inline void ath11k_thermal_unregister(struct ath11k_base *sc) +static inline void ath11k_thermal_unregister(struct ath11k_base *ab) { } diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 23ad6825e5be..2845b4313d3a 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -292,18 +292,18 @@ err_pull: int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id) { - struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab; + struct ath11k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; - struct ath11k_base *ab = wmi_sc->ab; + struct ath11k_base *ab = wmi_ab->ab; might_sleep(); if (ab->hw_params.credit_flow) { - wait_event_timeout(wmi_sc->tx_credits_wq, ({ + wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, - &wmi_sc->ab->dev_flags)) + &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); @@ -313,7 +313,7 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, - &wmi_sc->ab->dev_flags)) + &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -ENOBUFS); @@ -321,10 +321,10 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, } if (ret == -EAGAIN) - ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + ath11k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); if (ret == -ENOBUFS) - ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n", + ath11k_warn(wmi_ab->ab, "ce desc not available for wmi command %d\n", cmd_id); return ret; @@ -611,10 +611,10 @@ static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *sk return 0; } -struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len) +struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; - struct ath11k_base *ab = wmi_sc->ab; + struct ath11k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); @@ -2281,7 +2281,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; - tmp_ptr = (u32 *)ptr; + tmp_ptr = ptr; for (i = 0; i < params->num_chan; ++i) tmp_ptr[i] = params->chan_list[i]; @@ -4148,7 +4148,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, ptr += TLV_HDR_SIZE + len; if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { - hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr; + hw_mode = ptr; hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, @@ -4168,7 +4168,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, len = sizeof(*band_to_mac); for (idx = 0; idx < param->num_band_to_mac; idx++) { - band_to_mac = (void *)ptr; + band_to_mac = ptr; band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_BAND_TO_MAC) | @@ -4291,7 +4291,7 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, int ath11k_wmi_cmd_init(struct ath11k_base *ab) { - struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab; + struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; struct wmi_init_cmd_param init_param; struct target_resource_config config; @@ -4304,12 +4304,12 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) ab->wmi_ab.svc_map)) config.is_reg_cc_ext_event_supported = 1; - memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config)); + memcpy(&wmi_ab->wlan_resource_config, &config, sizeof(config)); - init_param.res_cfg = &wmi_sc->wlan_resource_config; - init_param.num_mem_chunks = wmi_sc->num_mem_chunks; - init_param.hw_mode_id = wmi_sc->preferred_hw_mode; - init_param.mem_chunks = wmi_sc->mem_chunks; + init_param.res_cfg = &wmi_ab->wlan_resource_config; + init_param.num_mem_chunks = wmi_ab->num_mem_chunks; + init_param.hw_mode_id = wmi_ab->preferred_hw_mode; + init_param.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params.single_pdev_only) init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; @@ -4317,7 +4317,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) init_param.num_band_to_mac = ab->num_radios; ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); - return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param); + return ath11k_init_cmd_send(&wmi_ab->wmi[0], &init_param); } int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, @@ -5440,10 +5440,11 @@ static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab, } ath11k_dbg(ab, ATH11K_DBG_WMI, - "cc_ext %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d", + "cc_ext %s dfs %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d phy_bitmap 0x%x", reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, - reg_info->min_bw_5ghz, reg_info->max_bw_5ghz); + reg_info->min_bw_5ghz, reg_info->max_bw_5ghz, + reg_info->phybitmap); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", @@ -6452,7 +6453,7 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, goto exit; } - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); @@ -6540,7 +6541,7 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, arvif->bssid, NULL); if (sta) { - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); arsta->rssi_beacon = src->beacon_snr; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats vdev id %d snr %d\n", @@ -7222,14 +7223,12 @@ static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, memset(&fixed_param, 0, sizeof(fixed_param)); memcpy(&fixed_param, (struct wmi_ready_event *)ptr, min_t(u16, sizeof(fixed_param), len)); - ab->wlan_init_status = fixed_param.ready_event_min.status; rdy_parse->num_extra_mac_addr = fixed_param.ready_event_min.num_extra_mac_addr; ether_addr_copy(ab->mac_addr, fixed_param.ready_event_min.mac_addr.addr); ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum; - ab->wmi_ready = true; break; case WMI_TAG_ARRAY_FIXED_STRUCT: addr_list = (struct wmi_mac_addr *)ptr; @@ -7469,7 +7468,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab, goto exit; } - arsta = (struct ath11k_sta *)sta->drv_priv; + arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); @@ -8337,6 +8336,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, ev->freq_offset, ev->sidx); + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { @@ -8354,6 +8355,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff ieee80211_radar_detected(ar->hw); exit: + rcu_read_unlock(); + kfree(tb); } @@ -8383,15 +8386,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id); + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id); - kfree(tb); - return; + goto exit; } ath11k_thermal_event_temperature(ar, ev->temp); +exit: + rcu_read_unlock(); + kfree(tb); } @@ -8611,12 +8618,13 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, return; } + rcu_read_lock(); + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); if (!arvif) { ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n", ev->vdev_id); - kfree(tb); - return; + goto exit; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n", @@ -8633,6 +8641,8 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, (void *)&replay_ctr_be, GFP_ATOMIC); +exit: + rcu_read_unlock(); kfree(tb); } diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h index 17cf16235e0b..79af3b6159f1 100644 --- a/drivers/net/wireless/ath/ath12k/ce.h +++ b/drivers/net/wireless/ath/ath12k/ce.h @@ -176,9 +176,6 @@ int ath12k_ce_alloc_pipes(struct ath12k_base *ab); void ath12k_ce_free_pipes(struct ath12k_base *ab); int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id); void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id); -int ath12k_ce_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe); -int ath12k_ce_attr_attach(struct ath12k_base *ab); void ath12k_ce_get_shadow_config(struct ath12k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len); #endif diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 3df8059d5512..b936760b5140 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -19,6 +19,27 @@ unsigned int ath12k_debug_mask; module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); +static int ath12k_core_rfkill_config(struct ath12k_base *ab) +{ + struct ath12k *ar; + int ret = 0, i; + + if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL)) + return 0; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + + ret = ath12k_mac_rfkill_config(ar); + if (ret && ret != -EOPNOTSUPP) { + ath12k_warn(ab, "failed to configure rfkill: %d", ret); + return ret; + } + } + + return ret; +} + int ath12k_core_suspend(struct ath12k_base *ab) { int ret; @@ -339,6 +360,7 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab, int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) { char boardname[BOARD_NAME_SIZE]; + int bd_api; int ret; ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); @@ -347,12 +369,12 @@ int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) return ret; } - ab->bd_api = 2; + bd_api = 2; ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname); if (!ret) goto success; - ab->bd_api = 1; + bd_api = 1; ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE); if (ret) { ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", @@ -361,7 +383,7 @@ int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) } success: - ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api); return 0; } @@ -377,6 +399,75 @@ static void ath12k_core_stop(struct ath12k_base *ab) /* De-Init of components as needed */ } +static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath12k_base *ab = data; + const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC; + struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr; + ssize_t copied; + size_t len; + int i; + + if (ab->qmi.target.bdf_ext[0] != '\0') + return; + + if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + if (!smbios->bdf_enabled) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + len = min_t(size_t, + strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext)); + for (i = 0; i < len; i++) { + if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic prefix */ + copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic), + sizeof(ab->qmi.target.bdf_ext)); + if (copied < 0) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate\n"); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext); +} + +int ath12k_core_check_smbios(struct ath12k_base *ab) +{ + ab->qmi.target.bdf_ext[0] = '\0'; + dmi_walk(ath12k_core_check_bdfext, ab); + + if (ab->qmi.target.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + static int ath12k_core_soc_create(struct ath12k_base *ab) { int ret; @@ -603,6 +694,13 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) goto err_core_stop; } ath12k_hif_irq_enable(ab); + + ret = ath12k_core_rfkill_config(ab); + if (ret && ret != -EOPNOTSUPP) { + ath12k_err(ab, "failed to config rfkill: %d\n", ret); + goto err_core_stop; + } + mutex_unlock(&ab->core_lock); return 0; @@ -655,6 +753,27 @@ err_hal_srng_deinit: return ret; } +static void ath12k_rfkill_work(struct work_struct *work) +{ + struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work); + struct ath12k *ar; + bool rfkill_radio_on; + int i; + + spin_lock_bh(&ab->base_lock); + rfkill_radio_on = ab->rfkill_radio_on; + spin_unlock_bh(&ab->base_lock); + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + if (!ar) + continue; + + ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on); + wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on); + } +} + void ath12k_core_halt(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; @@ -668,6 +787,7 @@ void ath12k_core_halt(struct ath12k *ar) ath12k_mac_peer_cleanup_all(ar); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ab->rfkill_work); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); @@ -685,6 +805,9 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) ab->stats.fw_crash_counter++; spin_unlock_bh(&ab->base_lock); + if (ab->is_reset) + set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); + for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; @@ -823,6 +946,8 @@ static void ath12k_core_reset(struct work_struct *work) ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n"); ab->is_reset = true; + atomic_set(&ab->recovery_start_count, 0); + reinit_completion(&ab->recovery_start); atomic_set(&ab->recovery_count, 0); ath12k_core_pre_reconfigure_recovery(ab); @@ -830,15 +955,13 @@ static void ath12k_core_reset(struct work_struct *work) reinit_completion(&ab->reconfigure_complete); ath12k_core_post_reconfigure_recovery(ab); - reinit_completion(&ab->recovery_start); - atomic_set(&ab->recovery_start_count, 0); - ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n"); time_left = wait_for_completion_timeout(&ab->recovery_start, ATH12K_RECOVER_START_TIMEOUT_HZ); ath12k_hif_power_down(ab); + ath12k_qmi_free_resource(ab); ath12k_hif_power_up(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n"); @@ -922,6 +1045,8 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); INIT_WORK(&ab->restart_work, ath12k_core_restart); INIT_WORK(&ab->reset_work, ath12k_core_reset); + INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work); + timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 2f93296db792..68c42ca44fcb 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -11,6 +11,8 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/bitfield.h> +#include <linux/dmi.h> +#include <linux/ctype.h> #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -32,6 +34,15 @@ /* Pending management packets threshold for dropping probe responses */ #define ATH12K_PRB_RSP_DROP_THRESHOLD ((ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4) +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH12K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH12K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* The magic used by QCA spec */ +#define ATH12K_SMBIOS_BDF_EXT_MAGIC "BDF_" + #define ATH12K_INVALID_HW_MAC_ID 0xFF #define ATH12K_RX_RATE_TABLE_NUM 320 #define ATH12K_RX_RATE_TABLE_11AX_NUM 576 @@ -129,6 +140,13 @@ struct ath12k_ext_irq_grp { struct net_device napi_ndev; }; +struct ath12k_smbios_bdf { + struct dmi_header hdr; + u32 padding; + u8 bdf_enabled; + u8 bdf_ext[]; +} __packed; + #define HEHANDLE_CAP_PHYINFO_SIZE 3 #define HECAP_PHYINFO_SIZE 9 #define HECAP_MACINFO_SIZE 5 @@ -238,6 +256,7 @@ struct ath12k_vif { u32 key_cipher; u8 tx_encap_type; u8 vdev_stats_id; + u32 punct_bitmap; }; struct ath12k_vif_iter { @@ -580,6 +599,14 @@ struct ath12k_band_cap { u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE]; struct ath12k_wmi_ppe_threshold_arg he_ppet; u16 he_6ghz_capa; + u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE]; + u32 eht_cap_phy_info[WMI_MAX_EHTCAP_PHY_SIZE]; + u32 eht_mcs_20_only; + u32 eht_mcs_80; + u32 eht_mcs_160; + u32 eht_mcs_320; + struct ath12k_wmi_ppe_threshold_arg eht_ppet; + u32 eht_cap_info_internal; }; struct ath12k_pdev_cap { @@ -614,6 +641,12 @@ struct ath12k_pdev { struct mlo_timestamp timestamp; }; +struct ath12k_fw_pdev { + u32 pdev_id; + u32 phy_id; + u32 supported_bands; +}; + struct ath12k_board_data { const struct firmware *fw; const void *data; @@ -669,7 +702,26 @@ struct ath12k_base { struct mutex core_lock; /* Protects data like peers */ spinlock_t base_lock; + + /* Single pdev device (struct ath12k_hw_params::single_pdev_only): + * + * Firmware maintains data for all bands but advertises a single + * phy to the host which is stored as a single element in this + * array. + * + * Other devices: + * + * This array will contain as many elements as the number of + * radios. + */ struct ath12k_pdev pdevs[MAX_RADIOS]; + + /* struct ath12k_hw_params::single_pdev_only devices use this to + * store phy specific data + */ + struct ath12k_fw_pdev fw_pdev[MAX_RADIOS]; + u8 fw_pdev_count; + struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS]; struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS]; unsigned long long free_vdev_map; @@ -685,7 +737,6 @@ struct ath12k_base { struct ath12k_wmi_target_cap_arg target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; - int bd_api; const struct ath12k_hw_params *hw_params; @@ -737,6 +788,10 @@ struct ath12k_base { u64 fw_soc_drop_count; bool static_window_map; + struct work_struct rfkill_work; + /* true means radio is on */ + bool rfkill_radio_on; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; @@ -755,7 +810,7 @@ int ath12k_core_fetch_bdf(struct ath12k_base *ath12k, struct ath12k_board_data *bd); void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd); int ath12k_core_check_dt(struct ath12k_base *ath12k); - +int ath12k_core_check_smbios(struct ath12k_base *ab); void ath12k_core_halt(struct ath12k *ar); int ath12k_core_resume(struct ath12k_base *ab); int ath12k_core_suspend(struct ath12k_base *ab); @@ -797,6 +852,11 @@ static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif) return (struct ath12k_vif *)vif->drv_priv; } +static inline struct ath12k_sta *ath12k_sta_to_arsta(struct ieee80211_sta *sta) +{ + return (struct ath12k_sta *)sta->drv_priv; +} + static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab, int mac_id) { diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c index 67893923e010..45d33279e665 100644 --- a/drivers/net/wireless/ath/ath12k/debug.c +++ b/drivers/net/wireless/ath/ath12k/debug.c @@ -64,7 +64,7 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask, vaf.va = &args; if (ath12k_debug_mask & mask) - dev_dbg(ab->dev, "%pV", &vaf); + dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf); /* TODO: trace log */ diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index ae1645d0f42a..6893466f61f0 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -38,6 +38,7 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr) ath12k_dp_rx_peer_tid_cleanup(ar, peer); crypto_free_shash(peer->tfm_mmic); + peer->dp_setup_done = false; spin_unlock_bh(&ab->base_lock); } @@ -1129,6 +1130,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) struct ath12k_dp *dp = &ab->dp; struct sk_buff *skb; int i; + u32 pool_id, tx_spt_page; if (!dp->spt_info) return; @@ -1148,6 +1150,14 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) dev_kfree_skb_any(skb); } + for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { + if (!dp->spt_info->rxbaddr[i]) + continue; + + kfree(dp->spt_info->rxbaddr[i]); + dp->spt_info->rxbaddr[i] = NULL; + } + spin_unlock_bh(&dp->rx_desc_lock); /* TX Descriptor cleanup */ @@ -1170,6 +1180,21 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) spin_unlock_bh(&dp->tx_desc_lock[i]); } + for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) { + spin_lock_bh(&dp->tx_desc_lock[pool_id]); + + for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) { + tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + if (!dp->spt_info->txbaddr[tx_spt_page]) + continue; + + kfree(dp->spt_info->txbaddr[tx_spt_page]); + dp->spt_info->txbaddr[tx_spt_page] = NULL; + } + + spin_unlock_bh(&dp->tx_desc_lock[pool_id]); + } + /* unmap SPT pages */ for (i = 0; i < dp->num_spt_pages; i++) { if (!dp->spt_info[i].vaddr) @@ -1343,6 +1368,8 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } + dp->spt_info->rxbaddr[i] = &rx_descs[0]; + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j); rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC; @@ -1368,8 +1395,10 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } + tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0]; + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page; tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j); tx_descs[j].pool_id = pool_id; diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 7c5dafce5a68..61f765432516 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -289,6 +289,8 @@ struct ath12k_tx_desc_info { struct ath12k_spt_info { dma_addr_t paddr; u64 *vaddr; + struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES]; + struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES]; }; struct ath12k_reo_queue_ref { @@ -712,7 +714,7 @@ enum htt_stats_internal_ppdu_frametype { * b'24 - status_swap: 1 is to swap status TLV * b'25 - pkt_swap: 1 is to swap packet TLV * b'26:31 - rsvd1: reserved for future use - * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring, + * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring, * in byte units. * Valid only for HW_TO_SW_RING and SW_TO_HW_RING * - b'16:31 - rsvd2: Reserved for future use diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index f1e57e98bdc6..f44bc5494ce7 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -13,8 +13,7 @@ static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; + struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv; rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->usr_resp_ref); @@ -23,13 +22,12 @@ static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, } static void -ath12k_dp_mon_rx_populate_byte_count(void *rx_tlv, void *ppduinfo, +ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *stats, + void *ppduinfo, struct hal_rx_user_status *rx_user_status) { - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = - (struct hal_rx_ppdu_end_user_stats *)rx_tlv; - u32 mpdu_ok_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_ok_cnt); - u32 mpdu_err_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_err_cnt); + u32 mpdu_ok_byte_count = __le32_to_cpu(stats->mpdu_ok_cnt); + u32 mpdu_err_byte_count = __le32_to_cpu(stats->mpdu_err_cnt); rx_user_status->mpdu_ok_byte_count = u32_get_bits(mpdu_ok_byte_count, @@ -2376,7 +2374,7 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, return; } - arsta = (struct ath12k_sta *)peer->sta->drv_priv; + arsta = ath12k_sta_to_arsta(peer->sta); rx_stats = arsta->rx_stats; if (!rx_stats) @@ -2552,7 +2550,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, } if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { - arsta = (struct ath12k_sta *)peer->sta->drv_priv; + arsta = ath12k_sta_to_arsta(peer->sta); ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, ppdu_info); } else if ((ppdu_info->fc_valid) && diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index ffd9a2018610..3543fadac4a5 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -1054,7 +1054,7 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar, struct ieee80211_ampdu_params *params) { struct ath12k_base *ab = ar->ab; - struct ath12k_sta *arsta = (void *)params->sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret; @@ -1072,7 +1072,7 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, { struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer; - struct ath12k_sta *arsta = (void *)params->sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; bool active; int ret; @@ -1410,7 +1410,7 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar, } sta = peer->sta; - arsta = (struct ath12k_sta *)sta->drv_priv; + arsta = ath12k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); @@ -1555,6 +1555,13 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data; len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE); + if (len > (skb->len - struct_size(msg, data, 0))) { + ath12k_warn(ab, + "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n", + len, skb->len); + return -EINVAL; + } + pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID); ppdu_id = le32_to_cpu(msg->ppdu_id); @@ -1583,6 +1590,16 @@ static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab, goto exit; } + if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) { + spin_unlock_bh(&ar->data_lock); + ath12k_warn(ab, + "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n", + ppdu_info->ppdu_stats.common.num_users, + HTT_PPDU_STATS_MAX_USERS); + ret = -EINVAL; + goto exit; + } + /* back up data rate tlv for all peers */ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA && (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) && @@ -1641,11 +1658,12 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, msg = (struct ath12k_htt_mlo_offset_msg *)skb->data; pdev_id = u32_get_bits(__le32_to_cpu(msg->info), HTT_T2H_MLO_OFFSET_INFO_PDEV_ID); - ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id); - return; + goto exit; } spin_lock_bh(&ar->data_lock); @@ -1661,6 +1679,8 @@ static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab, pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer); spin_unlock_bh(&ar->data_lock); +exit: + rcu_read_unlock(); } void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, @@ -2539,7 +2559,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, struct ath12k_skb_rxcb *rxcb; struct sk_buff *msdu; struct ath12k *ar; - u8 mac_id; + u8 mac_id, pdev_id; int ret; if (skb_queue_empty(msdu_list)) @@ -2550,8 +2570,9 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, while ((msdu = __skb_dequeue(msdu_list))) { rxcb = ATH12K_SKB_RXCB(msdu); mac_id = rxcb->mac_id; - ar = ab->pdevs[mac_id].ar; - if (!rcu_dereference(ab->pdevs_active[mac_id])) { + pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); + ar = ab->pdevs[pdev_id].ar; + if (!rcu_dereference(ab->pdevs_active[pdev_id])) { dev_kfree_skb_any(msdu); continue; } @@ -2747,6 +2768,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev } peer->tfm_mmic = tfm; + peer->dp_setup_done = true; spin_unlock_bh(&ab->base_lock); return 0; @@ -3026,7 +3048,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, desc_info->cookie, HAL_RX_BUF_RBM_SW3_BM); - /* Fill mpdu details into reo entrace ring */ + /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); @@ -3213,6 +3235,14 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, ret = -ENOENT; goto out_unlock; } + + if (!peer->dp_setup_done) { + ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", + peer->addr, peer_id); + ret = -ENOENT; + goto out_unlock; + } + rx_tid = &peer->rx_tid[tid]; if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || @@ -3228,7 +3258,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, goto out_unlock; } - if (frag_no > __fls(rx_tid->rx_frag_bitmap)) + if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) __skb_queue_tail(&rx_tid->rx_frags, msdu); else ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu); @@ -3385,6 +3415,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, dma_addr_t paddr; bool is_frag; bool drop = false; + int pdev_id; tot_n_bufs_reaped = 0; quota = budget; @@ -3440,7 +3471,8 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, mac_id = le32_get_bits(reo_desc->info0, HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); - ar = ab->pdevs[mac_id].ar; + pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); + ar = ab->pdevs[pdev_id].ar; if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop, msdu_cookies[i])) @@ -3497,23 +3529,13 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list) { struct ath12k_base *ab = ar->ab; - u16 msdu_len, peer_id; + u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz; msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc); - peer_id = ath12k_dp_rx_h_peer_id(ab, desc); - - spin_lock(&ab->base_lock); - if (!ath12k_peer_find_by_id(ab, peer_id)) { - spin_unlock(&ab->base_lock); - ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n", - peer_id); - return -EINVAL; - } - spin_unlock(&ab->base_lock); if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { /* First buffer will be freed by the caller, so deduct it's length */ @@ -3727,7 +3749,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, continue; } - desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc; + desc_info = err_info.rx_desc; /* retry manual desc retrieval if hw cc is not done */ if (!desc_info) { diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index d3c7c76d6b75..492ca6ce6714 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -106,11 +106,10 @@ static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp * return desc; } -static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd, +static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, + struct hal_tx_msdu_ext_desc *tcl_ext_cmd, struct hal_tx_info *ti) { - struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd; - tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr, HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO); tcl_ext_cmd->info1 = le32_encode_bits(0x0, @@ -301,7 +300,7 @@ tcl_ring_sel: spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; - /* Checking for available tcl descritors in another ring in + /* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. @@ -330,8 +329,11 @@ tcl_ring_sel: fail_unmap_dma: dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); - dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, - sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); + + if (skb_cb->paddr_ext_desc) + dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, + sizeof(struct hal_tx_msdu_ext_desc), + DMA_TO_DEVICE); fail_remove_tx_buf: ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id); @@ -347,6 +349,7 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab, { struct ath12k *ar; struct ath12k_skb_cb *skb_cb; + u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); skb_cb = ATH12K_SKB_CB(msdu); @@ -357,7 +360,7 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab, dev_kfree_skb_any(msdu); - ar = ab->pdevs[mac_id].ar; + ar = ab->pdevs[pdev_id].ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); } @@ -398,7 +401,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab, } } - ieee80211_tx_status(ar->hw, msdu); + ieee80211_tx_status_skb(ar->hw, msdu); } static void @@ -495,7 +498,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, * Might end up reporting it out-of-band from HTT stats. */ - ieee80211_tx_status(ar->hw, msdu); + ieee80211_tx_status_skb(ar->hw, msdu); exit: rcu_read_unlock(); @@ -536,7 +539,7 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) struct hal_tx_status ts = { 0 }; struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id]; struct hal_wbm_release_ring *desc; - u8 mac_id; + u8 mac_id, pdev_id; u64 desc_va; spin_lock_bh(&status_ring->lock); @@ -605,7 +608,8 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) continue; } - ar = ab->pdevs[mac_id].ar; + pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); + ar = ab->pdevs[pdev_id].ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c index e7a150e7158e..eca86fc25a60 100644 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -385,13 +385,13 @@ static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) { return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_INFO12_MIMO_SS_BITMAP); + RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP); } static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) { return le16_get_bits(desc->u.qcn9274.msdu_end.info5, - RX_MSDU_END_INFO5_TID); + RX_MSDU_END_QCN9274_INFO5_TID); } static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) @@ -819,13 +819,13 @@ static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) { return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_INFO12_MIMO_SS_BITMAP); + RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP); } static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) { - return le16_get_bits(desc->u.wcn7850.msdu_end.info5, - RX_MSDU_END_INFO5_TID); + return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, + RX_MPDU_START_INFO2_TID); } static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) @@ -837,7 +837,7 @@ static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end, - sizeof(struct rx_msdu_end_qcn9274)); + sizeof(struct rx_msdu_end_wcn7850)); } static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c index ee61a6462fdc..f6afbd8196bf 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.c +++ b/drivers/net/wireless/ath/ath12k/hal_rx.c @@ -713,8 +713,6 @@ void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, { struct hal_rx_reo_queue_ext *ext_desc; - memset(qdesc, 0, sizeof(*qdesc)); - ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED, HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0); diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h index 54490cdb63a1..4095fd82b1b3 100644 --- a/drivers/net/wireless/ath/ath12k/hif.h +++ b/drivers/net/wireless/ath/ath12k/hif.h @@ -10,17 +10,17 @@ #include "core.h" struct ath12k_hif_ops { - u32 (*read32)(struct ath12k_base *sc, u32 address); - void (*write32)(struct ath12k_base *sc, u32 address, u32 data); - void (*irq_enable)(struct ath12k_base *sc); - void (*irq_disable)(struct ath12k_base *sc); - int (*start)(struct ath12k_base *sc); - void (*stop)(struct ath12k_base *sc); - int (*power_up)(struct ath12k_base *sc); - void (*power_down)(struct ath12k_base *sc); + u32 (*read32)(struct ath12k_base *ab, u32 address); + void (*write32)(struct ath12k_base *ab, u32 address, u32 data); + void (*irq_enable)(struct ath12k_base *ab); + void (*irq_disable)(struct ath12k_base *ab); + int (*start)(struct ath12k_base *ab); + void (*stop)(struct ath12k_base *ab); + int (*power_up)(struct ath12k_base *ab); + void (*power_down)(struct ath12k_base *ab); int (*suspend)(struct ath12k_base *ab); int (*resume)(struct ath12k_base *ab); - int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id, + int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index 5991cc91cd00..2245fb510ba2 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -886,7 +886,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = false, .idle_ps = false, @@ -907,6 +908,12 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_ops = &hal_qcn9274_ops, .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01), + + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + + .rddm_size = 0, }, { .name = "wcn7850 hw2.0", @@ -964,6 +971,12 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01) | BIT(CNSS_PCIE_PERST_NO_PULL_V01), + + .rfkill_pin = 48, + .rfkill_cfg = 0, + .rfkill_on_level = 1, + + .rddm_size = 0x780000, }, { .name = "qcn9274 hw2.0", @@ -998,7 +1011,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = false, .idle_ps = false, @@ -1019,6 +1033,12 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_ops = &hal_qcn9274_ops, .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01), + + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + + .rddm_size = 0, }, }; diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index e6c4223c283c..2d6427cf41a4 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -186,6 +186,12 @@ struct ath12k_hw_params { const struct hal_ops *hal_ops; u64 qmi_cnss_feature_bitmap; + + u32 rfkill_pin; + u32 rfkill_cfg; + u32 rfkill_on_level; + + u32 rddm_size; }; struct ath12k_hw_ops { diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 1bb9802ef569..fc0d14ea328e 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -182,32 +182,35 @@ ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = { [NL80211_BAND_2GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, - [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G, - [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G, - [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G, - [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G, + [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20_2G, + [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20_2G, + [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40_2G, + [NL80211_CHAN_WIDTH_80] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_320] = MODE_UNKNOWN, }, [NL80211_BAND_5GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, - [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, - [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, - [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, - [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, - [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, - [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, + [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20, + [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20, + [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40, + [NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80, + [NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160, + [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80, + [NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320, }, [NL80211_BAND_6GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, - [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, - [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, - [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, - [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, - [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, - [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, + [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20, + [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20, + [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40, + [NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80, + [NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160, + [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80, + [NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320, }, }; @@ -292,6 +295,24 @@ static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) return "11ax-he40-2g"; case MODE_11AX_HE80_2G: return "11ax-he80-2g"; + case MODE_11BE_EHT20: + return "11be-eht20"; + case MODE_11BE_EHT40: + return "11be-eht40"; + case MODE_11BE_EHT80: + return "11be-eht80"; + case MODE_11BE_EHT80_80: + return "11be-eht80+80"; + case MODE_11BE_EHT160: + return "11be-eht160"; + case MODE_11BE_EHT160_160: + return "11be-eht160+160"; + case MODE_11BE_EHT320: + return "11be-eht320"; + case MODE_11BE_EHT20_2G: + return "11be-eht20-2g"; + case MODE_11BE_EHT40_2G: + return "11be-eht40-2g"; case MODE_UNKNOWN: /* skip */ break; @@ -502,7 +523,7 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath12k_vif_iter *arvif_iter = data; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; @@ -821,6 +842,7 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; + arg.punct_bitmap = 0xFFFFFFFF; arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); @@ -1186,7 +1208,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); u32 aid; lockdep_assert_held(&ar->conf_mutex); @@ -1214,7 +1236,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; - struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); const u8 *rsnie = NULL; const u8 *wpaie = NULL; @@ -1272,7 +1294,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; @@ -1335,7 +1357,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar, struct ath12k_wmi_peer_assoc_arg *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -1496,7 +1518,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, struct ath12k_wmi_peer_assoc_arg *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u16 *vht_mcs_mask; @@ -1637,9 +1659,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, arg->peer_nss = min(sta->deflink.rx_nss, max_nss); memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info, - sizeof(arg->peer_he_cap_macinfo)); + sizeof(he_cap->he_cap_elem.mac_cap_info)); memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, - sizeof(arg->peer_he_cap_phyinfo)); + sizeof(he_cap->he_cap_elem.phy_cap_info)); arg->peer_he_ops = vif->bss_conf.he_oper.params; /* the top most byte is used to indicate BSS color info */ @@ -1771,7 +1793,7 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: @@ -1929,12 +1951,47 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar, return MODE_UNKNOWN; } +static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar, + struct ieee80211_sta *sta) +{ + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) + if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) + return MODE_11BE_EHT320; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + return MODE_11BE_EHT160; + + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + return MODE_11BE_EHT80_80; + + ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n", + sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]); + + return MODE_11BE_EHT160; + } + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + return MODE_11BE_EHT80; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + return MODE_11BE_EHT40; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + return MODE_11BE_EHT20; + + return MODE_UNKNOWN; +} + static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -1950,7 +2007,12 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, switch (band) { case NL80211_BAND_2GHZ: - if (sta->deflink.he_cap.has_he) { + if (sta->deflink.eht_cap.has_eht) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11BE_EHT40_2G; + else + phymode = MODE_11BE_EHT20_2G; + } else if (sta->deflink.he_cap.has_he) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) phymode = MODE_11AX_HE80_2G; else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) @@ -1977,8 +2039,10 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, break; case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: - /* Check HE first */ - if (sta->deflink.he_cap.has_he) { + /* Check EHT first */ + if (sta->deflink.eht_cap.has_eht) { + phymode = ath12k_mac_get_phymode_eht(ar, sta); + } else if (sta->deflink.he_cap.has_he) { phymode = ath12k_mac_get_phymode_he(ar, sta); } else if (sta->deflink.vht_cap.vht_supported && !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { @@ -2004,6 +2068,152 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, WARN_ON(phymode == MODE_UNKNOWN); } +static void ath12k_mac_set_eht_mcs(u8 rx_tx_mcs7, u8 rx_tx_mcs9, + u8 rx_tx_mcs11, u8 rx_tx_mcs13, + u32 *rx_mcs, u32 *tx_mcs) +{ + *rx_mcs = 0; + u32p_replace_bits(rx_mcs, + u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_RX), + WMI_EHT_MCS_NSS_0_7); + u32p_replace_bits(rx_mcs, + u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_RX), + WMI_EHT_MCS_NSS_8_9); + u32p_replace_bits(rx_mcs, + u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_RX), + WMI_EHT_MCS_NSS_10_11); + u32p_replace_bits(rx_mcs, + u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_RX), + WMI_EHT_MCS_NSS_12_13); + + *tx_mcs = 0; + u32p_replace_bits(tx_mcs, + u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_TX), + WMI_EHT_MCS_NSS_0_7); + u32p_replace_bits(tx_mcs, + u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_TX), + WMI_EHT_MCS_NSS_8_9); + u32p_replace_bits(tx_mcs, + u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_TX), + WMI_EHT_MCS_NSS_10_11); + u32p_replace_bits(tx_mcs, + u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_TX), + WMI_EHT_MCS_NSS_12_13); +} + +static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres, + struct ath12k_wmi_ppe_threshold_arg *ppet) +{ + u32 bit_pos = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE, val; + u8 nss, ru, i; + u8 ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; + + ppet->numss_m1 = u8_get_bits(ppe_thres[0], IEEE80211_EHT_PPE_THRES_NSS_MASK); + ppet->ru_bit_mask = u16_get_bits(get_unaligned_le16(ppe_thres), + IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + + for (nss = 0; nss <= ppet->numss_m1; nss++) { + for (ru = 0; + ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + ru++) { + if ((ppet->ru_bit_mask & BIT(ru)) == 0) + continue; + + val = 0; + for (i = 0; i < ppet_bit_len_per_ru; i++) { + val |= (((ppe_thres[bit_pos / 8] >> + (bit_pos % 8)) & 0x1) << i); + bit_pos++; + } + ppet->ppet16_ppet8_ru3_ru0[nss] |= + (val << (ru * ppet_bit_len_per_ru)); + } + } +} + +static void ath12k_peer_assoc_h_eht(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; + const struct ieee80211_eht_mcs_nss_supp_bw *bw; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + u32 *rx_mcs, *tx_mcs; + + if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht) + return; + + arg->eht_flag = true; + + if ((eht_cap->eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) && + eht_cap->eht_ppe_thres[0] != 0) + ath12k_mac_set_eht_ppe_threshold(eht_cap->eht_ppe_thres, + &arg->peer_eht_ppet); + + memcpy(arg->peer_eht_cap_mac, eht_cap->eht_cap_elem.mac_cap_info, + sizeof(eht_cap->eht_cap_elem.mac_cap_info)); + memcpy(arg->peer_eht_cap_phy, eht_cap->eht_cap_elem.phy_cap_info, + sizeof(eht_cap->eht_cap_elem.phy_cap_info)); + + rx_mcs = arg->peer_eht_rx_mcs_set; + tx_mcs = arg->peer_eht_tx_mcs_set; + + switch (sta->deflink.bandwidth) { + case IEEE80211_STA_RX_BW_320: + bw = &eht_cap->eht_mcs_nss_supp.bw._320; + ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, + bw->rx_tx_mcs9_max_nss, + bw->rx_tx_mcs11_max_nss, + bw->rx_tx_mcs13_max_nss, + &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320], + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320]); + arg->peer_eht_mcs_count++; + fallthrough; + case IEEE80211_STA_RX_BW_160: + bw = &eht_cap->eht_mcs_nss_supp.bw._160; + ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, + bw->rx_tx_mcs9_max_nss, + bw->rx_tx_mcs11_max_nss, + bw->rx_tx_mcs13_max_nss, + &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160], + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160]); + arg->peer_eht_mcs_count++; + fallthrough; + default: + if ((he_cap->he_cap_elem.phy_cap_info[0] & + (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) { + bw_20 = &eht_cap->eht_mcs_nss_supp.only_20mhz; + + ath12k_mac_set_eht_mcs(bw_20->rx_tx_mcs7_max_nss, + bw_20->rx_tx_mcs9_max_nss, + bw_20->rx_tx_mcs11_max_nss, + bw_20->rx_tx_mcs13_max_nss, + &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); + } else { + bw = &eht_cap->eht_mcs_nss_supp.bw._80; + ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, + bw->rx_tx_mcs9_max_nss, + bw->rx_tx_mcs11_max_nss, + bw->rx_tx_mcs13_max_nss, + &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], + &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); + } + + arg->peer_eht_mcs_count++; + break; + } + + arg->punct_bitmap = ~arvif->punct_bitmap; +} + static void ath12k_peer_assoc_prepare(struct ath12k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2023,6 +2233,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar, ath12k_peer_assoc_h_ht(ar, vif, sta, arg); ath12k_peer_assoc_h_vht(ar, vif, sta, arg); ath12k_peer_assoc_h_he(ar, vif, sta, arg); + ath12k_peer_assoc_h_eht(ar, vif, sta, arg); ath12k_peer_assoc_h_qos(ar, vif, sta, arg); ath12k_peer_assoc_h_phymode(ar, vif, sta, arg); ath12k_peer_assoc_h_smps(sta, arg); @@ -2055,7 +2266,7 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_wmi_peer_assoc_arg peer_arg; struct ieee80211_sta *ap_sta; struct ath12k_peer *peer; @@ -2149,7 +2360,7 @@ static void ath12k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); @@ -2196,7 +2407,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); const struct ieee80211_supported_band *sband; u8 basic_rate_idx; int hw_rate_code; @@ -2314,7 +2525,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON) { param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; - param_value = WMI_BEACON_STAGGERED_MODE; + param_value = WMI_BEACON_BURST_MODE; ret = ath12k_wmi_pdev_set_param(ar, param_id, param_value, ar->pdev->pdev_id); if (ret) @@ -2322,7 +2533,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, arvif->vdev_id); else ath12k_dbg(ar->ab, ATH12K_DBG_MAC, - "Set staggered beacon mode for VDEV: %d\n", + "Set burst beacon mode for VDEV: %d\n", arvif->vdev_id); ret = ath12k_mac_setup_bcn_tmpl(arvif); @@ -2550,9 +2761,10 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_FILS_DISCOVERY || - changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) - ath12k_mac_fils_discovery(arvif, info); + ath12k_mac_fils_discovery(arvif, info); + + if (changed & BSS_CHANGED_EHT_PUNCTURING) + arvif->punct_bitmap = info->eht_puncturing; mutex_unlock(&ar->conf_mutex); } @@ -2566,18 +2778,21 @@ void __ath12k_mac_scan_finish(struct ath12k *ar) break; case ATH12K_SCAN_RUNNING: case ATH12K_SCAN_ABORTING: + if (ar->scan.is_roc && ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); + fallthrough; + case ATH12K_SCAN_STARTING: if (!ar->scan.is_roc) { struct cfg80211_scan_info info = { - .aborted = (ar->scan.state == - ATH12K_SCAN_ABORTING), + .aborted = ((ar->scan.state == + ATH12K_SCAN_ABORTING) || + (ar->scan.state == + ATH12K_SCAN_STARTING)), }; ieee80211_scan_completed(ar->hw, &info); - } else if (ar->scan.roc_notify) { - ieee80211_remain_on_channel_expired(ar->hw); } - fallthrough; - case ATH12K_SCAN_STARTING: + ar->scan.state = ATH12K_SCAN_IDLE; ar->scan_channel = NULL; ar->scan.roc_freq = 0; @@ -2755,9 +2970,12 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, arg.scan_id = ATH12K_SCAN_ID; if (req->ie_len) { + arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); + if (!arg.extraie.ptr) { + ret = -ENOMEM; + goto exit; + } arg.extraie.len = req->ie_len; - arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL); - memcpy(arg.extraie.ptr, req->ie, req->ie_len); } if (req->n_ssids) { @@ -2770,6 +2988,14 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, if (req->n_channels) { arg.num_chan = req->n_channels; + arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), + GFP_KERNEL); + + if (!arg.chan_list) { + ret = -ENOMEM; + goto exit; + } + for (i = 0; i < arg.num_chan; i++) arg.chan_list[i] = req->channels[i]->center_freq; } @@ -2788,6 +3014,8 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, ATH12K_MAC_SCAN_TIMEOUT_MSECS)); exit: + kfree(arg.chan_list); + if (req->ie_len) kfree(arg.extraie.ptr); @@ -3019,7 +3247,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { - arsta = (struct ath12k_sta *)sta->drv_priv; + arsta = ath12k_sta_to_arsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: @@ -3192,7 +3420,7 @@ static int ath12k_station_disassoc(struct ath12k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); @@ -3409,7 +3637,7 @@ static int ath12k_mac_station_add(struct ath12k *ar, { struct ath12k_base *ab = ar->ab; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k_wmi_peer_create_arg peer_param; int ret; @@ -3516,7 +3744,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k_peer *peer; int ret = 0; @@ -3628,7 +3856,7 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; s16 txpwr; @@ -3664,8 +3892,8 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, u32 changed) { struct ath12k *ar = hw->priv; - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_peer *peer; u32 bw, smps; @@ -3791,7 +4019,7 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct ath12k *ar = hw->priv; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct wmi_wmm_params_arg *p = NULL; int ret; @@ -4209,18 +4437,228 @@ static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap, return cpu_to_le16(bcap->he_6ghz_capa); } -static int ath12k_mac_copy_he_cap(struct ath12k *ar, - struct ath12k_pdev_cap *cap, - struct ieee80211_sband_iftype_data *data, - int band) +static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap, + int iftype, u8 num_tx_chains, + struct ieee80211_sta_he_cap *he_cap) +{ + struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; + struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp; + + he_cap->has_he = true; + memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, + sizeof(he_cap_elem->mac_cap_info)); + memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, + sizeof(he_cap_elem->phy_cap_info)); + + he_cap_elem->mac_cap_info[1] &= + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; + + he_cap_elem->phy_cap_info[5] &= + ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; + he_cap_elem->phy_cap_info[5] &= + ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; + he_cap_elem->phy_cap_info[5] |= num_tx_chains - 1; + + switch (iftype) { + case NL80211_IFTYPE_AP: + he_cap_elem->phy_cap_info[3] &= + ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + break; + case NL80211_IFTYPE_STATION: + he_cap_elem->mac_cap_info[0] &= ~IEEE80211_HE_MAC_CAP0_TWT_RES; + he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; + break; + case NL80211_IFTYPE_MESH_POINT: + ath12k_mac_filter_he_cap_mesh(he_cap_elem); + break; + } + + mcs_nss->rx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff); + mcs_nss->tx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff); + mcs_nss->rx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + mcs_nss->tx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + mcs_nss->rx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + mcs_nss->tx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + + memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); + if (he_cap_elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) + ath12k_gen_ppe_thresh(&band_cap->he_ppet, he_cap->ppe_thres); +} + +static void +ath12k_mac_copy_eht_mcs_nss(struct ath12k_band_cap *band_cap, + struct ieee80211_eht_mcs_nss_supp *mcs_nss, + const struct ieee80211_he_cap_elem *he_cap, + const struct ieee80211_eht_cap_elem_fixed *eht_cap) +{ + if ((he_cap->phy_cap_info[0] & + (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) + memcpy(&mcs_nss->only_20mhz, &band_cap->eht_mcs_20_only, + sizeof(struct ieee80211_eht_mcs_nss_supp_20mhz_only)); + + if (he_cap->phy_cap_info[0] & + (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) + memcpy(&mcs_nss->bw._80, &band_cap->eht_mcs_80, + sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + memcpy(&mcs_nss->bw._160, &band_cap->eht_mcs_160, + sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); + + if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) + memcpy(&mcs_nss->bw._320, &band_cap->eht_mcs_320, + sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); +} + +static void ath12k_mac_copy_eht_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet, + struct ieee80211_sta_eht_cap *cap) +{ + u16 bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; + u8 i, nss, ru, ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; + + u8p_replace_bits(&cap->eht_ppe_thres[0], fw_ppet->numss_m1, + IEEE80211_EHT_PPE_THRES_NSS_MASK); + + u16p_replace_bits((u16 *)&cap->eht_ppe_thres[0], fw_ppet->ru_bit_mask, + IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + + for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { + for (ru = 0; + ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + ru++) { + u32 val = 0; + + if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) + continue; + + u32p_replace_bits(&val, fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> + (ru * ppet_bit_len_per_ru), + GENMASK(ppet_bit_len_per_ru - 1, 0)); + + for (i = 0; i < ppet_bit_len_per_ru; i++) { + cap->eht_ppe_thres[bit / 8] |= + (((val >> i) & 0x1) << ((bit % 8))); + bit++; + } + } + } +} + +static void +ath12k_mac_filter_eht_cap_mesh(struct ieee80211_eht_cap_elem_fixed + *eht_cap_elem) { + u8 m; + + m = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS; + eht_cap_elem->mac_cap_info[0] &= ~m; + + m = IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO; + eht_cap_elem->phy_cap_info[0] &= ~m; + + m = IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + eht_cap_elem->phy_cap_info[3] &= ~m; + + m = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI; + eht_cap_elem->phy_cap_info[4] &= ~m; + + m = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK; + eht_cap_elem->phy_cap_info[5] &= ~m; + + m = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK; + eht_cap_elem->phy_cap_info[6] &= ~m; + + m = IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; + eht_cap_elem->phy_cap_info[7] &= ~m; +} + +static void ath12k_mac_copy_eht_cap(struct ath12k *ar, + struct ath12k_band_cap *band_cap, + struct ieee80211_he_cap_elem *he_cap_elem, + int iftype, + struct ieee80211_sta_eht_cap *eht_cap) +{ + struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem; + + memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap)); + + if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map))) + return; + + eht_cap->has_eht = true; + memcpy(eht_cap_elem->mac_cap_info, band_cap->eht_cap_mac_info, + sizeof(eht_cap_elem->mac_cap_info)); + memcpy(eht_cap_elem->phy_cap_info, band_cap->eht_cap_phy_info, + sizeof(eht_cap_elem->phy_cap_info)); + + switch (iftype) { + case NL80211_IFTYPE_AP: + eht_cap_elem->phy_cap_info[0] &= + ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ; + eht_cap_elem->phy_cap_info[4] &= + ~IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO; + eht_cap_elem->phy_cap_info[5] &= + ~IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP; + break; + case NL80211_IFTYPE_STATION: + eht_cap_elem->phy_cap_info[7] &= + ~(IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ); + eht_cap_elem->phy_cap_info[7] &= + ~(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); + break; + case NL80211_IFTYPE_MESH_POINT: + ath12k_mac_filter_eht_cap_mesh(eht_cap_elem); + break; + default: + break; + } + + ath12k_mac_copy_eht_mcs_nss(band_cap, &eht_cap->eht_mcs_nss_supp, + he_cap_elem, eht_cap_elem); + + if (eht_cap_elem->phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) + ath12k_mac_copy_eht_ppe_thresh(&band_cap->eht_ppet, eht_cap); +} + +static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar, + struct ath12k_pdev_cap *cap, + struct ieee80211_sband_iftype_data *data, + int band) +{ + struct ath12k_band_cap *band_cap = &cap->band[band]; int i, idx = 0; for (i = 0; i < NUM_NL80211_IFTYPES; i++) { struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; - struct ath12k_band_cap *band_cap = &cap->band[band]; - struct ieee80211_he_cap_elem *he_cap_elem = - &he_cap->he_cap_elem; switch (i) { case NL80211_IFTYPE_STATION: @@ -4233,102 +4671,56 @@ static int ath12k_mac_copy_he_cap(struct ath12k *ar, } data[idx].types_mask = BIT(i); - he_cap->has_he = true; - memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, - sizeof(he_cap_elem->mac_cap_info)); - memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, - sizeof(he_cap_elem->phy_cap_info)); - - he_cap_elem->mac_cap_info[1] &= - IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; - - he_cap_elem->phy_cap_info[5] &= - ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; - he_cap_elem->phy_cap_info[5] &= - ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; - he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1; - - switch (i) { - case NL80211_IFTYPE_AP: - he_cap_elem->phy_cap_info[3] &= - ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; - he_cap_elem->phy_cap_info[9] |= - IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; - break; - case NL80211_IFTYPE_STATION: - he_cap_elem->mac_cap_info[0] &= - ~IEEE80211_HE_MAC_CAP0_TWT_RES; - he_cap_elem->mac_cap_info[0] |= - IEEE80211_HE_MAC_CAP0_TWT_REQ; - he_cap_elem->phy_cap_info[9] |= - IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; - break; - case NL80211_IFTYPE_MESH_POINT: - ath12k_mac_filter_he_cap_mesh(he_cap_elem); - break; - } - - he_cap->he_mcs_nss_supp.rx_mcs_80 = - cpu_to_le16(band_cap->he_mcs & 0xffff); - he_cap->he_mcs_nss_supp.tx_mcs_80 = - cpu_to_le16(band_cap->he_mcs & 0xffff); - he_cap->he_mcs_nss_supp.rx_mcs_160 = - cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); - he_cap->he_mcs_nss_supp.tx_mcs_160 = - cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); - he_cap->he_mcs_nss_supp.rx_mcs_80p80 = - cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); - he_cap->he_mcs_nss_supp.tx_mcs_80p80 = - cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); - - memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); - if (he_cap_elem->phy_cap_info[6] & - IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) - ath12k_gen_ppe_thresh(&band_cap->he_ppet, - he_cap->ppe_thres); + ath12k_mac_copy_he_cap(band_cap, i, ar->num_tx_chains, he_cap); if (band == NL80211_BAND_6GHZ) { data[idx].he_6ghz_capa.capa = ath12k_mac_setup_he_6ghz_cap(cap, band_cap); } + ath12k_mac_copy_eht_cap(ar, band_cap, &he_cap->he_cap_elem, i, + &data[idx].eht_cap); idx++; } return idx; } -static void ath12k_mac_setup_he_cap(struct ath12k *ar, - struct ath12k_pdev_cap *cap) +static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, + struct ath12k_pdev_cap *cap) { - struct ieee80211_supported_band *band; + struct ieee80211_supported_band *sband; + enum nl80211_band band; int count; if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { - count = ath12k_mac_copy_he_cap(ar, cap, - ar->mac.iftype[NL80211_BAND_2GHZ], - NL80211_BAND_2GHZ); - band = &ar->mac.sbands[NL80211_BAND_2GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ]; - band->n_iftype_data = count; + band = NL80211_BAND_2GHZ; + count = ath12k_mac_copy_sband_iftype_data(ar, cap, + ar->mac.iftype[band], + band); + sband = &ar->mac.sbands[band]; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { - count = ath12k_mac_copy_he_cap(ar, cap, - ar->mac.iftype[NL80211_BAND_5GHZ], - NL80211_BAND_5GHZ); - band = &ar->mac.sbands[NL80211_BAND_5GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ]; - band->n_iftype_data = count; + band = NL80211_BAND_5GHZ; + count = ath12k_mac_copy_sband_iftype_data(ar, cap, + ar->mac.iftype[band], + band); + sband = &ar->mac.sbands[band]; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && ar->supports_6ghz) { - count = ath12k_mac_copy_he_cap(ar, cap, - ar->mac.iftype[NL80211_BAND_6GHZ], - NL80211_BAND_6GHZ); - band = &ar->mac.sbands[NL80211_BAND_6GHZ]; - band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ]; - band->n_iftype_data = count; + band = NL80211_BAND_6GHZ; + count = ath12k_mac_copy_sband_iftype_data(ar, cap, + ar->mac.iftype[band], + band); + sband = &ar->mac.sbands[band]; + _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], + count); } } @@ -4373,7 +4765,7 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) /* Reload HT/VHT/HE capability */ ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL); - ath12k_mac_setup_he_cap(ar, &ar->pdev->cap); + ath12k_mac_setup_sband_iftype_data(ar, &ar->pdev->cap); return 0; } @@ -4767,6 +5159,63 @@ err: return ret; } +int ath12k_mac_rfkill_config(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + u32 param; + int ret; + + if (ab->hw_params->rfkill_pin == 0) + return -EOPNOTSUPP; + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d", + ab->hw_params->rfkill_pin, ab->hw_params->rfkill_cfg, + ab->hw_params->rfkill_on_level); + + param = u32_encode_bits(ab->hw_params->rfkill_on_level, + WMI_RFKILL_CFG_RADIO_LEVEL) | + u32_encode_bits(ab->hw_params->rfkill_pin, + WMI_RFKILL_CFG_GPIO_PIN_NUM) | + u32_encode_bits(ab->hw_params->rfkill_cfg, + WMI_RFKILL_CFG_PIN_AS_GPIO); + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG, + param, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ab, + "failed to set rfkill config 0x%x: %d\n", + param, ret); + return ret; + } + + return 0; +} + +int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable) +{ + enum wmi_rfkill_enable_radio param; + int ret; + + if (enable) + param = WMI_RFKILL_ENABLE_RADIO_ON; + else + param = WMI_RFKILL_ENABLE_RADIO_OFF; + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac %d rfkill enable %d", + ar->pdev_idx, param); + + ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE, + param, ar->pdev->pdev_id); + if (ret) { + ath12k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n", + param, ret); + return ret; + } + + return 0; +} + static void ath12k_mac_op_stop(struct ieee80211_hw *hw) { struct ath12k *ar = hw->priv; @@ -4787,6 +5236,7 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw) cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ar->ab->rfkill_work); spin_lock_bh(&ar->data_lock); list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { @@ -5201,7 +5651,7 @@ err: static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif) { - struct ath12k_tx_desc_info *tx_desc_info, *tmp1; + struct ath12k_tx_desc_info *tx_desc_info; struct ath12k_skb_cb *skb_cb; struct sk_buff *skb; int i; @@ -5209,8 +5659,8 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { spin_lock_bh(&dp->tx_desc_lock[i]); - list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i], - list) { + list_for_each_entry(tx_desc_info, &dp->tx_desc_used_list[i], + list) { skb = tx_desc_info->skb; if (!skb) continue; @@ -5319,7 +5769,6 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; @@ -5337,8 +5786,8 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, "fail to set monitor filter: %d\n", ret); } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, - "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n", - changed_flags, *total_flags, reset_flag); + "total_flags:0x%x, reset_flag:%d\n", + *total_flags, reset_flag); mutex_unlock(&ar->conf_mutex); } @@ -5448,14 +5897,68 @@ static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, mutex_unlock(&ar->conf_mutex); } +static enum wmi_phy_mode +ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, + enum wmi_phy_mode mode, + enum nl80211_band band, + enum nl80211_iftype type) +{ + struct ieee80211_sta_eht_cap *eht_cap; + enum wmi_phy_mode down_mode; + + if (mode < MODE_11BE_EHT20) + return mode; + + eht_cap = &ar->mac.iftype[band][type].eht_cap; + if (eht_cap->has_eht) + return mode; + + switch (mode) { + case MODE_11BE_EHT20: + down_mode = MODE_11AX_HE20; + break; + case MODE_11BE_EHT40: + down_mode = MODE_11AX_HE40; + break; + case MODE_11BE_EHT80: + down_mode = MODE_11AX_HE80; + break; + case MODE_11BE_EHT80_80: + down_mode = MODE_11AX_HE80_80; + break; + case MODE_11BE_EHT160: + case MODE_11BE_EHT160_160: + case MODE_11BE_EHT320: + down_mode = MODE_11AX_HE160; + break; + case MODE_11BE_EHT20_2G: + down_mode = MODE_11AX_HE20_2G; + break; + case MODE_11BE_EHT40_2G: + down_mode = MODE_11AX_HE40_2G; + break; + default: + down_mode = mode; + break; + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac vdev start phymode %s downgrade to %s\n", + ath12k_mac_phymode_str(mode), + ath12k_mac_phymode_str(down_mode)); + + return down_mode; +} + static int ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef, + struct ieee80211_chanctx_conf *ctx, bool restart) { struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; struct wmi_vdev_start_req_arg arg = {}; + const struct cfg80211_chan_def *chandef = &ctx->def; int he_support = arvif->vif->bss_conf.he_support; int ret; @@ -5466,12 +5969,16 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, arg.vdev_id = arvif->vdev_id; arg.dtim_period = arvif->dtim_period; arg.bcn_intval = arvif->beacon_interval; + arg.punct_bitmap = ~arvif->punct_bitmap; arg.freq = chandef->chan->center_freq; arg.band_center_freq1 = chandef->center_freq1; arg.band_center_freq2 = chandef->center_freq2; arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; + arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode, + chandef->chan->band, + arvif->vif->type); arg.min_power = 0; arg.max_power = chandef->chan->max_power * 2; arg.max_reg_power = chandef->chan->max_reg_power * 2; @@ -5488,6 +5995,8 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, /* For now allow DFS for AP mode */ arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + arg.freq2_radar = ctx->radar_enabled; + arg.passive = arg.chan_radar; spin_lock_bh(&ab->base_lock); @@ -5508,9 +6017,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac vdev %d start center_freq %d phymode %s\n", + "mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n", arg.vdev_id, arg.freq, - ath12k_mac_phymode_str(arg.mode)); + ath12k_mac_phymode_str(arg.mode), arg.punct_bitmap); ret = ath12k_wmi_vdev_start(ar, &arg, restart); if (ret) { @@ -5595,15 +6104,15 @@ err: } static int ath12k_mac_vdev_start(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef) + struct ieee80211_chanctx_conf *ctx) { - return ath12k_mac_vdev_start_restart(arvif, chandef, false); + return ath12k_mac_vdev_start_restart(arvif, ctx, false); } static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif, - const struct cfg80211_chan_def *chandef) + struct ieee80211_chanctx_conf *ctx) { - return ath12k_mac_vdev_start_restart(arvif, chandef, true); + return ath12k_mac_vdev_start_restart(arvif, ctx, true); } struct ath12k_mac_change_chanctx_arg { @@ -5659,7 +6168,7 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, lockdep_assert_held(&ar->conf_mutex); for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; + arvif = ath12k_vif_to_arvif(vifs[i].vif); if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) monitor_vif = true; @@ -5693,18 +6202,33 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, /* TODO: Update ar->rx_channel */ for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; + arvif = ath12k_vif_to_arvif(vifs[i].vif); if (WARN_ON(!arvif->is_started)) continue; - if (WARN_ON(!arvif->is_up)) - continue; + /* Firmware expect vdev_restart only if vdev is up. + * If vdev is down then it expect vdev_stop->vdev_start. + */ + if (arvif->is_up) { + ret = ath12k_mac_vdev_restart(arvif, vifs[i].new_ctx); + if (ret) { + ath12k_warn(ab, "failed to restart vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } + } else { + ret = ath12k_mac_vdev_stop(arvif); + if (ret) { + ath12k_warn(ab, "failed to stop vdev %d: %d\n", + arvif->vdev_id, ret); + continue; + } - ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def); - if (ret) { - ath12k_warn(ab, "failed to restart vdev %d: %d\n", - arvif->vdev_id, ret); + ret = ath12k_mac_vdev_start(arvif, vifs[i].new_ctx); + if (ret) + ath12k_warn(ab, "failed to start vdev %d: %d\n", + arvif->vdev_id, ret); continue; } @@ -5777,7 +6301,8 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) goto unlock; - if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) + if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || + changed & IEEE80211_CHANCTX_CHANGE_RADAR) ath12k_mac_update_active_vif_chan(ar, ctx); /* TODO: Recalc radar detection */ @@ -5791,13 +6316,13 @@ static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; if (WARN_ON(arvif->is_started)) return -EBUSY; - ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def); + ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx); if (ret) { ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, @@ -5827,7 +6352,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; struct ath12k_wmi_peer_create_arg param; @@ -5837,6 +6362,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, "mac chanctx assign ptr %pK vdev_id %i\n", ctx, arvif->vdev_id); + arvif->punct_bitmap = link_conf->eht_puncturing; + /* for some targets bss peer must be created before vdev_start */ if (ab->hw_params->vdev_start_delay && arvif->vdev_type != WMI_VDEV_TYPE_AP && @@ -5875,7 +6402,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, goto out; } - ret = ath12k_mac_vdev_start(arvif, &ctx->def); + ret = ath12k_mac_vdev_start(arvif, ctx); if (ret) { ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, @@ -5904,7 +6431,7 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; mutex_lock(&ar->conf_mutex); @@ -6235,7 +6762,7 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { struct ath12k_vif *arvif = data; - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k *ar = arvif->ar; spin_lock_bh(&ar->data_lock); @@ -6267,7 +6794,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath12k_vif *arvif = (void *)vif->drv_priv; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath12k *ar = arvif->ar; enum nl80211_band band; @@ -6388,6 +6915,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, { struct ath12k *ar = hw->priv; struct ath12k_base *ab = ar->ab; + struct ath12k_vif *arvif; int recovery_count; if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) @@ -6416,6 +6944,26 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n"); } } + + list_for_each_entry(arvif, &ar->arvifs, list) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "reconfig cipher %d up %d vdev type %d\n", + arvif->key_cipher, + arvif->is_up, + arvif->vdev_type); + /* After trigger disconnect, then upper layer will + * trigger connect again, then the PN number of + * upper layer will be reset to keep up with AP + * side, hence PN number mismatch will not happen. + */ + if (arvif->is_up && + arvif->vdev_type == WMI_VDEV_TYPE_STA && + arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { + ieee80211_hw_restart_disconnect(arvif->vif); + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "restart disconnect\n"); + } + } } mutex_unlock(&ar->conf_mutex); @@ -6503,7 +7051,7 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { - struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv; + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); @@ -6854,7 +7402,7 @@ static int __ath12k_mac_register(struct ath12k *ar) goto err; ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); - ath12k_mac_setup_he_cap(ar, cap); + ath12k_mac_setup_sband_iftype_data(ar, cap); ret = ath12k_mac_setup_iface_combinations(ar); if (ret) { @@ -6867,6 +7415,11 @@ static int __ath12k_mac_register(struct ath12k *ar) ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes; + if (ar->hw->wiphy->bands[NL80211_BAND_2GHZ] && + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] && + ar->hw->wiphy->bands[NL80211_BAND_6GHZ]) + ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS); + ieee80211_hw_set(ar->hw, SIGNAL_DBM); ieee80211_hw_set(ar->hw, SUPPORTS_PS); ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); @@ -6943,6 +7496,8 @@ static int __ath12k_mac_register(struct ath12k *ar) NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); } + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_PUNCT); + ath12k_reg_init(ar); if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) { diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index 57f4295420bb..59b4e8f5eee0 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -33,7 +33,7 @@ struct ath12k_generic_iter { #define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16) #define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24) -#define ATH12K_CHAN_WIDTH_NUM 8 +#define ATH12K_CHAN_WIDTH_NUM 14 #define ATH12K_TX_POWER_MAX_VAL 70 #define ATH12K_TX_POWER_MIN_VAL 0 @@ -73,4 +73,6 @@ int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw); enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw); enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher); +int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable); +int ath12k_mac_rfkill_config(struct ath12k *ar); #endif diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c index 42f1140baa4f..39e640293cdc 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.c +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@ -366,12 +366,12 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) mhi_ctrl->fw_image = ab_pci->amss_path; mhi_ctrl->regs = ab->mem; mhi_ctrl->reg_len = ab->mem_len; + mhi_ctrl->rddm_size = ab->hw_params->rddm_size; ret = ath12k_mhi_get_msi(ab_pci); if (ret) { ath12k_err(ab, "failed to get msi for mhi\n"); - mhi_free_controller(mhi_ctrl); - return ret; + goto free_controller; } mhi_ctrl->iova_start = 0; @@ -388,11 +388,15 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config); if (ret) { ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret); - mhi_free_controller(mhi_ctrl); - return ret; + goto free_controller; } return 0; + +free_controller: + mhi_free_controller(mhi_ctrl); + ab_pci->mhi_ctrl = NULL; + return ret; } void ath12k_mhi_unregister(struct ath12k_pci *ab_pci) diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 5990a55801f0..3006cd3fbe11 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -424,12 +424,12 @@ static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } -static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc) +static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) { int i; for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath12k_pci_ext_grp_disable(irq_grp); @@ -794,8 +794,8 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); /* disable L0s and L1 */ - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); } @@ -803,8 +803,10 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) { if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl); + pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + ab_pci->link_ctl & + PCI_EXP_LNKCTL_ASPMC); } static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) @@ -1409,5 +1411,5 @@ static void ath12k_pci_exit(void) module_exit(ath12k_pci_exit); -MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h index b296dc0e2f67..c6edb24cbedd 100644 --- a/drivers/net/wireless/ath/ath12k/peer.h +++ b/drivers/net/wireless/ath/ath12k/peer.h @@ -44,6 +44,9 @@ struct ath12k_peer { struct ppdu_user_delayba ppdu_stats_delayba; bool delayba_flag; bool is_authorized; + + /* protected by ab->data_lock */ + bool dp_setup_done; }; void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id); diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index b510c2de1bd4..f6e949c618d0 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -387,7 +387,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { mlo_capable_valid), }, { - .data_type = QMI_OPT_FLAG, + .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, @@ -2213,6 +2213,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) struct qmi_txn txn = {}; unsigned int board_id = ATH12K_BOARD_ID_DEFAULT; int ret = 0; + int r; int i; memset(&req, 0, sizeof(req)); @@ -2297,6 +2298,10 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) ab->qmi.target.fw_build_timestamp, ab->qmi.target.fw_build_id); + r = ath12k_core_check_smbios(ab); + if (r) + ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); + out: return ret; } @@ -2535,6 +2540,7 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab) dma_free_coherent(ab->dev, m3_mem->size, m3_mem->vaddr, m3_mem->paddr); m3_mem->vaddr = NULL; + m3_mem->size = 0; } static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) @@ -3088,3 +3094,9 @@ void ath12k_qmi_deinit_service(struct ath12k_base *ab) ath12k_qmi_m3_free(ab); ath12k_qmi_free_target_mem_chunk(ab); } + +void ath12k_qmi_free_resource(struct ath12k_base *ab) +{ + ath12k_qmi_free_target_mem_chunk(ab); + ath12k_qmi_m3_free(ab); +} diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h index df76149c49f5..e20d6511d1ca 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.h +++ b/drivers/net/wireless/ath/ath12k/qmi.h @@ -562,9 +562,8 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 { int ath12k_qmi_firmware_start(struct ath12k_base *ab, u32 mode); void ath12k_qmi_firmware_stop(struct ath12k_base *ab); -void ath12k_qmi_event_work(struct work_struct *work); -void ath12k_qmi_msg_recv_work(struct work_struct *work); void ath12k_qmi_deinit_service(struct ath12k_base *ab); int ath12k_qmi_init_service(struct ath12k_base *ab); +void ath12k_qmi_free_resource(struct ath12k_base *ab); #endif diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c index 6ede91ebc8e1..5c006256c82a 100644 --- a/drivers/net/wireless/ath/ath12k/reg.c +++ b/drivers/net/wireless/ath/ath12k/reg.c @@ -314,6 +314,19 @@ static u32 ath12k_map_fw_reg_flags(u16 reg_flags) return flags; } +static u32 ath12k_map_fw_phy_flags(u32 phy_flags) +{ + u32 flags = 0; + + if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11AX) + flags |= NL80211_RRF_NO_HE; + + if (phy_flags & ATH12K_REG_PHY_BITMAP_NO11BE) + flags |= NL80211_RRF_NO_EHT; + + return flags; +} + static bool ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1, struct ieee80211_reg_rule *rule2) @@ -638,6 +651,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab, } flags |= ath12k_map_fw_reg_flags(reg_rule->flags); + flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap); ath12k_reg_update_rule(tmp_regd->reg_rules + i, reg_rule->start_freq, diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h index 56d009a47234..35569f03042d 100644 --- a/drivers/net/wireless/ath/ath12k/reg.h +++ b/drivers/net/wireless/ath/ath12k/reg.h @@ -83,6 +83,12 @@ struct ath12k_reg_info { [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; }; +/* Phy bitmaps */ +enum ath12k_reg_phy_bitmap { + ATH12K_REG_PHY_BITMAP_NO11AX = BIT(5), + ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6), +}; + void ath12k_reg_init(struct ath12k *ar); void ath12k_reg_free(struct ath12k_base *ab); void ath12k_regd_update_work(struct work_struct *work); diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h index f99556a253e5..c4058abc516e 100644 --- a/drivers/net/wireless/ath/ath12k/rx_desc.h +++ b/drivers/net/wireless/ath/ath12k/rx_desc.h @@ -221,7 +221,7 @@ struct rx_mpdu_start_qcn9274 { * PPE routing even if RXOLE CCE or flow search indicate 'Use_PPE' * This is set by SW for peers which are being handled by a * host SW/accelerator subsystem that also handles packet - * uffer management for WiFi-to-PPE routing. + * buffer management for WiFi-to-PPE routing. * * This is cleared by SW for peers which are being handled * by a different subsystem, completely disabling WiFi-to-PPE @@ -627,17 +627,18 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0) #define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1) -#define RX_MSDU_END_INFO5_TO_DS BIT(2) -#define RX_MSDU_END_INFO5_TID GENMASK(6, 3) #define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7) #define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8) #define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9) #define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10) #define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12) #define RX_MSDU_END_INFO5_LAST_MSDU BIT(13) -#define RX_MSDU_END_INFO5_FROM_DS BIT(14) #define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15) +#define RX_MSDU_END_QCN9274_INFO5_TO_DS BIT(2) +#define RX_MSDU_END_QCN9274_INFO5_TID GENMASK(6, 3) +#define RX_MSDU_END_QCN9274_INFO5_FROM_DS BIT(14) + #define RX_MSDU_END_INFO6_MSDU_DROP BIT(0) #define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1) #define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6) @@ -650,14 +651,15 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO7_AGGR_COUNT GENMASK(7, 0) #define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8) #define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9) -#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) -#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11) -#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12) -#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13) -#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14) -#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15) -#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0) +#define RX_MSDU_END_QCN9274_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) +#define RX_MSDU_END_QCN9274_INFO7_MSDU_LIMIT_ERROR BIT(11) +#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_TIMEOUT BIT(12) +#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_INVALID BIT(13) +#define RX_MSDU_END_QCN9274_INFO7_CCE_MATCH BIT(14) +#define RX_MSDU_END_QCN9274_INFO7_AMSDU_PARSER_ERR BIT(15) + +#define RX_MSDU_END_QCN9274_INFO8_KEY_ID GENMASK(7, 0) #define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6) #define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15) @@ -698,8 +700,9 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO12_RATE_MCS GENMASK(17, 14) #define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18) #define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21) -#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) -#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31) + +#define RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) +#define RX_MSDU_END_QCN9274_INFO12_MIMO_DONE_COPY BIT(31) #define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0) #define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2) @@ -714,7 +717,6 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO13_EOSP BIT(11) #define RX_MSDU_END_INFO13_A_MSDU_ERROR BIT(12) #define RX_MSDU_END_INFO13_ORDER BIT(14) -#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15) #define RX_MSDU_END_INFO13_OVERFLOW_ERR BIT(16) #define RX_MSDU_END_INFO13_MSDU_LEN_ERR BIT(17) #define RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL BIT(18) @@ -732,6 +734,8 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30) #define RX_MSDU_END_INFO13_FCS_ERR BIT(31) +#define RX_MSDU_END_QCN9274_INFO13_WIFI_PARSER_ERR BIT(15) + #define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10) #define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13) #define RX_MSDU_END_INFO14_MSDU_DONE BIT(31) @@ -782,6 +786,65 @@ struct rx_msdu_end_qcn9274 { __le32 info14; } __packed; +/* These macro definitions are only used for WCN7850 */ +#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0) + +#define RX_MSDU_END_WCN7850_INFO5_MSDU_LIMIT_ERR BIT(2) +#define RX_MSDU_END_WCN7850_INFO5_IDX_TIMEOUT BIT(3) +#define RX_MSDU_END_WCN7850_INFO5_IDX_INVALID BIT(4) +#define RX_MSDU_END_WCN7850_INFO5_WIFI_PARSE_ERR BIT(5) +#define RX_MSDU_END_WCN7850_INFO5_AMSDU_PARSER_ERR BIT(6) +#define RX_MSDU_END_WCN7850_INFO5_TCPUDP_CSUM_FAIL_CPY BIT(14) + +#define RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP GENMASK(31, 24) + +#define RX_MSDU_END_WCN7850_INFO13_FRAGMENT_FLAG BIT(13) +#define RX_MSDU_END_WCN7850_INFO13_CCE_MATCH BIT(15) + +struct rx_msdu_end_wcn7850 { + __le16 info0; + __le16 phy_ppdu_id; + __le16 ip_hdr_cksum; + __le16 info1; + __le16 info2; + __le16 cumulative_l3_checksum; + __le32 rule_indication0; + __le32 rule_indication1; + __le16 info3; + __le16 l3_type; + __le32 ipv6_options_crc; + __le32 tcp_seq_num; + __le32 tcp_ack_num; + __le16 info4; + __le16 window_size; + __le16 tcp_udp_chksum; + __le16 info5; + __le16 sa_idx; + __le16 da_idx_or_sw_peer_id; + __le32 info6; + __le32 fse_metadata; + __le16 cce_metadata; + __le16 sa_sw_peer_id; + __le16 info7; + __le16 rsvd0; + __le16 cumulative_l4_checksum; + __le16 cumulative_ip_length; + __le32 info9; + __le32 info10; + __le32 info11; + __le32 toeplitz_hash_2_or_4; + __le32 flow_id_toeplitz; + __le32 info12; + __le32 ppdu_start_timestamp_31_0; + __le32 ppdu_start_timestamp_63_32; + __le32 phy_meta_data; + __le16 vlan_ctag_ci; + __le16 vlan_stag_ci; + __le32 rsvd[3]; + __le32 info13; + __le32 info14; +} __packed; + /* rx_msdu_end * * rxpcu_mpdu_filter_in_category @@ -1410,7 +1473,7 @@ struct rx_pkt_hdr_tlv { struct hal_rx_desc_wcn7850 { __le64 msdu_end_tag; - struct rx_msdu_end_qcn9274 msdu_end; + struct rx_msdu_end_wcn7850 msdu_end; u8 rx_padding0[RX_BE_PADDING0_BYTES]; __le64 mpdu_start_tag; struct rx_mpdu_start_qcn9274 mpdu_start; diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 4928e4e91660..0e5bf5ce8d4c 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -62,9 +62,27 @@ struct ath12k_wmi_svc_rdy_ext_parse { bool dma_ring_cap_done; }; +struct ath12k_wmi_svc_rdy_ext2_arg { + u32 reg_db_version; + u32 hw_min_max_tx_power_2ghz; + u32 hw_min_max_tx_power_5ghz; + u32 chwidth_num_peer_caps; + u32 preamble_puncture_bw; + u32 max_user_per_ppdu_ofdma; + u32 max_user_per_ppdu_mumimo; + u32 target_cap_flags; + u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE]; + u32 max_num_linkview_peers; + u32 max_num_msduq_supported_per_tid; + u32 default_num_msduq_supported_per_tid; +}; + struct ath12k_wmi_svc_rdy_ext2_parse { + struct ath12k_wmi_svc_rdy_ext2_arg arg; struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; bool dma_ring_cap_done; + bool spectral_bin_scaling_done; + bool mac_phy_caps_ext_done; }; struct ath12k_wmi_rdy_parse { @@ -134,6 +152,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_service_available_event) }, [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, + [WMI_TAG_RFKILL_EVENT] = { + .min_len = sizeof(struct wmi_rfkill_state_change_event) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, [WMI_TAG_HOST_SWFDA_EVENT] = { @@ -388,22 +408,22 @@ err_pull: int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, u32 cmd_id) { - struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab; + struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; might_sleep(); - wait_event_timeout(wmi_sc->tx_credits_wq, ({ + wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); - if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags)) + if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); }), WMI_SEND_TIMEOUT_HZ); if (ret == -EAGAIN) - ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); return ret; } @@ -445,8 +465,10 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps; const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps; const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps; + struct ath12k_base *ab = wmi_handle->wmi_ab->ab; struct ath12k_band_cap *cap_band; struct ath12k_pdev_cap *pdev_cap = &pdev->cap; + struct ath12k_fw_pdev *fw_pdev; u32 phy_map; u32 hw_idx, phy_idx = 0; int i; @@ -475,6 +497,12 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands); pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); + fw_pdev = &ab->fw_pdev[ab->fw_pdev_count]; + fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands); + fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id); + fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id); + ab->fw_pdev_count++; + /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from * band to band for a single radio, need to see how this should be * handled. @@ -699,10 +727,10 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk return 0; } -struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len) +struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; - struct ath12k_base *ab = wmi_sc->ab; + struct ath12k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); @@ -995,6 +1023,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms); cmd->regdomain = cpu_to_le32(arg->regdomain); cmd->he_ops = cpu_to_le32(arg->he_ops); + cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); if (!restart) { if (arg->ssid) { @@ -1791,6 +1820,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, bool hw_crypto_disabled) { cmd->peer_flags = 0; + cmd->peer_flags_ext = 0; if (arg->is_wme_set) { if (arg->qos_flag) @@ -1805,6 +1835,8 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ); if (arg->bw_160) cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); + if (arg->bw_320) + cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ); /* Typically if STBC is enabled for VHT it should be enabled * for HT as well @@ -1832,6 +1864,8 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ); if (arg->twt_responder) cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP); + if (arg->eht_flag) + cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT); } /* Suppress authorization for all AUTH modes that need 4-way handshake @@ -1876,6 +1910,7 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, struct wmi_peer_assoc_complete_cmd *cmd; struct ath12k_wmi_vht_rate_set_params *mcs; struct ath12k_wmi_he_rate_set_params *he_mcs; + struct ath12k_wmi_eht_rate_set_params *eht_mcs; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; @@ -1892,7 +1927,9 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + sizeof(*mcs) + TLV_HDR_SIZE + - (sizeof(*he_mcs) * arg->peer_he_mcs_count); + (sizeof(*he_mcs) * arg->peer_he_mcs_count) + + TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) + + TLV_HDR_SIZE + TLV_HDR_SIZE; skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) @@ -1908,6 +1945,7 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc); cmd->peer_associd = cpu_to_le32(arg->peer_associd); + cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); ath12k_wmi_copy_peer_flags(cmd, arg, test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, @@ -1939,6 +1977,16 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] = cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]); + /* Update 11be capabilities */ + memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac), + arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac), + 0); + memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy), + arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy), + 0); + memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet), + &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0); + /* Update peer legacy rate information */ ptr += sizeof(*cmd); @@ -2005,8 +2053,36 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, ptr += sizeof(*he_mcs); } + /* MLO header tag with 0 length */ + len = 0; + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); + ptr += TLV_HDR_SIZE; + + /* Loop through the EHT rate set */ + len = arg->peer_eht_mcs_count * sizeof(*eht_mcs); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); + ptr += TLV_HDR_SIZE; + + for (i = 0; i < arg->peer_eht_mcs_count; i++) { + eht_mcs = ptr; + eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, + sizeof(*eht_mcs)); + + eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]); + eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]); + ptr += sizeof(*eht_mcs); + } + + /* ML partner links tag with 0 length */ + len = 0; + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); + ptr += TLV_HDR_SIZE; + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, - "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", + "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n", cmd->vdev_id, cmd->peer_associd, arg->peer_mac, cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, cmd->peer_listen_intval, cmd->peer_ht_caps, @@ -2016,7 +2092,10 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, cmd->peer_he_ops, cmd->peer_he_cap_info_ext, cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], cmd->peer_he_cap_phy[2], - cmd->peer_bw_rxnss_override); + cmd->peer_bw_rxnss_override, cmd->peer_flags_ext, + cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1], + cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1], + cmd->peer_eht_cap_phy[2]); ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); if (ret) { @@ -2162,12 +2241,6 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, if (arg->num_bssid) len += sizeof(*bssid) * arg->num_bssid; - len += TLV_HDR_SIZE; - if (arg->extraie.len) - extraie_len_with_pad = - roundup(arg->extraie.len, sizeof(u32)); - len += extraie_len_with_pad; - if (arg->num_hint_bssid) len += TLV_HDR_SIZE + arg->num_hint_bssid * sizeof(*hint_bssid); @@ -2176,6 +2249,18 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, len += TLV_HDR_SIZE + arg->num_hint_s_ssid * sizeof(*s_ssid); + len += TLV_HDR_SIZE; + if (arg->extraie.len) + extraie_len_with_pad = + roundup(arg->extraie.len, sizeof(u32)); + if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) { + len += extraie_len_with_pad; + } else { + ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n", + arg->extraie.len); + extraie_len_with_pad = 0; + } + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; @@ -2265,7 +2350,7 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); ptr += TLV_HDR_SIZE; - if (arg->extraie.len) + if (extraie_len_with_pad) memcpy(ptr, arg->extraie.ptr, arg->extraie.len); @@ -3386,7 +3471,7 @@ int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, int ath12k_wmi_cmd_init(struct ath12k_base *ab) { - struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab; + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; struct ath12k_wmi_init_cmd_arg arg = {}; if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, @@ -3395,9 +3480,9 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) ab->hw_params->wmi_init(ab, &arg.res_cfg); - arg.num_mem_chunks = wmi_sc->num_mem_chunks; - arg.hw_mode_id = wmi_sc->preferred_hw_mode; - arg.mem_chunks = wmi_sc->mem_chunks; + arg.num_mem_chunks = wmi_ab->num_mem_chunks; + arg.hw_mode_id = wmi_ab->preferred_hw_mode; + arg.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params->single_pdev_only) arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; @@ -3405,7 +3490,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.num_band_to_mac = ab->num_radios; ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); - return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg); + return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); } int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, @@ -3704,6 +3789,10 @@ static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) { hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; mode = le32_to_cpu(hw_mode_caps->hw_mode_id); + + if (mode >= WMI_HOST_HW_MODE_MAX) + continue; + pref = soc->wmi_ab.preferred_hw_mode; if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) { @@ -3789,6 +3878,12 @@ static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, ath12k_warn(soc, "failed to extract reg cap %d\n", i); return ret; } + + if (reg_cap.phy_id >= MAX_RADIOS) { + ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id); + return -EINVAL; + } + soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; } return 0; @@ -3810,6 +3905,7 @@ static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc, soc->num_radios = 0; phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map); + soc->fw_pdev_count = 0; while (phy_id_map && soc->num_radios < MAX_RADIOS) { ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, @@ -4037,14 +4133,183 @@ err: return ret; } +static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle, + const void *ptr, + struct ath12k_wmi_svc_rdy_ext2_arg *arg) +{ + const struct wmi_service_ready_ext2_event *ev = ptr; + + if (!ev) + return -EINVAL; + + arg->reg_db_version = le32_to_cpu(ev->reg_db_version); + arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz); + arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz); + arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps); + arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw); + arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma); + arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo); + arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags); + return 0; +} + +static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band, + const __le32 cap_mac_info[], + const __le32 cap_phy_info[], + const __le32 supp_mcs[], + const struct ath12k_wmi_ppe_threshold_params *ppet, + __le32 cap_info_internal) +{ + struct ath12k_band_cap *cap_band = &pdev->cap.band[band]; + u32 support_320mhz; + u8 i; + + if (band == NL80211_BAND_6GHZ) + support_320mhz = cap_band->eht_cap_phy_info[0] & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + + for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++) + cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]); + + for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++) + cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]); + + if (band == NL80211_BAND_6GHZ) + cap_band->eht_cap_phy_info[0] |= support_320mhz; + + cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]); + cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]); + if (band != NL80211_BAND_2GHZ) { + cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]); + cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]); + } + + cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1); + cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info); + for (i = 0; i < WMI_MAX_NUM_SS; i++) + cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] = + le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]); + + cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal); +} + +static int +ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, + const struct ath12k_wmi_caps_ext_params *caps, + struct ath12k_pdev *pdev) +{ + struct ath12k_band_cap *cap_band; + u32 bands, support_320mhz; + int i; + + if (ab->hw_params->single_pdev_only) { + if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) { + support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + cap_band = &pdev->cap.band[NL80211_BAND_6GHZ]; + cap_band->eht_cap_phy_info[0] |= support_320mhz; + return 0; + } + + for (i = 0; i < ab->fw_pdev_count; i++) { + struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i]; + + if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) && + fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) { + bands = fw_pdev->supported_bands; + break; + } + } + + if (i == ab->fw_pdev_count) + return -EINVAL; + } else { + bands = pdev->cap.supported_bands; + } + + if (bands & WMI_HOST_WLAN_2G_CAP) { + ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ, + caps->eht_cap_mac_info_2ghz, + caps->eht_cap_phy_info_2ghz, + caps->eht_supp_mcs_ext_2ghz, + &caps->eht_ppet_2ghz, + caps->eht_cap_info_internal); + } + + if (bands & WMI_HOST_WLAN_5G_CAP) { + ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ, + caps->eht_cap_mac_info_5ghz, + caps->eht_cap_phy_info_5ghz, + caps->eht_supp_mcs_ext_5ghz, + &caps->eht_ppet_5ghz, + caps->eht_cap_info_internal); + + ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ, + caps->eht_cap_mac_info_5ghz, + caps->eht_cap_phy_info_5ghz, + caps->eht_supp_mcs_ext_5ghz, + &caps->eht_ppet_5ghz, + caps->eht_cap_info_internal); + } + + return 0; +} + +static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, + u16 len, const void *ptr, + void *data) +{ + const struct ath12k_wmi_caps_ext_params *caps = ptr; + int i = 0, ret; + + if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT) + return -EPROTO; + + if (ab->hw_params->single_pdev_only) { + if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) && + caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE) + return 0; + } else { + for (i = 0; i < ab->num_radios; i++) { + if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id)) + break; + } + + if (i == ab->num_radios) + return -EINVAL; + } + + ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]); + if (ret) { + ath12k_warn(ab, + "failed to parse extended MAC PHY capabilities for pdev %d: %d\n", + ret, ab->pdevs[i].pdev_id); + return ret; + } + + return 0; +} + static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { + struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; int ret; switch (tag) { + case WMI_TAG_SERVICE_READY_EXT2_EVENT: + ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr, + &parse->arg); + if (ret) { + ath12k_warn(ab, + "failed to extract wmi service ready ext2 parameters: %d\n", + ret); + return ret; + } + break; + case WMI_TAG_ARRAY_STRUCT: if (!parse->dma_ring_cap_done) { ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, @@ -4053,6 +4318,23 @@ static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, return ret; parse->dma_ring_cap_done = true; + } else if (!parse->spectral_bin_scaling_done) { + /* TODO: This is a place-holder as WMI tag for + * spectral scaling is before + * WMI_TAG_MAC_PHY_CAPABILITIES_EXT + */ + parse->spectral_bin_scaling_done = true; + } else if (!parse->mac_phy_caps_ext_done) { + ret = ath12k_wmi_tlv_iter(ab, ptr, len, + ath12k_wmi_tlv_mac_phy_caps_ext, + parse); + if (ret) { + ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n", + ret); + return ret; + } + + parse->mac_phy_caps_ext_done = true; } break; default: @@ -4329,10 +4611,11 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, } ath12k_dbg(ab, ATH12K_DBG_WMI, - "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d", + "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x", __func__, reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2g, reg_info->max_bw_2g, - reg_info->min_bw_5g, reg_info->max_bw_5g); + reg_info->min_bw_5g, reg_info->max_bw_5g, + reg_info->phybitmap); ath12k_dbg(ab, ATH12K_DBG_WMI, "num_2g_reg_rules %d num_5g_reg_rules %d", @@ -5139,7 +5422,13 @@ static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, static bool ath12k_reg_is_world_alpha(char *alpha) { - return alpha[0] == '0' && alpha[1] == '0'; + if (alpha[0] == '0' && alpha[1] == '0') + return true; + + if (alpha[0] == 'n' && alpha[1] == 'a') + return true; + + return false; } static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) @@ -5222,7 +5511,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk ar = ab->pdevs[pdev_idx].ar; kfree(ab->new_regd[pdev_idx]); ab->new_regd[pdev_idx] = regd; - ieee80211_queue_work(ar->hw, &ar->regd_update_work); + queue_work(ab->workqueue, &ar->regd_update_work); } else { /* Multiple events for the same *ar is not expected. But we * can still clear any previously stored default_regd if we @@ -5611,8 +5900,9 @@ exit: rcu_read_unlock(); } -static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, - u32 vdev_id) +static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab, + u32 vdev_id, + enum ath12k_scan_state state) { int i; struct ath12k_pdev *pdev; @@ -5624,7 +5914,7 @@ static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab, ar = pdev->ar; spin_lock_bh(&ar->data_lock); - if (ar->scan.state == ATH12K_SCAN_ABORTING && + if (ar->scan.state == state && ar->scan.vdev_id == vdev_id) { spin_unlock_bh(&ar->data_lock); return ar; @@ -5654,10 +5944,15 @@ static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) * aborting scan's vdev id matches this event info. */ if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && - le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) - ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id)); - else + le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) { + ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), + ATH12K_SCAN_ABORTING); + if (!ar) + ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), + ATH12K_SCAN_RUNNING); + } else { ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); + } if (!ar) { ath12k_warn(ab, "Received scan event for unknown vdev"); @@ -5697,6 +5992,8 @@ static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) ath12k_wmi_event_scan_start_failed(ar); break; case WMI_SCAN_EVENT_DEQUEUED: + __ath12k_mac_scan_finish(ar); + break; case WMI_SCAN_EVENT_PREEMPTED: case WMI_SCAN_EVENT_RESTARTED: case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: @@ -6218,6 +6515,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, ev->freq_offset, ev->sidx); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); if (!ar) { @@ -6235,6 +6534,8 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff ieee80211_radar_detected(ar->hw); exit: + rcu_read_unlock(); + kfree(tb); } @@ -6253,11 +6554,16 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, ath12k_dbg(ab, ATH12K_DBG_WMI, "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id)); if (!ar) { ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id); - return; + goto exit; } + +exit: + rcu_read_unlock(); } static void ath12k_fils_discovery_event(struct ath12k_base *ab, @@ -6322,6 +6628,40 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, kfree(tb); } +static void ath12k_rfkill_state_change_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_rfkill_state_change_event *ev; + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_RFKILL_EVENT]; + if (!ev) { + kfree(tb); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_MAC, + "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", + le32_to_cpu(ev->gpio_pin_num), + le32_to_cpu(ev->int_type), + le32_to_cpu(ev->radio_state)); + + spin_lock_bh(&ab->base_lock); + ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON)); + spin_unlock_bh(&ab->base_lock); + + queue_work(ab->workqueue, &ab->rfkill_work); + kfree(tb); +} + static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -6414,6 +6754,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: ath12k_probe_resp_tx_status_event(ab, skb); break; + case WMI_RFKILL_STATE_CHANGE_EVENTID: + ath12k_rfkill_state_change_event(ab, skb); + break; /* add Unsupported events here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index d89c12bfb009..629373d67421 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -1167,6 +1167,11 @@ enum wmi_tlv_peer_flags { }; +enum wmi_tlv_peer_flags_ext { + WMI_PEER_EXT_EHT = BIT(0), + WMI_PEER_EXT_320MHZ = BIT(1), +}; + /** Enum list of TLV Tags for each parameter structure type. */ enum wmi_tlv_tag { WMI_TAG_LAST_RESERVED = 15, @@ -1920,10 +1925,12 @@ enum wmi_tlv_tag { /* TODO add all the missing cmds */ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO, + WMI_TAG_SERVICE_READY_EXT2_EVENT = 0x334, WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344, WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F, WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, + WMI_TAG_EHT_RATE_SET = 0x3C4, WMI_TAG_MAX }; @@ -2151,6 +2158,9 @@ enum wmi_tlv_service { WMI_MAX_EXT_SERVICE = 256, WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281, + + WMI_TLV_SERVICE_11BE = 289, + WMI_MAX_EXT2_SERVICE, }; @@ -2581,6 +2591,69 @@ struct ath12k_wmi_soc_hal_reg_caps_params { __le32 num_phy; } __packed; +#define WMI_MAX_EHTCAP_MAC_SIZE 2 +#define WMI_MAX_EHTCAP_PHY_SIZE 3 +#define WMI_MAX_EHTCAP_RATE_SET 3 + +/* Used for EHT MCS-NSS array. Data at each array index follows the format given + * in IEEE P802.11be/D2.0, May 20229.4.2.313.4. + * + * Index interpretation: + * 0 - 20 MHz only sta, all 4 bytes valid + * 1 - index for bandwidths <= 80 MHz except 20 MHz-only, first 3 bytes valid + * 2 - index for 160 MHz, first 3 bytes valid + * 3 - index for 320 MHz, first 3 bytes valid + */ +#define WMI_MAX_EHT_SUPP_MCS_2G_SIZE 2 +#define WMI_MAX_EHT_SUPP_MCS_5G_SIZE 4 + +#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_80 0 +#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_160 1 +#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_320 2 + +#define WMI_EHT_MCS_NSS_0_7 GENMASK(3, 0) +#define WMI_EHT_MCS_NSS_8_9 GENMASK(7, 4) +#define WMI_EHT_MCS_NSS_10_11 GENMASK(11, 8) +#define WMI_EHT_MCS_NSS_12_13 GENMASK(15, 12) + +struct wmi_service_ready_ext2_event { + __le32 reg_db_version; + __le32 hw_min_max_tx_power_2ghz; + __le32 hw_min_max_tx_power_5ghz; + __le32 chwidth_num_peer_caps; + __le32 preamble_puncture_bw; + __le32 max_user_per_ppdu_ofdma; + __le32 max_user_per_ppdu_mumimo; + __le32 target_cap_flags; + __le32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE]; + __le32 max_num_linkview_peers; + __le32 max_num_msduq_supported_per_tid; + __le32 default_num_msduq_supported_per_tid; +} __packed; + +struct ath12k_wmi_caps_ext_params { + __le32 hw_mode_id; + union { + struct { + __le16 pdev_id; + __le16 hw_link_id; + } __packed ath12k_wmi_pdev_to_link_map; + __le32 pdev_id; + }; + __le32 phy_id; + __le32 wireless_modes_ext; + __le32 eht_cap_mac_info_2ghz[WMI_MAX_EHTCAP_MAC_SIZE]; + __le32 eht_cap_mac_info_5ghz[WMI_MAX_EHTCAP_MAC_SIZE]; + __le32 rsvd0[2]; + __le32 eht_cap_phy_info_2ghz[WMI_MAX_EHTCAP_PHY_SIZE]; + __le32 eht_cap_phy_info_5ghz[WMI_MAX_EHTCAP_PHY_SIZE]; + struct ath12k_wmi_ppe_threshold_params eht_ppet_2ghz; + struct ath12k_wmi_ppe_threshold_params eht_ppet_5ghz; + __le32 eht_cap_info_internal; + __le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE]; + __le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE]; +} __packed; + /* 2 word representation of MAC addr */ struct ath12k_wmi_mac_addr_params { u8 addr[ETH_ALEN]; @@ -2705,6 +2778,11 @@ struct wmi_vdev_start_request_cmd { __le32 he_ops; __le32 cac_duration_ms; __le32 regdomain; + __le32 min_data_rate; + __le32 mbssid_flags; + __le32 mbssid_tx_vdev_id; + __le32 eht_ops; + __le32 punct_bitmap; } __packed; #define MGMT_TX_DL_FRM_LEN 64 @@ -2758,8 +2836,17 @@ enum wmi_phy_mode { MODE_11AX_HE20_2G = 21, MODE_11AX_HE40_2G = 22, MODE_11AX_HE80_2G = 23, - MODE_UNKNOWN = 24, - MODE_MAX = 24 + MODE_11BE_EHT20 = 24, + MODE_11BE_EHT40 = 25, + MODE_11BE_EHT80 = 26, + MODE_11BE_EHT80_80 = 27, + MODE_11BE_EHT160 = 28, + MODE_11BE_EHT160_160 = 29, + MODE_11BE_EHT320 = 30, + MODE_11BE_EHT20_2G = 31, + MODE_11BE_EHT40_2G = 32, + MODE_UNKNOWN = 33, + MODE_MAX = 33, }; struct wmi_vdev_start_req_arg { @@ -2795,6 +2882,10 @@ struct wmi_vdev_start_req_arg { u32 pref_rx_streams; u32 pref_tx_streams; u32 num_noa_descriptors; + u32 min_data_rate; + u32 mbssid_flags; + u32 mbssid_tx_vdev_id; + u32 punct_bitmap; }; struct ath12k_wmi_peer_create_arg { @@ -3034,7 +3125,6 @@ enum scan_dwelltime_adaptive_mode { #define WLAN_SCAN_MAX_NUM_SSID 10 #define WLAN_SCAN_MAX_NUM_BSSID 10 -#define WLAN_SCAN_MAX_NUM_CHANNELS 40 struct ath12k_wmi_element_info_arg { u32 len; @@ -3243,7 +3333,7 @@ struct ath12k_wmi_scan_req_arg { u32 num_bssid; u32 num_ssids; u32 n_probes; - u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS]; + u32 *chan_list; u32 notify_scan_events; struct cfg80211_ssid ssid[WLAN_SCAN_MAX_NUM_SSID]; struct ath12k_wmi_mac_addr_params bssid_list[WLAN_SCAN_MAX_NUM_BSSID]; @@ -3491,6 +3581,7 @@ struct ath12k_wmi_peer_assoc_arg { bool bw_40; bool bw_80; bool bw_160; + bool bw_320; bool stbc_flag; bool ldpc_flag; bool static_mimops_flag; @@ -3518,6 +3609,14 @@ struct ath12k_wmi_peer_assoc_arg { bool twt_responder; bool twt_requester; struct ath12k_wmi_ppe_threshold_arg peer_ppet; + bool eht_flag; + u32 peer_eht_cap_mac[WMI_MAX_EHTCAP_MAC_SIZE]; + u32 peer_eht_cap_phy[WMI_MAX_EHTCAP_PHY_SIZE]; + u32 peer_eht_mcs_count; + u32 peer_eht_rx_mcs_set[WMI_MAX_EHTCAP_RATE_SET]; + u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET]; + struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet; + u32 punct_bitmap; }; struct wmi_peer_assoc_complete_cmd { @@ -3549,6 +3648,15 @@ struct wmi_peer_assoc_complete_cmd { __le32 peer_he_cap_info_internal; __le32 min_data_rate; __le32 peer_he_caps_6ghz; + __le32 sta_type; + __le32 bss_max_idle_option; + __le32 auth_mode; + __le32 peer_flags_ext; + __le32 punct_bitmap; + __le32 peer_eht_cap_mac[WMI_MAX_EHTCAP_MAC_SIZE]; + __le32 peer_eht_cap_phy[WMI_MAX_EHTCAP_PHY_SIZE]; + __le32 peer_eht_ops; + struct ath12k_wmi_ppe_threshold_params peer_eht_ppet; } __packed; struct wmi_stop_scan_cmd { @@ -3776,6 +3884,12 @@ struct ath12k_wmi_he_rate_set_params { __le32 tx_mcs_set; } __packed; +struct ath12k_wmi_eht_rate_set_params { + __le32 tlv_header; + __le32 rx_mcs_set; + __le32 tx_mcs_set; +} __packed; + #define MAX_REG_RULES 10 #define REG_ALPHA2_LEN 2 #define MAX_6G_REG_RULES 5 @@ -4682,6 +4796,31 @@ struct ath12k_wmi_base { #define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024) +enum wmi_sys_cap_info_flags { + WMI_SYS_CAP_INFO_RXTX_LED = BIT(0), + WMI_SYS_CAP_INFO_RFKILL = BIT(1), +}; + +#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0) +#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6) +#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7) + +enum wmi_rfkill_enable_radio { + WMI_RFKILL_ENABLE_RADIO_ON = 0, + WMI_RFKILL_ENABLE_RADIO_OFF = 1, +}; + +enum wmi_rfkill_radio_state { + WMI_RFKILL_RADIO_STATE_OFF = 1, + WMI_RFKILL_RADIO_STATE_ON = 2, +}; + +struct wmi_rfkill_state_change_event { + __le32 gpio_pin_num; + __le32 int_type; + __le32 radio_state; +} __packed; + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -4744,8 +4883,6 @@ int ath12k_wmi_vdev_install_key(struct ath12k *ar, struct wmi_vdev_install_key_arg *arg); int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, enum wmi_bss_chan_info_req_type type); -int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, - u32 vdev_id, u32 pdev_id); int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar); int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, u8 peer_addr[ETH_ALEN], diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index 28a1e5eff204..08bd5d3b00f1 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -115,7 +115,6 @@ static int ath_ahb_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq); ret = irq; goto err_iounmap; } diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index c59c14483177..9f534ed2fbb3 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -230,13 +230,13 @@ ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) } static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset) { - struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; + struct ath5k_hw *ah = hw_priv; return ath5k_hw_reg_read(ah, reg_offset); } static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) { - struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; + struct ath5k_hw *ah = hw_priv; ath5k_hw_reg_write(ah, val, reg_offset); } @@ -1770,7 +1770,7 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, ah->stats.antenna_tx[0]++; /* invalid */ trace_ath5k_tx_complete(ah, skb, txq, ts); - ieee80211_tx_status(ah->hw, skb); + ieee80211_tx_status_skb(ah->hw, skb); } static void diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 4b41160e5d38..ec130510aeb2 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -982,8 +982,6 @@ ath5k_debug_init_device(struct ath5k_hw *ah) ah->debug.level = ath5k_debug; phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir); - if (!phydir) - return; debugfs_create_file("debug", 0600, phydir, ah, &fops_debug); debugfs_create_file("registers", 0400, phydir, ah, ®isters_fops); diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index 33e9928af363..439052984796 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -131,8 +131,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led, int err; led->ah = ah; - strncpy(led->name, name, sizeof(led->name)); - led->name[sizeof(led->name)-1] = 0; + strscpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = trigger; led->led_dev.brightness_set = ath5k_led_brightness_set; diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 11ed30d6b595..c630343ca4f9 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -382,7 +382,6 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, mfilt[1] = multicast >> 32; /* Only deal with supported flags */ - changed_flags &= SUPPORTED_FIF_FLAGS; *new_flags &= SUPPORTED_FIF_FLAGS; /* If HW detects any phy or radar errors, leave those filters on. diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 86b8cb975b1a..b51fce5ae260 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -54,7 +54,7 @@ MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); /* return bus cachesize in 4B word units */ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz) { - struct ath5k_hw *ah = (struct ath5k_hw *) common->priv; + struct ath5k_hw *ah = common->priv; u8 u8tmp; pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp); @@ -76,7 +76,7 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz) static bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) { - struct ath5k_hw *ah = (struct ath5k_hw *) common->ah; + struct ath5k_hw *ah = common->ah; u32 status, timeout; /* diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 5797ef9c73d7..7ee4e1616f45 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -26,6 +26,7 @@ #include <linux/delay.h> #include <linux/slab.h> +#include <linux/sort.h> #include <asm/unaligned.h> #include "ath5k.h" @@ -1554,6 +1555,11 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) hist->nfval[hist->index] = noise_floor; } +static int cmps16(const void *a, const void *b) +{ + return *(s16 *)a - *(s16 *)b; +} + /** * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer * @ah: The &struct ath5k_hw @@ -1561,25 +1567,16 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) { - s16 sort[ATH5K_NF_CAL_HIST_MAX]; - s16 tmp; - int i, j; - - memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort)); - for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) { - for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) { - if (sort[j] > sort[j - 1]) { - tmp = sort[j]; - sort[j] = sort[j - 1]; - sort[j - 1] = tmp; - } - } - } + s16 sorted_nfval[ATH5K_NF_CAL_HIST_MAX]; + int i; + + memcpy(sorted_nfval, ah->ah_nfcal_hist.nfval, sizeof(sorted_nfval)); + sort(sorted_nfval, ATH5K_NF_CAL_HIST_MAX, sizeof(s16), cmps16, NULL); for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) { ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, - "cal %d:%d\n", i, sort[i]); + "cal %d:%d\n", i, sorted_nfval[i]); } - return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2]; + return sorted_nfval[(ATH5K_NF_CAL_HIST_MAX - 1) / 2]; } /** diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 0c2b8b1a10d5..e37db4af33de 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -1118,9 +1118,9 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, ath6kl_band_2ghz.ht_cap.ht_supported) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT); - mutex_lock(&vif->wdev.mtx); + wiphy_lock(vif->ar->wiphy); cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0); - mutex_unlock(&vif->wdev.mtx); + wiphy_unlock(vif->ar->wiphy); } static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, @@ -2954,7 +2954,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, } static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *beacon) + struct cfg80211_ap_update *params) { struct ath6kl_vif *vif = netdev_priv(dev); @@ -2964,7 +2964,7 @@ static int ath6kl_change_beacon(struct wiphy *wiphy, struct net_device *dev, if (vif->next_mode != AP_NETWORK) return -EOPNOTSUPP; - return ath6kl_set_ies(vif, beacon); + return ath6kl_set_ies(vif, ¶ms->beacon); } static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev, diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index 433a047f3747..b837d31416df 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -1793,8 +1793,6 @@ int ath6kl_debug_init_fs(struct ath6kl *ar) { ar->debugfs_phy = debugfs_create_dir("ath6kl", ar->wiphy->debugfsdir); - if (!ar->debugfs_phy) - return -ENOMEM; debugfs_create_file("tgt_stats", 0400, ar->debugfs_phy, ar, &fops_tgt_stats); diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 201e45554070..15f455adb860 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1677,7 +1677,7 @@ static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len) /* add "..." to the end of string */ trunc_len = strlen(trunc) + 1; - strncpy(buf + buf_len - trunc_len, trunc, trunc_len); + memcpy(buf + buf_len - trunc_len, trunc, trunc_len); return; } diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index d3aa9e7a37c2..8f9fe23e9755 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -852,14 +852,14 @@ void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) void ath6kl_wakeup_event(void *dev) { - struct ath6kl *ar = (struct ath6kl *) dev; + struct ath6kl *ar = dev; wake_up(&ar->event_wq); } void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr) { - struct ath6kl *ar = (struct ath6kl *) devt; + struct ath6kl *ar = devt; ar->tx_pwr = tx_pwr; wake_up(&ar->event_wq); diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index a56fab6232a9..80e66acc5cf6 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -708,7 +708,7 @@ void ath6kl_tx_complete(struct htc_target *target, packet->endpoint >= ENDPOINT_MAX)) continue; - ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; + ath6kl_cookie = packet->pkt_cntxt; if (WARN_ON_ONCE(!ath6kl_cookie)) continue; diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index e150d82eddb6..0c47be06c153 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -57,8 +57,7 @@ config ATH9K_AHB config ATH9K_DEBUGFS bool "Atheros ath9k debugging" - depends on ATH9K && DEBUG_FS - select MAC80211_DEBUGFS + depends on ATH9K && DEBUG_FS && MAC80211_DEBUGFS select ATH9K_COMMON_DEBUG help Say Y, if you need access to ath9k's statistics for @@ -70,7 +69,6 @@ config ATH9K_DEBUGFS config ATH9K_STATION_STATISTICS bool "Detailed station statistics" depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS - select MAC80211_DEBUGFS default n help This option enables detailed statistics for association stations. diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 9cd12b20b18d..9bfaadfa6c00 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -132,8 +132,8 @@ static int ath_ahb_probe(struct platform_device *pdev) ah = sc->sc_ah; ath9k_hw_name(ah, hw_name, sizeof(hw_name)); - wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", - hw_name, (unsigned long)mem, irq); + wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n", + hw_name, mem, irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index a29c11f944a5..6274d1624261 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -766,10 +766,10 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah, } } -static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, +static u32 ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, struct ath9k_channel *chan) { - int ret; + u32 ret; if (IS_CHAN_2GHZ(chan)) { if (IS_CHAN_HT40(chan)) @@ -791,7 +791,7 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, return ret; } -static int ar9561_hw_get_modes_txgain_index(struct ath_hw *ah, +static u32 ar9561_hw_get_modes_txgain_index(struct ath_hw *ah, struct ath9k_channel *chan) { if (IS_CHAN_2GHZ(chan)) { @@ -916,7 +916,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, * TXGAIN initvals. */ if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) { - int modes_txgain_index = 1; + u32 modes_txgain_index = 1; if (AR_SREV_9550(ah)) modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan); @@ -925,9 +925,6 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, modes_txgain_index = ar9561_hw_get_modes_txgain_index(ah, chan); - if (modes_txgain_index < 0) - return -EINVAL; - REG_WRITE_ARRAY(&ah->iniModesTxGain, modes_txgain_index, regWrites); } else { diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 2cc23605c9fc..668fc07b3073 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -1129,7 +1129,6 @@ void ath_restart_work(struct ath_softc *sc); int ath9k_init_device(u16 devid, struct ath_softc *sc, const struct ath_bus_ops *bus_ops); void ath9k_deinit_device(struct ath_softc *sc); -void ath9k_reload_chainmask_settings(struct ath_softc *sc); u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate); void ath_start_rfkill_poll(struct ath_softc *sc); void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index e055adfb5361..a5349c72c332 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -855,16 +855,11 @@ static ssize_t write_file_spectral_short_repeat(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 1) return -EINVAL; @@ -903,17 +898,11 @@ static ssize_t write_file_spectral_count(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ssize_t ret; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 255) return -EINVAL; @@ -951,16 +940,11 @@ static ssize_t write_file_spectral_period(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 255) return -EINVAL; @@ -999,16 +983,11 @@ static ssize_t write_file_spectral_fft_period(struct file *file, { struct ath_spec_scan_priv *spec_priv = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 15) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index f0ab6f9955e4..12204cf86fcf 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -85,8 +85,6 @@ struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw, struct ath_hw *ah, struct cfg80211_chan_def *chandef); int ath9k_cmn_count_streams(unsigned int chainmask, int max); -void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, - enum ath_stomp_type stomp_type); void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow, u16 new_txpow, u16 *txpower); void ath9k_cmn_init_crypto(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index fb7a2952d0ce..a0376a6787b8 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -96,21 +96,16 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf, } static ssize_t write_file_debug(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long mask; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &mask)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; common->debug_mask = mask; return count; @@ -191,16 +186,11 @@ static ssize_t write_file_ani(struct file *file, struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long ani; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &ani)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &ani); + if (ret) + return ret; if (ani > 1) return -EINVAL; @@ -248,20 +238,15 @@ static ssize_t write_file_bt_ant_diversity(struct file *file, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps; unsigned long bt_ant_diversity; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ret = kstrtoul_from_user(user_buf, count, 0, &bt_ant_diversity); + if (ret) + return ret; if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV)) goto exit; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &bt_ant_diversity)) - return -EINVAL; - common->bt_ant_diversity = !!bt_ant_diversity; ath9k_ps_wakeup(sc); ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity); @@ -792,16 +777,11 @@ static ssize_t write_file_reset(struct file *file, struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val != 1) return -EINVAL; @@ -886,16 +866,11 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; unsigned long regidx; - char buf[32]; - ssize_t len; + ssize_t ret; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, ®idx)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, ®idx); + if (ret) + return ret; sc->debug.regidx = regidx; return count; @@ -931,16 +906,11 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf, struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; unsigned long regval; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, ®val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, ®val); + if (ret) + return ret; ath9k_ps_wakeup(sc); REG_WRITE_D(ah, sc->debug.regidx, regval); @@ -1128,16 +1098,11 @@ static ssize_t write_file_wow(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val != 1) return -EINVAL; @@ -1191,17 +1156,12 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf, struct ath_softc *sc = file->private_data; struct ath_hw *ah = sc->sc_ah; unsigned long val; - char buf[32]; - ssize_t len; + ssize_t ret; bool tpc_enabled; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val > 1) return -EINVAL; @@ -1333,7 +1293,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *ath9k_gstrings_stats, + memcpy(data, ath9k_gstrings_stats, sizeof(ath9k_gstrings_stats)); } @@ -1420,7 +1380,7 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy = debugfs_create_dir("ath9k", sc->hw->wiphy->debugfsdir); - if (!sc->debug.debugfs_phy) + if (IS_ERR(sc->debug.debugfs_phy)) return -ENOMEM; #ifdef CONFIG_ATH_DEBUG diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 2a79c2fa8415..8e18e9b4ef48 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -99,17 +99,11 @@ static ssize_t write_file_dfs(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; unsigned long val; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ssize_t ret; + ret = kstrtoul_from_user(user_buf, count, 0, &val); + if (ret) + return ret; if (val == DFS_STATS_RESET_MAGIC) memset(&sc->debug.stats.dfs_stats, 0, sizeof(sc->debug.stats.dfs_stats)); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 3caa149b1013..fd5312c2a7e3 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -572,8 +572,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah, } for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { - bool isHt40CtlMode = - (pCtlMode[ctlMode] == CTL_2GHT40) ? true : false; + bool isHt40CtlMode = pCtlMode[ctlMode] == CTL_2GHT40; if (isHt40CtlMode) freq = centers.synth_center; diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 27ff1ca2631f..90cfe39aa433 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -1432,7 +1432,7 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) { struct usb_device *udev = interface_to_usbdev(interface); struct hif_device_usb *hif_dev = usb_get_intfdata(interface); - bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false; + bool unplugged = udev->state == USB_STATE_NOTATTACHED; if (!hif_dev) return; @@ -1481,31 +1481,31 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface) { struct hif_device_usb *hif_dev = usb_get_intfdata(interface); struct htc_target *htc_handle = hif_dev->htc_handle; - int ret; const struct firmware *fw; + int ret; ret = ath9k_hif_usb_alloc_urbs(hif_dev); if (ret) return ret; - if (hif_dev->flags & HIF_USB_READY) { - /* request cached firmware during suspend/resume cycle */ - ret = request_firmware(&fw, hif_dev->fw_name, - &hif_dev->udev->dev); - if (ret) - goto fail_resume; - - hif_dev->fw_data = fw->data; - hif_dev->fw_size = fw->size; - ret = ath9k_hif_usb_download_fw(hif_dev); - release_firmware(fw); - if (ret) - goto fail_resume; - } else { - ath9k_hif_usb_dealloc_urbs(hif_dev); - return -EIO; + if (!(hif_dev->flags & HIF_USB_READY)) { + ret = -EIO; + goto fail_resume; } + /* request cached firmware during suspend/resume cycle */ + ret = request_firmware(&fw, hif_dev->fw_name, + &hif_dev->udev->dev); + if (ret) + goto fail_resume; + + hif_dev->fw_data = fw->data; + hif_dev->fw_size = fw->size; + ret = ath9k_hif_usb_download_fw(hif_dev); + release_firmware(fw); + if (ret) + goto fail_resume; + mdelay(100); ret = ath9k_htc_resume(htc_handle); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h index 5985aa15ca93..b3e66b0485a5 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h @@ -126,7 +126,7 @@ struct hif_device_usb { struct usb_anchor reg_in_submitted; struct usb_anchor mgmt_submitted; struct sk_buff *remain_skb; - char fw_name[32]; + char fw_name[64]; int fw_minor_index; int rx_remain_len; int rx_pkt_len; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index b3ed65e5c4da..278ddc713fdc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -375,16 +375,11 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf, struct ath9k_htc_priv *priv = file->private_data; struct ath_common *common = ath9k_hw_common(priv->ah); unsigned long mask; - char buf[32]; - ssize_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; + ssize_t ret; - buf[len] = '\0'; - if (kstrtoul(buf, 0, &mask)) - return -EINVAL; + ret = kstrtoul_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; common->debug_mask = mask; return count; @@ -428,7 +423,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *ath9k_htc_gstrings_stats, + memcpy(data, ath9k_htc_gstrings_stats, sizeof(ath9k_htc_gstrings_stats)); } @@ -491,7 +486,7 @@ int ath9k_htc_init_debug(struct ath_hw *ah) priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, priv->hw->wiphy->debugfsdir); - if (!priv->debug.debugfs_phy) + if (IS_ERR(priv->debug.debugfs_phy)) return -ENOMEM; ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 51766de5ec3b..9a9b5212051a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -719,7 +719,7 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv, aggr.sta_index = ista->index; aggr.tidno = tid & 0xf; - aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false; + aggr.aggr_enable = action == IEEE80211_AMPDU_TX_START; WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); if (ret) @@ -1264,7 +1264,6 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, u32 rfilt; mutex_lock(&priv->mutex); - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; if (test_bit(ATH_OP_INVALID, &common->op_flags)) { diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 672789e3c55d..800177021baf 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -523,7 +523,7 @@ send_mac80211: } /* Send status to mac80211 */ - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv, diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index af44b33814dd..f03d792732da 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -115,8 +115,10 @@ struct ath_tx_status { u8 qid; u16 desc_id; u8 tid; - u32 ba_low; - u32 ba_high; + struct_group(ba, + u32 ba_low; + u32 ba_high; + ); u32 evm0; u32 evm1; u32 evm2; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6360d3356e25..1494feedb27d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1571,7 +1571,6 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, struct ath_chanctx *ctx; u32 rfilt; - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; spin_lock_bh(&sc->chan_lock); diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index a09f9d223f3d..0633589b85c2 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -988,8 +988,8 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) sc->sc_ah->msi_reg = 0; ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); - wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", - hw_name, (unsigned long)sc->mem, pdev->irq); + wiphy_info(hw->wiphy, "%s mem=0x%p, irq=%d\n", + hw_name, sc->mem, pdev->irq); return 0; diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 8a996ed9a3be..f2144fd39093 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -172,9 +172,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - char buf[32]; bool start; - ssize_t len; + ssize_t ret; int r; if (count < 1) @@ -183,14 +182,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, if (sc->cur_chan->nvifs > 1) return -EOPNOTSUPP; - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - - buf[len] = '\0'; - - if (kstrtobool(buf, &start)) - return -EINVAL; + ret = kstrtobool_from_user(user_buf, count, &start); + if (ret) + return ret; mutex_lock(&sc->mutex); diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index d652c647d56b..1476b42b52a9 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -242,10 +242,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, spin_unlock_irqrestore(&wmi->wmi_lock, flags); goto free_skb; } - spin_unlock_irqrestore(&wmi->wmi_lock, flags); /* WMI command response */ ath9k_wmi_rsp_callback(wmi, skb); + spin_unlock_irqrestore(&wmi->wmi_lock, flags); free_skb: kfree_skb(skb); @@ -283,7 +283,8 @@ int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, static int ath9k_wmi_cmd_issue(struct wmi *wmi, struct sk_buff *skb, - enum wmi_cmd_id cmd, u16 len) + enum wmi_cmd_id cmd, u16 len, + u8 *rsp_buf, u32 rsp_len) { struct wmi_cmd_hdr *hdr; unsigned long flags; @@ -293,6 +294,11 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi, hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id); spin_lock_irqsave(&wmi->wmi_lock, flags); + + /* record the rsp buffer and length */ + wmi->cmd_rsp_buf = rsp_buf; + wmi->cmd_rsp_len = rsp_len; + wmi->last_seq_id = wmi->tx_seq_id; spin_unlock_irqrestore(&wmi->wmi_lock, flags); @@ -308,8 +314,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, struct ath_common *common = ath9k_hw_common(ah); u16 headroom = sizeof(struct htc_frame_hdr) + sizeof(struct wmi_cmd_hdr); + unsigned long time_left, flags; struct sk_buff *skb; - unsigned long time_left; int ret = 0; if (ah->ah_flags & AH_UNPLUGGED) @@ -333,11 +339,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, goto out; } - /* record the rsp buffer and length */ - wmi->cmd_rsp_buf = rsp_buf; - wmi->cmd_rsp_len = rsp_len; - - ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len); + ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len); if (ret) goto out; @@ -345,7 +347,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); + spin_lock_irqsave(&wmi->wmi_lock, flags); wmi->last_seq_id = 0; + spin_unlock_irqrestore(&wmi->wmi_lock, flags); mutex_unlock(&wmi->op_mutex); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index f6f2ab7a63ff..f15684379b03 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -94,7 +94,7 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_STATUS_EOSP)) { - ieee80211_tx_status(hw, skb); + ieee80211_tx_status_skb(hw, skb); return; } @@ -466,9 +466,11 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, *nframes = 0; isaggr = bf_isaggr(bf); + memset(ba, 0, WME_BA_BMP_SIZE >> 3); + if (isaggr) { seq_st = ts->ts_seqnum; - memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); + memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3); } while (bf) { @@ -551,7 +553,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (isaggr && txok) { if (ts->ts_flags & ATH9K_TX_BA) { seq_st = ts->ts_seqnum; - memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3); + memcpy(ba, &ts->ba, WME_BA_BMP_SIZE >> 3); } else { /* * AR5416 can become deaf/mute when BA diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index e4eb666c6eea..c4edf8355941 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -178,7 +178,7 @@ static void carl9170_usb_tx_data_complete(struct urb *urb) switch (urb->status) { /* everything is fine */ case 0: - carl9170_tx_callback(ar, (void *)urb->context); + carl9170_tx_callback(ar, urb->context); break; /* disconnect */ @@ -369,7 +369,7 @@ void carl9170_usb_handle_tx_err(struct ar9170 *ar) struct urb *urb; while ((urb = usb_get_from_anchor(&ar->tx_err))) { - struct sk_buff *skb = (void *)urb->context; + struct sk_buff *skb = urb->context; carl9170_tx_drop(ar, skb); carl9170_tx_callback(ar, skb); @@ -397,7 +397,7 @@ static void carl9170_usb_tasklet(struct tasklet_struct *t) static void carl9170_usb_rx_complete(struct urb *urb) { - struct ar9170 *ar = (struct ar9170 *)urb->context; + struct ar9170 *ar = urb->context; int err; if (WARN_ON_ONCE(!ar)) @@ -559,7 +559,7 @@ static int carl9170_usb_flush(struct ar9170 *ar) int ret, err = 0; while ((urb = usb_get_from_anchor(&ar->tx_wait))) { - struct sk_buff *skb = (void *)urb->context; + struct sk_buff *skb = urb->context; carl9170_tx_drop(ar, skb); carl9170_tx_callback(ar, skb); usb_free_urb(urb); @@ -668,7 +668,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, memcpy(ar->cmd.data, payload, plen); spin_lock_bh(&ar->cmd_lock); - ar->readbuf = (u8 *)out; + ar->readbuf = out; ar->readlen = outlen; spin_unlock_bh(&ar->cmd_lock); diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 27f4d74a41c8..700da9f4531e 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -161,7 +161,7 @@ get_dfs_domain_radar_types(enum nl80211_dfs_regions region) struct channel_detector { struct list_head head; u16 freq; - struct pri_detector **detectors; + struct pri_detector *detectors[]; }; /* channel_detector_reset() - reset detector lines for a given channel */ @@ -183,14 +183,13 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd, if (cd == NULL) return; list_del(&cd->head); - if (cd->detectors) { - for (i = 0; i < dpd->num_radar_types; i++) { - struct pri_detector *de = cd->detectors[i]; - if (de != NULL) - de->exit(de); - } + + for (i = 0; i < dpd->num_radar_types; i++) { + struct pri_detector *de = cd->detectors[i]; + if (de != NULL) + de->exit(de); } - kfree(cd->detectors); + kfree(cd); } @@ -200,16 +199,12 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) u32 i; struct channel_detector *cd; - cd = kmalloc(sizeof(*cd), GFP_ATOMIC); + cd = kzalloc(struct_size(cd, detectors, dpd->num_radar_types), GFP_ATOMIC); if (cd == NULL) goto fail; INIT_LIST_HEAD(&cd->head); cd->freq = freq; - cd->detectors = kmalloc_array(dpd->num_radar_types, - sizeof(*cd->detectors), GFP_ATOMIC); - if (cd->detectors == NULL) - goto fail; for (i = 0; i < dpd->num_radar_types; i++) { const struct radar_detector_specs *rs = &dpd->radar_spec[i]; diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index b7b61d4f02ba..21a93fec284d 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -104,7 +104,7 @@ bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) * Not setting this bit allows the hardware to use the key * for multicast frame decryption. */ - if (mac[0] & 0x01) + if (is_multicast_ether_addr(mac)) unicast_flag = 0; macLo = get_unaligned_le32(mac); diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 9013f056eecb..d405a4c34059 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -180,7 +180,7 @@ static int wcn36xx_dxe_init_descs(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wc if (!wcn_ch->cpu_addr) return -ENOMEM; - cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr; + cur_dxe = wcn_ch->cpu_addr; cur_ctl = wcn_ch->head_blk_ctl; for (i = 0; i < wcn_ch->desc_num; i++) { @@ -453,7 +453,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) { - struct wcn36xx *wcn = (struct wcn36xx *)dev; + struct wcn36xx *wcn = dev; int int_src, int_reason; wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); @@ -541,7 +541,7 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev) { - struct wcn36xx *wcn = (struct wcn36xx *)dev; + struct wcn36xx *wcn = dev; wcn36xx_dxe_rx_frame(wcn); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 8dbd115a393c..2bd1163177f0 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -19,9 +19,8 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/platform_device.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include <linux/rpmsg.h> #include <linux/soc/qcom/smem_state.h> #include <linux/soc/qcom/wcnss_ctrl.h> diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 17e1919d1cd8..2cf86fc3f8fe 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -576,7 +576,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len) if (len < sizeof(*rsp)) return -EIO; - rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf; + rsp = buf; if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status) return -EIO; @@ -1025,7 +1025,7 @@ static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len) ret = wcn36xx_smd_rsp_status_check(buf, len); if (ret) return ret; - rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf; + rsp = buf; wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n", rsp->channel_number, rsp->status); return ret; @@ -1072,7 +1072,7 @@ static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len, if (ret) return ret; - rsp = (struct wcn36xx_hal_process_ptt_msg_rsp_msg *)buf; + rsp = buf; wcn36xx_dbg(WCN36XX_DBG_HAL, "process ptt msg responded with length %d\n", rsp->header.len); @@ -1131,7 +1131,7 @@ static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len) { struct wcn36xx_hal_update_scan_params_resp *rsp; - rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf; + rsp = buf; /* Remove the PNO version bit */ rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK)); @@ -1198,7 +1198,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn, if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf; + rsp = buf; if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { wcn36xx_warn("hal add sta self failure: %d\n", @@ -1316,7 +1316,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len) if (wcn36xx_smd_rsp_status_check(buf, len)) return -EIO; - rsp = (struct wcn36xx_hal_join_rsp_msg *)buf; + rsp = buf; wcn36xx_dbg(WCN36XX_DBG_HAL, "hal rsp join status %d tx_mgmt_power %d\n", @@ -1481,7 +1481,7 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn, if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf; + rsp = buf; params = &rsp->params; if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { @@ -1849,7 +1849,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf; + rsp = buf; params = &rsp->bss_rsp_params; if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { @@ -2476,7 +2476,7 @@ static int wcn36xx_smd_add_ba_session_rsp(void *buf, int len, u8 *session) if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_add_ba_session_rsp_msg *)buf; + rsp = buf; if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) return rsp->status; @@ -2654,7 +2654,7 @@ static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba if (len < sizeof(*rsp)) return -EINVAL; - rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf; + rsp = buf; if (rsp->candidate_cnt < 1) return rsp->status ? rsp->status : -EINVAL; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index cf15cde2a364..2c1ed9e570bf 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -47,7 +47,7 @@ struct wcn36xx_fw_msg_status_rsp { struct wcn36xx_hal_ind_msg { struct list_head list; size_t msg_len; - u8 msg[]; + u8 msg[] __counted_by(msg_len); }; struct wcn36xx; diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c index 7ae14b4d2d0e..e5142c052985 100644 --- a/drivers/net/wireless/ath/wcn36xx/testmode.c +++ b/drivers/net/wireless/ath/wcn36xx/testmode.c @@ -53,7 +53,7 @@ static int wcn36xx_tm_cmd_ptt(struct wcn36xx *wcn, struct ieee80211_vif *vif, buf = nla_data(tb[WCN36XX_TM_ATTR_DATA]); buf_len = nla_len(tb[WCN36XX_TM_ATTR_DATA]); - msg = (struct ftm_rsp_msg *)buf; + msg = buf; wcn36xx_dbg(WCN36XX_DBG_TESTMODE, "testmode cmd wmi msg_id 0x%04X msg_len %d buf %pK buf_len %d\n", diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 40f9a7ef8980..dbe4b3478f03 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -2082,11 +2082,12 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil) static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_beacon_data *bcon) + struct cfg80211_ap_update *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = ndev->ieee80211_ptr; struct wil6210_vif *vif = ndev_to_vif(ndev); + struct cfg80211_beacon_data *bcon = ¶ms->beacon; int rc; u32 privacy = 0; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 237cbd5c5060..f29ac6de7139 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -666,7 +666,7 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; - const u8 *pn = (u8 *)&d->mac.pn_15_0; + const u8 *pn = (u8 *)&d->mac.pn; if (!cc->key_set) { wil_err_ratelimited(wil, diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 1ae1bec1b97f..689f68d89a44 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -343,8 +343,10 @@ struct vring_rx_mac { u32 d0; u32 d1; u16 w4; - u16 pn_15_0; - u32 pn_47_16; + struct_group_attr(pn, __packed, + u16 pn_15_0; + u32 pn_47_16; + ); } __packed; /* Rx descriptor - DMA part diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 201c8c35e0c9..1ba1f21ebea2 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -548,7 +548,7 @@ static int wil_rx_crypto_check_edma(struct wil6210_priv *wil, s = &wil->sta[cid]; c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; cc = &c->key_id[key_id]; - pn = (u8 *)&st->ext.pn_15_0; + pn = (u8 *)&st->ext.pn; if (!cc->key_set) { wil_err_ratelimited(wil, diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index c736f7413a35..ee90e225bb05 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -330,8 +330,10 @@ struct wil_rx_status_extension { u32 d0; u32 d1; __le16 seq_num; /* only lower 12 bits */ - u16 pn_15_0; - u32 pn_47_16; + struct_group_attr(pn, __packed, + u16 pn_15_0; + u32 pn_47_16; + ); } __packed; struct wil_rx_status_extended { diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 6a5976a2944c..6fdb77d4c59e 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -870,7 +870,6 @@ static void wmi_evt_rx_mgmt(struct wil6210_vif *vif, int id, void *d, int len) struct cfg80211_bss *bss; struct cfg80211_inform_bss bss_data = { .chan = channel, - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, .boottime_ns = ktime_to_ns(ktime_get_boottime()), }; @@ -1389,7 +1388,6 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len) u32 d_len; struct cfg80211_bss *bss; struct cfg80211_inform_bss bss_data = { - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .boottime_ns = ktime_to_ns(ktime_get_boottime()), }; diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 009bca34ece3..447b51cff8f9 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -10,7 +10,7 @@ * Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi> * Copyright (c) 2010 Sebastian Smolorz <sesmo@gmx.net> * - * This file is part of the Berlios driver for WLAN USB devices based on the + * This file is part of the Berlios driver for USB WLAN devices based on the * Atmel AT76C503A/505/505A. * * Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed @@ -143,7 +143,7 @@ static const struct usb_device_id dev_table[] = { { USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, /* Dynalink/Askey WLL013 (intersil) */ { USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861) }, - /* EZ connect 11Mpbs Wireless USB Adapter SMC2662W v1 */ + /* EZ connect 11Mpbs USB Wireless Adapter SMC2662W v1 */ { USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, /* BenQ AWL300 */ { USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861) }, @@ -195,7 +195,7 @@ static const struct usb_device_id dev_table[] = { { USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503) }, /* 3Com 3CRSHEW696 */ { USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503) }, - /* Siemens Santis ADSL WLAN USB adapter WLL 013 */ + /* Siemens Santis ADSL USB WLAN adapter WLL 013 */ { USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503) }, /* Belkin F5D6050, version 2 */ { USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503) }, @@ -238,7 +238,7 @@ static const struct usb_device_id dev_table[] = { { USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958) }, /* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */ { USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958) }, - /* Corega WLAN USB Stick 11 */ + /* Corega USB WLAN Stick 11 */ { USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) }, /* Microstar MSI Box MS6978 */ { USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958) }, diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index 7c2d1c588156..461dce21de2b 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -571,7 +571,6 @@ static const struct { { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} }; static void build_wpa_mib(struct atmel_private *priv); -static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void atmel_copy_to_card(struct net_device *dev, u16 dest, const unsigned char *src, u16 len); static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest, @@ -1487,7 +1486,6 @@ static const struct net_device_ops atmel_netdev_ops = { .ndo_stop = atmel_close, .ndo_set_mac_address = atmel_set_mac_address, .ndo_start_xmit = start_tx, - .ndo_do_ioctl = atmel_ioctl, .ndo_validate_addr = eth_validate_addr, }; @@ -2616,76 +2614,6 @@ static const struct iw_handler_def atmel_handler_def = { .get_wireless_stats = atmel_get_wireless_stats }; -static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - int i, rc = 0; - struct atmel_private *priv = netdev_priv(dev); - struct atmel_priv_ioctl com; - struct iwreq *wrq = (struct iwreq *) rq; - unsigned char *new_firmware; - char domain[REGDOMAINSZ + 1]; - - switch (cmd) { - case ATMELIDIFC: - wrq->u.param.value = ATMELMAGIC; - break; - - case ATMELFWL: - if (copy_from_user(&com, rq->ifr_data, sizeof(com))) { - rc = -EFAULT; - break; - } - - if (!capable(CAP_NET_ADMIN)) { - rc = -EPERM; - break; - } - - new_firmware = memdup_user(com.data, com.len); - if (IS_ERR(new_firmware)) { - rc = PTR_ERR(new_firmware); - break; - } - - kfree(priv->firmware); - - priv->firmware = new_firmware; - priv->firmware_length = com.len; - strncpy(priv->firmware_id, com.id, 31); - priv->firmware_id[31] = '\0'; - break; - - case ATMELRD: - if (copy_from_user(domain, rq->ifr_data, REGDOMAINSZ)) { - rc = -EFAULT; - break; - } - - if (!capable(CAP_NET_ADMIN)) { - rc = -EPERM; - break; - } - - domain[REGDOMAINSZ] = 0; - rc = -EINVAL; - for (i = 0; i < ARRAY_SIZE(channel_table); i++) { - if (!strcasecmp(channel_table[i].name, domain)) { - priv->config_reg_domain = channel_table[i].reg_domain; - rc = 0; - } - } - - if (rc == 0 && priv->station_state != STATION_STATE_DOWN) - rc = atmel_open(dev); - break; - - default: - rc = -EOPNOTSUPP; - } - - return rc; -} - struct auth_body { __le16 alg; __le16 trans_seq; diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index 9a7c62bd5e43..760d1a28edc6 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1531,9 +1531,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ - ieee80211_tx_status(dev->wl->hw, meta->skb); + ieee80211_tx_status_skb(dev->wl->hw, meta->skb); - /* skb will be freed by ieee80211_tx_status(). + /* skb will be freed by ieee80211_tx_status_skb(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c index 8c28a9250cd1..0cf70fdb60a6 100644 --- a/drivers/net/wireless/broadcom/b43/pio.c +++ b/drivers/net/wireless/broadcom/b43/pio.c @@ -582,7 +582,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev, q->buffer_used -= total_len; q->free_packet_slots += 1; - ieee80211_tx_status(dev->wl->hw, pack->skb); + ieee80211_tx_status_skb(dev->wl->hw, pack->skb); pack->skb = NULL; list_add(&pack->list, &q->packets_list); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 2a90bb24ba77..667462369a32 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3367,7 +3367,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, freq = ieee80211_channel_to_frequency(channel, band); bss_data.chan = ieee80211_get_channel(wiphy, freq); - bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20; bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); notify_capability = le16_to_cpu(bi->capability); @@ -5416,13 +5415,13 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev, static s32 brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, - struct cfg80211_beacon_data *info) + struct cfg80211_ap_update *info) { struct brcmf_if *ifp = netdev_priv(ndev); brcmf_dbg(TRACE, "Enter\n"); - return brcmf_config_ap_mgmt_ie(ifp->vif, info); + return brcmf_config_ap_mgmt_ie(ifp->vif, &info->beacon); } static int diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 09d2f2dc2b46..83f8ed7d00f9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -19,7 +19,7 @@ #define BRCMF_FW_MAX_NVRAM_SIZE 64000 #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ -#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ +#define BRCMF_FW_NVRAM_PCIEDEV_LEN 20 /* pcie/1/4/ + \0 */ #define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff" #define BRCMF_FW_MACADDR_FMT "macaddr=%pM" #define BRCMF_FW_MACADDR_LEN (7 + ETH_ALEN * 3) @@ -238,9 +238,9 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, u16 bus_nr) { /* Device path with a leading '=' key-value separator */ - char pci_path[] = "=pci/?/?"; + char pci_path[20]; size_t pci_len; - char pcie_path[] = "=pcie/?/?"; + char pcie_path[20]; size_t pcie_len; u32 i, j; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index 1266cbaee072..4002d326fd21 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -69,7 +69,7 @@ struct brcmf_fw_request { u16 bus_nr; u32 n_items; const char *board_types[BRCMF_FW_MAX_BOARD_TYPES]; - struct brcmf_fw_item items[]; + struct brcmf_fw_item items[] __counted_by(n_items); }; struct brcmf_fw_name { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index dac7eb77799b..68960ae98987 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -33,7 +33,7 @@ struct brcmf_fweh_queue_item { u8 ifaddr[ETH_ALEN]; struct brcmf_event_msg_be emsg; u32 datalen; - u8 data[]; + u8 data[] __counted_by(datalen); }; /* @@ -418,17 +418,17 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, datalen + sizeof(*event_packet) > packet_len) return; - event = kzalloc(sizeof(*event) + datalen, gfp); + event = kzalloc(struct_size(event, data, datalen), gfp); if (!event) return; + event->datalen = datalen; event->code = code; event->ifidx = event_packet->msg.ifidx; /* use memcpy to get aligned event message */ memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); memcpy(event->data, data, datalen); - event->datalen = datalen; memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN); brcmf_fweh_queue_event(fweh, event); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index bece26741d3a..9d248ba1c0b2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le { * fixed parameter portion is assumed, otherwise * ssid in the fixed portion is ignored */ - __le16 channel_list[1]; /* list of chanspecs */ + union { + __le16 padding; /* Reserve space for at least 1 entry for abort + * which uses an on stack brcmf_scan_params_v2_le + */ + DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */ + }; }; struct brcmf_scan_results { @@ -702,7 +707,7 @@ struct brcmf_sta_info_le { struct brcmf_chanspec_list { __le32 count; /* # of entries */ - __le32 element[1]; /* variable length uint32 list */ + __le32 element[]; /* variable length uint32 list */ }; /* @@ -1209,7 +1214,7 @@ struct brcmf_gscan_config { u8 count_of_channel_buckets; u8 retry_threshold; __le16 lost_ap_window; - struct brcmf_gscan_bucket_config bucket[]; + struct brcmf_gscan_bucket_config bucket[] __counted_by(count_of_channel_buckets); }; /** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h index 2d08c155c23b..90b6e3982d2c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h @@ -145,14 +145,6 @@ struct si_pub { struct pci_dev; -struct gpioh_item { - void *arg; - bool level; - void (*handler) (u32 stat, void *arg); - u32 event; - struct gpioh_item *next; -}; - /* misc si info needed by some of the routines */ struct si_info { struct si_pub pub; /* back plane public state (must be first) */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c index e24228e60027..e859075db716 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c @@ -476,11 +476,9 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid) void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid, - u8 ba_wsize, /* negotiated ba window size (in pdu) */ uint max_rx_ampdu_bytes) /* from ht_cap in beacon */ { struct scb_ampdu *scb_ampdu; - struct scb_ampdu_tid_ini *ini; struct ampdu_info *ampdu = wlc->ampdu; struct scb *scb = &wlc->pri_scb; scb_ampdu = &scb->scb_ampdu; @@ -491,10 +489,6 @@ brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid, return; } - ini = &scb_ampdu->ini[tid]; - ini->tid = tid; - ini->scb = scb_ampdu->scb; - ini->ba_wsize = ba_wsize; scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 0bd4e679a359..543e93ec49d2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -810,7 +810,6 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, brcms_c_init_scb(scb); wl->pub->global_ampdu = &(scb->scb_ampdu); - wl->pub->global_ampdu->scb = scb; wl->pub->global_ampdu->max_pdu = 16; /* @@ -831,7 +830,6 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; - u8 buf_size = params->buf_size; if (WARN_ON(scb->magic != SCB_MAGIC)) return -EIDRM; @@ -863,11 +861,11 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, /* * BA window size from ADDBA response ('buf_size') defines how * many outstanding MPDUs are allowed for the BA stream by - * recipient and traffic class. 'ampdu_factor' gives maximum - * AMPDU size. + * recipient and traffic class (this is actually unused by the + * rest of the driver). 'ampdu_factor' gives maximum AMPDU size. */ spin_lock_bh(&wl->lock); - brcms_c_ampdu_tx_operational(wl->wlc, tid, buf_size, + brcms_c_ampdu_tx_operational(wl->wlc, tid, (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + sta->deflink.ht_cap.ampdu_factor)) - 1); spin_unlock_bh(&wl->lock); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index 11b33e78127c..b3663c5ef382 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -3147,10 +3147,8 @@ void brcms_c_init_scb(struct scb *scb) scb->flags = SCB_WMECAP | SCB_HTCAP; for (i = 0; i < NUMPRIO; i++) { scb->seqnum[i] = 0; - scb->seqctl[i] = 0xFFFF; } - scb->seqctl_nonqos = 0xFFFF; scb->magic = SCB_MAGIC; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h index 2e6a3d454ee8..1efc92fd1671 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h @@ -141,11 +141,6 @@ struct tx_power { u8 target[WL_TX_POWER_RATES]; }; -struct tx_inst_power { - u8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */ - u8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */ -}; - struct brcms_chanvec { u8 vec[MAXCHANNEL / NBBY]; }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h index 4da38cb4f318..bfc63b2f0537 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h @@ -297,7 +297,7 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc); void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta, u16 tid); void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid, - u8 ba_wsize, uint max_rx_ampdu_bytes); + uint max_rx_ampdu_bytes); int brcms_c_module_register(struct brcms_pub *pub, const char *name, struct brcms_info *hdl, int (*down_fn)(void *handle)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h index 3a3d73699f83..d65561227da0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h @@ -36,19 +36,13 @@ /* structure to store per-tid state for the ampdu initiator */ struct scb_ampdu_tid_ini { - u8 tid; /* initiator tid for easy lookup */ /* tx retry count; indexed by seq modulo */ u8 txretry[AMPDU_TX_BA_MAX_WSIZE]; - struct scb *scb; /* backptr for easy lookup */ - u8 ba_wsize; /* negotiated ba window size (in pdu) */ }; struct scb_ampdu { - struct scb *scb; /* back pointer for easy reference */ - u8 mpdu_density; /* mpdu density */ u8 max_pdu; /* max pdus allowed in ampdu */ u8 release; /* # of mpdus released at a time */ - u16 min_len; /* min mpdu len to support the density */ u32 max_rx_ampdu_bytes; /* max ampdu rcv length; 8k, 16k, 32k, 64k */ /* @@ -64,15 +58,7 @@ struct scb_ampdu { struct scb { u32 magic; u32 flags; /* various bit flags as defined below */ - u32 flags2; /* various bit flags2 as defined below */ - u8 state; /* current state bitfield of auth/assoc process */ - u8 ea[ETH_ALEN]; /* station address */ - uint fragresid[NUMPRIO];/* #bytes unused in frag buffer per prio */ - u16 seqctl[NUMPRIO]; /* seqctl of last received frame (for dups) */ - /* seqctl of last received frame (for dups) for non-QoS data and - * management */ - u16 seqctl_nonqos; u16 seqnum[NUMPRIO];/* WME: driver maintained sw seqnum per priority */ struct scb_ampdu scb_ampdu; /* AMPDU state including per tid info */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h index 2b0df07ced74..12a0df5b4e98 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h @@ -288,15 +288,6 @@ struct tx_status; struct d11rxhdr; struct txpwr_limits; -/* iovar structure */ -struct brcmu_iovar { - const char *name; /* name for lookup and display */ - u16 varid; /* id for switch */ - u16 flags; /* driver-specific flag bits */ - u16 type; /* base type of argument */ - u16 minlen; /* min length for buffer vars */ -}; - /* brcm_msg_level is a bit vector with defs in defs.h */ extern u32 brcm_msg_level; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 0812db8936f1..b6636002c7d2 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -317,8 +317,6 @@ static int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max); -static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, - size_t max); static void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_ucode_download(struct ipw2100_priv *priv, @@ -5894,17 +5892,14 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct ipw2100_priv *priv = libipw_priv(dev); - char fw_ver[64], ucode_ver[64]; + char fw_ver[64]; strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); - ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); - - snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", - fw_ver, priv->eeprom_version, ucode_ver); + strscpy(info->fw_version, fw_ver, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(priv->pci_dev), sizeof(info->bus_info)); } @@ -8406,17 +8401,6 @@ static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, return tmp; } -static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, - size_t max) -{ - u32 ver; - u32 len = sizeof(ver); - /* microcode version is a 32 bit integer */ - if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION, &ver, &len)) - return -EIO; - return snprintf(buf, max, "%08X", ver); -} - /* * On exit, the firmware will have been freed from the fw list */ diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index dfe0f74369e6..eed9ef17bc29 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -1176,23 +1176,20 @@ static ssize_t debug_level_show(struct device_driver *d, char *buf) static ssize_t debug_level_store(struct device_driver *d, const char *buf, size_t count) { - char *p = (char *)buf; - u32 val; + unsigned long val; - if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { - p++; - if (p[0] == 'x' || p[0] == 'X') - p++; - val = simple_strtoul(p, &p, 16); - } else - val = simple_strtoul(p, &p, 10); - if (p == buf) + int result = kstrtoul(buf, 0, &val); + + if (result == -EINVAL) printk(KERN_INFO DRV_NAME ": %s is not in hex or decimal form.\n", buf); + else if (result == -ERANGE) + printk(KERN_INFO DRV_NAME + ": %s has overflowed.\n", buf); else ipw_debug_level = val; - return strnlen(buf, count); + return count; } static DRIVER_ATTR_RW(debug_level); @@ -1461,25 +1458,13 @@ static ssize_t scan_age_store(struct device *d, struct device_attribute *attr, { struct ipw_priv *priv = dev_get_drvdata(d); struct net_device *dev = priv->net_dev; - char buffer[] = "00000000"; - unsigned long len = - (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; - unsigned long val; - char *p = buffer; IPW_DEBUG_INFO("enter\n"); - strncpy(buffer, buf, len); - buffer[len] = 0; + unsigned long val; + int result = kstrtoul(buf, 0, &val); - if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { - p++; - if (p[0] == 'x' || p[0] == 'X') - p++; - val = simple_strtoul(p, &p, 16); - } else - val = simple_strtoul(p, &p, 10); - if (p == buffer) { + if (result == -EINVAL || result == -ERANGE) { IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); } else { priv->ieee->scan_age = val; @@ -1487,7 +1472,7 @@ static ssize_t scan_age_store(struct device *d, struct device_attribute *attr, } IPW_DEBUG_INFO("exit\n"); - return len; + return count; } static DEVICE_ATTR_RW(scan_age); @@ -9671,31 +9656,30 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, mutex_lock(&priv->mutex); switch (priv->ieee->mode) { case IEEE_A: - strncpy(extra, "802.11a (1)", MAX_WX_STRING); + strscpy_pad(extra, "802.11a (1)", MAX_WX_STRING); break; case IEEE_B: - strncpy(extra, "802.11b (2)", MAX_WX_STRING); + strscpy_pad(extra, "802.11b (2)", MAX_WX_STRING); break; case IEEE_A | IEEE_B: - strncpy(extra, "802.11ab (3)", MAX_WX_STRING); + strscpy_pad(extra, "802.11ab (3)", MAX_WX_STRING); break; case IEEE_G: - strncpy(extra, "802.11g (4)", MAX_WX_STRING); + strscpy_pad(extra, "802.11g (4)", MAX_WX_STRING); break; case IEEE_A | IEEE_G: - strncpy(extra, "802.11ag (5)", MAX_WX_STRING); + strscpy_pad(extra, "802.11ag (5)", MAX_WX_STRING); break; case IEEE_B | IEEE_G: - strncpy(extra, "802.11bg (6)", MAX_WX_STRING); + strscpy_pad(extra, "802.11bg (6)", MAX_WX_STRING); break; case IEEE_A | IEEE_B | IEEE_G: - strncpy(extra, "802.11abg (7)", MAX_WX_STRING); + strscpy_pad(extra, "802.11abg (7)", MAX_WX_STRING); break; default: - strncpy(extra, "unknown", MAX_WX_STRING); + strscpy_pad(extra, "unknown", MAX_WX_STRING); break; } - extra[MAX_WX_STRING - 1] = '\0'; IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); @@ -10393,7 +10377,6 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, { struct ipw_priv *p = libipw_priv(dev); char vers[64]; - char date[32]; u32 len; strscpy(info->driver, DRV_NAME, sizeof(info->driver)); @@ -10401,11 +10384,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev, len = sizeof(vers); ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); - len = sizeof(date); - ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); - snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", - vers, date); + strscpy(info->fw_version, vers, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(p->pci_dev), sizeof(info->bus_info)); } diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h index bec7bc273748..9065ca5b0208 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw.h +++ b/drivers/net/wireless/intel/ipw2x00/libipw.h @@ -488,7 +488,7 @@ struct libipw_txb { u8 reserved; u16 frag_size; u16 payload_size; - struct sk_buff *fragments[]; + struct sk_buff *fragments[] __counted_by(nr_frags); }; /* SWEEP TABLE ENTRIES NUMBER */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 0a4aa3c678c1..69276266ce6f 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -6122,7 +6122,7 @@ il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (il->ops->set_channel_switch(il, ch_switch)) { clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status); il->switch_channel = 0; - ieee80211_chswitch_done(il->vif, false); + ieee80211_chswitch_done(il->vif, false, 0); } out: diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 96002121bb8b..054fef680aba 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4090,7 +4090,7 @@ il_chswitch_done(struct il_priv *il, bool is_success) return; if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) - ieee80211_chswitch_done(il->vif, is_success); + ieee80211_chswitch_done(il->vif, is_success, 0); } EXPORT_SYMBOL(il_chswitch_done); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c index 8d5f9dce71d5..134635c70ce8 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_AX210_UCODE_API_MAX 83 +#define IWL_AX210_UCODE_API_MAX 86 /* Lowest firmware API version supported */ #define IWL_AX210_UCODE_API_MIN 59 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index b9893b22e41d..82da957adcf6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 83 +#define IWL_BZ_UCODE_API_MAX 86 /* Lowest firmware API version supported */ #define IWL_BZ_UCODE_API_MIN 80 @@ -134,12 +134,10 @@ static const struct iwl_base_params iwl_bz_base_params = { .ht_params = &iwl_gl_a_ht_params /* - * If the device doesn't support HE, no need to have that many buffers. - * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an * A-MPDU, with additional overhead to account for processing time. */ -#define IWL_NUM_RBDS_NON_HE 512 -#define IWL_NUM_RBDS_BZ_HE 4096 +#define IWL_NUM_RBDS_BZ_EHT (512 * 16) const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_BZ, @@ -160,16 +158,16 @@ const struct iwl_cfg iwl_cfg_bz = { .fw_name_mac = "bz", .uhb_supported = true, IWL_DEVICE_BZ, - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_BZ_HE, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_EHT, }; const struct iwl_cfg iwl_cfg_gl = { .fw_name_mac = "gl", .uhb_supported = true, IWL_DEVICE_BZ, - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_BZ_HE, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_EHT, }; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index ad283fd22e2a..80eb9b499538 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 83 +#define IWL_SC_UCODE_API_MAX 86 /* Lowest firmware API version supported */ #define IWL_SC_UCODE_API_MIN 82 @@ -127,12 +127,10 @@ static const struct iwl_base_params iwl_sc_base_params = { .ht_params = &iwl_22000_ht_params /* - * If the device doesn't support HE, no need to have that many buffers. - * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an * A-MPDU, with additional overhead to account for processing time. */ -#define IWL_NUM_RBDS_NON_HE 512 -#define IWL_NUM_RBDS_SC_HE 4096 +#define IWL_NUM_RBDS_SC_EHT (512 * 16) const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_SC, @@ -153,8 +151,8 @@ const struct iwl_cfg iwl_cfg_sc = { .fw_name_mac = "sc", .uhb_supported = true, IWL_DEVICE_SC, - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_SC_HE, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_SC_EHT, }; MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 75a4b8e26232..04864d3fda63 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2005-2014, 2023 Intel Corporation */ /* * Please use this file (commands.h) only for uCode API definitions. @@ -270,7 +270,7 @@ enum { #define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 #define IWL_PWR_CCK_ENTRIES 2 -/** +/* * struct tx_power_dual_stream * * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH @@ -281,7 +281,7 @@ struct tx_power_dual_stream { __le32 dw; } __packed; -/** +/* * Command REPLY_TX_POWER_DBM_CMD = 0x98 * struct iwlagn_tx_power_dbm_cmd */ @@ -295,7 +295,7 @@ struct iwlagn_tx_power_dbm_cmd { u8 reserved; } __packed; -/** +/* * Command TX_ANT_CONFIGURATION_CMD = 0x98 * This command is used to configure valid Tx antenna. * By default uCode concludes the valid antenna according to the radio flavor. @@ -313,7 +313,7 @@ struct iwl_tx_ant_config_cmd { #define UCODE_VALID_OK cpu_to_le32(0x1) -/** +/* * REPLY_ALIVE = 0x1 (response only, not a command) * * uCode issues this "alive" notification once the runtime image is ready @@ -534,7 +534,7 @@ enum { /* transfer to host non bssid beacons in associated state */ #define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) -/** +/* * REPLY_RXON = 0x10 (command, has simple generic response) * * RXON tunes the radio tuner to a service channel, and sets up a number @@ -681,6 +681,7 @@ struct iwl_csa_notification { * @aifsn: Number of slots in Arbitration Interframe Space (before * performing random backoff timing prior to Tx). Device default 1. * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * @reserved1: reserved for alignment * * Device will automatically increase contention window by (2*CW) + 1 for each * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW @@ -791,9 +792,11 @@ struct iwl_keyinfo { /** * struct sta_id_modify - * @addr[ETH_ALEN]: station's MAC address + * @addr: station's MAC address + * @reserved1: reserved for alignment * @sta_id: index of station in uCode's station table * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change + * @reserved2: reserved for alignment * * Driver selects unused table index when adding new station, * or the index to a pre-existing station entry when modifying that station. @@ -1464,7 +1467,7 @@ struct iwl_compressed_ba_resp { #define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) -/** +/* * struct iwl_link_qual_general_params * * Used in REPLY_TX_LINK_QUALITY_CMD @@ -1507,7 +1510,7 @@ struct iwl_link_qual_general_params { #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) -/** +/* * struct iwl_link_qual_agg_params * * Used in REPLY_TX_LINK_QUALITY_CMD @@ -2040,7 +2043,7 @@ struct iwl_spectrum_notification { * *****************************************************************************/ -/** +/* * struct iwl_powertable_cmd - Power Table Command * @flags: See below: * @@ -2171,7 +2174,7 @@ struct iwl_ct_kill_throttling_config { #define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0) #define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) -/** +/* * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table * * One for each channel in the scan list. @@ -2210,7 +2213,7 @@ struct iwl_scan_channel { /* set number of direct probes __le32 type */ #define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) -/** +/* * struct iwl_ssid_ie - directed scan network information element * * Up to 20 of these may appear in REPLY_SCAN_CMD, @@ -2560,6 +2563,7 @@ struct statistics_rx_bt { * @ant_a: current tx power on chain a in 1/2 dB step * @ant_b: current tx power on chain b in 1/2 dB step * @ant_c: current tx power on chain c in 1/2 dB step + * @reserved: reserved for alignment */ struct statistics_tx_power { u8 ant_a; @@ -3006,7 +3010,7 @@ struct iwl_enhance_sensitivity_cmd { } __packed; -/** +/* * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) * * This command sets the relative gains of agn device's 3 radio receiver chains. @@ -3847,6 +3851,7 @@ struct iwlagn_wowlan_status { * @type: * 0 - BSS * 1 - PAN + * @reserved: reserved for alignment */ struct iwl_wipan_slot { __le16 width; @@ -3874,6 +3879,8 @@ struct iwl_wipan_slot { * uCode will perform leaving channel methods in context switch * also when working in same channel mode * @num_slots: 1 - 10 + * @slots: per-slot data + * @reserved: reserved for alignment */ struct iwl_wipan_params_cmd { __le16 flags; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 1a9eadace188..25283e4b849f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2003 - 2014, 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2020, 2023 Intel Corporation. All rights reserved. *****************************************************************************/ /* * Please use this file (dev.h) for driver implementation definitions. @@ -126,11 +126,11 @@ enum iwl_agg_state { /** * struct iwl_ht_agg - aggregation state machine - + * * This structs holds the states for the BA agreement establishment and tear * down. It also holds the state during the BA session itself. This struct is * duplicated for each RA / TID. - + * * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * Tx response (REPLY_TX), and the block ack notification * (REPLY_COMPRESSED_BA). @@ -152,9 +152,9 @@ struct iwl_ht_agg { /** * struct iwl_tid_data - one for each RA / TID - + * * This structs holds the states for each RA / TID. - + * * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). @@ -195,7 +195,7 @@ struct iwl_station_priv { u8 sta_id; }; -/** +/* * struct iwl_vif_priv - driver's private per-interface information * * When mac80211 allocates a virtual interface, it can allocate @@ -529,6 +529,7 @@ enum iwl_scan_type { * relevant for 1000, 6000 and up * @struct iwl_sensitivity_ranges: range of sensitivity values * @use_rts_for_aggregation: use rts/cts protection for HT traffic + * @sens: sensitivity ranges pointer */ struct iwl_hw_params { u8 tx_chains_num; @@ -547,6 +548,7 @@ struct iwl_hw_params { * @bt_prio_boost: default bt priority boost value * @agg_time_limit: maximum number of uSec in aggregation * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode + * @bt_session_2: indicates version 2 of the BT command is used */ struct iwl_dvm_bt_params { bool advanced_bt_coexist; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index b1939ff275b5..5f3d5b15f727 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2018 - 2019, 2022 Intel Corporation + * Copyright(C) 2018 - 2019, 2022 - 2023 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -1001,7 +1001,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, if (priv->lib->set_channel_switch(priv, ch_switch)) { clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); + ieee80211_chswitch_done(ctx->vif, false, 0); } out: @@ -1024,7 +1024,7 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) return; if (ctx->vif) - ieee80211_chswitch_done(ctx->vif, is_success); + ieee80211_chswitch_done(ctx->vif, is_success, 0); } static void iwlagn_configure_filter(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index a873be109f43..8774dd7b921e 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1464,7 +1464,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, snprintf(priv->hw->wiphy->fw_version, sizeof(priv->hw->wiphy->fw_version), - "%s", fw->fw_version); + "%.31s", fw->fw_version); priv->new_scan_threshold_behaviour = !!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h index 0b47f1993c5d..100cb932c6b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2023 Intel Corporation. All rights reserved. *****************************************************************************/ #ifndef __iwl_agn_rs_h__ @@ -269,7 +269,7 @@ struct iwl_rate_mcs_info { char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; }; -/** +/* * struct iwl_rate_scale_data -- tx success history for one rate */ struct iwl_rate_scale_data { @@ -281,7 +281,7 @@ struct iwl_rate_scale_data { unsigned long stamp; }; -/** +/* * struct iwl_scale_tbl_info -- tx params and success history for all rates * * There are two of these in struct iwl_lq_sta, @@ -311,7 +311,7 @@ struct iwl_traffic_load { u8 head; /* start of the circular buffer */ }; -/** +/* * struct iwl_lq_sta -- driver's rate scaling private structure * * Pointer to this gets passed back and forth between driver and mac80211. @@ -379,7 +379,7 @@ static inline u8 first_antenna(u8 mask) void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id); -/** +/* * iwl_rate_control_register - Register the rate control algorithm callbacks * * Since the rate control algorithm is hardware specific, there is no need @@ -391,7 +391,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, */ int iwlagn_rate_control_register(void); -/** +/* * iwl_rate_control_unregister - Unregister the rate control callbacks * * This should be called after calling ieee80211_unregister_hw, but before diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index 7ace052fc78a..23dfcda0dd86 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2014, 2023 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -72,14 +72,15 @@ struct iwl_tt_trans { * when thermal throttling state != IWL_TI_0 * the tt_power_mode should set to different * power mode based on the current tt state - * @tt_previous_temperature: last measured temperature - * @iwl_tt_restriction: ptr to restriction tbl, used by advance + * @tt_previous_temp: last measured temperature + * @restriction: ptr to restriction tbl, used by advance * thermal throttling to determine how many tx/rx streams * should be used in tt state; and can HT be enabled or not - * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling + * @transaction: ptr to adv trans table, used by advance thermal throttling * state transaction * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature * @ct_kill_exit_tm: timer to exit thermal kill + * @ct_kill_waiting_tm: timer to enter thermal kill */ struct iwl_tt_mgmt { enum iwl_tt_state state; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 60a7b61d59aa..111ed1873006 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -3,6 +3,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2023 Intel Corporation *****************************************************************************/ #include <linux/kernel.h> @@ -1169,7 +1170,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) iwlagn_check_ratid_empty(priv, sta_id, tid); } - iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false); freed = 0; @@ -1247,7 +1248,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } } @@ -1315,7 +1316,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn, - &reclaimed_skbs); + &reclaimed_skbs, false); IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", @@ -1384,6 +1385,6 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index dfe8357036eb..b96f30d11644 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -69,6 +69,11 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), }, }, + { .ident = "RAZER", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Razer"), + }, + }, {} }; @@ -1006,18 +1011,29 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { int ret; u8 value; + u32 val; __le32 config_bitmap = 0; /* - ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2' + * Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'. + * Setting config_bitmap Indonesia bit is valid only for HR/JF. */ - ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, - DSM_FUNC_ENABLE_INDONESIA_5G2, - &iwl_guid, &value); - - if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) - config_bitmap |= - cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) { + case IWL_CFG_RF_TYPE_HR1: + case IWL_CFG_RF_TYPE_HR2: + case IWL_CFG_RF_TYPE_JF1: + case IWL_CFG_RF_TYPE_JF2: + ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, + DSM_FUNC_ENABLE_INDONESIA_5G2, + &iwl_guid, &value); + + if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + break; + default: + break; + } /* ** Evaluate func 'DSM_FUNC_DISABLE_SRD' @@ -1034,6 +1050,23 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); } + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) { + /* + ** Evaluate func 'DSM_FUNC_REGULATORY_CONFIG' + */ + ret = iwl_acpi_get_dsm_u32(fwrt->dev, 0, + DSM_FUNC_REGULATORY_CONFIG, + &iwl_guid, &val); + /* + * China 2022 enable if the BIOS object does not exist or + * if it is enabled in BIOS. + */ + if (ret < 0 || val & DSM_MASK_CHINA_22_REG) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK); + } + return config_bitmap; } IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index c36c62d6414d..e9277f6f3582 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -134,10 +134,12 @@ enum iwl_dsm_funcs_rev_0 { DSM_FUNC_DISABLE_SRD = 1, DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, DSM_FUNC_ENABLE_6E = 3, + DSM_FUNC_REGULATORY_CONFIG = 4, DSM_FUNC_11AX_ENABLEMENT = 6, DSM_FUNC_ENABLE_UNII4_CHAN = 7, DSM_FUNC_ACTIVATE_CHANNEL = 8, - DSM_FUNC_FORCE_DISABLE_CHANNELS = 9 + DSM_FUNC_FORCE_DISABLE_CHANNELS = 9, + DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10, }; enum iwl_dsm_values_srd { @@ -164,6 +166,10 @@ enum iwl_dsm_values_rfi { DSM_VALUE_RFI_MAX }; +enum iwl_dsm_masks_reg { + DSM_MASK_CHINA_22_REG = BIT(2) +}; + #ifdef CONFIG_ACPI struct iwl_fw_runtime; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 13cb0d53a1a3..7544c4cb1a30 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -30,6 +30,8 @@ * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from * &enum iwl_regulatory_and_nvm_subcmd_ids * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds + * @STATISTICS_GROUP: Statistics group, uses command IDs from + * &enum iwl_statistics_subcmd_ids */ enum iwl_mvm_command_groups { LEGACY_GROUP = 0x0, @@ -44,6 +46,7 @@ enum iwl_mvm_command_groups { PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, + STATISTICS_GROUP = 0x10, }; /** @@ -617,9 +620,36 @@ enum iwl_system_subcmd_ids { SYSTEM_FEATURES_CONTROL_CMD = 0xd, /** + * @SYSTEM_STATISTICS_CMD: &struct iwl_system_statistics_cmd + */ + SYSTEM_STATISTICS_CMD = 0xf, + + /** + * @SYSTEM_STATISTICS_END_NOTIF: &struct iwl_system_statistics_end_notif + */ + SYSTEM_STATISTICS_END_NOTIF = 0xfd, + + /** * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif */ RFI_DEACTIVATE_NOTIF = 0xff, }; +/** + * enum iwl_statistics_subcmd_ids - Statistics group command IDs + */ +enum iwl_statistics_subcmd_ids { + /** + * @STATISTICS_OPER_NOTIF: Notification about operational + * statistics &struct iwl_system_statistics_notif_oper + */ + STATISTICS_OPER_NOTIF = 0x0, + + /** + * @STATISTICS_OPER_PART1_NOTIF: Notification about operational part1 + * statistics &struct iwl_system_statistics_part1_notif_oper + */ + STATISTICS_OPER_PART1_NOTIF = 0x1, +}; + #endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 72d461c47323..ea99d41040d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -397,6 +397,8 @@ struct iwl_wowlan_config_cmd { #define WOWLAN_GTK_KEYS_NUM 2 #define WOWLAN_IGTK_KEYS_NUM 2 #define WOWLAN_IGTK_MIN_INDEX 4 +#define WOWLAN_BIGTK_KEYS_NUM 2 +#define WOWLAN_BIGTK_MIN_INDEX 6 /* * WOWLAN_TSC_RSC_PARAMS @@ -621,9 +623,10 @@ struct iwl_wowlan_gtk_status_v3 { * @ipn: the IGTK packet number (replay counter) * @key_len: IGTK length, if set to 0, the key is not available * @key_flags: information about the key: - * bits[0]: key index assigned by the AP (0: index 4, 1: index 5) - * bits[1:5]: IGTK index of the key in the internal DB - * bit[6]: Set iff this is the currently used IGTK + * bits[0]: key index assigned by the AP (0: index 4, 1: index 5) + * (0: index 6, 1: index 7 with bigtk) + * bits[1:5]: IGTK index of the key in the internal DB + * bit[6]: Set iff this is the currently used IGTK */ struct iwl_wowlan_igtk_status { u8 key[WOWLAN_KEY_MAX_SIZE]; @@ -808,9 +811,43 @@ struct iwl_wowlan_info_notif_v1 { } __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */ /** + * struct iwl_wowlan_info_notif_v2 - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @reserved1: reserved + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @reserved2: reserved + */ +struct iwl_wowlan_info_notif_v2 { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ + +/** * struct iwl_wowlan_info_notif - WoWLAN information notification * @gtk: GTK data * @igtk: IGTK data + * @bigtk: BIGTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched patterns * @reserved1: reserved @@ -827,6 +864,7 @@ struct iwl_wowlan_info_notif_v1 { struct iwl_wowlan_info_notif { struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; __le16 reserved1; @@ -838,7 +876,7 @@ struct iwl_wowlan_info_notif { u8 tid_tear_down; u8 station_id; u8 reserved2[2]; -} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */ /** * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index ba538d70985f..394747deb269 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ @@ -13,6 +13,7 @@ #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0 #define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0) #define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16) +#define IWL_FW_INI_PRESET_DISABLE 0xff /** * struct iwl_fw_ini_hcmd @@ -42,6 +43,30 @@ struct iwl_fw_ini_header { } __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */ /** + * struct iwl_fw_ini_addr_size - Base address and size that defines + * a chunk of memory + * + * @addr: the base address (fixed size - 4 bytes) + * @size: the size to read + */ +struct iwl_fw_ini_addr_size { + __le32 addr; + __le32 size; +} __packed; /* FW_TLV_DEBUG_ADDR_SIZE_VER_1 */ + +/** + * struct iwl_fw_ini_region_dev_addr_range - Configuration to read + * device address range + * + * @offset: offset to add to the base address of each chunk + * The addrs[] array will be treated as an array of &iwl_fw_ini_addr_size - + * an array of (addr, size) pairs. + */ +struct iwl_fw_ini_region_dev_addr_range { + __le32 offset; +} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_RANGE_API_S_VER_1 */ + +/** * struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses * * @size: size of each memory chunk @@ -134,6 +159,10 @@ struct iwl_fw_ini_region_internal_buffer { * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, + * &IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, + * @dev_addr_range: device address range configuration. Used by + * &IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and + * &IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and * &IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by @@ -156,6 +185,7 @@ struct iwl_fw_ini_region_tlv { u8 name[IWL_FW_INI_MAX_NAME]; union { struct iwl_fw_ini_region_dev_addr dev_addr; + struct iwl_fw_ini_region_dev_addr_range dev_addr_range; struct iwl_fw_ini_region_fifos fifos; struct iwl_fw_ini_region_err_table err_table; struct iwl_fw_ini_region_internal_buffer internal_buffer; @@ -361,6 +391,9 @@ enum iwl_fw_ini_buffer_location { * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory * @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM + * @IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE: a range of periphery registers of MAC + * @IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE: a range of periphery registers of PHY + * @IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP: periphery registers of SNPS DPHYIP * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { @@ -383,6 +416,9 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_PCI_IOSF_CONFIG, IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY, IWL_FW_INI_REGION_DBGI_SRAM, + IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE, + IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE, + IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 8fef38139bf6..7b18e098b125 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -30,6 +30,11 @@ enum iwl_debug_cmds { */ HOST_EVENT_CFG = 0x3, /** + * @INVALID_WR_PTR_CMD: invalid write pointer, set in the TFD + * when it's not in use + */ + INVALID_WR_PTR_CMD = 0x6, + /** * @DBGC_SUSPEND_RESUME: * DBGC suspend/resume commad. Uses a single dword as data: * 0 - resume DBGC recording @@ -377,7 +382,7 @@ struct iwl_buf_alloc_cmd { #define DRAM_INFO_SECOND_MAGIC_WORD 0x89ABCDEF /** - * struct iwL_dram_info - DRAM fragments allocation struct + * struct iwl_dram_info - DRAM fragments allocation struct * * Driver will fill in the first 1K(+) of the pointed DRAM fragment * @@ -517,4 +522,26 @@ enum iwl_mvm_tas_statically_disabled_reason { TAS_DISABLED_REASON_MAX, }; /*_TAS_STATICALLY_DISABLED_REASON_E*/ +/** + * enum iwl_fw_dbg_config_cmd_type - types of FW debug config command + * @DEBUG_TOKEN_CONFIG_TYPE: token config type + */ +enum iwl_fw_dbg_config_cmd_type { + DEBUG_TOKEN_CONFIG_TYPE = 0x2B, +}; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */ + +/* this token disables debug asserts in the firmware */ +#define IWL_FW_DBG_CONFIG_TOKEN 0x00011301 + +/** + * struct iwl_fw_dbg_config_cmd - configure FW debug + * + * @type: according to &enum iwl_fw_dbg_config_cmd_type + * @conf: FW configuration + */ +struct iwl_fw_dbg_config_cmd { + __le32 type; + __le32 conf; +} __packed; /* LDBG_CFG_CMD_API_S_VER_7 */ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index 184db5a6f06f..f15e6d64c298 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -58,6 +58,14 @@ enum iwl_mac_conf_subcmd_ids { */ STA_DISABLE_TX_CMD = 0xD, /** + * @ROC_CMD: &struct iwl_roc_req + */ + ROC_CMD = 0xE, + /** + * @ROC_NOTIF: &struct iwl_roc_notif + */ + ROC_NOTIF = 0xF8, + /** * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif */ SESSION_PROTECTION_NOTIF = 0xFB, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 28bfabb399b2..dfe0bebabc81 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -21,8 +21,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids { * &struct iwl_lari_config_change_cmd_v2, * &struct iwl_lari_config_change_cmd_v3, * &struct iwl_lari_config_change_cmd_v4, - * &struct iwl_lari_config_change_cmd_v5 or - * &struct iwl_lari_config_change_cmd_v6 + * &struct iwl_lari_config_change_cmd_v5, + * &struct iwl_lari_config_change_cmd_v6 or + * &struct iwl_lari_config_change_cmd_v7 */ LARI_CONFIG_CHANGE = 0x1, @@ -44,6 +45,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids { SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, /** + * @UATS_TABLE_CMD: &struct iwl_uats_table_cmd + */ + UATS_TABLE_CMD = 0x5, + + /** * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy */ PNVM_INIT_COMPLETE_NTFY = 0xFE, @@ -480,18 +486,20 @@ union iwl_tas_config_cmd { struct iwl_tas_config_cmd_v4 v4; }; /** - * enum iwl_lari_configs - bit masks for the various LARI config operations + * enum iwl_lari_config_masks - bit masks for the various LARI config operations * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan * @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled * @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in * Indonesia + * @LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK: enable 2022 china regulatory */ enum iwl_lari_config_masks { LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0), LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1), LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2), LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3), + LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK = BIT(7), }; #define IWL_11AX_UKRAINE_MASK 3 @@ -601,6 +609,45 @@ struct iwl_lari_config_change_cmd_v6 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */ /** + * struct iwl_lari_config_change_cmd_v7 - change LARI configuration + * This structure is used also for lari cmd version 8. + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * @chan_state_active_bitmap: Bitmap to enable different bands per country + * or region. + * Each bit represents a country or region, and a band to activate + * according to the BIOS definitions. + * For LARI cmd version 7 - bits 0:3 are supported. + * For LARI cmd version 8 - bits 0:4 are supported. + * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. + * Each bit represents a set of channels in a specific band that should be + * disabled + * @edt_bitmap: Bitmap of energy detection threshold table. + * Disable/enable the EDT optimization method for different band. + */ +struct iwl_lari_config_change_cmd_v7 { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; + __le32 edt_bitmap; +} __packed; +/* LARI_CHANGE_CONF_CMD_S_VER_7 */ +/* LARI_CHANGE_CONF_CMD_S_VER_8 */ + +/* Activate UNII-1 (5.2GHz) for World Wide */ +#define ACTIVATE_5G2_IN_WW_MASK BIT(4) + +/** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete * @status: PNVM image loading status */ @@ -608,4 +655,17 @@ struct iwl_pnvm_init_complete_ntfy { __le32 status; } __packed; /* PNVM_INIT_COMPLETE_NTFY_S_VER_1 */ +#define UATS_TABLE_ROW_SIZE 26 +#define UATS_TABLE_COL_SIZE 13 + +/** + * struct iwl_uats_table_cmd - struct for UATS_TABLE_CMD + * @offset_map: mapping a mcc to UHB AP type support (UATS) allowed + * @reserved: reserved + */ +struct iwl_uats_table_cmd { + u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE]; + __le16 reserved; +} __packed; /* UATS_TABLE_CMD_S_VER_1 */ + #endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index 898bf351f6e4..2d2b9c8c36ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -3,7 +3,7 @@ * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ @@ -18,7 +18,9 @@ enum iwl_prot_offload_subcmd_ids { WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC, /** - * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif + * @WOWLAN_INFO_NOTIFICATION: Notification in + * &struct iwl_wowlan_info_notif_v1, &struct iwl_wowlan_info_notif_v2, + * or iwl_wowlan_info_notif */ WOWLAN_INFO_NOTIFICATION = 0xFD, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index 8fe42cff1102..306ed88de463 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -25,8 +25,8 @@ * For legacy set bit means upper channel, otherwise lower. * For VHT - bit-2 marks if the control is lower/upper relative to center-freq * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. - * center_freq * For EHT - bit-3 is used for extended distance + * center_freq * | * 40Mhz |____|____| * 80Mhz |____|____|____|____| diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 85d89f559f6c..040d83fa5424 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -144,6 +144,8 @@ struct iwl_powertable_cmd { * receiver and transmitter. '0' - does not allow. * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK: * Device Retention indication, '1' indicate retention is enabled. + * @DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK: + * Prevent power save until entering d3 is completed. * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK: * 32Khz external slow clock valid indication, '1' indicate cloack is * valid. @@ -151,6 +153,7 @@ struct iwl_powertable_cmd { enum iwl_device_power_flags { DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1), + DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK = BIT(7), DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12), }; @@ -162,7 +165,7 @@ enum iwl_device_power_flags { * @reserved: reserved (padding) */ struct iwl_device_power_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_6 */ + /* PM_POWER_TABLE_CMD_API_S_VER_7 */ __le16 flags; __le16 reserved; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h index 1a84a4081e7c..34d664023473 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2021, 2023 Intel Corporation */ #ifndef __iwl_fw_api_rfi_h__ #define __iwl_fw_api_rfi_h__ @@ -25,8 +25,9 @@ struct iwl_rfi_lut_entry { /** * struct iwl_rfi_config_cmd - RFI configuration table * - * @entry: a table can have 24 frequency/channel mappings + * @table: a table can have 24 frequency/channel mappings * @oem: specifies if this is the default table or set by OEM + * @reserved: (reserved/padding) */ struct iwl_rfi_config_cmd { struct iwl_rfi_lut_entry table[IWL_RFI_LUT_SIZE]; @@ -35,7 +36,7 @@ struct iwl_rfi_config_cmd { } __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */ /** - * iwl_rfi_freq_table_status - status of the frequency table query + * enum iwl_rfi_freq_table_status - status of the frequency table query * @RFI_FREQ_TABLE_OK: can be used * @RFI_FREQ_TABLE_DVFS_NOT_READY: DVFS is not ready yet, should try later * @RFI_FREQ_TABLE_DISABLED: the feature is disabled in FW diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 25e2e23dce3d..e71f29d0c694 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -371,7 +371,7 @@ enum iwl_rx_phy_eht_data1 { IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7 = 0x0000fe00, }; -/* goes into Metadata DW 7 */ +/* goes into Metadata DW 7 (Qu) or 8 (So or higher) */ enum iwl_rx_phy_he_data2 { /* info type: HE MU-EXT */ /* the a1/a2/... is what the PHY/firmware calls the values */ @@ -387,7 +387,7 @@ enum iwl_rx_phy_he_data2 { IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000, }; -/* goes into Metadata DW 8 */ +/* goes into Metadata DW 8 (Qu) or 7 (So or higher) */ enum iwl_rx_phy_he_data3 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */ @@ -408,10 +408,9 @@ enum iwl_rx_phy_he_he_data4 { IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600, }; -/* goes into Metadata DW 7 */ +/* goes into Metadata DW 8 (Qu has no EHT) */ enum iwl_rx_phy_eht_data2 { /* info type: EHT-MU-EXT */ - /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_0_OUT */ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 0x000001ff, IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 0x0003fe00, IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_B1 = 0x07fc0000, @@ -420,11 +419,10 @@ enum iwl_rx_phy_eht_data2 { IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 0xffffffff, }; -/* goes into Metadata DW 8 */ +/* goes into Metadata DW 7 (Qu has no EHT) */ enum iwl_rx_phy_eht_data3 { + /* note: low 8 bits cannot be used */ /* info type: EHT-MU-EXT */ - /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_1_OUT */ - IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B2 = 0x000001ff, IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C1 = 0x0003fe00, IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C2 = 0x07fc0000, }; @@ -432,10 +430,10 @@ enum iwl_rx_phy_eht_data3 { /* goes into Metadata DW 4 */ enum iwl_rx_phy_eht_data4 { /* info type: EHT-MU-EXT */ - /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_2_OUT */ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D1 = 0x000001ff, IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D2 = 0x0003fe00, IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 0x000c0000, + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_B2 = 0x1ff00000, }; /* goes into Metadata DW 16 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 898e62326e6c..2271b19213fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_stats_h__ #define __iwl_fw_api_stats_h__ #include "mac.h" +#include "mac-cfg.h" struct mvm_statistics_dbg { __le32 burst_check; @@ -412,6 +413,49 @@ struct iwl_statistics_cmd { #define MAX_BCAST_FILTER_NUM 8 /** + * enum iwl_statistics_notify_type_id - type_id used in system statistics + * command + * @IWL_STATS_NTFY_TYPE_ID_OPER: request legacy statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART1: request operational part1 statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART2: request operational part2 statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART3: request operational part3 statistics + * @IWL_STATS_NTFY_TYPE_ID_OPER_PART4: request operational part4 statistics + */ +enum iwl_statistics_notify_type_id { + IWL_STATS_NTFY_TYPE_ID_OPER = BIT(0), + IWL_STATS_NTFY_TYPE_ID_OPER_PART1 = BIT(1), + IWL_STATS_NTFY_TYPE_ID_OPER_PART2 = BIT(2), + IWL_STATS_NTFY_TYPE_ID_OPER_PART3 = BIT(3), + IWL_STATS_NTFY_TYPE_ID_OPER_PART4 = BIT(4), +}; + +/** + * enum iwl_statistics_cfg_flags - cfg_mask used in system statistics command + * @IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK: 0 for enable, 1 for disable + * @IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK: 0 for periodic, 1 for on-demand + * @IWL_STATS_CFG_FLG_RESET_MSK: 0 for reset statistics after + * sending the notification, 1 for do not reset statistics after sending + * the notification + */ +enum iwl_statistics_cfg_flags { + IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK = BIT(0), + IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK = BIT(1), + IWL_STATS_CFG_FLG_RESET_MSK = BIT(2), +}; + +/** + * struct iwl_system_statistics_cmd - system statistics command + * @cfg_mask: configuration mask, &enum iwl_statistics_cfg_flags + * @config_time_sec: time in sec for periodic notification + * @type_id_mask: type_id masks, &enum iwl_statistics_notify_type_id + */ +struct iwl_system_statistics_cmd { + __le32 cfg_mask; + __le32 config_time_sec; + __le32 type_id_mask; +} __packed; /* STATISTICS_FW_CMD_API_S_VER_1 */ + +/** * enum iwl_fw_statistics_type * * @FW_STATISTICS_OPERATIONAL: operational statistics @@ -447,7 +491,49 @@ struct iwl_statistics_ntfy_hdr { }; /* STATISTICS_NTFY_HDR_API_S_VER_1 */ /** - * struct iwl_statistics_ntfy_per_mac + * struct iwl_stats_ntfy_per_link + * + * @beacon_filter_average_energy: Average energy [-dBm] of the 2 + * antennas. + * @air_time: air time + * @beacon_counter: all beacons (both filtered and not filtered) + * @beacon_average_energy: Average energy [-dBm] of all beacons + * (both filtered and not filtered) + * @beacon_rssi_a: beacon RSSI on antenna A + * @beacon_rssi_b: beacon RSSI on antenna B + * @rx_bytes: RX byte count + */ +struct iwl_stats_ntfy_per_link { + __le32 beacon_filter_average_energy; + __le32 air_time; + __le32 beacon_counter; + __le32 beacon_average_energy; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 rx_bytes; +} __packed; /* STATISTICS_NTFY_PER_LINK_API_S_VER_1 */ + +/** + * struct iwl_stats_ntfy_part1_per_link + * + * @rx_time: rx time + * @tx_time: tx time + * @rx_action: action frames handled by FW + * @tx_action: action frames generated and transmitted by FW + * @cca_defers: cca defer count + * @beacon_filtered: filtered out beacons + */ +struct iwl_stats_ntfy_part1_per_link { + __le64 rx_time; + __le64 tx_time; + __le32 rx_action; + __le32 tx_action; + __le32 cca_defers; + __le32 beacon_filtered; +} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_PER_LINK_API_S_VER_1 */ + +/** + * struct iwl_stats_ntfy_per_mac * * @beacon_filter_average_energy: Average energy [-dBm] of the 2 * antennas. @@ -459,7 +545,7 @@ struct iwl_statistics_ntfy_hdr { * @beacon_rssi_b: beacon RSSI on antenna B * @rx_bytes: RX byte count */ -struct iwl_statistics_ntfy_per_mac { +struct iwl_stats_ntfy_per_mac { __le32 beacon_filter_average_energy; __le32 air_time; __le32 beacon_counter; @@ -470,7 +556,7 @@ struct iwl_statistics_ntfy_per_mac { } __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */ #define IWL_STATS_MAX_BW_INDEX 5 -/** struct iwl_statistics_ntfy_per_phy +/** struct iwl_stats_ntfy_per_phy * @channel_load: channel load * @channel_load_by_us: device contribution to MCLM * @channel_load_not_by_us: other devices' contribution to MCLM @@ -485,7 +571,7 @@ struct iwl_statistics_ntfy_per_mac { * per channel BW. note BACK counted as 1 * @last_tx_ch_width_indx: last txed frame channel width index */ -struct iwl_statistics_ntfy_per_phy { +struct iwl_stats_ntfy_per_phy { __le32 channel_load; __le32 channel_load_by_us; __le32 channel_load_not_by_us; @@ -499,23 +585,62 @@ struct iwl_statistics_ntfy_per_phy { } __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */ /** - * struct iwl_statistics_ntfy_per_sta + * struct iwl_stats_ntfy_per_sta * * @average_energy: in fact it is minus the energy.. */ -struct iwl_statistics_ntfy_per_sta { +struct iwl_stats_ntfy_per_sta { __le32 average_energy; } __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */ -#define IWL_STATS_MAX_PHY_OPERTINAL 3 +#define IWL_STATS_MAX_PHY_OPERATIONAL 3 +#define IWL_STATS_MAX_FW_LINKS (IWL_MVM_FW_MAX_LINK_ID + 1) + +/** + * struct iwl_system_statistics_notif_oper + * + * @time_stamp: time when the notification is sent from firmware + * @per_link: per link statistics, &struct iwl_stats_ntfy_per_link + * @per_phy: per phy statistics, &struct iwl_stats_ntfy_per_phy + * @per_sta: per sta statistics, &struct iwl_stats_ntfy_per_sta + */ +struct iwl_system_statistics_notif_oper { + __le32 time_stamp; + struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS]; + struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; +} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */ + +/** + * struct iwl_system_statistics_part1_notif_oper + * + * @time_stamp: time when the notification is sent from firmware + * @per_link: per link statistics &struct iwl_stats_ntfy_part1_per_link + * @per_phy_crc_error_stats: per phy crc error statistics + */ +struct iwl_system_statistics_part1_notif_oper { + __le32 time_stamp; + struct iwl_stats_ntfy_part1_per_link per_link[IWL_STATS_MAX_FW_LINKS]; + __le32 per_phy_crc_error_stats[IWL_STATS_MAX_PHY_OPERATIONAL]; +} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_API_S_VER_4 */ + +/** + * struct iwl_system_statistics_end_notif + * + * @time_stamp: time when the notification is sent from firmware + */ +struct iwl_system_statistics_end_notif { + __le32 time_stamp; +} __packed; /* STATISTICS_FW_NTFY_END_API_S_VER_1 */ + /** * struct iwl_statistics_operational_ntfy * * @hdr: general statistics header * @flags: bitmap of possible notification structures - * @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac - * @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy - * @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta + * @per_mac: per mac statistics, &struct iwl_stats_ntfy_per_mac + * @per_phy: per phy statistics, &struct iwl_stats_ntfy_per_phy + * @per_sta: per sta statistics, &struct iwl_stats_ntfy_per_sta * @rx_time: rx time * @tx_time: usec the radio is transmitting. * @on_time_rf: The total time in usec the RF is awake. @@ -524,9 +649,9 @@ struct iwl_statistics_ntfy_per_sta { struct iwl_statistics_operational_ntfy { struct iwl_statistics_ntfy_hdr hdr; __le32 flags; - struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX]; - struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL]; - struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_stats_ntfy_per_mac per_mac[MAC_INDEX_AUX]; + struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL]; + struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX]; __le64 rx_time; __le64 tx_time; __le64 on_time_rf; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 7cc706731d70..2e15be71c957 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020, 2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2022-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -335,6 +335,63 @@ struct iwl_hs20_roc_res { __le32 status; } __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ +/* + * Activity types for the ROC command + * @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity + * @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity + * @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity + */ +enum iwl_roc_activity { + ROC_ACTIVITY_HOTSPOT, + ROC_ACTIVITY_P2P_DISC, + ROC_ACTIVITY_P2P_TXRX, + ROC_NUM_ACTIVITIES +}; /* ROC_ACTIVITY_API_E_VER_1 */ + +/* + * ROC command + * + * Command requests the firmware to remain on a channel for a certain duration. + * + * ( MAC_CONF_GROUP 0x3, ROC_CMD 0xE ) + * + * @action: action to perform, see &enum iwl_ctxt_action + * @activity: type of activity, see &enum iwl_roc_activity + * @sta_id: station id, resumed during "Remain On Channel" activity. + * @channel_info: &struct iwl_fw_channel_info + * @node_addr: node MAC address for Rx filtering + * @reserved: align to a dword + * @max_delay: max delay the ROC can start in TU + * @duration: remain on channel duration in TU + */ +struct iwl_roc_req { + __le32 action; + __le32 activity; + __le32 sta_id; + struct iwl_fw_channel_info channel_info; + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 max_delay; + __le32 duration; +} __packed; /* ROC_CMD_API_S_VER_3 */ + +/* + * ROC notification + * + * Notification when ROC startes and when ROC ended. + * + * ( MAC_CONF_GROUP 0x3, ROC_NOTIF 0xf8 ) + * + * @status: true if ROC succeeded to start + * @start_end: true if ROC started, false if ROC ended + * @activity: notification to which activity - &enum iwl_roc_activity + */ +struct iwl_roc_notif { + __le32 success; + __le32 started; + __le32 activity; +} __packed; /* ROC_NOTIF_API_S_VER_1 */ + /** * enum iwl_mvm_session_prot_conf_id - session protection's configurations * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association. @@ -375,8 +432,8 @@ enum iwl_mvm_session_prot_conf_id { /** * struct iwl_mvm_session_prot_cmd - configure a session protection - * @id_and_color: the id and color of the mac for which this session protection - * is sent + * @id_and_color: the id and color of the link (or mac, for command version 1) + * for which this session protection is sent * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE, * see &enum iwl_ctxt_action * @conf_id: see &enum iwl_mvm_session_prot_conf_id @@ -397,11 +454,15 @@ struct iwl_mvm_session_prot_cmd { __le32 duration_tu; __le32 repetition_count; __le32 interval; -} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */ +} __packed; +/* SESSION_PROTECTION_CMD_API_S_VER_1 and + * SESSION_PROTECTION_CMD_API_S_VER_2 + */ /** * struct iwl_mvm_session_prot_notif - session protection started / ended - * @mac_id: the mac id for which the session protection started / ended + * @mac_link_id: the mac id (or link id, for notif ver > 2) for which the + * session protection started / ended * @status: 1 means success, 0 means failure * @start: 1 means the session protection started, 0 means it ended * @conf_id: see &enum iwl_mvm_session_prot_conf_id @@ -410,10 +471,13 @@ struct iwl_mvm_session_prot_cmd { * and end even the firmware could not schedule it. */ struct iwl_mvm_session_prot_notif { - __le32 mac_id; + __le32 mac_link_id; __le32 status; __le32 start; __le32 conf_id; -} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */ +} __packed; +/* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 and + * SESSION_PROTECTION_NOTIFICATION_API_S_VER_3 + */ #endif /* __iwl_fw_api_time_event_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index e018946310d1..9c69d3674384 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2019-2021 Intel Corporation + * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -76,7 +76,7 @@ enum iwl_tx_queue_cfg_actions { TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), }; -#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4) +#define IWL_DEFAULT_QUEUE_SIZE_EHT (512 * 4) #define IWL_DEFAULT_QUEUE_SIZE_HE 1024 #define IWL_DEFAULT_QUEUE_SIZE 256 #define IWL_MGMT_QUEUE_SIZE 16 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 3ab6a68f1e9f..3975a53a9f20 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1021,22 +1021,18 @@ struct iwl_dump_ini_region_data { struct iwl_fwrt_dump_data *dump_data; }; -static int -iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, - struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, u32 range_len, int idx) +static int iwl_dump_ini_prph_mac_iter_common(struct iwl_fw_runtime *fwrt, + void *range_ptr, u32 addr, + __le32 size) { - struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_val; - u32 addr = le32_to_cpu(reg->addrs[idx]) + - le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); - range->range_data_size = reg->dev_addr.size; - for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { + range->range_data_size = size; + for (i = 0; i < le32_to_cpu(size); i += 4) { prph_val = iwl_read_prph(fwrt->trans, addr + i); if (iwl_trans_is_hw_error_value(prph_val)) return -EBUSY; @@ -1047,38 +1043,61 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, } static int -iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, +iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 addr = le32_to_cpu(reg->addrs[idx]) + + le32_to_cpu(reg->dev_addr.offset); + + return iwl_dump_ini_prph_mac_iter_common(fwrt, range_ptr, addr, + reg->dev_addr.size); +} + +static int +iwl_dump_ini_prph_mac_block_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs; + u32 addr = le32_to_cpu(reg->dev_addr_range.offset) + + le32_to_cpu(pairs[idx].addr); + + return iwl_dump_ini_prph_mac_iter_common(fwrt, range_ptr, addr, + pairs[idx].size); +} + +static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt, + void *range_ptr, u32 addr, + __le32 size, __le32 offset) +{ struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1; u32 indirect_rd_addr = WMAL_MRSPF_1; u32 prph_val; - u32 addr = le32_to_cpu(reg->addrs[idx]); u32 dphy_state; u32 dphy_addr; int i; range->internal_base_addr = cpu_to_le32(addr); - range->range_data_size = reg->dev_addr.size; + range->range_data_size = size; if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) indirect_wr_addr = WMAL_INDRCT_CMD1; - indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset); - indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset); + indirect_wr_addr += le32_to_cpu(offset); + indirect_rd_addr += le32_to_cpu(offset); if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; - dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW : - WFPM_LMAC1_PS_CTL_RW; + dphy_addr = (offset) ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW; dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); - for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { + for (i = 0; i < le32_to_cpu(size); i += 4) { if (dphy_state == HBUS_TIMEOUT || (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != WFPM_PHYRF_STATE_ON) { @@ -1097,6 +1116,33 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 addr = le32_to_cpu(reg->addrs[idx]); + + return iwl_dump_ini_prph_phy_iter_common(fwrt, range_ptr, addr, + reg->dev_addr.size, + reg->dev_addr.offset); +} + +static int +iwl_dump_ini_prph_phy_block_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs; + u32 addr = le32_to_cpu(pairs[idx].addr); + + return iwl_dump_ini_prph_phy_iter_common(fwrt, range_ptr, addr, + pairs[idx].size, + reg->dev_addr_range.offset); +} + static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) @@ -1370,6 +1416,53 @@ out: return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_prph_snps_dphyip_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_error_dump_range *range = range_ptr; + __le32 *val = range->data; + __le32 offset = reg->dev_addr.offset; + u32 indirect_rd_wr_addr = DPHYIP_INDIRECT; + u32 addr = le32_to_cpu(reg->addrs[idx]); + u32 dphy_state, dphy_addr, prph_val; + int i; + + range->internal_base_addr = cpu_to_le32(addr); + range->range_data_size = reg->dev_addr.size; + + if (!iwl_trans_grab_nic_access(fwrt->trans)) + return -EBUSY; + + indirect_rd_wr_addr += le32_to_cpu(offset); + + dphy_addr = offset ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW; + dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); + + for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { + if (dphy_state == HBUS_TIMEOUT || + (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != + WFPM_PHYRF_STATE_ON) { + *val++ = cpu_to_le32(WFPM_DPHY_OFF); + continue; + } + + iwl_write_prph_no_grab(fwrt->trans, indirect_rd_wr_addr, + addr + i); + /* wait a bit for value to be ready in register */ + udelay(1); + prph_val = iwl_read_prph_no_grab(fwrt->trans, + indirect_rd_wr_addr); + *val++ = cpu_to_le32((prph_val & DPHYIP_INDIRECT_RD_MSK) >> + DPHYIP_INDIRECT_RD_SHIFT); + } + + iwl_trans_release_nic_access(fwrt->trans); + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + struct iwl_ini_rxf_data { u32 fifo_num; u32 size; @@ -1781,6 +1874,16 @@ static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); } +static u32 +iwl_dump_ini_mem_block_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + size_t size = sizeof(struct iwl_fw_ini_addr_size); + + return iwl_tlv_array_len_with_size(reg_data->reg_tlv, reg, size); +} + static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -1867,6 +1970,25 @@ static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, } static u32 +iwl_dump_ini_mem_block_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs; + u32 ranges = iwl_dump_ini_mem_block_ranges(fwrt, reg_data); + u32 size = sizeof(struct iwl_fw_ini_error_dump); + int range; + + if (!ranges) + return 0; + + for (range = 0; range < ranges; range++) + size += le32_to_cpu(pairs[range].size); + + return size + ranges * sizeof(struct iwl_fw_ini_error_dump_range); +} + +static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -2413,6 +2535,18 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_phy_iter, }, + [IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE] = { + .get_num_of_ranges = iwl_dump_ini_mem_block_ranges, + .get_size = iwl_dump_ini_mem_block_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_mac_block_iter, + }, + [IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE] = { + .get_num_of_ranges = iwl_dump_ini_mem_block_ranges, + .get_size = iwl_dump_ini_mem_block_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_phy_block_iter, + }, [IWL_FW_INI_REGION_PERIPHERY_AUX] = {}, [IWL_FW_INI_REGION_PAGING] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, @@ -2450,6 +2584,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header, .fill_range = iwl_dump_ini_dbgi_sram_iter, }, + [IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_snps_dphyip_iter, + }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, @@ -2492,7 +2632,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; - if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY && + if ((reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY || + reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE || + reg_type == IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP) && tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) { IWL_WARN(fwrt, "WRT: trying to collect phy prph at time point: %d, skipping\n", @@ -3228,3 +3370,28 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, #endif } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); + +void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt) +{ + struct iwl_fw_dbg_config_cmd cmd = { + .type = cpu_to_le32(DEBUG_TOKEN_CONFIG_TYPE), + .conf = cpu_to_le32(IWL_FW_DBG_CONFIG_TOKEN), + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LONG_GROUP, LDBG_CONFIG_CMD), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u32 preset = u32_get_bits(fwrt->trans->dbg.domains_bitmap, + GENMASK(31, IWL_FW_DBG_DOMAIN_POS + 1)); + + /* supported starting from 9000 devices */ + if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + return; + + if (fwrt->trans->dbg.yoyo_bin_loaded || (preset && preset != 1)) + return; + + iwl_trans_send_cmd(fwrt->trans, &hcmd); +} +IWL_EXPORT_SYMBOL(iwl_fw_disable_dbg_asserts); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 4227fbd2b977..66b233250c7c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -329,6 +329,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data); +void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt); #define IWL_FW_CHECK_FAILED(_obj, _fmt, ...) \ IWL_ERR_LIMIT(_obj, _fmt, __VA_ARGS__) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 3cdbc6ac7ae5..751a125a1566 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -141,7 +141,11 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, event_cfg.enabled_severities = cpu_to_le32(enabled_severities); - ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + if (fwrt->ops && fwrt->ops->send_hcmd) + ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); + else + ret = -EPERM; + IWL_INFO(fwrt, "sent host event cfg with enabled_severities: %u, ret: %d\n", enabled_severities, ret); @@ -342,6 +346,12 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v) " %d: %d\n", IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT, has_capa); + has_capa = fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT) ? 1 : 0; + seq_printf(seq, + " %d: %d\n", + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT, + has_capa); seq_puts(seq, "fw_api_ver:\n"); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index 5876f917e536..8f107ceec407 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -182,8 +182,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu base = fwrt->fw->inst_errlog_ptr; } - if ((fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && !base) || - (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && base < 0x400000)) { + if (!base) { IWL_ERR(fwrt, "Not valid error log pointer 0x%08X for %s uCode\n", base, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index f5e08988dc7b..06d6f7f66430 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr { struct iwl_fw_ini_error_dump_range { __le32 range_data_size; union { - __le32 internal_base_addr; - __le64 dram_base_addr; - __le32 page_num; + __le32 internal_base_addr __packed; + __le64 dram_base_addr __packed; + __le32 page_num __packed; struct iwl_fw_ini_fifo_hdr fifo_hdr; struct iwl_cmd_header fw_pkt_hdr; }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index b36e9613a52c..03f6e520145f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2008-2014, 2018-2021 Intel Corporation + * Copyright (C) 2008-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -281,12 +281,16 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59, - -#ifdef __CHECKER__ - /* sparse says it cannot increment the previous enum member */ -#define NUM_IWL_UCODE_TLV_API 128 -#else NUM_IWL_UCODE_TLV_API +/* + * This construction make both sparse (which cannot increment the previous + * member due to its bitwise type) and kernel-doc (which doesn't understand + * the ifdef/else properly) work. + */ +#ifdef __CHECKER__ +#define __CHECKER_NUM_IWL_UCODE_TLV_API 128 + = (__force iwl_ucode_tlv_api_t)__CHECKER_NUM_IWL_UCODE_TLV_API, +#define NUM_IWL_UCODE_TLV_API __CHECKER_NUM_IWL_UCODE_TLV_API #endif }; @@ -468,12 +472,18 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT = (__force iwl_ucode_tlv_capa_t)113, IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114, IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116, + IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117, -#ifdef __CHECKER__ - /* sparse says it cannot increment the previous enum member */ -#define NUM_IWL_UCODE_TLV_CAPA 128 -#else NUM_IWL_UCODE_TLV_CAPA +/* + * This construction make both sparse (which cannot increment the previous + * member due to its bitwise type) and kernel-doc (which doesn't understand + * the ifdef/else properly) work. + */ +#ifdef __CHECKER__ +#define __CHECKER_NUM_IWL_UCODE_TLV_CAPA 128 + = (__force iwl_ucode_tlv_capa_t)__CHECKER_NUM_IWL_UCODE_TLV_CAPA, +#define NUM_IWL_UCODE_TLV_CAPA __CHECKER_NUM_IWL_UCODE_TLV_CAPA #endif }; @@ -965,4 +975,6 @@ static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv, _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), \ sizeof(_struct_ptr->_memb[0])) +#define iwl_tlv_array_len_with_size(_tlv_ptr, _struct_ptr, _size) \ + _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), _size) #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 8d0d58d61892..96bda80632f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -198,7 +198,7 @@ struct iwl_dump_exclude { struct iwl_fw { u32 ucode_ver; - char fw_version[64]; + char fw_version[128]; /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h index 49e8ba11b6a8..0e49794911c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2005-2014, 2023 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_notif_wait_h__ @@ -25,6 +25,7 @@ struct iwl_notif_wait_data { * returns true, the wait is over, if it returns false then * the waiter stays blocked. If no function is given, any * of the listed commands will unblock the waiter. + * @fn_data: pointer to pass to the @fn's data argument * @cmds: command IDs * @n_cmds: number of command IDs * @triggered: waiter should be woken up diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c index b09e68dbf5a9..8f99e501629e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c @@ -208,7 +208,6 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps", iwl_rs_pretty_ant(ant), - index == IWL_RATE_INVALID ? "BAD" : iwl_rate_mcs(index)->mbps); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 702586945533..357727774db9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -98,6 +98,8 @@ struct iwl_txf_iter_data { * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data + * @uats_enabled: VLP or AFC AP is enabled + * @uats_table: AP type table */ struct iwl_fw_runtime { struct iwl_trans *trans; @@ -171,6 +173,8 @@ struct iwl_fw_runtime { struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; u8 reduced_power_flags; + bool uats_enabled; + struct iwl_uats_table_cmd uats_table; #endif }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index 9877988db0d2..2964c5fb11e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -388,4 +388,54 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, kfree(data); } IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table); + +static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data, + struct iwl_fw_runtime *fwrt) +{ + if (uats_data->revision != 1) + return -EINVAL; + + memcpy(fwrt->uats_table.offset_map, uats_data->offset_map, + sizeof(fwrt->uats_table.offset_map)); + return 0; +} + +int iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_wlan_uats_data *data; + unsigned long package_size; + int ret; + + data = iwl_uefi_get_variable(IWL_UEFI_UATS_NAME, &IWL_EFI_VAR_GUID, + &package_size); + if (IS_ERR(data)) { + IWL_DEBUG_FW(trans, + "UATS UEFI variable not found 0x%lx\n", + PTR_ERR(data)); + return -EINVAL; + } + + if (package_size < sizeof(*data)) { + IWL_DEBUG_FW(trans, + "Invalid UATS table UEFI variable len (%lu)\n", + package_size); + kfree(data); + return -EINVAL; + } + + IWL_DEBUG_FW(trans, "Read UATS from UEFI with size %lu\n", + package_size); + + ret = iwl_uefi_uats_parse(data, fwrt); + if (ret < 0) { + IWL_DEBUG_FW(trans, "Cannot read UATS table. rev is invalid\n"); + kfree(data); + return ret; + } + + kfree(data); + return 0; +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table); #endif /* CONFIG_ACPI */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index 1369cc4855c3..bf61a8df1225 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -9,8 +9,10 @@ #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" #define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping" #define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP" +#define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS" #define IWL_SGOM_MAP_SIZE 339 +#define IWL_UATS_MAP_SIZE 339 struct pnvm_sku_package { u8 rev; @@ -25,6 +27,11 @@ struct uefi_cnv_wlan_sgom_data { u8 offset_map[IWL_SGOM_MAP_SIZE - 1]; } __packed; +struct uefi_cnv_wlan_uats_data { + u8 revision; + u8 offset_map[IWL_UATS_MAP_SIZE - 1]; +} __packed; + struct uefi_cnv_common_step_data { u8 revision; u8 step_mode; @@ -82,10 +89,20 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, #if defined(CONFIG_EFI) && defined(CONFIG_ACPI) void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); +int iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt); #else static inline void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { } + +static inline +int iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) +{ + return 0; +} + #endif #endif /* __iwl_fw_uefi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 241a9e3f2a1a..02ded22295c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -86,10 +86,7 @@ enum iwl_nvm_type { #define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ NETIF_F_TSO | NETIF_F_TSO6) -#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6) -#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \ - IWL_TX_CSUM_NETIF_FLAGS_BZ | \ - NETIF_F_RXCSUM) +#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM) /* Antenna presence definitions */ #define ANT_NONE 0x0 @@ -250,7 +247,6 @@ enum iwl_cfg_trans_ltr_delay { * RFID can be read before deciding the remaining parameters to use. * * @base_params: pointer to basic parameters - * @csr: csr flags and addresses that are different across devices * @device_family: the device family * @umac_prph_offset: offset to add to UMAC periphery address * @xtal_latency: power up latency to get the xtal stabilized @@ -319,7 +315,6 @@ struct iwl_fw_mon_regs { * @non_shared_ant: the antenna that is for WiFi only * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version - * @lib: pointer to the lib ops * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity @@ -344,15 +339,12 @@ struct iwl_fw_mon_regs { * @nvm_type: see &enum iwl_nvm_type * @d3_debug_data_base_addr: base address where D3 debug data is stored * @d3_debug_data_length: length of the D3 debug data - * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue * @uhb_supported: ultra high band channels supported * @min_ba_txq_size: minimum number of slots required in a TX queue which * based on hardware support (HE - 256, EHT - 1K). * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) - * @mac_addr_csr_base: CSR base register for MAC address access, if not set - * assume 0x380 * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index 96bf353469b8..1379dc2d231b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018, 2020-2022 Intel Corporation + * Copyright (C) 2018, 2020-2023 Intel Corporation */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ @@ -44,7 +44,7 @@ enum iwl_prph_scratch_mtr_format { * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for * multicomm. * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW - * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) + * @IWL_PRPH_SCRATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for * completion descriptor, 1 for responses (legacy) * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. @@ -187,11 +187,15 @@ struct iwl_prph_scratch_ctrl_cfg { * struct iwl_prph_scratch - peripheral scratch mapping * @ctrl_cfg: control and configuration of prph scratch * @dram: firmware images addresses in DRAM + * @fseq_override: FSEQ override parameters + * @step_analog_params: STEP analog calibration values * @reserved: reserved */ struct iwl_prph_scratch { struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; - __le32 reserved[10]; + __le32 fseq_override; + __le32 step_analog_params; + __le32 reserved[8]; struct iwl_context_info_dram dram; } __packed; /* PERIPH_SCRATCH_S */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 587368a0ad4a..a4df67ff21ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -313,6 +313,7 @@ enum { SILICON_C_STEP, SILICON_D_STEP, SILICON_E_STEP, + SILICON_TC_STEP = 0xe, SILICON_Z_STEP = 0xf, }; @@ -618,6 +619,7 @@ enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), MSIX_HW_INT_CAUSES_REG_IML = BIT(1), MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2), + MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR = BIT(3), MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index ef5baee6c9c5..b658cf228fbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -509,6 +509,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) if (res) return; + trans->dbg.yoyo_bin_loaded = true; + iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size); release_firmware(fw); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index 128059ca77e6..06fb7d665390 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ @@ -10,7 +10,8 @@ #include <fw/file.h> #include <fw/api/dbg-tlv.h> -#define IWL_DBG_TLV_MAX_PRESET 15 +#define IWL_DBG_TLV_MAX_PRESET 15 +#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1) /** * struct iwl_dbg_tlv_node - debug TLV node diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index 1455b578358b..01fb7b900a6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -3,17 +3,19 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(C) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018, 2023 Intel Corporation *****************************************************************************/ #ifndef __IWLWIFI_DEVICE_TRACE #include <linux/skbuff.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> +#include <net/mac80211.h> #include "iwl-trans.h" #if !defined(__IWLWIFI_DEVICE_TRACE) static inline bool iwl_trace_data(struct sk_buff *skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; int offs = 24; /* start with normal header length */ @@ -21,6 +23,10 @@ static inline bool iwl_trace_data(struct sk_buff *skb) if (!ieee80211_is_data(fc)) return false; + /* If upper layers wanted TX status it's an important frame */ + if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) + return false; + /* Try to determine if the frame is EAPOL. This might have false * positives (if there's no RFC 1042 header and we compare to some * payload instead) but since we're only doing tracing that's not diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 3d87d26845e7..ffe2670720c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -162,6 +162,8 @@ static inline char iwl_drv_get_step(int step) { if (step == SILICON_Z_STEP) return 'z'; + if (step == SILICON_TC_STEP) + return 'a'; return 'a' + step; } @@ -178,6 +180,8 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) mac_step = iwl_drv_get_step(trans->hw_rev_step); + rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id)); + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_HR1: case IWL_CFG_RF_TYPE_HR2: @@ -196,7 +200,13 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) rf = "fm"; break; case IWL_CFG_RF_TYPE_WH: - rf = "wh"; + if (SILICON_Z_STEP == + CSR_HW_RFID_STEP(trans->hw_rf_id)) { + rf = "whtc"; + rf_step = 'a'; + } else { + rf = "wh"; + } break; default: return "unknown-rf"; @@ -204,8 +214,6 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : ""; - rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id)); - scnprintf(buf, FW_NAME_PRE_BUFSIZE, "iwlwifi-%s-%c0-%s%s-%c0", trans->cfg->fw_name_mac, mac_step, @@ -1303,10 +1311,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_CURRENT_PC: if (tlv_len < sizeof(struct iwl_pc_data)) goto invalid_tlv_len; - drv->trans->dbg.num_pc = - tlv_len / sizeof(struct iwl_pc_data); drv->trans->dbg.pc_data = kmemdup(tlv_data, tlv_len, GFP_KERNEL); + if (!drv->trans->dbg.pc_data) + return -ENOMEM; + drv->trans->dbg.num_pc = + tlv_len / sizeof(struct iwl_pc_data); break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); @@ -1415,6 +1425,9 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) struct iwl_op_mode *op_mode = NULL; int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; + /* also protects start/stop from racing against each other */ + lockdep_assert_held(&iwlwifi_opmode_table_mtx); + for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1429,6 +1442,9 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) if (op_mode) return op_mode; + if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status)) + break; + IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1442,6 +1458,9 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) static void _iwl_op_mode_stop(struct iwl_drv *drv) { + /* also protects start/stop from racing against each other */ + lockdep_assert_held(&iwlwifi_opmode_table_mtx); + /* op_mode can be NULL if its start failed */ if (drv->op_mode) { iwl_op_mode_stop(drv->op_mode); @@ -1725,11 +1744,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } mutex_unlock(&iwlwifi_opmode_table_mtx); - /* - * Complete the firmware request last so that - * a driver unbind (stop) doesn't run while we - * are doing the start() above. - */ complete(&drv->request_firmware_complete); /* @@ -1795,6 +1809,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) #endif drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans); + if (iwlwifi_mod_params.enable_ini != ENABLE_INI) { + /* We have a non-default value in the module parameter, + * take its value + */ + drv->trans->dbg.domains_bitmap &= 0xffff; + if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) { + if (iwlwifi_mod_params.enable_ini > ENABLE_INI) { + IWL_ERR(trans, + "invalid enable_ini module parameter value: max = %d, using 0 instead\n", + ENABLE_INI); + iwlwifi_mod_params.enable_ini = 0; + } + drv->trans->dbg.domains_bitmap = + BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini); + } + } ret = iwl_request_firmware(drv, true); if (ret) { @@ -1818,11 +1848,12 @@ void iwl_drv_stop(struct iwl_drv *drv) { wait_for_completion(&drv->request_firmware_complete); + mutex_lock(&iwlwifi_opmode_table_mtx); + _iwl_op_mode_stop(drv); iwl_dealloc_ucode(drv); - mutex_lock(&iwlwifi_opmode_table_mtx); /* * List is empty (this item wasn't added) * when firmware loading failed -- in that @@ -1843,8 +1874,6 @@ void iwl_drv_stop(struct iwl_drv *drv) kfree(drv); } -#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1) - /* shared module parameters */ struct iwl_mod_params iwlwifi_mod_params = { .fw_restart = true, @@ -1964,38 +1993,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); -static int enable_ini_set(const char *arg, const struct kernel_param *kp) -{ - int ret = 0; - bool res; - __u32 new_enable_ini; - - /* in case the argument type is a number */ - ret = kstrtou32(arg, 0, &new_enable_ini); - if (!ret) { - if (new_enable_ini > ENABLE_INI) { - pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini); - return -EINVAL; - } - goto out; - } - - /* in case the argument type is boolean */ - ret = kstrtobool(arg, &res); - if (ret) - return ret; - new_enable_ini = (res ? ENABLE_INI : 0); - -out: - iwlwifi_mod_params.enable_ini = new_enable_ini; - return 0; -} - -static const struct kernel_param_ops enable_ini_ops = { - .set = enable_ini_set -}; - -module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644); +module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444); MODULE_PARM_DESC(enable_ini, "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined," "Debug INI TLV FW debug infrastructure (default: 16)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 6c19989e4ab7..3d1a27ba35c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -56,7 +56,7 @@ struct iwl_cfg; /** * iwl_drv_start - start the drv * - * @trans_ops: the ops of the transport + * @trans: the transport * * starts the driver: fetches the firmware. This should be called by bus * specific system flows implementations. For example, the bus specific probe diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index d7a7835b935c..5aab64c63a13 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation * Copyright (C) 2015 Intel Mobile Communications GmbH */ #include <linux/types.h> @@ -721,6 +721,9 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans, ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; ht_info->mcs.rx_mask[0] = 0xFF; + ht_info->mcs.rx_mask[1] = 0x00; + ht_info->mcs.rx_mask[2] = 0x00; + if (rx_chains >= 2) ht_info->mcs.rx_mask[1] = 0xFF; if (rx_chains >= 3) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 0e8ca761d24b..34a178a2eb5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018, 2020-2023 Intel Corporation * Copyright (C) 2015 Intel Mobile Communications GmbH */ #ifndef __iwl_eeprom_parse_h__ @@ -61,7 +61,7 @@ struct iwl_nvm_data { /** * iwl_parse_eeprom_data - parse EEPROM data and return values * - * @dev: device pointer we're parsing for, for debug only + * @trans: ransport we're parsing for, for debug only * @cfg: device configuration for parsing and overrides * @eeprom: the EEPROM data * @eeprom_size: length of the EEPROM data diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 4e4a60ddf9b2..e0400ba2ab74 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -565,6 +565,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 +#define IWL_DEFAULT_RX_QUEUE 0 + /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers @@ -679,12 +681,13 @@ struct iwl_tfh_tb { /** * struct iwl_tfd - Transmit Frame Descriptor (TFD) - * @ __reserved1[3] reserved - * @ num_tbs 0-4 number of active tbs - * 5 reserved - * 6-7 padding (not used) - * @ tbs[20] transmit frame buffer descriptors - * @ __pad padding + * @__reserved1: reserved + * @num_tbs: + * 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @tbs: transmit frame buffer descriptors + * @__pad: padding */ struct iwl_tfd { u8 __reserved1[3]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 8c23f57f5c89..6015e1255d2a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -671,7 +671,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | - IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 | + IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | @@ -962,6 +963,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, } } } else { + struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp = + &iftype_data->he_cap.he_mcs_nss_supp; + if (iftype_data->eht_cap.has_eht) { struct ieee80211_eht_mcs_nss_supp *mcs_nss = &iftype_data->eht_cap.eht_mcs_nss_supp; @@ -980,6 +984,19 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_MAX_NC_1; } + + he_mcs_nss_supp->rx_mcs_80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->tx_mcs_80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->rx_mcs_160 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->tx_mcs_160 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->rx_mcs_80p80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); + he_mcs_nss_supp->tx_mcs_80p80 |= + cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap) @@ -990,6 +1007,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, case IWL_CFG_RF_TYPE_GF: case IWL_CFG_RF_TYPE_MR: case IWL_CFG_RF_TYPE_MS: + case IWL_CFG_RF_TYPE_FM: + case IWL_CFG_RF_TYPE_WH: iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; if (!is_ap) @@ -1050,10 +1069,6 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, struct ieee80211_sband_iftype_data *iftype_data; int i; - /* should only initialize once */ - if (WARN_ON(sband->iftype_data)) - return; - BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_eht_capa)); BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_eht_capa)); BUILD_BUG_ON(sizeof(data->iftd.uhb) != sizeof(iwl_he_eht_capa)); @@ -1075,8 +1090,8 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, memcpy(iftype_data, iwl_he_eht_capa, sizeof(iwl_he_eht_capa)); - sband->iftype_data = iftype_data; - sband->n_iftype_data = ARRAY_SIZE(iwl_he_eht_capa); + _ieee80211_set_sband_iftype_data(sband, iftype_data, + ARRAY_SIZE(iwl_he_eht_capa)); for (i = 0; i < sband->n_iftype_data; i++) iwl_nvm_fixup_sband_iftd(trans, data, sband, &iftype_data[i], @@ -1085,6 +1100,37 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains); } +void iwl_reinit_cab(struct iwl_trans *trans, struct iwl_nvm_data *data, + u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) +{ + struct ieee80211_supported_band *sband; + + sband = &data->bands[NL80211_BAND_2GHZ]; + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ, + tx_chains, rx_chains); + + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, + fw); + + sband = &data->bands[NL80211_BAND_5GHZ]; + iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ, + tx_chains, rx_chains); + if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) + iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, + tx_chains, rx_chains); + + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, + fw); + + sband = &data->bands[NL80211_BAND_6GHZ]; + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, + fw); +} +IWL_EXPORT_SYMBOL(iwl_reinit_cab); + static void iwl_init_sbands(struct iwl_trans *trans, struct iwl_nvm_data *data, const void *nvm_ch_flags, u8 tx_chains, @@ -1363,7 +1409,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, - const struct iwl_fw *fw) + const struct iwl_fw *fw, u8 tx_ant, u8 rx_ant) { struct iwl_nvm_data *data; u32 sbands_flags = 0; @@ -1390,6 +1436,10 @@ iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, tx_chains &= data->valid_tx_ant; if (data->valid_rx_ant) rx_chains &= data->valid_rx_ant; + if (tx_ant) + tx_chains &= tx_ant; + if (rx_ant) + rx_chains &= rx_ant; data->sku_cap_mimo_disabled = false; data->sku_cap_band_24ghz_enable = true; @@ -1955,7 +2005,8 @@ out: IWL_EXPORT_SYMBOL(iwl_read_external_nvm); struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, - const struct iwl_fw *fw) + const struct iwl_fw *fw, + u8 set_tx_ant, u8 set_rx_ant) { struct iwl_nvm_get_info cmd = {}; struct iwl_nvm_data *nvm; @@ -1969,6 +2020,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; + u8 tx_ant; + u8 rx_ant; + /* * All the values in iwl_nvm_get_info_rsp v4 are the same as * in v3, except for the channel profile part of the @@ -2056,10 +2110,15 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : (void *)rsp_v3->regulatory.channel_profile; - iwl_init_sbands(trans, nvm, - channel_profile, - nvm->valid_tx_ant & fw->valid_tx_ant, - nvm->valid_rx_ant & fw->valid_rx_ant, + tx_ant = nvm->valid_tx_ant & fw->valid_tx_ant; + rx_ant = nvm->valid_rx_ant & fw->valid_rx_ant; + + if (set_tx_ant) + tx_ant &= set_tx_ant; + if (set_rx_ant) + rx_ant &= set_rx_ant; + + iwl_init_sbands(trans, nvm, channel_profile, tx_ant, rx_ant, sbands_flags, v4, fw); iwl_free_resp(&hcmd); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index c79f72d54482..651ed25b683b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2022 Intel Corporation + * Copyright (C) 2005-2015, 2018-2023 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ @@ -21,7 +21,7 @@ enum iwl_nvm_sbands_flags { IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1), }; -/** +/* * iwl_parse_nvm_data - parse NVM data and return values * * This function parses all NVM values we need and then @@ -73,21 +73,28 @@ int iwl_read_external_nvm(struct iwl_trans *trans, void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len); -/** +/* * iwl_get_nvm - retrieve NVM data from firmware * * Allocates a new iwl_nvm_data structure, fills it with * NVM data, and returns it to caller. */ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, - const struct iwl_fw *fw); + const struct iwl_fw *fw, + u8 set_tx_ant, u8 set_rx_ant); -/** +/* * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data */ struct iwl_nvm_data * iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, - const struct iwl_fw *fw); + const struct iwl_fw *fw, u8 set_tx_ant, u8 set_rx_ant); + +/* + * iwl_reinit_cab - to be called when the tx_chains or rx_chains are modified + */ +void iwl_reinit_cab(struct iwl_trans *trans, struct iwl_nvm_data *data, + u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 6dd381ff0f9e..dd32c287b983 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -348,8 +348,8 @@ #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 #define WFPM_OTP_CFG1_ADDR 0x00a03098 -#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) -#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) +#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5) +#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4) #define WFPM_OTP_BZ_BNJ_JACKET_BIT 5 #define WFPM_OTP_BZ_BNJ_CDB_BIT 4 #define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT) @@ -365,7 +365,6 @@ #define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF enum { - ENABLE_WFPM = BIT(31), WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, }; @@ -383,7 +382,7 @@ enum { #define PREG_PRPH_WPROT_22000 0xA04D00 #define SB_MODIFY_CFG_FLAG 0xA03088 -#define SB_CFG_RESIDES_IN_OTP_MASK 0x10 +#define SB_CFG_RESIDES_IN_ROM 0x80 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 @@ -424,14 +423,14 @@ enum { * reserved: bits 12-18 * slave_exist: bit 19 * dash: bits 20-23 - * step: bits 24-26 - * flavor: bits 27-31 + * step: bits 24-27 + * flavor: bits 28-31 */ #define REG_CRF_ID_TYPE(val) (((val) & 0x00000FFF) >> 0) #define REG_CRF_ID_SLAVE(val) (((val) & 0x00080000) >> 19) #define REG_CRF_ID_DASH(val) (((val) & 0x00F00000) >> 20) -#define REG_CRF_ID_STEP(val) (((val) & 0x07000000) >> 24) -#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF8000000) >> 27) +#define REG_CRF_ID_STEP(val) (((val) & 0x0F000000) >> 24) +#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF0000000) >> 28) #define UREG_CHICK (0xA05C00) #define UREG_CHICK_MSI_ENABLE BIT(24) @@ -452,6 +451,7 @@ enum { #define REG_CRF_ID_TYPE_FM 0x910 #define REG_CRF_ID_TYPE_FMI 0x930 #define REG_CRF_ID_TYPE_FMR 0x900 +#define REG_CRF_ID_TYPE_WHP 0xA10 #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) @@ -516,4 +516,8 @@ enum { #define WFPM_LMAC2_PD_NOTIFICATION 0xA033CC #define WFPM_LMAC2_PD_RE_READ BIT(31) +#define DPHYIP_INDIRECT 0xA2D800 +#define DPHYIP_INDIRECT_RD_MSK 0xFF000000 +#define DPHYIP_INDIRECT_RD_SHIFT 24 + #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index d02943d0ea62..05e72a2125b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -56,6 +56,10 @@ * 6) Eventually, the free function will be called. */ +/* default preset 0 (start from bit 16)*/ +#define IWL_FW_DBG_DOMAIN_POS 16 +#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS) + #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ @@ -105,6 +109,7 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * @CMD_ASYNC: Return right away and don't wait for the response * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of * the response. The caller needs to call iwl_free_resp when done. + * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill. * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be * called after this command completes. Valid only with CMD_ASYNC. * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to @@ -274,7 +279,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define IWL_MGMT_TID 15 #define IWL_FRAME_LIMIT 64 #define IWL_MAX_RX_HW_QUEUES 16 -#define IWL_9000_MAX_RX_HW_QUEUES 6 +#define IWL_9000_MAX_RX_HW_QUEUES 1 /** * enum iwl_wowlan_status - WoWLAN image/device status @@ -584,7 +589,7 @@ struct iwl_trans_ops { int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, - struct sk_buff_head *skbs); + struct sk_buff_head *skbs, bool is_flush); void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); @@ -734,6 +739,7 @@ struct iwl_dram_data { }; /** + * struct iwl_dram_regions - DRAM regions container structure * @drams: array of several DRAM areas that contains the pnvm and power * reduction table payloads. * @n_regions: number of DRAM regions that were allocated @@ -833,6 +839,7 @@ struct iwl_pc_data { * @dump_file_name_ext_valid: dump file name extension if valid or not * @num_pc: number of program counter for cpu * @pc_data: details of the program counter + * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded */ struct iwl_trans_debug { u8 n_dest_reg; @@ -862,8 +869,7 @@ struct iwl_trans_debug { u64 unsupported_region_msk; struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID]; struct list_head debug_info_tlv_list; - struct iwl_dbg_tlv_time_point_data - time_point[IWL_FW_INI_TIME_POINT_NUM]; + struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM]; struct list_head periodic_trig_list; u32 domains_bitmap; @@ -875,6 +881,7 @@ struct iwl_trans_debug { bool dump_file_name_ext_valid; u32 num_pc; struct iwl_pc_data *pc_data; + bool yoyo_bin_loaded; }; struct iwl_dma_ptr { @@ -916,7 +923,6 @@ struct iwl_pcie_first_tb_buf { /** * struct iwl_txq - Tx Queue for DMA - * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) * @first_tb_bufs: start of command headers, including scratch buffers, for * the writeback -- this is DMA memory and an array holding one buffer @@ -1060,15 +1066,15 @@ struct iwl_trans_txqs { * starting the firmware, used for tracing * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the * start of the 802.11 header in the @rx_mpdu_cmd - * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. - * @iwl_trans_txqs: transport tx queues data. + * @txqs: transport tx queues data. * @mbx_addr_0_step: step address data 0 * @mbx_addr_1_step: step address data 1 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), * only valid for discrete (not integrated) NICs + * @invalid_tx_cmd: invalid TX command buffer */ struct iwl_trans { bool csme_own; @@ -1133,6 +1139,8 @@ struct iwl_trans { u8 pcie_link_speed; + struct iwl_dma_ptr invalid_tx_cmd; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[] __aligned(sizeof(void *)); @@ -1266,14 +1274,15 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, } static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, - int ssn, struct sk_buff_head *skbs) + int ssn, struct sk_buff_head *skbs, + bool is_flush) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } - trans->ops->reclaim(trans, queue, ssn, skbs); + trans->ops->reclaim(trans, queue, ssn, skbs, is_flush); } static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, @@ -1490,7 +1499,7 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) { u32 value; - if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) + if (iwl_trans_read_mem(trans, addr, &value, 1)) return 0xa5a5a5a5; return value; diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h index 655d95d3a068..1f3c885aeb65 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h +++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2021 - 2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation */ #ifndef __iwl_mei_h__ @@ -493,7 +493,7 @@ static inline void iwl_mei_set_power_limit(__le16 *power_limit) static inline int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) -{ return 0; } +{ return -EOPNOTSUPP; } static inline void iwl_mei_start_unregister(void) {} diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index 54445f39fd55..1dd9106c6513 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2023 Intel Corporation */ #include <linux/etherdevice.h> @@ -774,9 +774,13 @@ static void iwl_mei_set_init_conf(struct iwl_mei *mei) iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr); } - ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address); - ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address); - iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); + if (is_valid_ether_addr(iwl_mei_cache.mac_address)) { + ether_addr_copy(nic_info_msg.mac_address, + iwl_mei_cache.mac_address); + ether_addr_copy(nic_info_msg.nvm_address, + iwl_mei_cache.nvm_address); + iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); + } iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr); } @@ -1532,7 +1536,7 @@ void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1561,7 +1565,7 @@ void iwl_mei_host_disassociated(void) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1597,7 +1601,7 @@ void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1626,7 +1630,7 @@ void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1654,7 +1658,7 @@ void iwl_mei_set_country_code(u16 mcc) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); @@ -1680,7 +1684,7 @@ void iwl_mei_set_power_limit(const __le16 *power_limit) mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - if (!mei && !mei->amt_enabled) + if (!mei || !mei->amt_enabled) goto out; memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table)); @@ -1832,7 +1836,9 @@ void iwl_mei_unregister_complete(void) struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); - iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN); + if (mei->amt_enabled) + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_WIFIDR_DOWN); mei->got_ownership = false; } @@ -2070,33 +2076,29 @@ static void iwl_mei_remove(struct mei_cl_device *cldev) mutex_lock(&iwl_mei_mutex); - if (mei->amt_enabled) { - /* - * Tell CSME that we are going down so that it won't access the - * memory anymore, make sure this message goes through immediately. - */ - mei->csa_throttled = false; - iwl_mei_send_sap_msg(mei->cldev, - SAP_MSG_NOTIF_HOST_GOES_DOWN); - - for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { - if (!iwl_mei_host_to_me_data_pending(mei)) - break; + /* Tell CSME that we are going down so that it won't access the + * memory anymore, make sure this message goes through immediately. + */ + mei->csa_throttled = false; + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_HOST_GOES_DOWN); - msleep(20); - } + for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { + if (!iwl_mei_host_to_me_data_pending(mei)) + break; - /* - * If we couldn't make sure that CSME saw the HOST_GOES_DOWN - * message, it means that it will probably keep reading memory - * that we are going to unmap and free, expect IOMMU error - * messages. - */ - if (i == SEND_SAP_MAX_WAIT_ITERATION) - dev_err(&mei->cldev->dev, - "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); + msleep(20); } + /* If we couldn't make sure that CSME saw the HOST_GOES_DOWN + * message, it means that it will probably keep reading memory + * that we are going to unmap and free, expect IOMMU error + * messages. + */ + if (i == SEND_SAP_MAX_WAIT_ITERATION) + dev_err(&mei->cldev->dev, + "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); + mutex_unlock(&iwl_mei_mutex); /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 243eccc68cb0..c832068b5718 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -60,6 +60,7 @@ #define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */ #define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM #define IWL_MVM_NON_TRANSMITTING_AP 0 +#define IWL_MVM_CONN_LISTEN_INTERVAL 10 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 @@ -118,5 +119,6 @@ #define IWL_MVM_DISABLE_AP_FILS false #define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ +#define IWL_MVM_AUTO_EML_ENABLE true #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index f6488b4bbe68..92c45571bd69 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -818,7 +818,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) IWL_ERR(mvm, "Failed to send quota: %d\n", ret); - if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) + if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm, false)) IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); return 0; @@ -1438,6 +1438,7 @@ struct iwl_wowlan_status_data { } ptk; struct iwl_multicast_key_data igtk; + struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM]; u8 *wake_packet; }; @@ -1781,8 +1782,8 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; struct iwl_wowlan_status_data *status; - u32 gtk_cipher, igtk_cipher; - bool unhandled_cipher, igtk_support; + u32 gtk_cipher, igtk_cipher, bigtk_cipher; + bool unhandled_cipher, igtk_support, bigtk_support; int num_keys; }; @@ -1817,6 +1818,9 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw, if (data->igtk_support && (key->keyidx == 4 || key->keyidx == 5)) { data->igtk_cipher = key->cipher; + } else if (data->bigtk_support && + (key->keyidx == 6 || key->keyidx == 7)) { + data->bigtk_cipher = key->cipher; } else { data->unhandled_cipher = true; return; @@ -1848,6 +1852,24 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key, } } +static void +iwl_mvm_d3_update_igtk_bigtk(struct iwl_wowlan_status_data *status, + struct ieee80211_key_conf *key, + struct iwl_multicast_key_data *key_data) +{ + if (status->num_of_gtk_rekeys && key_data->len) { + /* remove rekeyed key */ + ieee80211_remove_key(key); + } else { + struct ieee80211_key_seq seq; + + iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, + &seq, + key->cipher); + ieee80211_set_key_rx_seq(key, 0, &seq); + } +} + static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -1900,17 +1922,14 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_AES_CMAC: if (key->keyidx == 4 || key->keyidx == 5) { - /* remove rekeyed key */ - if (status->num_of_gtk_rekeys) { - ieee80211_remove_key(key); - } else { - struct ieee80211_key_seq seq; + iwl_mvm_d3_update_igtk_bigtk(status, key, + &status->igtk); + } + if (key->keyidx == 6 || key->keyidx == 7) { + u8 idx = key->keyidx == status->bigtk[1].id; - iwl_mvm_d3_set_igtk_bigtk_ipn(&status->igtk, - &seq, - key->cipher); - ieee80211_set_key_rx_seq(key, 0, &seq); - } + iwl_mvm_d3_update_igtk_bigtk(status, key, + &status->bigtk[idx]); } } } @@ -2012,6 +2031,16 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status, if (IS_ERR(key_config)) return false; ieee80211_set_key_rx_seq(key_config, 0, &seq); + + if (key_config->keyidx == 4 || key_config->keyidx == 5) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_id]; + + mvm_link->igtk = key_config; + } + return true; } @@ -2042,6 +2071,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, .mvm = mvm, .status = status, }; + int i; + u32 disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; @@ -2058,6 +2089,11 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, 0)) gtkdata.igtk_support = true; + if (iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + 0) >= 3) + gtkdata.bigtk_support = true; + /* find last GTK that we used initially, if any */ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_find_last_keys, >kdata); @@ -2088,6 +2124,13 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, &status->igtk)) return false; + for (i = 0; i < ARRAY_SIZE(status->bigtk); i++) { + if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif, + gtkdata.bigtk_cipher, + &status->bigtk[i])) + return false; + } + ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } @@ -2172,6 +2215,37 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn)); } +static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status, + const struct iwl_wowlan_igtk_status *data) +{ + int data_idx, status_idx = 0; + + BUILD_BUG_ON(ARRAY_SIZE(status->bigtk) < WOWLAN_BIGTK_KEYS_NUM); + + for (data_idx = 0; data_idx < WOWLAN_BIGTK_KEYS_NUM; data_idx++) { + if (!data[data_idx].key_len) + continue; + + status->bigtk[status_idx].len = data[data_idx].key_len; + status->bigtk[status_idx].flags = data[data_idx].key_flags; + status->bigtk[status_idx].id = + u32_get_bits(data[data_idx].key_flags, + IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) + + WOWLAN_BIGTK_MIN_INDEX; + + BUILD_BUG_ON(sizeof(status->bigtk[status_idx].key) < + sizeof(data[data_idx].key)); + BUILD_BUG_ON(sizeof(status->bigtk[status_idx].ipn) < + sizeof(data[data_idx].ipn)); + + memcpy(status->bigtk[status_idx].key, data[data_idx].key, + sizeof(data[data_idx].key)); + memcpy(status->bigtk[status_idx].ipn, data[data_idx].ipn, + sizeof(data[data_idx].ipn)); + status_idx++; + } +} + static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, struct iwl_wowlan_info_notif *data, struct iwl_wowlan_status_data *status, @@ -2194,7 +2268,42 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); iwl_mvm_convert_gtk_v3(status, data->gtk); iwl_mvm_convert_igtk(status, &data->igtk[0]); + iwl_mvm_convert_bigtk(status, data->bigtk); + status->replay_ctr = le64_to_cpu(data->replay_ctr); + status->pattern_number = le16_to_cpu(data->pattern_number); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) + status->qos_seq_ctr[i] = + le16_to_cpu(data->qos_seq_ctr[i]); + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); + status->num_of_gtk_rekeys = + le32_to_cpu(data->num_of_gtk_rekeys); + status->received_beacons = le32_to_cpu(data->received_beacons); + status->tid_tear_down = data->tid_tear_down; +} +static void +iwl_mvm_parse_wowlan_info_notif_v2(struct iwl_mvm *mvm, + struct iwl_wowlan_info_notif_v2 *data, + struct iwl_wowlan_status_data *status, + u32 len) +{ + u32 i; + + if (!data) { + IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); + status = NULL; + return; + } + + if (len < sizeof(*data)) { + IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); + status = NULL; + return; + } + + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, data->gtk); + iwl_mvm_convert_igtk(status, &data->igtk[0]); status->replay_ctr = le64_to_cpu(data->replay_ctr); status->pattern_number = le16_to_cpu(data->pattern_number); for (i = 0; i < IWL_MAX_TID_COUNT; i++) @@ -2866,7 +2975,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_d3_data *d3_data = data; - u32 len; + u32 len = iwl_rx_packet_payload_len(pkt); int ret; int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, @@ -2876,7 +2985,6 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): { - struct iwl_wowlan_info_notif *notif; if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) { /* We might get two notifications due to dual bss */ @@ -2886,26 +2994,39 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, } if (wowlan_info_ver < 2) { - struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; + struct iwl_wowlan_info_notif_v1 *notif_v1 = + (void *)pkt->data; + struct iwl_wowlan_info_notif_v2 *notif_v2; - notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); - if (!notif) + notif_v2 = kmemdup(notif_v1, sizeof(*notif_v2), GFP_ATOMIC); + + if (!notif_v2) return false; - notif->tid_tear_down = notif_v1->tid_tear_down; - notif->station_id = notif_v1->station_id; - memset_after(notif, 0, station_id); + notif_v2->tid_tear_down = notif_v1->tid_tear_down; + notif_v2->station_id = notif_v1->station_id; + memset_after(notif_v2, 0, station_id); + iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2, + d3_data->status, + len); + kfree(notif_v2); + + } else if (wowlan_info_ver == 2) { + struct iwl_wowlan_info_notif_v2 *notif_v2 = + (void *)pkt->data; + + iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2, + d3_data->status, + len); } else { - notif = (void *)pkt->data; + struct iwl_wowlan_info_notif *notif = + (void *)pkt->data; + + iwl_mvm_parse_wowlan_info_notif(mvm, notif, + d3_data->status, len); } d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO; - len = iwl_rx_packet_payload_len(pkt); - iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status, - len); - - if (wowlan_info_ver < 2) - kfree(notif); if (d3_data->status && d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index cb4ecad6103f..e8b881596baf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -699,19 +699,11 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); - -void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct dentry *dbgfs_dir = vif->debugfs_dir; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[100]; - - /* - * Check if debugfs directory already exist before creating it. - * This may happen when, for example, resetting hw or suspend-resume - */ - if (!dbgfs_dir || mvmvif->dbgfs_dir) - return; mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) { @@ -737,6 +729,17 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); +} + +void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct dentry *dbgfs_dir = vif->debugfs_dir; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[100]; + + /* this will happen in monitor mode */ + if (!dbgfs_dir) + return; /* * Create symlink for convenience pointing to interface specific @@ -745,21 +748,62 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * find * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ */ - snprintf(buf, 100, "../../../%pd3/%pd", - dbgfs_dir, - mvmvif->dbgfs_dir); + snprintf(buf, 100, "../../../%pd3/iwlmvm", dbgfs_dir); mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, mvm->debugfs_dir, buf); } -void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); debugfs_remove(mvmvif->dbgfs_slink); mvmvif->dbgfs_slink = NULL; +} + +#define MVM_DEBUGFS_WRITE_LINK_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, \ + struct ieee80211_bss_conf) +#define MVM_DEBUGFS_READ_WRITE_LINK_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(link_##name, bufsz, \ + struct ieee80211_bss_conf) +#define MVM_DEBUGFS_ADD_LINK_FILE(name, parent, mode) \ + debugfs_create_file(#name, mode, parent, link_conf, \ + &iwl_dbgfs_link_##name##_ops) + +static void iwl_mvm_debugfs_add_link_files(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *mvm_dir) +{ + /* Add per-link files here*/ +} + +void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct dentry *mvm_dir; + + if (WARN_ON(!link_info) || !dir) + return; + + if (dir == vif->debugfs_dir) { + WARN_ON(!mvmvif->dbgfs_dir); + mvm_dir = mvmvif->dbgfs_dir; + } else { + mvm_dir = debugfs_create_dir("iwlmvm", dir); + if (IS_ERR_OR_NULL(mvm_dir)) { + IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", + dir); + return; + } + } - debugfs_remove_recursive(mvmvif->dbgfs_dir); - mvmvif->dbgfs_dir = NULL; + iwl_mvm_debugfs_add_link_files(vif, link_conf, mvm_dir); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index cf27f106d4d5..329c545f65fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -50,8 +50,18 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; + bool force; - if (!iwl_mvm_is_ctdp_supported(mvm)) + if (!kstrtobool(buf, &force)) + IWL_DEBUG_INFO(mvm, + "force start is %d [0=disabled, 1=enabled]\n", + force); + + /* we allow skipping cap support check and force stop ctdp + * statistics collection and with guerantee that it is + * safe to use. + */ + if (!force && !iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || @@ -65,6 +75,36 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int ret; + bool force; + + if (!kstrtobool(buf, &force)) + IWL_DEBUG_INFO(mvm, + "force start is %d [0=disabled, 1=enabled]\n", + force); + + /* we allow skipping cap support check and force enable ctdp + * for statistics collection and with guerantee that it is + * safe to use. + */ + if (!force && !iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 0); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { @@ -965,6 +1005,13 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, char *buf; int ret; size_t bufsz; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + return -EOPNOTSUPP; if (iwl_mvm_has_new_rx_stats_api(mvm)) bufsz = ((sizeof(struct mvm_statistics_rx) / @@ -1144,6 +1191,101 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, } #undef PRINT_STAT_LE32 +static ssize_t iwl_dbgfs_fw_system_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff, *pos, *endpos; + int ret; + size_t bufsz; + int i; + struct iwl_mvm_vif *mvmvif; + struct ieee80211_vif *vif; + struct iwl_mvm *mvm = file->private_data; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); + + /* in case of a wrong cmd version, allocate buffer only for error msg */ + bufsz = (cmd_ver == 1) ? 4096 : 64; + + buff = kzalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + pos = buff; + endpos = pos + bufsz; + + if (cmd_ver != 1) { + pos += scnprintf(pos, endpos - pos, + "System stats not supported:%d\n", cmd_ver); + goto send_out; + } + + mutex_lock(&mvm->mutex); + if (iwl_mvm_firmware_running(mvm)) + iwl_mvm_request_statistics(mvm, false); + + for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); + if (!vif) + continue; + + if (vif->type == NL80211_IFTYPE_STATION) + break; + } + + if (i == NUM_MAC_INDEX_DRIVER || !vif) { + pos += scnprintf(pos, endpos - pos, "vif is NULL\n"); + goto release_send_out; + } + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (!mvmvif) { + pos += scnprintf(pos, endpos - pos, "mvmvif is NULL\n"); + goto release_send_out; + } + + for_each_mvm_vif_valid_link(mvmvif, i) { + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[i]; + + pos += scnprintf(pos, endpos - pos, + "link_id %d", i); + pos += scnprintf(pos, endpos - pos, + " num_beacons %d", + link_info->beacon_stats.num_beacons); + pos += scnprintf(pos, endpos - pos, + " accu_num_beacons %d", + link_info->beacon_stats.accu_num_beacons); + pos += scnprintf(pos, endpos - pos, + " avg_signal %d\n", + link_info->beacon_stats.avg_signal); + } + + pos += scnprintf(pos, endpos - pos, + "radio_stats.rx_time %lld\n", + mvm->radio_stats.rx_time); + pos += scnprintf(pos, endpos - pos, + "radio_stats.tx_time %lld\n", + mvm->radio_stats.tx_time); + pos += scnprintf(pos, endpos - pos, + "accu_radio_stats.rx_time %lld\n", + mvm->accu_radio_stats.rx_time); + pos += scnprintf(pos, endpos - pos, + "accu_radio_stats.tx_time %lld\n", + mvm->accu_radio_stats.tx_time); + +release_send_out: + mutex_unlock(&mvm->mutex); + +send_out: + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + + return ret; +} + static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, char __user *user_buf, size_t count, loff_t *ppos, @@ -1998,6 +2140,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); @@ -2012,6 +2155,7 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); +MVM_DEBUGFS_READ_FILE_OPS(fw_system_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); MVM_DEBUGFS_READ_FILE_OPS(tas_get_status); @@ -2210,6 +2354,7 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); @@ -2218,6 +2363,7 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h index 0711ab689c48..cc2c45b45ddc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* + * Copyright (C) 2023 Intel Corporation * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index b49781d1a07a..10b9219b3bfd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include <net/cfg80211.h> #include <linux/etherdevice.h> @@ -302,7 +302,12 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm, struct iwl_mvm_pasn_sta *sta) { list_del(&sta->list); - iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id); + + if (iwl_mvm_has_mld_api(mvm->fw)) + iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id); + else + iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id); + iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta); kfree(sta); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 1f5db65a088d..403bd17b8b7a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -15,6 +15,7 @@ #include "iwl-prph.h" #include "fw/acpi.h" #include "fw/pnvm.h" +#include "fw/uefi.h" #include "mvm.h" #include "fw/dbg.h" @@ -23,12 +24,15 @@ #include "iwl-nvm-parse.h" #include "time-sync.h" -#define MVM_UCODE_ALIVE_TIMEOUT (HZ) +#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) #define IWL_TAS_US_MCC 0x5553 #define IWL_TAS_CANADA_MCC 0x4341 +#define IWL_UATS_VLP_AP_SUPPORTED BIT(29) +#define IWL_UATS_AFC_AP_SUPPORTED BIT(30) + struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -487,6 +491,52 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, } #if defined(CONFIG_ACPI) && defined(CONFIG_EFI) +static void iwl_mvm_uats_init(struct iwl_mvm *mvm) +{ + u8 cmd_ver; + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + UATS_TABLE_CMD), + .flags = 0, + .data[0] = &mvm->fwrt.uats_table, + .len[0] = sizeof(mvm->fwrt.uats_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!(mvm->trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210)) { + IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n"); + return; + } + + if (!mvm->fwrt.uats_enabled) { + IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n"); + return; + } + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver != 1) { + IWL_DEBUG_RADIO(mvm, + "UATS_TABLE_CMD ver %d not supported\n", + cmd_ver); + return; + } + + ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); + if (ret < 0) { + IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret); + return; + } + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret); + else + IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n"); +} + static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) { u8 cmd_ver; @@ -526,6 +576,10 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) { return 0; } + +static void iwl_mvm_uats_init(struct iwl_mvm *mvm) +{ +} #endif static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) @@ -583,6 +637,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, }; + u32 sb_cfg; int ret; if (mvm->trans->cfg->tx_with_siso_diversity) @@ -592,6 +647,14 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) mvm->rfkill_safe_init_done = false; + if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); + /* if needed, we'll reset this on our way out later */ + mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM; + if (mvm->pldr_sync && iwl_mei_pldr_req()) + return -EBUSY; + } + iwl_init_notification_wait(&mvm->notif_wait, &init_wait, init_complete, @@ -605,6 +668,13 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + + /* if we needed reset then fail here, but notify and remove */ + if (mvm->pldr_sync) { + iwl_mei_alive_notif(false); + iwl_trans_pcie_remove(mvm->trans, true); + } + goto error; } iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, @@ -667,7 +737,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { - mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); + mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw, + mvm->set_tx_ant, mvm->set_rx_ant); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); mvm->nvm_data = NULL; @@ -802,7 +873,7 @@ out: mvm->nvm_data->bands[0].n_channels = 1; mvm->nvm_data->bands[0].n_bitrates = 1; mvm->nvm_data->bands[0].bitrates = - (void *)((u8 *)mvm->nvm_data->channels + 1); + (void *)(mvm->nvm_data->channels + 1); mvm->nvm_data->bands[0].bitrates->hw_value = 10; } @@ -1084,6 +1155,12 @@ static const struct dmi_system_id dmi_tas_approved_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), }, }, + { .ident = "GOOGLE-HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + }, + }, { .ident = "MSI", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), @@ -1209,7 +1286,10 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { int ret; u32 value; - struct iwl_lari_config_change_cmd_v6 cmd = {}; + struct iwl_lari_config_change_cmd_v7 cmd = {}; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), 1); cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); @@ -1227,8 +1307,11 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ACTIVATE_CHANNEL, &iwl_guid, &value); - if (!ret) + if (!ret) { + if (cmd_ver < 8) + value &= ~ACTIVATE_5G2_IN_WW_MASK; cmd.chan_state_active_bitmap = cpu_to_le32(value); + } ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ENABLE_6E, @@ -1242,18 +1325,26 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) if (!ret) cmd.force_disable_channels_bitmap = cpu_to_le32(value); + ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, + DSM_FUNC_ENERGY_DETECTION_THRESHOLD, + &iwl_guid, &value); + if (!ret) + cmd.edt_bitmap = cpu_to_le32(value); + if (cmd.config_bitmap || cmd.oem_uhb_allow_bitmap || cmd.oem_11ax_allow_bitmap || cmd.oem_unii4_allow_bitmap || cmd.chan_state_active_bitmap || - cmd.force_disable_channels_bitmap) { + cmd.force_disable_channels_bitmap || + cmd.edt_bitmap) { size_t cmd_size; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE), - 1); + switch (cmd_ver) { + case 8: + case 7: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); + break; case 6: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); break; @@ -1287,6 +1378,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", le32_to_cpu(cmd.oem_uhb_allow_bitmap), le32_to_cpu(cmd.force_disable_channels_bitmap)); + IWL_DEBUG_RADIO(mvm, + "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n", + le32_to_cpu(cmd.edt_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), @@ -1296,6 +1390,10 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) "Failed to send LARI_CONFIG_CHANGE (%d)\n", ret); } + + if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED || + le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED) + mvm->fwrt.uats_enabled = TRUE; } void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) @@ -1499,10 +1597,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; - struct ieee80211_channel *chan; - struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband = NULL; - u32 sb_cfg; lockdep_assert_held(&mvm->mutex); @@ -1510,11 +1605,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) return ret; - sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); - mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK); - if (mvm->pldr_sync && iwl_mei_pldr_req()) - return -EBUSY; - ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); @@ -1527,6 +1617,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* FW loaded successfully */ mvm->pldr_sync = false; + iwl_fw_disable_dbg_asserts(&mvm->fwrt); iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); @@ -1630,21 +1721,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - chan = &sband->channels[0]; - - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - for (i = 0; i < NUM_PHY_CTX; i++) { - /* - * The channel used here isn't relevant as it's - * going to be overwritten in the other flows. - * For now use the first channel we have. - */ - ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], - &chandef, 1, 1); - if (ret) - goto error; - } - if (iwl_mvm_is_tt_in_fw(mvm)) { /* in order to give the responsibility of ct-kill and * TX backoff to FW we need to send empty temperature reporting @@ -1727,6 +1803,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); + iwl_mvm_uats_init(mvm); if (iwl_rfi_supported(mvm)) { if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index ace82e2c5bd9..be48b0fc9cb6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -53,7 +53,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned int link_id = link_conf->link_id; struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; struct iwl_link_config_cmd cmd = {}; - struct iwl_mvm_phy_ctxt *phyctxt; if (WARN_ON_ONCE(!link_info)) return -EINVAL; @@ -61,7 +60,7 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) { link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm, mvmvif); - if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) + if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)) return -EINVAL; rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id], @@ -77,12 +76,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd.link_id = cpu_to_le32(link_info->fw_link_id); cmd.mac_id = cpu_to_le32(mvmvif->id); cmd.spec_link_id = link_conf->link_id; - /* P2P-Device already has a valid PHY context during add */ - phyctxt = link_info->phy_ctxt; - if (phyctxt) - cmd.phy_id = cpu_to_le32(phyctxt->id); - else - cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); + WARN_ON_ONCE(link_info->phy_ctxt); + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN); @@ -194,11 +189,14 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, flags_mask |= LINK_FLG_MU_EDCA_CW; } - if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be) - cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing); - else - /* This flag can be set only if the MAC has eht support */ - changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; + if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) { + if (iwlwifi_mod_params.disable_11be || + !link_conf->eht_support) + changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; + else + cmd.puncture_mask = + cpu_to_le16(link_conf->eht_puncturing); + } cmd.bss_color = link_conf->he_bss_color.color; @@ -244,9 +242,10 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_link_config_cmd cmd = {}; int ret; + /* mac80211 thought we have the link, but it was never configured */ if (WARN_ON(!link_info || - link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) - return -EINVAL; + link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) + return 0; RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id], NULL); @@ -254,6 +253,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id); link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; cmd.spec_link_id = link_conf->link_id; + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 7369a45f7f2b..c4f96125cf33 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -286,6 +286,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; + mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; + /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return 0; @@ -300,10 +304,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } - mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; @@ -1083,6 +1083,19 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, sizeof(beacon_cmd)); } +bool iwl_mvm_enable_fils(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx) +{ + if (IWL_MVM_DISABLE_AP_FILS) + return false; + + if (cfg80211_channel_is_psc(ctx->def.chan)) + return true; + + return (ctx->def.chan->band == NL80211_BAND_6GHZ && + ctx->def.width >= NL80211_CHAN_WIDTH_80); +} + static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, @@ -1102,8 +1115,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, ctx = rcu_dereference(link_conf->chanctx_conf); channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); WARN_ON(channel == 0); - if (cfg80211_channel_is_psc(ctx->def.chan) && - !IWL_MVM_DISABLE_AP_FILS) { + if (iwl_mvm_enable_fils(mvm, ctx)) { flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 10 ? IWL_MAC_BEACON_FILS : @@ -1761,6 +1773,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, u32 id_n_color, csa_id; /* save mac_id or link_id to use later to cancel csa if needed */ u32 id; + u32 mac_link_id = 0; u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, 0); bool csa_active; @@ -1790,6 +1803,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, goto out_unlock; id = link_id; + mac_link_id = bss_conf->link_id; vif = bss_conf->vif; csa_active = bss_conf->csa_active; } @@ -1839,7 +1853,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); - ieee80211_chswitch_done(vif, true); + ieee80211_chswitch_done(vif, true, mac_link_id); break; default: /* should never happen */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index ce7905faa08f..a64600f0ed9f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -186,7 +186,7 @@ struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, MCC_SOURCE_OLD_FW, changed); } -int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) +int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync) { enum iwl_mcc_source used_src; struct ieee80211_regdomain *regd; @@ -213,8 +213,10 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) if (IS_ERR_OR_NULL(regd)) return -EIO; - /* update cfg80211 if the regdomain was changed */ - if (changed) + /* update cfg80211 if the regdomain was changed or the caller explicitly + * asked to update regdomain + */ + if (changed || force_regd_sync) ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); else ret = 0; @@ -279,6 +281,30 @@ int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) return 0; } +int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* This has been tested on those devices only */ + if (mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 && + mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000) + return -ENOTSUPP; + + if (!mvm->nvm_data) + return -EBUSY; + + /* mac80211 ensures the device is not started, + * so the firmware cannot be running + */ + + mvm->set_tx_ant = tx_ant; + mvm->set_rx_ant = rx_ant; + + iwl_reinit_cab(mvm->trans, mvm->nvm_data, tx_ant, rx_ant, mvm->fw); + + return 0; +} + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -315,8 +341,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, STA_MMPDU_TXQ); /* Set this early since we need to have it for the check below */ - if (mvm->mld_api_is_used && - mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable && + !iwlwifi_mod_params.disable_11ax && + !iwlwifi_mod_params.disable_11be) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; /* With MLD FW API, it tracks timing by itself, @@ -351,7 +378,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, HAS_RATE_CONTROL); } - if (iwl_mvm_has_new_rx_api(mvm)) + /* We want to use the mac80211's reorder buffer for 9000 */ + if (iwl_mvm_has_new_rx_api(mvm) && + mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, @@ -497,7 +526,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ARRAY_SIZE(iwl_mvm_iface_combinations); hw->wiphy->max_remain_on_channel_duration = 10000; - hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + hw->max_listen_interval = IWL_MVM_CONN_LISTEN_INTERVAL; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); @@ -1032,6 +1061,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, spin_unlock_bh(&mvm->time_event_lock); memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); + mvmvif->ap_sta = NULL; for_each_mvm_vif_valid_link(mvmvif, link_id) { mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; @@ -1168,19 +1198,9 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw) for (retry = 0; retry <= max_retry; retry++) { ret = __iwl_mvm_mac_start(mvm); - if (!ret) + if (!ret || mvm->pldr_sync) break; - /* - * In PLDR sync PCI re-enumeration is needed. no point to retry - * mac start before that. - */ - if (mvm->pldr_sync) { - iwl_mei_alive_notif(false); - iwl_trans_pcie_remove(mvm->trans, true); - break; - } - IWL_ERR(mvm, "mac start retry %d\n", retry); } clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); @@ -1369,7 +1389,8 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -1379,10 +1400,11 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; + unsigned int link_id = link_conf->link_id; + u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id; mvmvif->csa_bcn_pending = false; - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->deflink.ap_sta_id); + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; @@ -1451,7 +1473,8 @@ void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, mvmvif->csa_failed = true; mutex_unlock(&mvm->mutex); - iwl_mvm_post_channel_switch(hw, vif); + /* If we're here, we can't support MLD */ + iwl_mvm_post_channel_switch(hw, vif, &vif->bss_conf); } void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) @@ -1463,7 +1486,7 @@ void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); /* Trigger disconnect (should clear the CSA state) */ - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); } static u8 @@ -1516,6 +1539,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int i; mutex_lock(&mvm->mutex); @@ -1532,8 +1556,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - mvmvif->deflink.beacon_stats.accu_num_beacons += - mvmvif->deflink.beacon_stats.num_beacons; + for_each_mvm_vif_valid_link(mvmvif, i) + mvmvif->link[i]->beacon_stats.accu_num_beacons += + mvmvif->link[i]->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); @@ -1561,7 +1586,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - iwl_mvm_vif_dbgfs_register(mvm, vif); + iwl_mvm_vif_dbgfs_add_link(mvm, vif); ret = 0; goto out; } @@ -1588,32 +1613,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - /* - * P2P_DEVICE interface does not have a channel context assigned to it, - * so a dedicated PHY context is allocated to it and the corresponding - * MAC context is bound to it at this stage. - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - - mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!mvmvif->deflink.phy_ctxt) { - ret = -ENOSPC; - goto out_free_bf; - } - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (ret) - goto out_unref_phy; - - ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); - if (ret) - goto out_unbind; - - /* Save a pointer to p2p device vif, so it can later be used to - * update the p2p device MAC when a GO is started/stopped */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_device_vif = vif; - } iwl_mvm_tcm_add_vif(mvm, vif); INIT_DELAYED_WORK(&mvmvif->csa_work, @@ -1625,7 +1626,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); } - iwl_mvm_vif_dbgfs_register(mvm, vif); + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -1642,16 +1643,6 @@ out: goto out_unlock; - out_unbind: - iwl_mvm_binding_remove_vif(mvm, vif); - out_unref_phy: - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - out_free_bf: - if (mvm->bf_allowed_vif == mvmvif) { - mvm->bf_allowed_vif = NULL; - vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | - IEEE80211_VIF_SUPPORTS_CQM_RSSI); - } out_remove_mac: mvmvif->deflink.phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); @@ -1713,7 +1704,7 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); - iwl_mvm_vif_dbgfs_clean(mvm, vif); + iwl_mvm_vif_dbgfs_rm_link(mvm, vif); /* * For AP/GO interface, the tear down of the resources allocated to the @@ -1743,12 +1734,17 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, if (iwl_mvm_mac_remove_interface_common(hw, vif)) goto out; + /* Before the interface removal, mac80211 would cancel the ROC, and the + * ROC worker would be scheduled if needed. The worker would be flushed + * in iwl_mvm_prepare_mac_removal() and thus at this point there is no + * binding etc. so nothing needs to be done here. + */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + if (mvmvif->deflink.phy_ctxt) { + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; + } mvm->p2p_device_vif = NULL; - iwl_mvm_rm_p2p_bcast_sta(mvm, vif); - iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - mvmvif->deflink.phy_ctxt = NULL; } iwl_mvm_mac_ctxt_remove(mvm, vif); @@ -2472,7 +2468,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, } void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 duration_override) + u32 duration_override, unsigned int link_id) { u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; @@ -2492,7 +2488,8 @@ void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, 900, - min_duration, false); + min_duration, false, + link_id); else iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); @@ -2586,6 +2583,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int i; /* * Re-calculate the tsf id, as the leader-follower relations depend @@ -2632,8 +2630,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (vif->cfg.assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); - memset(&mvmvif->deflink.beacon_stats, 0, - sizeof(mvmvif->deflink.beacon_stats)); + for_each_mvm_vif_valid_link(mvmvif, i) + memset(&mvmvif->link[i]->beacon_stats, 0, + sizeof(mvmvif->link[i]->beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); @@ -2680,7 +2679,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * time could be small without us having heard * a beacon yet. */ - iwl_mvm_protect_assoc(mvm, vif, 0); + iwl_mvm_protect_assoc(mvm, vif, 0, 0); } iwl_mvm_sf_update(mvm, vif, false); @@ -3047,22 +3046,6 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf, u64 changes) { - static const struct iwl_mvm_bss_info_changed_ops callbacks = { - .bss_info_changed_sta = iwl_mvm_bss_info_changed_station, - .bss_info_changed_ap_ibss = iwl_mvm_bss_info_changed_ap_ibss, - }; - - iwl_mvm_bss_info_changed_common(hw, vif, bss_conf, &callbacks, - changes); -} - -void -iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - const struct iwl_mvm_bss_info_changed_ops *callbacks, - u64 changes) -{ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); @@ -3072,12 +3055,11 @@ iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, switch (vif->type) { case NL80211_IFTYPE_STATION: - callbacks->bss_info_changed_sta(mvm, vif, bss_conf, changes); + iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: - callbacks->bss_info_changed_ap_ibss(mvm, vif, bss_conf, - changes); + iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) @@ -3784,12 +3766,25 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, callbacks->mac_ctxt_changed(mvm, vif, false); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); + + /* when client is authorized (AP station marked as such), + * try to enable more links + */ + if (vif->type == NL80211_IFTYPE_STATION && + !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_mld_select_links(mvm, vif, false); } mvm_sta->authorized = true; iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + /* MFP is set by default before the station is authorized. + * Clear it here in case it's not used. + */ + if (!sta->mfp) + return callbacks->update_sta(mvm, vif, sta); + return 0; } @@ -3877,9 +3872,14 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - /* this would be a mac80211 bug ... but don't crash */ + /* this would be a mac80211 bug ... but don't crash, unless we had a + * firmware crash while we were activating a link, in which case it is + * legit to have phy_ctxt = NULL. Don't bother not to WARN if we are in + * recovery flow since we spit tons of error messages anyway. + */ for_each_sta_active_link(vif, sta, link_sta, link_id) { - if (WARN_ON_ONCE(!mvmvif->link[link_id]->phy_ctxt)) { + if (WARN_ON_ONCE(!mvmvif->link[link_id] || + !mvmvif->link[link_id]->phy_ctxt)) { mutex_unlock(&mvm->mutex); return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; @@ -4018,7 +4018,7 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); - iwl_mvm_protect_assoc(mvm, vif, info->duration); + iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id); mutex_unlock(&mvm->mutex); } @@ -4155,12 +4155,21 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. - * CMAC/GMAC in AP/IBSS modes must be done in software. + * CMAC/GMAC in AP/IBSS modes must be done in software + * on older NICs. * * Except, of course, beacon protection - it must be - * offloaded since we just set a beacon template. + * offloaded since we just set a beacon template, and + * then we must also offload the IGTK (not just BIGTK) + * for firmware reasons. + * + * So just check for beacon protection - if we don't + * have it we cannot get here with keyidx >= 6, and + * if we do have it we need to send the key to FW in + * all cases (CMAC/GMAC). */ - if (keyidx < 6 && + if (!wiphy_ext_feature_isset(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION) && (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) { @@ -4389,6 +4398,39 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) + +static void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, + u32 duration_ms, + u32 *duration_tu, + u32 *delay) +{ + u32 dtim_interval = vif->bss_conf.dtim_period * + vif->bss_conf.beacon_int; + + *delay = AUX_ROC_MIN_DELAY; + *duration_tu = MSEC_TO_TU(duration_ms); + + /* + * If we are associated we want the delay time to be at least one + * dtim interval so that the FW can wait until after the DTIM and + * then start the time event, this will potentially allow us to + * remain off-channel for the max duration. + * Since we want to use almost a whole dtim interval we would also + * like the delay to be for 2-3 dtim intervals, in case there are + * other time events with higher priority. + */ + if (vif->cfg.assoc) { + *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); + /* We cannot remain off-channel longer than the DTIM interval */ + if (dtim_interval <= *duration_tu) { + *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER; + if (*duration_tu <= AUX_ROC_MIN_DURATION) + *duration_tu = dtim_interval - + AUX_ROC_MIN_SAFETY_BUFFER; + } + } +} + static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, @@ -4399,8 +4441,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; - u32 dtim_interval = vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int; u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), @@ -4421,29 +4461,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, /* Set the time and duration */ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); - delay = AUX_ROC_MIN_DELAY; - req_dur = MSEC_TO_TU(duration); - - /* - * If we are associated we want the delay time to be at least one - * dtim interval so that the FW can wait until after the DTIM and - * then start the time event, this will potentially allow us to - * remain off-channel for the max duration. - * Since we want to use almost a whole dtim interval we would also - * like the delay to be for 2-3 dtim intervals, in case there are - * other time events with higher priority. - */ - if (vif->cfg.assoc) { - delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); - /* We cannot remain off-channel longer than the DTIM interval */ - if (dtim_interval <= req_dur) { - req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; - if (req_dur <= AUX_ROC_MIN_DURATION) - req_dur = dtim_interval - - AUX_ROC_MIN_SAFETY_BUFFER; - } - } - + iwl_mvm_roc_duration_and_delay(vif, duration, &req_dur, &delay); tail->duration = cpu_to_le32(req_dur); tail->apply_time_max_delay = cpu_to_le32(delay); @@ -4451,8 +4469,8 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, "ROC: Requesting to remain on channel %u for %ums\n", channel->hw_value, req_dur); IWL_DEBUG_TE(mvm, - "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", - duration, delay, dtim_interval); + "\t(requested = %ums, max_delay = %ums)\n", + duration, delay); /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); @@ -4510,6 +4528,48 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, return res; } +static int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration, u32 activity) +{ + int res; + u32 duration_tu, delay; + struct iwl_roc_req roc_req = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .activity = cpu_to_le32(activity), + .sta_id = cpu_to_le32(mvm->aux_sta.sta_id), + }; + + lockdep_assert_held(&mvm->mutex); + + /* Set the channel info data */ + iwl_mvm_set_chan_info(mvm, &roc_req.channel_info, + channel->hw_value, + iwl_mvm_phy_band_from_nl80211(channel->band), + IWL_PHY_CHANNEL_MODE20, 0); + + iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu, + &delay); + roc_req.duration = cpu_to_le32(duration_tu); + roc_req.max_delay = cpu_to_le32(delay); + + IWL_DEBUG_TE(mvm, + "\t(requested = %ums, max_delay = %ums)\n", + duration, delay); + IWL_DEBUG_TE(mvm, + "Requesting to remain on channel %u for %utu\n", + channel->hw_value, duration_tu); + + /* Set the node address */ + memcpy(roc_req.node_addr, vif->addr, ETH_ALEN); + + res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), + 0, sizeof(roc_req), &roc_req); + + return res; +} + static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id) { int ret = 0; @@ -4530,30 +4590,20 @@ static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id) return ret; } -static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_phy_ctxt *new_phy_ctxt) +static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret = 0; + int ret; lockdep_assert_held(&mvm->mutex); - /* Unbind the P2P_DEVICE from the current PHY context, - * and if the PHY context is not used remove it. - */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) return ret; - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - - /* Bind the P2P_DEVICE to the current PHY Context */ - mvmvif->deflink.phy_ctxt = new_phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - WARN(ret, "Failed binding P2P_DEVICE\n"); - return ret; + /* The station and queue allocation must be done only after the binding + * is done, as otherwise the FW might incorrectly configure its state. + */ + return iwl_mvm_add_p2p_bcast_sta(mvm, vif); } static int iwl_mvm_roc(struct ieee80211_hw *hw, @@ -4564,12 +4614,81 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, { static const struct iwl_mvm_roc_ops ops = { .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20, - .switch_phy_ctxt = iwl_mvm_roc_switch_binding, + .link = iwl_mvm_roc_link, }; return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); } +static int iwl_mvm_roc_station(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration) +{ + int ret; + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); + u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + + if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) { + ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); + } else if (fw_ver == 3) { + ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, + ROC_ACTIVITY_HOTSPOT); + } else { + ret = -EOPNOTSUPP; + IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver); + } + + return ret; +} + +static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel *channel) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct cfg80211_chan_def chandef; + int i; + + lockdep_assert_held(&mvm->mutex); + + if (mvmvif->deflink.phy_ctxt && + channel == mvmvif->deflink.phy_ctxt->channel) + return 0; + + /* Try using a PHY context that is already in use */ + for (i = 0; i < NUM_PHY_CTX; i++) { + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[i]; + + if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt) + continue; + + if (channel == phy_ctxt->channel) { + if (mvmvif->deflink.phy_ctxt) + iwl_mvm_phy_ctxt_unref(mvm, + mvmvif->deflink.phy_ctxt); + + mvmvif->deflink.phy_ctxt = phy_ctxt; + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); + return 0; + } + } + + /* We already have a phy_ctxt, but it's not on the right channel */ + if (mvmvif->deflink.phy_ctxt) + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + + mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->deflink.phy_ctxt) + return -ENOSPC; + + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); + + return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, + &chandef, 1, 1); +} + /* Execute the common part for MLD and non-MLD modes */ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, @@ -4577,12 +4696,8 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct iwl_mvm_roc_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct cfg80211_chan_def chandef; - struct iwl_mvm_phy_ctxt *phy_ctxt; - bool band_change_removal; - int ret, i; u32 lmac_id; + int ret; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); @@ -4602,89 +4717,27 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, /* Use aux roc framework (HS20) */ ret = ops->add_aux_sta_for_hs20(mvm, lmac_id); if (!ret) - ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, - vif, duration); + ret = iwl_mvm_roc_station(mvm, channel, vif, duration); goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: - IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); + IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type); ret = -EINVAL; goto out_unlock; } - for (i = 0; i < NUM_PHY_CTX; i++) { - phy_ctxt = &mvm->phy_ctxts[i]; - if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt) - continue; - if (phy_ctxt->ref && channel == phy_ctxt->channel) { - ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); - if (ret) - goto out_unlock; - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - goto schedule_time_event; - } - } - - /* Need to update the PHY context only if the ROC channel changed */ - if (channel == mvmvif->deflink.phy_ctxt->channel) - goto schedule_time_event; - - cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); - - /* - * Check if the remain-on-channel is on a different band and that - * requires context removal, see iwl_mvm_phy_ctxt_changed(). If - * so, we'll need to release and then re-configure here, since we - * must not remove a PHY context that's part of a binding. - */ - band_change_removal = - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && - mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band; - - if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) { - /* - * Change the PHY context configuration as it is currently - * referenced only by the P2P Device MAC (and we can modify it) - */ - ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt, - &chandef, 1, 1); - if (ret) - goto out_unlock; - } else { - /* - * The PHY context is shared with other MACs (or we're trying to - * switch bands), so remove the P2P Device from the binding, - * allocate an new PHY context and create a new binding. - */ - phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!phy_ctxt) { - ret = -ENOSPC; - goto out_unlock; - } - - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, - 1, 1); - if (ret) { - IWL_ERR(mvm, "Failed to change PHY context\n"); - goto out_unlock; - } - - ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); - if (ret) - goto out_unlock; + ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel); + if (ret) + goto out_unlock; - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - } + ret = ops->link(mvm, vif); + if (ret) + goto out_unlock; -schedule_time_event: - /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); - out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); @@ -4741,8 +4794,9 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; - bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); - struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; + bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) || + iwl_mvm_enable_fils(mvm, ctx); + struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); @@ -4755,15 +4809,14 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, goto out; } - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, - ctx->rx_chains_static, - ctx->rx_chains_dynamic); + ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); if (ret) { IWL_ERR(mvm, "Failed to add PHY context\n"); goto out; } - iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); *phy_ctxt_id = phy_ctxt->id; out: return ret; @@ -4809,8 +4862,9 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); - struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; + bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) || + iwl_mvm_enable_fils(mvm, ctx); + struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | @@ -4949,7 +5003,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { - u32 duration = 3 * vif->bss_conf.beacon_int; + u32 duration = 5 * vif->bss_conf.beacon_int; /* Protect the session to make sure we hear the first * beacon on the new channel. @@ -5227,8 +5281,8 @@ int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) return mvm->ibss_manager; } -int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - bool set) +static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -5455,7 +5509,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, goto out_unlock; } - if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) + if (chsw->delay > IWL_MAX_CSA_BLOCK_TX && + hweight16(vif->valid_links) <= 1) schedule_delayed_work(&mvmvif->csa_work, 0); if (chsw->block_tx) { @@ -5534,7 +5589,7 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ iwl_mvm_abort_channel_switch(hw, vif); - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); mvmvif->csa_misbehave = false; return; } @@ -5604,9 +5659,6 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; } - if (vif->type != NL80211_IFTYPE_STATION) - return; - /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); @@ -5630,11 +5682,9 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ap_sta_done = true; } - /* make sure only TDLS peers or the AP are flushed */ - WARN_ON_ONCE(sta != mvmvif->ap_sta && !sta->tdls); - if (drop) { - if (iwl_mvm_flush_sta(mvm, mvmsta, false)) + if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id, + mvmsta->tfd_queue_msk)) IWL_ERR(mvm, "flush request fail\n"); } else { if (iwl_mvm_has_new_tx_api(mvm)) @@ -5656,22 +5706,21 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int i; + struct iwl_mvm_link_sta *mvm_link_sta; + struct ieee80211_link_sta *link_sta; + int link_id; mutex_lock(&mvm->mutex); - for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { - struct iwl_mvm_sta *mvmsta; - struct ieee80211_sta *tmp; - - tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], - lockdep_is_held(&mvm->mutex)); - if (tmp != sta) + for_each_sta_active_link(vif, sta, link_sta, link_id) { + mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (!mvm_link_sta) continue; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - if (iwl_mvm_flush_sta(mvm, mvmsta, false)) + if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id, + mvmsta->tfd_queue_msk)) IWL_ERR(mvm, "flush request fail\n"); } mutex_unlock(&mvm->mutex); @@ -5681,7 +5730,11 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; + int ret = 0; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); memset(survey, 0, sizeof(*survey)); @@ -5701,13 +5754,8 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, goto out; } - survey->filled = SURVEY_INFO_TIME | - SURVEY_INFO_TIME_RX | - SURVEY_INFO_TIME_TX | - SURVEY_INFO_TIME_SCAN; - survey->time = mvm->accu_radio_stats.on_time_rf + - mvm->radio_stats.on_time_rf; - do_div(survey->time, USEC_PER_MSEC); + survey->filled = SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX; survey->time_rx = mvm->accu_radio_stats.rx_time + mvm->radio_stats.rx_time; @@ -5717,11 +5765,20 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, mvm->radio_stats.tx_time; do_div(survey->time_tx, USEC_PER_MSEC); + /* the new fw api doesn't support the following fields */ + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + goto out; + + survey->filled |= SURVEY_INFO_TIME | + SURVEY_INFO_TIME_SCAN; + survey->time = mvm->accu_radio_stats.on_time_rf + + mvm->radio_stats.on_time_rf; + do_div(survey->time, USEC_PER_MSEC); + survey->time_scan = mvm->accu_radio_stats.on_time_scan + mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); - ret = 0; out: mutex_unlock(&mvm->mutex); return ret; @@ -5870,6 +5927,7 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int i; if (mvmsta->deflink.avg_energy) { sinfo->signal_avg = -(s8)mvmsta->deflink.avg_energy; @@ -5898,8 +5956,11 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (iwl_mvm_request_statistics(mvm, false)) goto unlock; - sinfo->rx_beacon = mvmvif->deflink.beacon_stats.num_beacons + - mvmvif->deflink.beacon_stats.accu_num_beacons; + sinfo->rx_beacon = 0; + for_each_mvm_vif_valid_link(mvmvif, i) + sinfo->rx_beacon += mvmvif->link[i]->beacon_stats.num_beacons + + mvmvif->link[i]->beacon_stats.accu_num_beacons; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->deflink.beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ @@ -6205,6 +6266,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, + .set_antenna = iwl_mvm_op_set_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, @@ -6287,6 +6349,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS + .vif_add_debugfs = iwl_mvm_vif_add_debugfs, .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, #endif .set_hw_timestamp = iwl_mvm_set_hw_timestamp, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c index 2c9f2f71b083..ea3e9e9c6e26 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c @@ -24,10 +24,15 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm, return 0; } - /* AP group keys are per link and should be on the mcast STA */ + /* AP group keys are per link and should be on the mcast/bcast STA */ if (vif->type == NL80211_IFTYPE_AP && - !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + /* IGTK/BIGTK to bcast STA */ + if (keyconf->keyidx >= 4) + return BIT(link_info->bcast_sta.sta_id); + /* GTK for data to mcast STA */ return BIT(link_info->mcast_sta.sta_id); + } /* for client mode use the AP STA also for group keys */ if (!sta && vif->type == NL80211_IFTYPE_STATION) @@ -91,7 +96,12 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, if (!sta && vif->type == NL80211_IFTYPE_STATION) sta = mvmvif->ap_sta; - if (!IS_ERR_OR_NULL(sta) && sta->mfp) + /* Set the MFP flag also for an AP interface where the key is an IGTK + * key as in such a case the station would always be NULL + */ + if ((!IS_ERR_OR_NULL(sta) && sta->mfp) || + (vif->type == NL80211_IFTYPE_AP && + (keyconf->keyidx == 4 || keyconf->keyidx == 5))) flags |= IWL_SEC_KEY_FLAG_MFP; return flags; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 8b6c641772ee..ff6cb064051b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -10,6 +10,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int i; mutex_lock(&mvm->mutex); @@ -22,8 +23,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - mvmvif->deflink.beacon_stats.accu_num_beacons += - mvmvif->deflink.beacon_stats.num_beacons; + for_each_mvm_vif_valid_link(mvmvif, i) + mvmvif->link[i]->beacon_stats.accu_num_beacons += + mvmvif->link[i]->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); @@ -56,43 +58,15 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - /* - * P2P_DEVICE interface does not have a channel context assigned to it, - * so a dedicated PHY context is allocated to it and the corresponding - * MAC context is bound to it at this stage. - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!mvmvif->deflink.phy_ctxt) { - ret = -ENOSPC; - goto out_free_bf; - } - - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); - ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); - if (ret) - goto out_unref_phy; - - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_ACTIVE | - LINK_CONTEXT_MODIFY_RATES_INFO, - true); - if (ret) - goto out_remove_link; - - ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf); - if (ret) - goto out_remove_link; + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_free_bf; - /* Save a pointer to p2p device vif, so it can later be used to - * update the p2p device MAC when a GO is started/stopped - */ + /* Save a pointer to p2p device vif, so it can later be used to + * update the p2p device MAC when a GO is started/stopped + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_device_vif = vif; - } else { - ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); - if (ret) - goto out_free_bf; - } ret = iwl_mvm_power_update_mac(mvm); if (ret) @@ -107,7 +81,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); } - iwl_mvm_vif_dbgfs_register(mvm, vif); + iwl_mvm_vif_dbgfs_add_link(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && @@ -119,10 +93,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, goto out_unlock; - out_remove_link: - iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); - out_unref_phy: - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; @@ -130,7 +100,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: - mvmvif->deflink.phy_ctxt = NULL; mvmvif->link[0] = NULL; iwl_mvm_mld_mac_ctxt_remove(mvm, vif); out_unlock: @@ -168,7 +137,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); - iwl_mvm_vif_dbgfs_clean(mvm, vif); + iwl_mvm_vif_dbgfs_rm_link(mvm, vif); /* For AP/GO interface, the tear down of the resources allocated to the * interface is be handled as part of the stop_ap flow. @@ -185,14 +154,18 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); + /* Before the interface removal, mac80211 would cancel the ROC, and the + * ROC worker would be scheduled if needed. The worker would be flushed + * in iwl_mvm_prepare_mac_removal() and thus at this point the link is + * not active. So need only to remove the link. + */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + if (mvmvif->deflink.phy_ctxt) { + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; + } mvm->p2p_device_vif = NULL; - - /* P2P device uses only one link */ - iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf); - iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - mvmvif->deflink.phy_ctxt = NULL; + iwl_mvm_remove_link(mvm, vif, &vif->bss_conf); } else { iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); } @@ -240,8 +213,8 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, mvmvif->esr_active = true; - /* Disable SMPS overrideing by user */ - vif->driver_flags |= IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + /* Indicate to mac80211 that EML is enabled */ + vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE; iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, IEEE80211_SMPS_OFF); @@ -399,7 +372,7 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, mvmvif->esr_active = false; - vif->driver_flags &= ~IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, IEEE80211_SMPS_AUTOMATIC); @@ -489,10 +462,17 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_mld_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); + /* in the non-MLD case, remove/re-add the link to clean up FW state */ + if (!ieee80211_vif_is_mld(vif) && !mvmvif->ap_sta && + !WARN_ON_ONCE(vif->cfg.assoc)) { + iwl_mvm_remove_link(mvm, vif, link_conf); + iwl_mvm_add_link(mvm, vif, link_conf); + } mutex_unlock(&mvm->mutex); } @@ -623,6 +603,126 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw, &callbacks); } +struct iwl_mvm_link_sel_data { + u8 link_id; + enum nl80211_band band; + bool active; +}; + +static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a, + struct iwl_mvm_link_sel_data *b) +{ + return a->band != b->band; +} + +void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool valid_links_changed) +{ + struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; + unsigned long usable_links = ieee80211_vif_usable_links(vif); + u32 max_active_links = iwl_mvm_max_active_links(mvm, vif); + u16 new_active_links; + u8 link_id, n_data = 0, i, j; + + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + + if (!ieee80211_vif_is_mld(vif) || usable_links == 1) + return; + + /* The logic below is a simple version that doesn't suit more than 2 + * links + */ + WARN_ON_ONCE(max_active_links > 2); + + /* if only a single active link is supported, assume that the one + * selected by higher layer for connection establishment is the best. + */ + if (max_active_links == 1 && !valid_links_changed) + return; + + /* If we are already using the maximal number of active links, don't do + * any change. This can later be optimized to pick a 'better' link pair. + */ + if (hweight16(vif->active_links) == max_active_links) + return; + + rcu_read_lock(); + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(vif->link_conf[link_id]); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + data[n_data].link_id = link_id; + data[n_data].band = link_conf->chandef.chan->band; + data[n_data].active = vif->active_links & BIT(link_id); + n_data++; + } + + rcu_read_unlock(); + + /* this is expected to be the current active link */ + if (n_data == 1) + return; + + new_active_links = 0; + + /* Assume that after association only a single link is active, thus, + * select only the 2nd link + */ + if (!valid_links_changed) { + for (i = 0; i < n_data; i++) { + if (data[i].active) + break; + } + + if (WARN_ON_ONCE(i == n_data)) + return; + + for (j = 0; j < n_data; j++) { + if (i == j) + continue; + + if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j])) + break; + } + + if (j != n_data) + new_active_links = BIT(data[i].link_id) | + BIT(data[j].link_id); + } else { + /* Try to find a valid link pair for EMLSR operation. If a pair + * is not found continue using the current active link. + */ + for (i = 0; i < n_data; i++) { + for (j = 0; j < n_data; j++) { + if (i == j) + continue; + + if (iwl_mvm_mld_valid_link_pair(&data[i], + &data[j])) + break; + } + + /* found a valid pair for EMLSR, use it */ + if (j != n_data) { + new_active_links = BIT(data[i].link_id) | + BIT(data[j].link_id); + break; + } + } + } + + if (WARN_ON(!new_active_links)) + return; + + if (vif->active_links != new_active_links) + ieee80211_set_active_links_async(vif, new_active_links); +} + static void iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -653,7 +753,7 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, } /* Update EHT Puncturing info */ - if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht) + if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc) link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS; if (link_changes) { @@ -667,6 +767,9 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + if (changes & BSS_CHANGED_MLD_VALID_LINKS) + iwl_mvm_mld_select_links(mvm, vif, true); + memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid, ETH_ALEN); @@ -731,73 +834,84 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, mvmvif->associated = vif->cfg.assoc; - if (!(changes & BSS_CHANGED_ASSOC)) - return; - - if (vif->cfg.assoc) { - /* clear statistics to get clean beacon counter */ - iwl_mvm_request_statistics(mvm, true); - iwl_mvm_sf_update(mvm, vif, false); - iwl_mvm_power_vif_assoc(mvm, vif); - - for_each_mvm_vif_valid_link(mvmvif, i) { - memset(&mvmvif->link[i]->beacon_stats, 0, - sizeof(mvmvif->link[i]->beacon_stats)); + if (changes & BSS_CHANGED_ASSOC) { + if (vif->cfg.assoc) { + /* clear statistics to get clean beacon counter */ + iwl_mvm_request_statistics(mvm, true); + iwl_mvm_sf_update(mvm, vif, false); + iwl_mvm_power_vif_assoc(mvm, vif); + + for_each_mvm_vif_valid_link(mvmvif, i) { + memset(&mvmvif->link[i]->beacon_stats, 0, + sizeof(mvmvif->link[i]->beacon_stats)); + + if (vif->p2p) { + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC, i); + } + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[i]); + if (link_conf && !link_conf->dtim_period) + protect = true; + rcu_read_unlock(); + } - if (vif->p2p) { - iwl_mvm_update_smps(mvm, vif, - IWL_MVM_SMPS_REQ_PROT, - IEEE80211_SMPS_DYNAMIC, i); + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + protect) { + /* We are in assoc so only one link is active- + * The association link + */ + unsigned int link_id = + ffs(vif->active_links) - 1; + + /* If we're not restarting and still haven't + * heard a beacon (dtim period unknown) then + * make sure we still have enough minimum time + * remaining in the time event, since the auth + * might actually have taken quite a while + * (especially for SAE) and so the remaining + * time could be small without us having heard + * a beacon yet. + */ + iwl_mvm_protect_assoc(mvm, vif, 0, link_id); } - rcu_read_lock(); - link_conf = rcu_dereference(vif->link_conf[i]); - if (link_conf && !link_conf->dtim_period) - protect = true; - rcu_read_unlock(); - } + iwl_mvm_sf_update(mvm, vif, false); - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - protect) { - /* If we're not restarting and still haven't - * heard a beacon (dtim period unknown) then - * make sure we still have enough minimum time - * remaining in the time event, since the auth - * might actually have taken quite a while - * (especially for SAE) and so the remaining - * time could be small without us having heard - * a beacon yet. + /* FIXME: need to decide about misbehaving AP handling */ + iwl_mvm_power_vif_assoc(mvm, vif); + } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { + iwl_mvm_mei_host_disassociated(mvm); + + /* If update fails - SF might be running in associated + * mode while disassociated - which is forbidden. + */ + ret = iwl_mvm_sf_update(mvm, vif, false); + WARN_ONCE(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status), + "Failed to update SF upon disassociation\n"); + + /* If we get an assert during the connection (after the + * station has been added, but before the vif is set + * to associated), mac80211 will re-add the station and + * then configure the vif. Since the vif is not + * associated, we would remove the station here and + * this would fail the recovery. */ - iwl_mvm_protect_assoc(mvm, vif, 0); + iwl_mvm_mld_vif_delete_all_stas(mvm, vif); } - iwl_mvm_sf_update(mvm, vif, false); - - /* FIXME: need to decide about misbehaving AP handling */ - iwl_mvm_power_vif_assoc(mvm, vif); - } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { - iwl_mvm_mei_host_disassociated(mvm); - - /* If update fails - SF might be running in associated - * mode while disassociated - which is forbidden. - */ - ret = iwl_mvm_sf_update(mvm, vif, false); - WARN_ONCE(ret && - !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status), - "Failed to update SF upon disassociation\n"); - - /* If we get an assert during the connection (after the - * station has been added, but before the vif is set - * to associated), mac80211 will re-add the station and - * then configure the vif. Since the vif is not - * associated, we would remove the station here and - * this would fail the recovery. - */ - iwl_mvm_mld_vif_delete_all_stas(mvm, vif); + iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); } - iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); + if (changes & BSS_CHANGED_PS) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } } static void @@ -963,36 +1077,29 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw, return 0; } -static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_phy_ctxt *new_phy_ctxt) +static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret = 0; + int ret; lockdep_assert_held(&mvm->mutex); - /* Inorder to change the phy_ctx of a link, the link needs to be - * inactive. Therefore, first deactivate the link, then change its - * phy_ctx, and then activate it again. - */ - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_ACTIVE, false); - if (WARN(ret, "Failed to deactivate link\n")) + /* The PHY context ID might have changed so need to set it */ + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false); + if (WARN(ret, "Failed to set PHY context ID\n")) return ret; - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); - - mvmvif->deflink.phy_ctxt = new_phy_ctxt; + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_RATES_INFO, + true); - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false); - if (WARN(ret, "Failed to deactivate link\n")) + if (WARN(ret, "Failed linking P2P_DEVICE\n")) return ret; - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_ACTIVE, true); - WARN(ret, "Failed binding P2P_DEVICE\n"); - return ret; + /* The station and queue allocation must be done only after the linking + * is done, as otherwise the FW might incorrectly configure its state. + */ + return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf); } static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1001,7 +1108,7 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { static const struct iwl_mvm_roc_ops ops = { .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta, - .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx, + .link = iwl_mvm_mld_roc_link, }; return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); @@ -1084,9 +1191,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, } } - if (err) - goto out_err; - err = 0; if (new_links == 0) { mvmvif->link[0] = &mvmvif->deflink; @@ -1124,6 +1228,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, + .set_antenna = iwl_mvm_op_set_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, @@ -1170,8 +1275,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .tx_last_beacon = iwl_mvm_tx_last_beacon, - .set_tim = iwl_mvm_set_tim, - .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, @@ -1206,6 +1309,8 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .abort_pmsr = iwl_mvm_abort_pmsr, #ifdef CONFIG_IWLWIFI_DEBUGFS + .vif_add_debugfs = iwl_mvm_vif_add_debugfs, + .link_add_debugfs = iwl_mvm_link_add_debugfs, .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, #endif .set_hw_timestamp = iwl_mvm_set_hw_timestamp, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 524852cf5cd2..6af606e5da65 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -347,7 +347,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm, return -EINVAL; if (flush) - iwl_mvm_flush_sta(mvm, int_sta, true); + iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk); iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid); @@ -697,6 +697,8 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* at this stage sta link pointers are already allocated */ ret = iwl_mvm_mld_update_sta(mvm, vif, sta); + if (ret) + goto err; for_each_sta_active_link(vif, sta, link_sta, link_id) { struct ieee80211_bss_conf *link_conf = @@ -705,8 +707,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, rcu_dereference_protected(mvm_sta->link[link_id], lockdep_is_held(&mvm->mutex)); - if (WARN_ON(!link_conf || !mvm_link_sta)) + if (WARN_ON(!link_conf || !mvm_link_sta)) { + ret = -EINVAL; goto err; + } ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, mvm_link_sta); @@ -1104,15 +1108,26 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, link_sta_dereference_protected(sta, link_id); mvm_vif_link = mvm_vif->link[link_id]; - if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta || - mvm_sta->link[link_id])) { + if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta)) { ret = -EINVAL; goto err; } - ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); - if (WARN_ON(ret)) - goto err; + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (WARN_ON(!mvm_sta->link[link_id])) { + ret = -EINVAL; + goto err; + } + } else { + if (WARN_ON(mvm_sta->link[link_id])) { + ret = -EINVAL; + goto err; + } + ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, + link_id); + if (WARN_ON(ret)) + goto err; + } link_sta->agg.max_rc_amsdu_len = 1; ieee80211_sta_recalc_aggregates(sta); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b18c91c5dd5d..f2af3e571409 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -121,15 +121,16 @@ struct iwl_mvm_time_event_data { * if the te is in the time event list or not (when id == TE_MAX) */ u32 id; + u8 link_id; }; /* Power management */ /** * enum iwl_power_scheme - * @IWL_POWER_LEVEL_CAM - Continuously Active Mode - * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) - * @IWL_POWER_LEVEL_LP - Low Power + * @IWL_POWER_SCHEME_CAM: Continuously Active Mode + * @IWL_POWER_SCHEME_BPS: Balanced Power Save (default) + * @IWL_POWER_SCHEME_LP: Low Power */ enum iwl_power_scheme { IWL_POWER_SCHEME_CAM = 1, @@ -137,7 +138,6 @@ enum iwl_power_scheme { IWL_POWER_SCHEME_LP }; -#define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -218,7 +218,7 @@ enum iwl_bt_force_ant_mode { }; /** - * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs + * enum iwl_mvm_low_latency_force - low latency force mode set by debugfs * @LOW_LATENCY_FORCE_UNSET: unset force mode * @LOW_LATENCY_FORCE_ON: for low latency on * @LOW_LATENCY_FORCE_OFF: for low latency off @@ -232,7 +232,7 @@ enum iwl_mvm_low_latency_force { }; /** -* struct iwl_mvm_low_latency_cause - low latency set causes +* enum iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command @@ -302,7 +302,11 @@ struct iwl_probe_resp_data { * @queue_params: QoS params for this MAC * @mgmt_queue: queue number for unbufferable management frames * @igtk: the current IGTK programmed into the firmware + * @active: indicates the link is active in FW (for sanity checking) + * @cab_queue: content-after-beacon (multicast) queue * @listen_lmac: indicates this link is allocated to the listen LMAC + * @mcast_sta: multicast station + * @phy_ctxt: phy context allocated to this link, if any */ struct iwl_mvm_vif_link_info { u8 bssid[ETH_ALEN]; @@ -342,6 +346,7 @@ struct iwl_mvm_vif_link_info { /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context + * @mvm: pointer back to the mvm struct * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal * @associated: indicates that we're currently associated, used only for @@ -364,6 +369,13 @@ struct iwl_mvm_vif_link_info { * @csa_failed: CSA failed to schedule time event, report an error later * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel * @features: hw features active for this vif + * @ap_beacon_time: AP beacon time for synchronisation (on older FW) + * @bcn_prot: beacon protection data (keys; FIXME: needs to be per link) + * @bf_data: beacon filtering data + * @deflink: default link data for use in non-MLO + * @link: link data for each link in MLO + * @esr_active: indicates eSR mode is active + * @pm_enabled: indicates powersave is enabled */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -635,18 +647,9 @@ struct iwl_mvm_tcm { * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection - * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU - * it is the time of last received sub-frame - * @removed: prevent timer re-arming * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context - * @consec_oldsn_drops: consecutive drops due to old SN - * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track - * when to apply old SN consecutive drop workaround - * @consec_oldsn_prev_drop: track whether or not an MPDU - * that was single/part of the previous A-MPDU was - * dropped due to old SN */ struct iwl_mvm_reorder_buffer { u16 head_sn; @@ -655,33 +658,21 @@ struct iwl_mvm_reorder_buffer { int queue; u16 last_amsdu; u8 last_sub_index; - struct timer_list reorder_timer; - bool removed; bool valid; spinlock_t lock; struct iwl_mvm *mvm; - unsigned int consec_oldsn_drops; - u32 consec_oldsn_ampdu_gp2; - unsigned int consec_oldsn_prev_drop:1; } ____cacheline_aligned_in_smp; /** - * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno + * struct iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno * @frames: list of skbs stored - * @reorder_time: time the packet was stored in the reorder buffer */ -struct _iwl_mvm_reorder_buf_entry { - struct sk_buff_head frames; - unsigned long reorder_time; -}; - -/* make this indirection to get the aligned thing */ struct iwl_mvm_reorder_buf_entry { - struct _iwl_mvm_reorder_buf_entry e; + struct sk_buff_head frames; } #ifndef __CHECKER__ /* sparse doesn't like this construct: "bad integer constant expression" */ -__aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) +__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head))) #endif ; @@ -689,15 +680,17 @@ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) * struct iwl_mvm_baid_data - BA session data * @sta_mask: current station mask for the BAID * @tid: tid of the session - * @baid baid of the session + * @baid: baid of the session * @timeout: the timeout set in the addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update * @session_timer: timer to check if BA session expired, runs at 2 * timeout + * @rcu_ptr: BA data RCU protected access + * @rcu_head: RCU head for freeing this data * @mvm: mvm pointer, needed for timer context * @reorder_buf: reorder buffer, allocated per queue - * @reorder_buf_data: data + * @entries: data */ struct iwl_mvm_baid_data { struct rcu_head rcu_head; @@ -967,6 +960,9 @@ struct iwl_mvm { u8 scan_last_antenna_idx; /* to toggle TX between antennas */ u8 mgmt_last_antenna_idx; + u8 set_tx_ant; + u8 set_rx_ant; + /* last smart fifo state that was successfully sent to firmware */ enum iwl_sf_state sf_state; @@ -1198,6 +1194,8 @@ struct iwl_mvm { struct iwl_time_sync_data time_sync; struct iwl_mei_scan_filter mei_scan_filter; + + bool statistics_clear; }; /* Extract MVM priv from op_mode and _hw */ @@ -1658,7 +1656,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status); static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } #endif int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk); -int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal); +int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask); int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids); /* Utils to extract sta related data */ @@ -1689,6 +1687,16 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) } /* Statistics */ +void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +static inline void +iwl_mvm_handle_rx_system_end_stats_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ +} + void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, @@ -1702,16 +1710,29 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { - return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? - mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : - mvm->fw->valid_tx_ant; + u8 tx_ant = mvm->fw->valid_tx_ant; + + if (mvm->nvm_data && mvm->nvm_data->valid_tx_ant) + tx_ant &= mvm->nvm_data->valid_tx_ant; + + if (mvm->set_tx_ant) + tx_ant &= mvm->set_tx_ant; + + return tx_ant; } static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { - return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? - mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : - mvm->fw->valid_rx_ant; + u8 rx_ant = mvm->fw->valid_tx_ant; + + if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant) + rx_ant &= mvm->nvm_data->valid_tx_ant; + + if (mvm->set_rx_ant) + rx_ant &= mvm->set_rx_ant; + + return rx_ant; + } static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant) @@ -1892,41 +1913,10 @@ void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* BSS Info */ -/** - * struct iwl_mvm_bss_info_changed_ops - callbacks for the bss_info_changed() - * - * Since the only difference between both MLD and - * non-MLD versions of bss_info_changed() is these function calls, - * each version will send its specific function calls to - * %iwl_mvm_bss_info_changed_common(). - * - * @bss_info_changed_sta: pointer to the function that handles changes - * in bss_info in sta mode - * @bss_info_changed_ap_ibss: pointer to the function that handles changes - * in bss_info in ap and ibss modes - */ -struct iwl_mvm_bss_info_changed_ops { - void (*bss_info_changed_sta)(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u64 changes); - void (*bss_info_changed_ap_ibss)(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u64 changes); -}; - -void -iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - const struct iwl_mvm_bss_info_changed_ops *callbacks, - u64 changes); -void -iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, - u64 changes); +void iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes); void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u64 changes); @@ -1942,13 +1932,12 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, * * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta * for Hot Spot 2.0 - * @switch_phy_ctxt: pointer to the function that switches a vif from one - * phy_ctx to another + * @link: For a P2P Device interface, pointer to a function that links the + * MAC/Link to the PHY context */ struct iwl_mvm_roc_ops { int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id); - int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_phy_ctxt *new_phy_ctxt); + int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif); }; int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1959,7 +1948,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /*Session Protection */ void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 duration_override); + u32 duration_override, unsigned int link_id); /* Quota management */ static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm) @@ -2019,18 +2008,19 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm); -void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { } static inline void -iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } static inline void -iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif /* CONFIG_IWLWIFI_DEBUGFS */ @@ -2263,7 +2253,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, bool *changed); struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed); -int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); +int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync); void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); /* smart fifo */ @@ -2316,7 +2306,8 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added); void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + unsigned int link_id); int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, @@ -2335,7 +2326,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size); -void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); @@ -2375,6 +2365,10 @@ void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta, struct dentry *dir); +void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir); #endif /* new MLD related APIs */ @@ -2427,7 +2421,8 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) /* Channel Switch */ void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk); int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); /* Channel Context */ /** @@ -2611,6 +2606,7 @@ int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params); int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); +int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int iwl_mvm_mac_start(struct ieee80211_hw *hw); void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); @@ -2682,8 +2678,6 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed); int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw); -int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - bool set); void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw); int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, @@ -2725,4 +2719,8 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_set_hw_timestamp *hwts); int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +bool iwl_mvm_enable_fils(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx); +void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool valid_links_changed); #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index f67ab8ee18c2..c0dd441e800e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -220,6 +220,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __be16 *hw; const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; + u8 tx_ant = mvm->fw->valid_tx_ant; + u8 rx_ant = mvm->fw->valid_rx_ant; int regulatory_type; /* Checking for required sections */ @@ -270,9 +272,15 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; + if (mvm->set_tx_ant) + tx_ant &= mvm->set_tx_ant; + + if (mvm->set_rx_ant) + rx_ant &= mvm->set_rx_ant; + return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, regulatory, mac_override, phy_sku, - mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant); + tx_ant, rx_ant); } /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ @@ -565,7 +573,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) * try to replay the last set MCC to FW. If it doesn't exist, * queue an update to cfg80211 to retrieve the default alpha2 from FW. */ - retval = iwl_mvm_init_fw_regd(mvm); + retval = iwl_mvm_init_fw_regd(mvm, true); if (retval != -ENOENT) return retval; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5336a4afde4d..1627b2f819db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -322,6 +322,19 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF, + iwl_mvm_handle_rx_system_oper_stats, + RX_HANDLER_ASYNC_LOCKED, + struct iwl_system_statistics_notif_oper), + RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF, + iwl_mvm_handle_rx_system_oper_part1_stats, + RX_HANDLER_ASYNC_LOCKED, + struct iwl_system_statistics_part1_notif_oper), + RX_HANDLER_GRP(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF, + iwl_mvm_handle_rx_system_end_stats_notif, + RX_HANDLER_ASYNC_LOCKED, + struct iwl_system_statistics_end_notif), + RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, iwl_mvm_window_status_notif, RX_HANDLER_SYNC, struct iwl_ba_window_status_notif), @@ -426,6 +439,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION, iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC, struct iwl_time_msmt_cfm_notify), + RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF, + iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC, + struct iwl_roc_notif), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -534,6 +550,8 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(RFI_CONFIG_CMD), HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), + HCMD_NAME(SYSTEM_STATISTICS_CMD), + HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF), HCMD_NAME(RFI_DEACTIVATE_NOTIF), }; @@ -549,6 +567,8 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(AUX_STA_CMD), HCMD_NAME(STA_REMOVE_CMD), HCMD_NAME(STA_DISABLE_TX_CMD), + HCMD_NAME(ROC_CMD), + HCMD_NAME(ROC_NOTIF), HCMD_NAME(SESSION_PROTECTION_NOTIF), HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; @@ -589,6 +609,14 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ +static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = { + HCMD_NAME(STATISTICS_OPER_NOTIF), + HCMD_NAME(STATISTICS_OPER_PART1_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ static const struct iwl_hcmd_names iwl_mvm_scan_names[] = { HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF), }; @@ -640,6 +668,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), + [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names), }; /* this forward declaration can avoid to export the function */ @@ -751,7 +780,10 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) */ mvm->nvm_data = iwl_parse_mei_nvm_data(trans, trans->cfg, - mvm->mei_nvm_data, mvm->fw); + mvm->mei_nvm_data, + mvm->fw, + mvm->set_tx_ant, + mvm->set_rx_ant); return 0; } @@ -790,6 +822,9 @@ get_nvm_from_fw: if (ret) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + /* no longer need this regardless of failure or not */ + mvm->pldr_sync = false; + return ret; } @@ -1136,7 +1171,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - max_agg = IEEE80211_MAX_AMPDU_BUF_EHT; + max_agg = 512; else max_agg = IEEE80211_MAX_AMPDU_BUF_HE; @@ -1298,7 +1333,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), - "%s", fw->fw_version); + "%.31s", fw->fw_version); trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); @@ -1944,9 +1979,6 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - if (mvm->pldr_sync) - return; - if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index a5b432bc9e2f..4e1fccff3987 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -192,6 +192,9 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info, chains_static, chains_dynamic); + IWL_DEBUG_FW(mvm, "Send RLC command: phy=%d, rx_chain_info=0x%x\n", + ctxt->id, cmd.rlc.rx_chain_info); + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD, DATA_PATH_GROUP, 2), 0, sizeof(cmd), &cmd); @@ -265,6 +268,8 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { + int ret; + WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && ctxt->ref); lockdep_assert_held(&mvm->mutex); @@ -273,9 +278,16 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, - chains_static, chains_dynamic, - FW_CTXT_ACTION_ADD); + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_ADD); + + if (ret) + return ret; + + ctxt->ref++; + + return 0; } /* @@ -285,6 +297,11 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); + + /* If we were taking the first ref, we should have + * called iwl_mvm_phy_ctxt_add. + */ + WARN_ON(!ctxt->ref); ctxt->ref++; } @@ -301,7 +318,11 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, lockdep_assert_held(&mvm->mutex); - if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 && + if (WARN_ON_ONCE(!ctxt->ref)) + return -EINVAL; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, + RLC_CONFIG_CMD), 0) >= 2 && ctxt->channel == chandef->chan && ctxt->width == chandef->width && ctxt->center_freq1 == chandef->center_freq1) @@ -335,6 +356,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { + struct cfg80211_chan_def chandef; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!ctxt)) @@ -342,41 +364,13 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) ctxt->ref--; - /* - * Move unused phy's to a default channel. When the phy is moved the, - * fw will cleanup immediate quiet bit if it was previously set, - * otherwise we might not be able to reuse this phy. - */ - if (ctxt->ref == 0) { - struct ieee80211_channel *chan = NULL; - struct cfg80211_chan_def chandef; - struct ieee80211_supported_band *sband; - enum nl80211_band band; - int channel; - - for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { - sband = mvm->hw->wiphy->bands[band]; - - if (!sband) - continue; - - for (channel = 0; channel < sband->n_channels; channel++) - if (!(sband->channels[channel].flags & - IEEE80211_CHAN_DISABLED)) { - chan = &sband->channels[channel]; - break; - } - - if (chan) - break; - } - - if (WARN_ON(!chan)) - return; - - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); - } + if (ctxt->ref) + return; + + cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT); + + iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1, + FW_CTXT_ACTION_REMOVE); } static void iwl_mvm_binding_iterator(void *_data, u8 *mac, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 9131b5f1bc76..1b9b06e0443f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -489,6 +489,11 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm) if (mvm->ext_clock_valid) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK); + if (iwl_fw_lookup_cmd_ver(mvm->fw, POWER_TABLE_CMD, 0) >= 7 && + test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) + cmd.flags |= + cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK); + IWL_DEBUG_POWER(mvm, "Sending device power command with flags = 0x%X\n", cmd.flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 1ca375a5cf6b..376b23b409dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -3,7 +3,7 @@ * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2003 - 2014, 2018 - 2022 Intel Corporation + * Copyright (C) 2003 - 2014, 2018 - 2023 Intel Corporation *****************************************************************************/ #ifndef __rs_h__ @@ -203,18 +203,12 @@ struct rs_rate { /** * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW * @last_rate_n_flags: last rate reported by FW - * @max_agg_bufsize: the maximal size of the AGG buffer for this station - * @sta_id: the id of the station -#ifdef CONFIG_MAC80211_DEBUGFS - * @dbg_fixed_rate: for debug, use fixed rate if not 0 - * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU -#endif + * @pers.sta_id: the id of the station * @chains: bitmask of chains reported in %chain_signal * @chain_signal: per chain signal strength * @last_rssi: last rssi reported * @drv: pointer back to the driver data */ - struct iwl_lq_sta_rs_fw { /* last tx rate_n_flags */ u32 last_rate_n_flags; @@ -223,7 +217,14 @@ struct iwl_lq_sta_rs_fw { struct lq_sta_pers_rs_fw { u32 sta_id; #ifdef CONFIG_MAC80211_DEBUGFS + /** + * @dbg_fixed_rate: for debug, use fixed rate if not 0 + */ u32 dbg_fixed_rate; + /** + * @dbg_agg_frame_count_lim: for debug, max number of + * frames in A-MPDU + */ u16 dbg_agg_frame_count_lim; #endif u8 chains; @@ -233,7 +234,7 @@ struct iwl_lq_sta_rs_fw { } pers; }; -/** +/* * struct iwl_rate_scale_data -- tx success history for one rate */ struct iwl_rate_scale_data { @@ -275,7 +276,7 @@ struct rs_rate_stats { u64 total; }; -/** +/* * struct iwl_scale_tbl_info -- tx params and success history for all rates * * There are two of these in struct iwl_lq_sta, @@ -296,7 +297,7 @@ enum { RS_STATE_STAY_IN_COLUMN, }; -/** +/* * struct iwl_lq_sta -- driver's rate scaling private structure * * Pointer to this gets passed back and forth between driver and mac80211. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 542c192698a4..8caa971770c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -553,7 +553,7 @@ struct iwl_mvm_stat_data { struct iwl_mvm_stat_data_all_macs { struct iwl_mvm *mvm; __le32 flags; - struct iwl_statistics_ntfy_per_mac *per_mac_stats; + struct iwl_stats_ntfy_per_mac *per_mac; }; static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) @@ -658,7 +658,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data_all_macs *data = _data; - struct iwl_statistics_ntfy_per_mac *mac_stats; + struct iwl_stats_ntfy_per_mac *mac_stats; int sig; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 vif_id = mvmvif->id; @@ -669,7 +669,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, if (vif->type != NL80211_IFTYPE_STATION) return; - mac_stats = &data->per_mac_stats[vif_id]; + mac_stats = &data->per_mac[vif_id]; mvmvif->deflink.beacon_stats.num_beacons = le32_to_cpu(mac_stats->beacon_counter); @@ -759,7 +759,7 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, struct iwl_mvm_stat_data_all_macs data = { .mvm = mvm, .flags = stats->flags, - .per_mac_stats = stats->per_mac_stats, + .per_mac = stats->per_mac, }; ieee80211_iterate_active_interfaces(mvm->hw, @@ -829,6 +829,142 @@ static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm, } static void +iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, + struct iwl_stats_ntfy_per_link *per_link) +{ + u32 air_time[MAC_INDEX_AUX] = {}; + u32 rx_bytes[MAC_INDEX_AUX] = {}; + int fw_link_id; + + for (fw_link_id = 0; fw_link_id < ARRAY_SIZE(mvm->link_id_to_link_conf); + fw_link_id++) { + struct iwl_stats_ntfy_per_link *link_stats; + struct ieee80211_bss_conf *bss_conf; + struct iwl_mvm_vif *mvmvif; + int link_id; + int sig; + + bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id, + false); + if (!bss_conf) + continue; + + if (bss_conf->vif->type != NL80211_IFTYPE_STATION) + continue; + + link_id = bss_conf->link_id; + if (link_id >= ARRAY_SIZE(mvmvif->link)) + continue; + + mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif); + if (!mvmvif || !mvmvif->link[link_id]) + continue; + + link_stats = &per_link[fw_link_id]; + + mvmvif->link[link_id]->beacon_stats.num_beacons = + le32_to_cpu(link_stats->beacon_counter); + + /* we basically just use the u8 to store 8 bits and then treat + * it as a s8 whenever we take it out to a different type. + */ + mvmvif->link[link_id]->beacon_stats.avg_signal = + -le32_to_cpu(link_stats->beacon_average_energy); + + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (mvm->statistics_clear) + mvmvif->link[link_id]->beacon_stats.accu_num_beacons += + mvmvif->link[link_id]->beacon_stats.num_beacons; + + sig = -le32_to_cpu(link_stats->beacon_filter_average_energy); + iwl_mvm_update_vif_sig(bss_conf->vif, sig); + + if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX, + "invalid mvmvif id: %d", mvmvif->id)) + continue; + + air_time[mvmvif->id] += + le32_to_cpu(per_link[fw_link_id].air_time); + rx_bytes[mvmvif->id] += + le32_to_cpu(per_link[fw_link_id].rx_bytes); + } + + /* Don't update in case the statistics are not cleared, since + * we will end up counting twice the same airtime, once in TCM + * request and once in statistics notification. + */ + if (mvm->statistics_clear) { + __le32 air_time_le[MAC_INDEX_AUX]; + __le32 rx_bytes_le[MAC_INDEX_AUX]; + int vif_id; + + for (vif_id = 0; vif_id < ARRAY_SIZE(air_time_le); vif_id++) { + air_time_le[vif_id] = cpu_to_le32(air_time[vif_id]); + rx_bytes_le[vif_id] = cpu_to_le32(rx_bytes[vif_id]); + } + + iwl_mvm_update_tcm_from_stats(mvm, air_time_le, rx_bytes_le); + } +} + +void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_system_statistics_notif_oper *stats; + int i; + u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP, + STATISTICS_OPER_NOTIF, 0); + + if (notif_ver != 3) { + IWL_FW_CHECK_FAILED(mvm, + "Oper stats notif ver %d is not supported\n", + notif_ver); + return; + } + + stats = (void *)&pkt->data; + iwl_mvm_stat_iterator_all_links(mvm, stats->per_link); + + for (i = 0; i < ARRAY_SIZE(average_energy); i++) + average_energy[i] = + le32_to_cpu(stats->per_sta[i].average_energy); + + ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, + average_energy); +} + +void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_system_statistics_part1_notif_oper *part1_stats; + int i; + u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP, + STATISTICS_OPER_PART1_NOTIF, 0); + + if (notif_ver != 4) { + IWL_FW_CHECK_FAILED(mvm, + "Part1 stats notif ver %d is not supported\n", + notif_ver); + return; + } + + part1_stats = (void *)&pkt->data; + mvm->radio_stats.rx_time = 0; + mvm->radio_stats.tx_time = 0; + for (i = 0; i < ARRAY_SIZE(part1_stats->per_link); i++) { + mvm->radio_stats.rx_time += + le64_to_cpu(part1_stats->per_link[i].rx_time); + mvm->radio_stats.tx_time += + le64_to_cpu(part1_stats->per_link[i].tx_time); + } +} + +static void iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -887,11 +1023,11 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, for (i = 0; i < ARRAY_SIZE(average_energy); i++) average_energy[i] = - le32_to_cpu(stats->per_sta_stats[i].average_energy); + le32_to_cpu(stats->per_sta[i].average_energy); for (i = 0; i < ARRAY_SIZE(air_time); i++) { - air_time[i] = stats->per_mac_stats[i].air_time; - rx_bytes[i] = stats->per_mac_stats[i].rx_bytes; + air_time[i] = stats->per_mac[i].air_time; + rx_bytes[i] = stats->per_mac[i].rx_bytes; } } @@ -917,6 +1053,13 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, __le32 *bytes, *air_time, flags; int expected_size; u8 *energy; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + return; /* From ver 14 and up we use TLV statistics format */ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 8d1e44fd9de7..886d00098528 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -376,8 +376,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ if (phy_info & IWL_RX_MPDU_PHY_AMPDU && (status & IWL_RX_MPDU_STATUS_SEC_MASK) == - IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) + IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) { + IWL_DEBUG_DROP(mvm, "Dropping packets, bad enc status\n"); return -1; + } if (unlikely(ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_has_protected(hdr->frame_control))) @@ -548,44 +550,12 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; } -/* - * Returns true if sn2 - buffer_size < sn1 < sn2. - * To be used only in order to compare reorder buffer head with NSSN. - * We fully trust NSSN unless it is behind us due to reorder timeout. - * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. - */ -static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) -{ - return ieee80211_sn_less(sn1, sn2) && - !ieee80211_sn_less(sn1, sn2 - buffer_size); -} - -static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) -{ - if (IWL_MVM_USE_NSSN_SYNC) { - struct iwl_mvm_nssn_sync_data notif = { - .baid = baid, - .nssn = nssn, - }; - - iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false, - ¬if, sizeof(notif)); - } -} - -#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) - -enum iwl_mvm_release_flags { - IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0), - IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1), -}; - static void iwl_mvm_release_frames(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct iwl_mvm_baid_data *baid_data, struct iwl_mvm_reorder_buffer *reorder_buf, - u16 nssn, u32 flags) + u16 nssn) { struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[reorder_buf->queue * @@ -594,31 +564,12 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, lockdep_assert_held(&reorder_buf->lock); - /* - * We keep the NSSN not too far behind, if we are sync'ing it and it - * is more than 2048 ahead of us, it must be behind us. Discard it. - * This can happen if the queue that hit the 0 / 2048 seqno was lagging - * behind and this queue already processed packets. The next if - * would have caught cases where this queue would have processed less - * than 64 packets, but it may have processed more than 64 packets. - */ - if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) && - ieee80211_sn_less(nssn, ssn)) - goto set_timer; - - /* ignore nssn smaller than head sn - this can happen due to timeout */ - if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) - goto set_timer; - - while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { + while (ieee80211_sn_less(ssn, nssn)) { int index = ssn % reorder_buf->buf_size; - struct sk_buff_head *skb_list = &entries[index].e.frames; + struct sk_buff_head *skb_list = &entries[index].frames; struct sk_buff *skb; ssn = ieee80211_sn_inc(ssn); - if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) && - (ssn == 2048 || ssn == 0)) - iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn); /* * Empty the list. Will have more than one frame for A-MSDU. @@ -633,99 +584,6 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, } } reorder_buf->head_sn = nssn; - -set_timer: - if (reorder_buf->num_stored && !reorder_buf->removed) { - u16 index = reorder_buf->head_sn % reorder_buf->buf_size; - - while (skb_queue_empty(&entries[index].e.frames)) - index = (index + 1) % reorder_buf->buf_size; - /* modify timer to match next frame's expiration time */ - mod_timer(&reorder_buf->reorder_timer, - entries[index].e.reorder_time + 1 + - RX_REORDER_BUF_TIMEOUT_MQ); - } else { - del_timer(&reorder_buf->reorder_timer); - } -} - -void iwl_mvm_reorder_timer_expired(struct timer_list *t) -{ - struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer); - struct iwl_mvm_baid_data *baid_data = - iwl_mvm_baid_data_from_reorder_buf(buf); - struct iwl_mvm_reorder_buf_entry *entries = - &baid_data->entries[buf->queue * baid_data->entries_per_queue]; - int i; - u16 sn = 0, index = 0; - bool expired = false; - bool cont = false; - - spin_lock(&buf->lock); - - if (!buf->num_stored || buf->removed) { - spin_unlock(&buf->lock); - return; - } - - for (i = 0; i < buf->buf_size ; i++) { - index = (buf->head_sn + i) % buf->buf_size; - - if (skb_queue_empty(&entries[index].e.frames)) { - /* - * If there is a hole and the next frame didn't expire - * we want to break and not advance SN - */ - cont = false; - continue; - } - if (!cont && - !time_after(jiffies, entries[index].e.reorder_time + - RX_REORDER_BUF_TIMEOUT_MQ)) - break; - - expired = true; - /* continue until next hole after this expired frames */ - cont = true; - sn = ieee80211_sn_add(buf->head_sn, i + 1); - } - - if (expired) { - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - u8 sta_id = ffs(baid_data->sta_mask) - 1; - - rcu_read_lock(); - sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { - rcu_read_unlock(); - goto out; - } - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - /* SN is set to the last expired frame + 1 */ - IWL_DEBUG_HT(buf->mvm, - "Releasing expired frames for sta %u, sn %d\n", - sta_id, sn); - iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, - sta, baid_data->tid); - iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, - buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); - rcu_read_unlock(); - } else { - /* - * If no frame expired and there are stored frames, index is now - * pointing to the first unexpired frame - modify timer - * accordingly to this frame. - */ - mod_timer(&buf->reorder_timer, - entries[index].e.reorder_time + - 1 + RX_REORDER_BUF_TIMEOUT_MQ); - } - -out: - spin_unlock(&buf->lock); } static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, @@ -758,10 +616,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, - reorder_buf->buf_size), - 0); + reorder_buf->buf_size)); spin_unlock_bh(&reorder_buf->lock); - del_timer_sync(&reorder_buf->reorder_timer); out: rcu_read_unlock(); @@ -769,8 +625,7 @@ out: static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, struct napi_struct *napi, - u8 baid, u16 nssn, int queue, - u32 flags) + u8 baid, u16 nssn, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; @@ -788,8 +643,7 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, ba_data = rcu_dereference(mvm->baid_map[baid]); if (!ba_data) { - WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC), - "BAID %d not found in map\n", baid); + WARN(true, "BAID %d not found in map\n", baid); goto out; } @@ -803,22 +657,13 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, napi, ba_data, - reorder_buf, nssn, flags); + reorder_buf, nssn); spin_unlock_bh(&reorder_buf->lock); out: rcu_read_unlock(); } -static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm, - struct napi_struct *napi, int queue, - const struct iwl_mvm_nssn_sync_data *data) -{ - iwl_mvm_release_frames_from_notif(mvm, napi, data->baid, - data->nssn, queue, - IWL_MVM_RELEASE_FROM_RSS_SYNC); -} - void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -853,14 +698,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, break; iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; - case IWL_MVM_RXQ_NSSN_SYNC: - if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data), - "invalid nssn sync notification size %d (%d)", - len, (int)sizeof(struct iwl_mvm_nssn_sync_data))) - break; - iwl_mvm_nssn_sync(mvm, napi, queue, - (void *)internal_notif->data); - break; default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } @@ -874,55 +711,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, } } -static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, int tid, - struct iwl_mvm_reorder_buffer *buffer, - u32 reorder, u32 gp2, int queue) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - if (gp2 != buffer->consec_oldsn_ampdu_gp2) { - /* we have a new (A-)MPDU ... */ - - /* - * reset counter to 0 if we didn't have any oldsn in - * the last A-MPDU (as detected by GP2 being identical) - */ - if (!buffer->consec_oldsn_prev_drop) - buffer->consec_oldsn_drops = 0; - - /* either way, update our tracking state */ - buffer->consec_oldsn_ampdu_gp2 = gp2; - } else if (buffer->consec_oldsn_prev_drop) { - /* - * tracking state didn't change, and we had an old SN - * indication before - do nothing in this case, we - * already noted this one down and are waiting for the - * next A-MPDU (by GP2) - */ - return; - } - - /* return unless this MPDU has old SN */ - if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)) - return; - - /* update state */ - buffer->consec_oldsn_prev_drop = 1; - buffer->consec_oldsn_drops++; - - /* if limit is reached, send del BA and reset state */ - if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) { - IWL_WARN(mvm, - "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n", - IWL_MVM_AMPDU_CONSEC_DROPS_DELBA, - sta->addr, queue, tid); - ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr); - buffer->consec_oldsn_prev_drop = 0; - buffer->consec_oldsn_drops = 0; - } -} - /* * Returns true if the MPDU was buffered\dropped, false if it should be passed * to upper layer. @@ -934,11 +722,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); struct iwl_mvm_baid_data *baid_data; struct iwl_mvm_reorder_buffer *buffer; - struct sk_buff *tail; u32 reorder = le32_to_cpu(desc->reorder_data); bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; bool last_subframe = @@ -955,6 +741,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; + if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) + return false; + /* * This also covers the case of receiving a Block Ack Request * outside a BA session; we'll pass it to mac80211 and that @@ -1016,59 +805,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, buffer->valid = true; } - if (ieee80211_is_back_req(hdr->frame_control)) { - iwl_mvm_release_frames(mvm, sta, napi, baid_data, - buffer, nssn, 0); + /* drop any duplicated packets */ + if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE)) goto drop; - } - - /* - * If there was a significant jump in the nssn - adjust. - * If the SN is smaller than the NSSN it might need to first go into - * the reorder buffer, in which case we just release up to it and the - * rest of the function will take care of storing it and releasing up to - * the nssn. - * This should not happen. This queue has been lagging and it should - * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice - * and update the other queues. - */ - if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, - buffer->buf_size) || - !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { - u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; - - iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, - min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); - } - - iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder, - rx_status->device_timestamp, queue); /* drop any oudated packets */ - if (ieee80211_sn_less(sn, buffer->head_sn)) + if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) goto drop; /* release immediately if allowed by nssn and no stored frames */ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { - if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, - buffer->buf_size) && - (!amsdu || last_subframe)) { - /* - * If we crossed the 2048 or 0 SN, notify all the - * queues. This is done in order to avoid having a - * head_sn that lags behind for too long. When that - * happens, we can get to a situation where the head_sn - * is within the interval [nssn - buf_size : nssn] - * which will make us think that the nssn is a packet - * that we already freed because of the reordering - * buffer and we will ignore it. So maintain the - * head_sn somewhat updated across all the queues: - * when it crosses 0 and 2048. - */ - if (sn == 2048 || sn == 0) - iwl_mvm_sync_nssn(mvm, baid, sn); + if (!amsdu || last_subframe) buffer->head_sn = nssn; - } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; @@ -1083,37 +831,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, * while technically there is no hole and we can move forward. */ if (!buffer->num_stored && sn == buffer->head_sn) { - if (!amsdu || last_subframe) { - if (sn == 2048 || sn == 0) - iwl_mvm_sync_nssn(mvm, baid, sn); + if (!amsdu || last_subframe) buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); - } + /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } - index = sn % buffer->buf_size; - - /* - * Check if we already stored this frame - * As AMSDU is either received or not as whole, logic is simple: - * If we have frames in that position in the buffer and the last frame - * originated from AMSDU had a different SN then it is a retransmission. - * If it is the same SN then if the subframe index is incrementing it - * is the same AMSDU - otherwise it is a retransmission. - */ - tail = skb_peek_tail(&entries[index].e.frames); - if (tail && !amsdu) - goto drop; - else if (tail && (sn != buffer->last_amsdu || - buffer->last_sub_index >= sub_frame_idx)) - goto drop; - /* put in reorder buffer */ - __skb_queue_tail(&entries[index].e.frames, skb); + index = sn % buffer->buf_size; + __skb_queue_tail(&entries[index].frames, skb); buffer->num_stored++; - entries[index].e.reorder_time = jiffies; if (amsdu) { buffer->last_amsdu = sn; @@ -1133,8 +862,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, */ if (!amsdu || last_subframe) iwl_mvm_release_frames(mvm, sta, napi, baid_data, - buffer, nssn, - IWL_MVM_RELEASE_SEND_RSS_SYNC); + buffer, nssn); spin_unlock_bh(&buffer->lock); return true; @@ -1507,7 +1235,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, #define IWL_RX_RU_DATA_A1 2 #define IWL_RX_RU_DATA_A2 2 #define IWL_RX_RU_DATA_B1 2 -#define IWL_RX_RU_DATA_B2 3 +#define IWL_RX_RU_DATA_B2 4 #define IWL_RX_RU_DATA_C1 3 #define IWL_RX_RU_DATA_C2 3 #define IWL_RX_RU_DATA_D1 4 @@ -2562,6 +2290,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_rx_csum(mvm, sta, skb, pkt); if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { + IWL_DEBUG_DROP(mvm, "Dropping duplicate packet 0x%x\n", + le16_to_cpu(hdr->seq_ctrl)); kfree_skb(skb); goto out; } @@ -2613,9 +2343,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) && likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) && - likely(!iwl_mvm_mei_filter_scan(mvm, skb))) + likely(!iwl_mvm_mei_filter_scan(mvm, skb))) { + if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && + (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && + !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) + rx_status->flag |= RX_FLAG_AMSDU_MORE; + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, link_sta); + } out: rcu_read_unlock(); } @@ -2758,7 +2494,7 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_release_frames_from_notif(mvm, napi, release->baid, le16_to_cpu(release->nssn), - queue, 0); + queue); } void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, @@ -2799,7 +2535,10 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, tid)) goto out; - iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0); + IWL_DEBUG_DROP(mvm, "Received a BAR, expect packet loss: nssn %d\n", + nssn); + + iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue); out: rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index c1d9ce753468..75c5c58e14a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm, if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; - if (version < 12) { + if (version < 16) { gp->scan_start_mac_or_link_id = scan_vif->id; } else { struct iwl_mvm_vif_link_info *link_info; @@ -3408,7 +3408,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) if (!(mvm->scan_status & type)) return 0; - if (iwl_mvm_is_radio_killed(mvm)) { + if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { ret = 0; goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 3b9a343d4f67..bba96a968890 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -827,7 +827,7 @@ static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta) if (!link) continue; - /* support for 1k ba size */ + /* support for 512 ba size */ if (link->eht_cap.has_eht && max_size < IWL_DEFAULT_QUEUE_SIZE_EHT) max_size = IWL_DEFAULT_QUEUE_SIZE_EHT; @@ -865,11 +865,11 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; unsigned int link_id; - for (link_id = 0; - link_id < ARRAY_SIZE(mvmsta->link); - link_id++) { + rcu_read_lock(); + for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) { struct iwl_mvm_link_sta *link = rcu_dereference_protected(mvmsta->link[link_id], lockdep_is_held(&mvm->mutex)); @@ -879,6 +879,7 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, sta_mask |= BIT(link->sta_id); } + rcu_read_unlock(); } else { sta_mask |= BIT(sta_id); } @@ -2059,7 +2060,8 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, *status = IWL_MVM_QUEUE_FREE; } - if (vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION && + mvm_link->ap_sta_id == sta_id) { /* if associated - we can't remove the AP STA now */ if (vif->cfg.assoc) return true; @@ -2097,7 +2099,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, return ret; /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); + ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id, + mvm_sta->tfd_queue_msk); if (ret) return ret; if (iwl_mvm_has_new_tx_api(mvm)) { @@ -2408,7 +2411,8 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true); + iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id, + mvmvif->deflink.bcast_sta.tfd_queue_msk); switch (vif->type) { case NL80211_IFTYPE_AP: @@ -2664,7 +2668,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true); + iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id, + mvmvif->deflink.mcast_sta.tfd_queue_msk); iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id, &mvmvif->deflink.cab_queue, 0); @@ -2714,18 +2719,9 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, WARN_ON(1); for (j = 0; j < reorder_buf->buf_size; j++) - __skb_queue_purge(&entries[j].e.frames); - /* - * Prevent timer re-arm. This prevents a very far fetched case - * where we timed out on the notification. There may be prior - * RX frames pending in the RX queue before the notification - * that might get processed between now and the actual deletion - * and we would re-arm the timer although we are deleting the - * reorder buffer. - */ - reorder_buf->removed = true; + __skb_queue_purge(&entries[j].frames); + spin_unlock_bh(&reorder_buf->lock); - del_timer_sync(&reorder_buf->reorder_timer); } } @@ -2745,15 +2741,12 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; reorder_buf->buf_size = buf_size; - /* rx reorder timer */ - timer_setup(&reorder_buf->reorder_timer, - iwl_mvm_reorder_timer_expired, 0); spin_lock_init(&reorder_buf->lock); reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) - __skb_queue_head_init(&entries[j].e.frames); + __skb_queue_head_init(&entries[j].frames); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 7364346a1209..b33a0ce096d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -286,12 +286,10 @@ struct iwl_mvm_key_pn { * * @IWL_MVM_RXQ_EMPTY: empty sync notification * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA - * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN */ enum iwl_mvm_rxq_notif_type { IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, - IWL_MVM_RXQ_NSSN_SYNC, }; /** @@ -315,11 +313,6 @@ struct iwl_mvm_delba_data { u32 baid; } __packed; -struct iwl_mvm_nssn_sync_data { - u32 baid; - u32 nssn; -} __packed; - /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection @@ -356,6 +349,7 @@ struct iwl_mvm_link_sta { /** * struct iwl_mvm_sta - representation of a station in the driver + * @vif: the interface the station belongs to * @tfd_queue_msk: the tfd queues used by the station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for @@ -380,6 +374,7 @@ struct iwl_mvm_link_sta { * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. * In case TLC offload is not active it is either 0xFFFF or 0. * @max_amsdu_len: max AMSDU length + * @sleeping: indicates the station is sleeping (when not offloaded to FW) * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleeping: sta sleep transitions in power management * @sleep_tx_count: the number of frames that we told the firmware to let out @@ -389,7 +384,6 @@ struct iwl_mvm_link_sta { * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data - * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index dae6f2a1aad9..e7d5f4ebeb25 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2022 Intel Corporation + * Copyright (C) 2018-2020, 2022-2023 Intel Corporation */ #include <linux/etherdevice.h> #include "mvm.h" @@ -144,7 +144,8 @@ void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + unsigned int link_id) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; @@ -154,7 +155,7 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, duration, - duration, true); + duration, true, link_id); else iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 5f0e7144a951..218fdf1ed530 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -42,6 +42,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, te_data->uid = 0; te_data->id = TE_MAX; te_data->vif = NULL; + te_data->link_id = -1; } void iwl_mvm_roc_done_wk(struct work_struct *wk) @@ -78,9 +79,29 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) */ if (!WARN_ON(!mvm->p2p_device_vif)) { - mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); - iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, - true); + struct ieee80211_vif *vif = mvm->p2p_device_vif; + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id, + mvmvif->deflink.bcast_sta.tfd_queue_msk); + + if (mvm->mld_api_is_used) { + iwl_mvm_mld_rm_bcast_sta(mvm, vif, + &vif->bss_conf); + + iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE, + false); + } else { + iwl_mvm_rm_p2p_bcast_sta(mvm, vif); + iwl_mvm_binding_remove_vif(mvm, vif); + } + + /* Do not remove the PHY context as removing and adding + * a PHY context has timing overheads. Leaving it + * configured in FW would be useful in case the next ROC + * is with the same channel. + */ } } @@ -93,7 +114,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) */ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { /* do the same in case of hot spot 2.0 */ - iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id, + mvm->aux_sta.tfd_queue_msk); if (mvm->mld_api_is_used) { iwl_mvm_mld_rm_aux_sta(mvm); @@ -223,7 +245,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, } iwl_mvm_csa_client_absent(mvm, te_data->vif); cancel_delayed_work(&mvmvif->csa_work); - ieee80211_chswitch_done(te_data->vif, true); + ieee80211_chswitch_done(te_data->vif, true, 0); break; default: /* should never happen */ @@ -378,6 +400,22 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, } } +void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_roc_notif *notif = (void *)pkt->data; + + if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) && + le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) { + set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); + ieee80211_ready_on_channel(mvm->hw); + } else { + iwl_mvm_roc_finished(mvm); + ieee80211_remain_on_channel_expired(mvm->hw); + } +} + /* * Handle A Aux ROC time event */ @@ -651,19 +689,46 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, } } +/* Determine whether mac or link id should be used, and validate the link id */ +static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 link_id) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, + SESSION_PROTECTION_CMD), 1); + + if (ver < 2) + return mvmvif->id; + + if (WARN(link_id < 0 || !mvmvif->link[link_id], + "Invalid link ID for session protection: %u\n", link_id)) + return -EINVAL; + + if (WARN(ieee80211_vif_is_mld(vif) && + !(vif->active_links & BIT(link_id)), + "Session Protection on an inactive link: %u\n", link_id)) + return -EINVAL; + + return mvmvif->link[link_id]->fw_link_id; +} + static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif, - u32 id) + struct ieee80211_vif *vif, + u32 id, u32 link_id) { + int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id); struct iwl_mvm_session_prot_cmd cmd = { - .id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), + .id_and_color = cpu_to_le32(mac_link_id), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), .conf_id = cpu_to_le32(id), }; int ret; + if (mac_link_id < 0) + return; + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd); @@ -677,10 +742,12 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, u32 *uid) { u32 id; + struct ieee80211_vif *vif = te_data->vif; struct iwl_mvm_vif *mvmvif; enum nl80211_iftype iftype; + unsigned int link_id; - if (!te_data->vif) + if (!vif) return false; mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); @@ -695,6 +762,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, /* Save time event uid before clearing its data */ *uid = te_data->uid; id = te_data->id; + link_id = te_data->link_id; /* * The clear_data function handles time events that were already removed @@ -712,7 +780,8 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, id != HOT_SPOT_CMD) { if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { /* Session protection is still ongoing. Cancel it */ - iwl_mvm_cancel_session_protection(mvm, mvmvif, id); + iwl_mvm_cancel_session_protection(mvm, vif, id, + link_id); if (iftype == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_p2p_roc_finished(mvm); } @@ -829,18 +898,41 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data; + unsigned int ver = + iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, + SESSION_PROTECTION_CMD), 2); + int id = le32_to_cpu(notif->mac_link_id); struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; + unsigned int notif_link_id; rcu_read_lock(); - vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id), - true); + + if (ver <= 2) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + } else { + struct ieee80211_bss_conf *link_conf = + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true); + + if (!link_conf) + goto out_unlock; + + notif_link_id = link_conf->link_id; + vif = link_conf->vif; + } if (!vif) goto out_unlock; mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 && + mvmvif->time_event_data.link_id != notif_link_id, + "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n", + notif_link_id, mvmvif->time_event_data.link_id)) + goto out_unlock; + /* The vif is not a P2P_DEVICE, maintain its time_event_data */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { struct iwl_mvm_time_event_data *te_data = @@ -880,8 +972,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; - ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_p2p_roc_finished(mvm); + ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { if (WARN_ON(mvmvif->time_event_data.id != le32_to_cpu(notif->conf_id))) @@ -903,8 +995,7 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_session_prot_cmd cmd = { .id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), + cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; @@ -914,6 +1005,9 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, /* The time_event_data.id field is reused to save session * protection's configuration. */ + + mvmvif->time_event_data.link_id = 0; + switch (type) { case IEEE80211_ROC_TYPE_NORMAL: mvmvif->time_event_data.id = @@ -1030,6 +1124,37 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) __iwl_mvm_remove_time_event(mvm, te_data, &uid); } +static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) +{ + int ret; + struct iwl_roc_req roc_cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .activity = cpu_to_le32(activity), + }; + + lockdep_assert_held(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), + 0, sizeof(roc_cmd), &roc_cmd); + WARN_ON(ret); +} + +static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif) +{ + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); + u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + + if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, + &mvmvif->hs_time_event_data); + else if (fw_ver == 3) + iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT); + else + IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver); +} + void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif; @@ -1040,12 +1165,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - iwl_mvm_cancel_session_protection(mvm, mvmvif, - mvmvif->time_event_data.id); + iwl_mvm_cancel_session_protection(mvm, vif, + mvmvif->time_event_data.id, + mvmvif->time_event_data.link_id); iwl_mvm_p2p_roc_finished(mvm); } else { - iwl_mvm_remove_aux_roc_te(mvm, mvmvif, - &mvmvif->hs_time_event_data); + iwl_mvm_roc_station_remove(mvm, mvmvif); iwl_mvm_roc_finished(mvm); } @@ -1164,25 +1289,28 @@ static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - bool wait_for_notif) + bool wait_for_notif, + unsigned int link_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) }; struct iwl_notification_wait wait_notif; + int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id); struct iwl_mvm_session_prot_cmd cmd = { - .id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)), + .id_and_color = cpu_to_le32(mac_link_id), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; + if (mac_link_id < 0) + return; + lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); - if (te_data->running && + if (te_data->running && te_data->link_id == link_id && time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", jiffies_to_msecs(te_data->end_jiffies - jiffies)); @@ -1199,6 +1327,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, te_data->id = le32_to_cpu(cmd.conf_id); te_data->duration = le32_to_cpu(cmd.duration_tu); te_data->vif = vif; + te_data->link_id = link_id; spin_unlock_bh(&mvm->time_event_lock); IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", @@ -1208,11 +1337,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { - IWL_ERR(mvm, - "Couldn't send the SESSION_PROTECTION_CMD\n"); - spin_lock_bh(&mvm->time_event_lock); - iwl_mvm_te_clear_data(mvm, te_data); - spin_unlock_bh(&mvm->time_event_lock); + goto send_cmd_err; } return; @@ -1225,12 +1350,19 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { - IWL_ERR(mvm, - "Couldn't send the SESSION_PROTECTION_CMD\n"); iwl_remove_notification(&mvm->notif_wait, &wait_notif); + goto send_cmd_err; } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, TU_TO_JIFFIES(100))) { IWL_ERR(mvm, "Failed to protect session until session protection\n"); } + return; + +send_cmd_err: + IWL_ERR(mvm, + "Couldn't send the SESSION_PROTECTION_CMD\n"); + spin_lock_bh(&mvm->time_event_lock); + iwl_mvm_te_clear_data(mvm, te_data); + spin_unlock_bh(&mvm->time_event_lock); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index 989a5319fb21..49256ba4cf58 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2012-2014, 2019-2020, 2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #ifndef __time_event_h__ @@ -101,6 +101,14 @@ void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /** + * iwl_mvm_rx_roc_notif - handles %DISCOVERY_ROC_NTF. + * @mvm: the mvm component + * @rxb: RX buffer + */ +void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/** * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality * @mvm: the mvm component * @vif: the virtual interface for which the roc is requested. It is assumed @@ -134,7 +142,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /** * iwl_mvm_remove_time_event - general function to clean up of time event * @mvm: the mvm component - * @vif: the vif to which the time event belongs + * @mvmvif: the vif to which the time event belongs * @te_data: the time event data that corresponds to that time event * * This function can be used to cancel a time event regardless its type. @@ -195,16 +203,21 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data) * iwl_mvm_schedule_session_protection - schedule a session protection * @mvm: the mvm component * @vif: the virtual interface for which the protection issued - * @duration: the duration of the protection + * @duration: the requested duration of the protection + * @min_duration: the minimum duration of the protection * @wait_for_notif: if true, will block until the start of the protection + * @link_id: The link to schedule a session protection for */ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - bool wait_for_notif); + bool wait_for_notif, + unsigned int link_id); /** * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF + * @mvm: the mvm component + * @rxb: the RX buffer containing the notification */ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 157e96fa23c1..dee9c367dcd3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -642,7 +642,6 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, int trip, int temp) { struct iwl_mvm *mvm = thermal_zone_device_priv(device); - struct iwl_mvm_thermal_device *tzone; int ret; mutex_lock(&mvm->mutex); @@ -658,12 +657,6 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, goto out; } - tzone = &mvm->tz_device; - if (!tzone) { - ret = -EIO; - goto out; - } - ret = iwl_mvm_send_temp_report_ths_cmd(mvm); out: mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 36d70d589aed..ae5cd13cd6dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -262,8 +262,42 @@ static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; } +static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + int rate_idx) +{ + u32 rate_flags = 0; + u8 rate_plcp; + bool is_cck; + + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) + rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm, + info, + info->control.vif); + + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); + is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && + (rate_idx <= IWL_LAST_CCK_RATE); + + /* Set CCK or OFDM flag */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { + if (!is_cck) + rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; + else + rate_flags |= RATE_MCS_CCK_MSK; + } else if (is_cck) { + rate_flags |= RATE_MCS_CCK_MSK_V1; + } + + return (u32)rate_plcp | rate_flags; +} + static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info) + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + __le16 fc) { struct ieee80211_tx_rate *rate = &info->control.rates[0]; u32 result; @@ -288,6 +322,9 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1); else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1); + + if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) + result = iwl_new_rate_from_v1(result); } else if (rate->flags & IEEE80211_TX_RC_MCS) { result = RATE_MCS_HT_MSK_V1; result |= u32_encode_bits(rate->idx, @@ -301,12 +338,21 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, result |= RATE_MCS_LDPC_MSK_V1; if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) result |= RATE_MCS_STBC_MSK; + + if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) + result = iwl_new_rate_from_v1(result); } else { - return 0; + int rate_idx = info->control.rates[0].idx; + + result = iwl_mvm_convert_rate_idx(mvm, info, rate_idx); } - if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) - return iwl_new_rate_from_v1(result); + if (info->control.antennas) + result |= u32_encode_bits(info->control.antennas, + RATE_MCS_ANT_AB_MSK); + else + result |= iwl_mvm_get_tx_ant(mvm, info, sta, fc); + return result; } @@ -315,17 +361,8 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, __le16 fc) { int rate_idx = -1; - u8 rate_plcp; - u32 rate_flags = 0; - bool is_cck; - if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { - u32 result = iwl_mvm_get_inject_tx_rate(mvm, info); - - if (result) - return result; - rate_idx = info->control.rates[0].idx; - } else if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { + if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { /* info->control is only relevant for non HW rate control */ /* HT rate doesn't make sense for a non data frame */ @@ -350,33 +387,16 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); } - /* if the rate isn't a well known legacy rate, take the lowest one */ - if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) - rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm, - info, - info->control.vif); - - /* Get PLCP rate for tx_cmd->rate_n_flags */ - rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); - is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE); - - /* Set CCK or OFDM flag */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { - if (!is_cck) - rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; - else - rate_flags |= RATE_MCS_CCK_MSK; - } else if (is_cck) { - rate_flags |= RATE_MCS_CCK_MSK_V1; - } - - return (u32)rate_plcp | rate_flags; + return iwl_mvm_convert_rate_idx(mvm, info, rate_idx); } static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) + return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc); + return iwl_mvm_get_tx_rate(mvm, info, sta, fc) | iwl_mvm_get_tx_ant(mvm, info, sta, fc); } @@ -536,16 +556,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, flags |= IWL_TX_FLAGS_ENCRYPT_DIS; /* - * For data packets rate info comes from the fw. Only - * set rate/antenna during connection establishment or in case - * no station is given. + * For data and mgmt packets rate info comes from the fw. Only + * set rate/antenna for injected frames with fixed rate, or + * when no sta is given. */ - if (!sta || !ieee80211_is_data(hdr->frame_control) || - mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { + if (unlikely(!sta || + info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, hdr->frame_control); + } else if (!ieee80211_is_data(hdr->frame_control) || + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { + /* These are important frames */ + flags |= IWL_TX_FLAGS_HIGH_PRI; } if (mvm->trans->trans_cfg->device_family >= @@ -1599,7 +1623,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ - iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); + iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false); while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); @@ -1612,6 +1636,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); + info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { @@ -1699,7 +1724,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1))) - ieee80211_tx_status(mvm->hw, skb); + ieee80211_tx_status_skb(mvm->hw, skb); } /* This is an aggregation queue or might become one, so we use @@ -1950,7 +1975,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ - iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); + iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush); skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1964,6 +1989,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, */ if (!is_flush) info->flags |= IEEE80211_TX_STAT_ACK; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; } /* @@ -2053,7 +2080,7 @@ out: while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); - ieee80211_tx_status(mvm->hw, skb); + ieee80211_tx_status_skb(mvm->hw, skb); } } @@ -2290,24 +2317,10 @@ free_rsp: return ret; } -int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) +int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask) { - u32 sta_id, tfd_queue_msk; - - if (internal) { - struct iwl_mvm_int_sta *int_sta = sta; - - sta_id = int_sta->sta_id; - tfd_queue_msk = int_sta->tfd_queue_msk; - } else { - struct iwl_mvm_sta *mvm_sta = sta; - - sta_id = mvm_sta->deflink.sta_id; - tfd_queue_msk = mvm_sta->tfd_queue_msk; - } - if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff); - return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk); + return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 48016b4343d2..91286018a69d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -342,6 +342,60 @@ static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, return true; } +static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear, + u8 cmd_ver) +{ + struct iwl_system_statistics_cmd system_cmd = { + .cfg_mask = clear ? + cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) : + cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK | + IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK), + .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER | + IWL_STATS_NTFY_TYPE_ID_OPER_PART1), + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD), + .len[0] = sizeof(system_cmd), + .data[0] = &system_cmd, + }; + struct iwl_notification_wait stats_wait; + static const u16 stats_complete[] = { + WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF), + }; + int ret; + + if (cmd_ver != 1) { + IWL_FW_CHECK_FAILED(mvm, + "Invalid system statistics command version:%d\n", + cmd_ver); + return -EOPNOTSUPP; + } + + iwl_init_notification_wait(&mvm->notif_wait, &stats_wait, + stats_complete, ARRAY_SIZE(stats_complete), + NULL, NULL); + + mvm->statistics_clear = clear; + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + iwl_remove_notification(&mvm->notif_wait, &stats_wait); + return ret; + } + + /* 500ms for OPERATIONAL, PART1 and END notification should be enough + * for FW to collect data from all LMACs and send + * STATISTICS_NOTIFICATION to host + */ + ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2); + if (ret) + return ret; + + if (clear) + iwl_mvm_accu_radio_stats(mvm); + + return ret; +} + int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) { struct iwl_statistics_cmd scmd = { @@ -353,8 +407,15 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) .len[0] = sizeof(scmd), .data[0] = &scmd, }; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + IWL_FW_CMD_VER_UNKNOWN); int ret; + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) + return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver); + /* From version 15 - STATISTICS_NOTIFICATION, the reply for * STATISTICS_CMD is empty, and the response is with * STATISTICS_NOTIFICATION notification diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 73c1fb3c0c5e..26a0953603ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1132,15 +1132,9 @@ static int get_crf_id(struct iwl_trans *iwl_trans) else sd_reg_ver_addr = SD_REG_VER; - if (!iwl_trans_grab_nic_access(iwl_trans)) { - IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); - ret = -EIO; - goto out; - } - /* Enable access to peripheral registers */ val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); - val |= ENABLE_WFPM; + val |= WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK; iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ @@ -1157,9 +1151,6 @@ static int get_crf_id(struct iwl_trans *iwl_trans) iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id, iwl_trans->hw_wfpm_id); - iwl_trans_release_nic_access(iwl_trans); - -out: return ret; } @@ -1205,6 +1196,9 @@ static int map_crf_id(struct iwl_trans *iwl_trans) case REG_CRF_ID_TYPE_FMR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); break; + case REG_CRF_ID_TYPE_WHP: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); + break; default: ret = -EIO; IWL_ERR(iwl_trans, @@ -1351,6 +1345,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_free_trans; if (iwl_trans_grab_nic_access(iwl_trans)) { + get_crf_id(iwl_trans); /* all good */ iwl_trans_release_nic_access(iwl_trans); } else { @@ -1360,7 +1355,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); - get_crf_id(iwl_trans); /* * The RF_ID is set to zero in blank OTP so read version to diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 0adcf0e13e85..56def20374f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2003-2015, 2018-2022 Intel Corporation + * Copyright (C) 2003-2015, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -58,10 +58,7 @@ struct iwl_rx_mem_buffer { bool invalid; }; -/** - * struct isr_statistics - interrupt statistics - * - */ +/* interrupt statistics */ struct isr_statistics { u32 hw; u32 sw; @@ -127,6 +124,8 @@ struct iwl_rx_completion_desc_bz { * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet + * @write_actual: actual write pointer written to device, since we update in + * blocks of 8 only * @free_count: Number of pre-allocated buffers in rx_free * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: @@ -135,10 +134,12 @@ struct iwl_rx_completion_desc_bz { * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status - * @lock: + * @lock: per-queue lock * @queue: actual rx queue. Not used for multi-rx queue. * @next_rb_is_fragment: indicates that the previous RB that we handled set * the fragmented flag, so the next one is still another fragment + * @napi: NAPI struct for this queue + * @queue_size: size of this queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ @@ -188,19 +189,20 @@ struct iwl_rb_allocator { /** * iwl_get_closed_rb_stts - get closed rb stts from different structs - * @rxq - the rxq to get the rb stts from + * @trans: transport pointer (for configuration) + * @rxq: the rxq to get the rb stts from */ -static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans, + struct iwl_rxq *rxq) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { __le16 *rb_stts = rxq->rb_stts; - return READ_ONCE(*rb_stts); + return le16_to_cpu(READ_ONCE(*rb_stts)); } else { struct iwl_rb_status *rb_stts = rxq->rb_stts; - return READ_ONCE(rb_stts->closed_rb_num); + return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF; } } @@ -243,6 +245,7 @@ enum iwl_image_response_code { IWL_IMAGE_RESP_FAIL = 2, }; +#ifdef CONFIG_IWLWIFI_DEBUGFS /** * struct cont_rec: continuous recording data structure * @prev_wr_ptr: the last address that was read in monitor_data @@ -253,7 +256,6 @@ enum iwl_image_response_code { * in &iwl_fw_mon_dbgfs_state enum * @mutex: locked while reading from monitor_data debugfs file */ -#ifdef CONFIG_IWLWIFI_DEBUGFS struct cont_rec { u32 prev_wr_ptr; u32 prev_wrap_cnt; @@ -298,10 +300,6 @@ enum iwl_pcie_imr_status { * @prph_info_dma_addr: dma addr of prph info * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information - * @init_dram: DRAM data of firmware image (including paging). - * Context information addresses will be taken from here. - * This is driver's local copy for keeping track of size and - * count for allocating and freeing the memory. * @iml: image loader image virtual address * @iml_dma_addr: image loader image DMA address * @trans: pointer to the generic transport area @@ -315,17 +313,15 @@ enum iwl_pcie_imr_status { * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @cmd_queue - command queue number - * @def_rx_queue - default rx queue number * @rx_buf_size: Rx buffer size * @scd_set_active: should the transport configure the SCD for HCMD queue * @rx_page_order: page order for receive buffer size * @rx_buf_bytes: RX buffer (RB) size in bytes * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw - * @cmd_in_flight: true when we have a host command in flight -#ifdef CONFIG_IWLWIFI_DEBUGFS * @fw_mon_data: fw continuous recording data -#endif + * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround + * during commands in flight * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles @@ -345,8 +341,32 @@ enum iwl_pcie_imr_status { * @alloc_page: allocated page to still use parts of * @alloc_page_used: how much of the allocated page was already used (bytes) * @imr_status: imr dma state machine - * @wait_queue_head_t: imr wait queue for dma completion + * @imr_waitq: imr wait queue for dma completion * @rf_name: name/version of the CRF, if any + * @use_ict: whether or not ICT (interrupt table) is used + * @ict_index: current ICT read index + * @ict_tbl: ICT table pointer + * @ict_tbl_dma: ICT table DMA address + * @inta_mask: interrupt (INT-A) mask + * @irq_lock: lock to synchronize IRQ handling + * @txq_memory: TXQ allocation array + * @sx_waitq: waitqueue for Sx transitions + * @sx_complete: completion for Sx transitions + * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already + * @opmode_down: indicates opmode went away + * @num_rx_bufs: number of RX buffers to allocate/use + * @no_reclaim_cmds: special commands not using reclaim flow + * (firmware workaround) + * @n_no_reclaim_cmds: number of special commands not using reclaim flow + * @affinity_mask: IRQ affinity mask for each RX queue + * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio + * enable/disable + * @fw_reset_handshake: indicates FW reset handshake is needed + * @fw_reset_state: state of FW reset handshake + * @fw_reset_waitq: waitqueue for FW reset handshake + * @is_down: indicates the NIC is down + * @isr_stats: interrupt statistics + * @napi_dev: (fake) netdev for NAPI registration */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -398,7 +418,6 @@ struct iwl_trans_pcie { wait_queue_head_t ucode_write_waitq; wait_queue_head_t sx_waitq; - u8 def_rx_queue; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u16 num_rx_bufs; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f87b28edc267..bc6a9f861711 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1373,7 +1373,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } } - if (rxq->id == trans_pcie->def_rx_queue) + if (rxq->id == IWL_DEFAULT_RX_QUEUE) iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb); else @@ -1385,7 +1385,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * if it is true then one of the handlers took the page. */ - if (reclaim) { + if (reclaim && txq) { u16 sequence = le16_to_cpu(pkt->hdr.sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index = iwl_txq_get_cmd_index(txq, index); @@ -1510,7 +1510,7 @@ restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + r = iwl_get_closed_rb_stts(trans, rxq); i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ @@ -1660,9 +1660,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); local_bh_disable(); - if (napi_schedule_prep(&rxq->napi)) - __napi_schedule(&rxq->napi); - else + if (!napi_schedule(&rxq->napi)) iwl_pcie_clear_irq(trans, entry->entry); local_bh_enable(); @@ -2291,6 +2289,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) else sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; + if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) { + IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n", + inta_hw); + /* TODO: PLDR flow required here for >= Bz */ + } + /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { IWL_ERR(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index fa46dad5fd68..c9e5bda8f0b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -161,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_synchronize_irqs(trans); iwl_pcie_rx_napi_sync(trans); iwl_txq_gen2_tx_free(trans); iwl_pcie_rx_stop(trans); @@ -230,11 +231,14 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); + int ret; /* TODO: most of the logic can be removed in A0 - but not in Z0 */ spin_lock_bh(&trans_pcie->irq_lock); - iwl_pcie_gen2_apm_init(trans); + ret = iwl_pcie_gen2_apm_init(trans); spin_unlock_bh(&trans_pcie->irq_lock); + if (ret) + return ret; iwl_op_mode_nic_config(trans->op_mode); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 3e988da44973..92253260f568 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1111,6 +1111,7 @@ static const struct iwl_causes_list causes_list_common[] = { IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), @@ -1263,6 +1264,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_synchronize_irqs(trans); iwl_pcie_rx_napi_sync(trans); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); @@ -2018,6 +2020,30 @@ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions memset(desc_dram, 0, sizeof(*desc_dram)); } +static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans) +{ + iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd); +} + +static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans) +{ + struct iwl_cmd_header_wide bad_cmd = { + .cmd = INVALID_WR_PTR_CMD, + .group_id = DEBUG_GROUP, + .sequence = cpu_to_le16(0xffff), + .length = cpu_to_le16(0), + .version = 0, + }; + int ret; + + ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd, + sizeof(bad_cmd)); + if (ret) + return ret; + memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd)); + return 0; +} + void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2048,6 +2074,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_free_ict(trans); } + iwl_pcie_free_invalid_tx_cmd(trans); + iwl_pcie_free_fw_monitor(trans); iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data, @@ -2086,8 +2114,11 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) pci_lock_rescan_remove(); pci_dev_put(pdev); pci_stop_and_remove_bus_device(pdev); - if (removal->rescan) - pci_rescan_bus(bus->parent); + if (removal->rescan && bus) { + if (bus->parent) + bus = bus->parent; + pci_rescan_bus(bus); + } pci_unlock_rescan_remove(); kfree(removal); @@ -2147,6 +2178,9 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return false; + spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) @@ -2259,6 +2293,8 @@ out: static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { +#define IWL_MAX_HW_ERRS 5 + unsigned int num_consec_hw_errors = 0; int offs = 0; u32 *vals = buf; @@ -2274,6 +2310,17 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, while (offs < dwords) { vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); + + if (iwl_trans_is_hw_error_value(vals[offs])) + num_consec_hw_errors++; + else + num_consec_hw_errors = 0; + + if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) { + iwl_trans_release_nic_access(trans); + return -EIO; + } + offs++; if (time_after(jiffies, end)) { @@ -2686,11 +2733,9 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { - u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, - rxq)); + u32 r = iwl_get_closed_rb_stts(trans, rxq); pos += scnprintf(buf + pos, bufsz - pos, - "\tclosed_rb_num: %u\n", - r & 0x0FFF); + "\tclosed_rb_num: %u\n", r); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); @@ -3061,9 +3106,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; - spin_lock(&rxq->lock); + spin_lock_bh(&rxq->lock); - r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; + r = iwl_get_closed_rb_stts(trans, rxq); for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; @@ -3085,7 +3130,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, *data = iwl_fw_error_next_data(*data); } - spin_unlock(&rxq->lock); + spin_unlock_bh(&rxq->lock); return rb_len; } @@ -3359,9 +3404,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = - le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) - & 0x0FFF; + num_rbs = iwl_get_closed_rb_stts(trans, rxq); num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + @@ -3571,10 +3614,19 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, int ret, addr_size; const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; void __iomem * const *table; + u32 bar0; if (!cfg_trans->gen2) ops = &trans_ops_pcie; + /* reassign our BAR 0 if invalid due to possible runtime PM races */ + pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); + if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { + ret = pci_assign_resource(pdev, 0); + if (ret) + return ERR_PTR(ret); + } + ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); @@ -3617,8 +3669,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } - trans_pcie->def_rx_queue = 0; - pci_set_master(pdev); addr_size = trans->txqs.tfd.addr_size; @@ -3686,6 +3736,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->sx_waitq); + ret = iwl_pcie_alloc_invalid_tx_cmd(trans); + if (ret) + goto out_no_pci; if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 790e5b124740..2f39b639c43f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -132,22 +132,6 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) } } -static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, - u8 idx, dma_addr_t addr, u16 len) -{ - struct iwl_tfd *tfd_fh = (void *)tfd; - struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; - - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - hi_n_len |= iwl_get_dma_hi_addr(addr); - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd_fh->num_tbs = idx + 1; -} - static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { @@ -172,7 +156,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; - iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); + iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len); return num_tbs; } @@ -1203,7 +1187,11 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, group_id = cmd->hdr.group_id; cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); - iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); + if (trans->trans_cfg->gen2) + iwl_txq_gen2_tfd_unmap(trans, meta, + iwl_txq_get_tfd(trans, txq, index)); + else + iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 5bb3cc3367c9..ca74b1b63cac 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -10,6 +10,7 @@ #include "fw/api/commands.h" #include "fw/api/tx.h" #include "fw/api/datapath.h" +#include "fw/api/debug.h" #include "queue/tx.h" #include "iwl-fh.h" #include "iwl-scd.h" @@ -84,6 +85,50 @@ static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, return le16_to_cpu(tfd->num_tbs) & 0x1f; } +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, + dma_addr_t addr, u16 len) +{ + int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); + struct iwl_tfh_tb *tb; + + /* Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_txq_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) + return -EINVAL; + tb = &tfd->tbs[idx]; + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans->txqs.tfd.max_tbs); + return -EINVAL; + } + + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); + + tfd->num_tbs = cpu_to_le16(idx + 1); + + return idx; +} + +static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd) { @@ -111,7 +156,7 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, DMA_TO_DEVICE); } - tfd->num_tbs = 0; + iwl_txq_set_tfd_invalid_gen2(trans, tfd); } void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) @@ -142,42 +187,6 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) } } -int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, - dma_addr_t addr, u16 len) -{ - int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); - struct iwl_tfh_tb *tb; - - /* - * Only WARN here so we know about the issue, but we mess up our - * unmap path because not every place currently checks for errors - * returned from this function - it can only return an error if - * there's no more space, and so when we know there is enough we - * don't always check ... - */ - WARN(iwl_txq_crosses_4g_boundary(addr, len), - "possible DMA problem with iova:0x%llx, len:%d\n", - (unsigned long long)addr, len); - - if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) - return -EINVAL; - tb = &tfd->tbs[idx]; - - /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans->txqs.tfd.max_tbs); - return -EINVAL; - } - - put_unaligned_le64(addr, &tb->addr); - tb->tb_len = cpu_to_le16(len); - - tfd->num_tbs = cpu_to_le16(idx + 1); - - return idx; -} - static struct page *get_workaround_page(struct iwl_trans *trans, struct sk_buff *skb) { @@ -1026,11 +1035,21 @@ static void iwl_txq_stuck_timer(struct timer_list *t) iwl_force_nmi(trans); } +static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, + struct iwl_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { - size_t tfd_sz = trans->txqs.tfd.size * - trans->trans_cfg->base_params->max_tfd_queue_size; + size_t num_entries = trans->trans_cfg->gen2 ? + slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; + size_t tfd_sz; size_t tb0_buf_sz; int i; @@ -1040,8 +1059,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; - if (trans->trans_cfg->gen2) - tfd_sz = trans->txqs.tfd.size * slots_num; + tfd_sz = trans->txqs.tfd.size * num_entries; timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); txq->trans = trans; @@ -1081,6 +1099,15 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, if (!txq->first_tb_bufs) goto err_free_tfds; + for (i = 0; i < num_entries; i++) { + void *tfd = iwl_txq_get_tfd(trans, txq, i); + + if (trans->trans_cfg->gen2) + iwl_txq_set_tfd_invalid_gen2(trans, tfd); + else + iwl_txq_set_tfd_invalid_gen1(trans, tfd); + } + return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); @@ -1340,22 +1367,12 @@ error: } static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, - void *_tfd, u8 idx) + struct iwl_tfd *tfd, u8 idx) { - struct iwl_tfd *tfd; - struct iwl_tfd_tb *tb; + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; dma_addr_t addr; dma_addr_t hi_len; - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfh_tfd = _tfd; - struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; - - return (dma_addr_t)(le64_to_cpu(tfh_tb->addr)); - } - - tfd = _tfd; - tb = &tfd->tbs[idx]; addr = get_unaligned_le32(&tb->lo); if (sizeof(dma_addr_t) <= sizeof(u32)) @@ -1376,7 +1393,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, struct iwl_txq *txq, int index) { int i, num_tbs; - void *tfd = iwl_txq_get_tfd(trans, txq, index); + struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); @@ -1408,15 +1425,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, meta->tbs = 0; - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } else { - struct iwl_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } + iwl_txq_set_tfd_invalid_gen1(trans, tfd); } #define IWL_TX_CRC_SIZE 4 @@ -1520,7 +1529,12 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ - iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); + if (trans->trans_cfg->gen2) + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_txq_get_tfd(trans, txq, rd_ptr)); + else + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, + txq, rd_ptr); /* free SKB */ skb = txq->entries[idx].skb; @@ -1561,7 +1575,7 @@ void iwl_txq_progress(struct iwl_txq *txq) /* Frees buffers until index _not_ inclusive */ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs) + struct sk_buff_head *skbs, bool is_flush) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; int tfd_num, read_ptr, last_to_free; @@ -1636,9 +1650,11 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (iwl_txq_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans->txqs.queue_stopped)) { struct sk_buff_head overflow_skbs; + struct sk_buff *skb; __skb_queue_head_init(&overflow_skbs); - skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); + skb_queue_splice_init(&txq->overflow_q, + is_flush ? skbs : &overflow_skbs); /* * We are going to transmit from the overflow queue. @@ -1658,8 +1674,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, */ spin_unlock_bh(&txq->lock); - while (!skb_queue_empty(&overflow_skbs)) { - struct sk_buff *skb = __skb_dequeue(&overflow_skbs); + while ((skb = __skb_dequeue(&overflow_skbs))) { struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h index 1e4a24ab9bab..124b29aac4a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -71,7 +71,8 @@ static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) /** * iwl_txq_inc_wrap - increment queue index, wrap back to beginning - * @index -- current index + * @trans: the transport (for configuration data) + * @index: current index */ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) { @@ -81,7 +82,8 @@ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) /** * iwl_txq_dec_wrap - decrement queue index, wrap back to end - * @index -- current index + * @trans: the transport (for configuration data) + * @index: current index */ static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) { @@ -131,17 +133,8 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, struct sk_buff *skb); #endif static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, - void *_tfd) + struct iwl_tfd *tfd) { - struct iwl_tfd *tfd; - - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfh_tfd = _tfd; - - return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f; - } - - tfd = (struct iwl_tfd *)_tfd; return tfd->num_tbs & 0x1f; } @@ -164,6 +157,21 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, return le16_to_cpu(tb->hi_n_len) >> 4; } +static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans, + struct iwl_tfd *tfd, + u8 idx, dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + hi_n_len |= iwl_get_dma_hi_addr(addr); + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_txq *txq, int index); @@ -173,7 +181,7 @@ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs); void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs); + struct sk_buff_head *skbs, bool is_flush); void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze); diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h index c17ab6dbbb53..552ae33d7875 100644 --- a/drivers/net/wireless/intersil/hostap/hostap.h +++ b/drivers/net/wireless/intersil/hostap/hostap.h @@ -92,7 +92,6 @@ void hostap_info_process(local_info_t *local, struct sk_buff *skb); extern const struct iw_handler_def hostap_iw_handler_def; extern const struct ethtool_ops prism2_ethtool_ops; -int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd); diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c index 3672291ced5c..5e5bada28b5b 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_download.c +++ b/drivers/net/wireless/intersil/hostap/hostap_download.c @@ -732,8 +732,7 @@ static int prism2_download(local_info_t *local, goto out; } - dl = kzalloc(sizeof(*dl) + param->num_areas * - sizeof(struct prism2_download_data_area), GFP_KERNEL); + dl = kzalloc(struct_size(dl, data, param->num_areas), GFP_KERNEL); if (dl == NULL) { ret = -ENOMEM; goto out; diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index b4adfc190ae8..26162f92e3c3 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -2316,21 +2316,6 @@ static const struct iw_priv_args prism2_priv[] = { }; -static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i) -{ - struct hostap_interface *iface; - local_info_t *local; - - iface = netdev_priv(dev); - local = iface->local; - - if (local->func->cmd(dev, HFA384X_CMDCODE_INQUIRE, *i, NULL, NULL)) - return -EOPNOTSUPP; - - return 0; -} - - static int prism2_ioctl_priv_prism2_param(struct net_device *dev, struct iw_request_info *info, union iwreq_data *uwrq, char *extra) @@ -2910,146 +2895,6 @@ static int prism2_ioctl_priv_writemif(struct net_device *dev, } -static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i) -{ - struct hostap_interface *iface; - local_info_t *local; - int ret = 0; - union iwreq_data wrqu; - - iface = netdev_priv(dev); - local = iface->local; - - printk(KERN_DEBUG "%s: process %d (%s) used deprecated iwpriv monitor " - "- update software to use iwconfig mode monitor\n", - dev->name, task_pid_nr(current), current->comm); - - /* Backward compatibility code - this can be removed at some point */ - - if (*i == 0) { - /* Disable monitor mode - old mode was not saved, so go to - * Master mode */ - wrqu.mode = IW_MODE_MASTER; - ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL); - } else if (*i == 1) { - /* netlink socket mode is not supported anymore since it did - * not separate different devices from each other and was not - * best method for delivering large amount of packets to - * user space */ - ret = -EOPNOTSUPP; - } else if (*i == 2 || *i == 3) { - switch (*i) { - case 2: - local->monitor_type = PRISM2_MONITOR_80211; - break; - case 3: - local->monitor_type = PRISM2_MONITOR_PRISM; - break; - } - wrqu.mode = IW_MODE_MONITOR; - ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL); - hostap_monitor_mode_enable(local); - } else - ret = -EINVAL; - - return ret; -} - - -static int prism2_ioctl_priv_reset(struct net_device *dev, int *i) -{ - struct hostap_interface *iface; - local_info_t *local; - - iface = netdev_priv(dev); - local = iface->local; - - printk(KERN_DEBUG "%s: manual reset request(%d)\n", dev->name, *i); - switch (*i) { - case 0: - /* Disable and enable card */ - local->func->hw_shutdown(dev, 1); - local->func->hw_config(dev, 0); - break; - - case 1: - /* COR sreset */ - local->func->hw_reset(dev); - break; - - case 2: - /* Disable and enable port 0 */ - local->func->reset_port(dev); - break; - - case 3: - prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING); - if (local->func->cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, - NULL)) - return -EINVAL; - break; - - case 4: - if (local->func->cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, - NULL)) - return -EINVAL; - break; - - default: - printk(KERN_DEBUG "Unknown reset request %d\n", *i); - return -EOPNOTSUPP; - } - - return 0; -} - - -static int prism2_ioctl_priv_set_rid_word(struct net_device *dev, int *i) -{ - int rid = *i; - int value = *(i + 1); - - printk(KERN_DEBUG "%s: Set RID[0x%X] = %d\n", dev->name, rid, value); - - if (hostap_set_word(dev, rid, value)) - return -EINVAL; - - return 0; -} - - -#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT -static int ap_mac_cmd_ioctl(local_info_t *local, int *cmd) -{ - int ret = 0; - - switch (*cmd) { - case AP_MAC_CMD_POLICY_OPEN: - local->ap->mac_restrictions.policy = MAC_POLICY_OPEN; - break; - case AP_MAC_CMD_POLICY_ALLOW: - local->ap->mac_restrictions.policy = MAC_POLICY_ALLOW; - break; - case AP_MAC_CMD_POLICY_DENY: - local->ap->mac_restrictions.policy = MAC_POLICY_DENY; - break; - case AP_MAC_CMD_FLUSH: - ap_control_flush_macs(&local->ap->mac_restrictions); - break; - case AP_MAC_CMD_KICKALL: - ap_control_kickall(local->ap); - hostap_deauth_all_stas(local->dev, local->ap, 0); - break; - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} -#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ - - #ifdef PRISM2_DOWNLOAD_SUPPORT static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p) { @@ -3963,79 +3808,6 @@ const struct iw_handler_def hostap_iw_handler_def = .get_wireless_stats = hostap_get_wireless_stats, }; -/* Private ioctls (iwpriv) that have not yet been converted - * into new wireless extensions API */ -int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct iwreq *wrq = (struct iwreq *) ifr; - struct hostap_interface *iface; - local_info_t *local; - int ret = 0; - - iface = netdev_priv(dev); - local = iface->local; - - switch (cmd) { - case PRISM2_IOCTL_INQUIRE: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_MONITOR: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_monitor(dev, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_RESET: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_reset(dev, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_WDS_ADD: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_wds_add(local, wrq->u.ap_addr.sa_data, 1); - break; - - case PRISM2_IOCTL_WDS_DEL: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_wds_del(local, wrq->u.ap_addr.sa_data, 1, 0); - break; - - case PRISM2_IOCTL_SET_RID_WORD: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = prism2_ioctl_priv_set_rid_word(dev, - (int *) wrq->u.name); - break; - -#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT - case PRISM2_IOCTL_MACCMD: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_mac_cmd_ioctl(local, (int *) wrq->u.name); - break; - - case PRISM2_IOCTL_ADDMAC: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_control_add_mac(&local->ap->mac_restrictions, - wrq->u.ap_addr.sa_data); - break; - case PRISM2_IOCTL_DELMAC: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_control_del_mac(&local->ap->mac_restrictions, - wrq->u.ap_addr.sa_data); - break; - case PRISM2_IOCTL_KICKMAC: - if (!capable(CAP_NET_ADMIN)) ret = -EPERM; - else ret = ap_control_kick_mac(local->ap, local->dev, - wrq->u.ap_addr.sa_data); - break; -#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} /* Private ioctls that are not used with iwpriv; * in SIOCDEVPRIVATE range */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 787f685e70b4..bf86ac26c2ac 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -796,7 +796,6 @@ static const struct net_device_ops hostap_netdev_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, - .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, @@ -809,7 +808,6 @@ static const struct net_device_ops hostap_mgmt_netdev_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, - .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, @@ -822,7 +820,6 @@ static const struct net_device_ops hostap_master_ops = { .ndo_open = prism2_open, .ndo_stop = prism2_close, - .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h index c25cd21d18bd..f71c0545c0be 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h +++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h @@ -617,7 +617,7 @@ struct prism2_download_data { u32 addr; /* wlan card address */ u32 len; u8 *data; /* allocated data */ - } data[]; + } data[] __counted_by(num_areas); }; diff --git a/drivers/net/wireless/intersil/orinoco/airport.c b/drivers/net/wireless/intersil/orinoco/airport.c index a890bfa0d5cc..45ac00fdafa5 100644 --- a/drivers/net/wireless/intersil/orinoco/airport.c +++ b/drivers/net/wireless/intersil/orinoco/airport.c @@ -18,7 +18,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/of_device.h> +#include <linux/mod_devicetable.h> #include <asm/pmac_feature.h> #include "orinoco.h" diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index dd31929261ab..866e0230df25 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -129,18 +129,18 @@ MODULE_FIRMWARE("orinoco_ezusb_fw"); #define USB_AVAYA8_VENDOR_ID 0x0D98 #define USB_AVAYAE_VENDOR_ID 0x0D9E -#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */ +#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya USB Wireless Card */ #define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */ -#define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */ -#define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */ -#define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */ +#define USB_AGERE_MODEL0801_ID 0x1000 /* USB Wireless Card Model 0801 */ +#define USB_AGERE_MODEL0802_ID 0x1001 /* USB Wireless Card Model 0802 */ +#define USB_AGERE_REBRANDED_ID 0x047A /* USB WLAN Card */ #define USB_ELSA_VENDOR_ID 0x05CC #define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */ #define USB_LEGEND_VENDOR_ID 0x0E7C -#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */ +#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet USB WLAN Card */ #define USB_SAMSUNG_VENDOR_ID 0x04E8 #define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */ @@ -154,7 +154,7 @@ MODULE_FIRMWARE("orinoco_ezusb_fw"); #define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */ #define USB_2WIRE_VENDOR_ID 0x1630 -#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */ +#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire USB Wireless adapter */ #define EZUSB_REQUEST_FW_TRANS 0xA0 diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h index 3356ea708d81..522656de4159 100644 --- a/drivers/net/wireless/intersil/p54/p54.h +++ b/drivers/net/wireless/intersil/p54/p54.h @@ -126,7 +126,7 @@ struct p54_cal_database { size_t entry_size; size_t offset; size_t len; - u8 data[]; + u8 data[] __counted_by(len); }; #define EEPROM_READBACK_LEN 0x3fc diff --git a/drivers/net/wireless/legacy/ray_cs.c b/drivers/net/wireless/legacy/ray_cs.c index 8ace797ce951..c95a79e01cd0 100644 --- a/drivers/net/wireless/legacy/ray_cs.c +++ b/drivers/net/wireless/legacy/ray_cs.c @@ -348,7 +348,7 @@ static int ray_config(struct pcmcia_device *link) { int ret = 0; int i; - struct net_device *dev = (struct net_device *)link->priv; + struct net_device *dev = link->priv; ray_dev_t *local = netdev_priv(dev); dev_dbg(&link->dev, "ray_config\n"); @@ -1830,7 +1830,7 @@ static void set_multicast_list(struct net_device *dev) =============================================================================*/ static irqreturn_t ray_interrupt(int irq, void *dev_id) { - struct net_device *dev = (struct net_device *)dev_id; + struct net_device *dev = dev_id; struct pcmcia_device *link; ray_dev_t *local; struct ccs __iomem *pccs; @@ -2567,7 +2567,7 @@ static int ray_cs_proc_show(struct seq_file *m, void *v) link = this_device; if (!link) return 0; - dev = (struct net_device *)link->priv; + dev = link->priv; if (!dev) return 0; local = netdev_priv(dev); diff --git a/drivers/net/wireless/legacy/rndis_wlan.c b/drivers/net/wireless/legacy/rndis_wlan.c index 712038d46bdb..e7fea7ded6d5 100644 --- a/drivers/net/wireless/legacy/rndis_wlan.c +++ b/drivers/net/wireless/legacy/rndis_wlan.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Driver for RNDIS based wireless USB devices. + * Driver for RNDIS based USB wireless devices. * * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net> * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@iki.fi> diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index a63c5e622ee3..524034699972 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -101,7 +101,7 @@ MODULE_FIRMWARE("sd8688_helper.bin"); MODULE_FIRMWARE("sd8688.bin"); struct if_sdio_packet { - struct if_sdio_packet *next; + struct list_head list; u16 nb; u8 buffer[] __aligned(4); }; @@ -119,10 +119,11 @@ struct if_sdio_card { u8 buffer[65536] __attribute__((aligned(4))); spinlock_t lock; - struct if_sdio_packet *packets; + struct list_head packets; struct workqueue_struct *workqueue; struct work_struct packet_worker; + struct work_struct reset_worker; u8 rx_unit; }; @@ -404,9 +405,10 @@ static void if_sdio_host_to_card_worker(struct work_struct *work) while (1) { spin_lock_irqsave(&card->lock, flags); - packet = card->packets; + packet = list_first_entry_or_null(&card->packets, + struct if_sdio_packet, list); if (packet) - card->packets = packet->next; + list_del(&packet->list); spin_unlock_irqrestore(&card->lock, flags); if (!packet) @@ -909,7 +911,7 @@ static int if_sdio_host_to_card(struct lbs_private *priv, { int ret; struct if_sdio_card *card; - struct if_sdio_packet *packet, *cur; + struct if_sdio_packet *packet; u16 size; unsigned long flags; @@ -934,7 +936,6 @@ static int if_sdio_host_to_card(struct lbs_private *priv, goto out; } - packet->next = NULL; packet->nb = size; /* @@ -949,14 +950,7 @@ static int if_sdio_host_to_card(struct lbs_private *priv, spin_lock_irqsave(&card->lock, flags); - if (!card->packets) - card->packets = packet; - else { - cur = card->packets; - while (cur->next) - cur = cur->next; - cur->next = packet; - } + list_add_tail(&packet->list, &card->packets); switch (type) { case MVMS_CMD: @@ -1029,10 +1023,19 @@ static int if_sdio_reset_deep_sleep_wakeup(struct lbs_private *priv) } -static struct mmc_host *reset_host; - static void if_sdio_reset_card_worker(struct work_struct *work) { + int ret; + const char *name; + struct device *dev; + struct if_sdio_card *card; + struct mmc_host *reset_host; + + card = container_of(work, struct if_sdio_card, reset_worker); + reset_host = card->func->card->host; + name = card->priv->dev->name; + dev = &card->func->dev; + /* * The actual reset operation must be run outside of lbs_thread. This * is because mmc_remove_host() will cause the device to be instantly @@ -1043,21 +1046,19 @@ static void if_sdio_reset_card_worker(struct work_struct *work) * instance for that reason. */ - pr_info("Resetting card..."); + dev_info(dev, "resetting card %s...", name); mmc_remove_host(reset_host); - mmc_add_host(reset_host); + ret = mmc_add_host(reset_host); + if (ret) + dev_err(dev, "%s: can't add mmc host, error %d\n", name, ret); } -static DECLARE_WORK(card_reset_work, if_sdio_reset_card_worker); static void if_sdio_reset_card(struct lbs_private *priv) { struct if_sdio_card *card = priv->card; - if (work_pending(&card_reset_work)) - return; - - reset_host = card->func->card->host; - schedule_work(&card_reset_work); + if (!work_pending(&card->reset_worker)) + schedule_work(&card->reset_worker); } static int if_sdio_power_save(struct lbs_private *priv) @@ -1137,7 +1138,7 @@ static int if_sdio_probe(struct sdio_func *func, struct lbs_private *priv; int ret, i; unsigned int model; - struct if_sdio_packet *packet; + struct if_sdio_packet *packet, *tmp; for (i = 0;i < func->card->num_info;i++) { if (sscanf(func->card->info[i], @@ -1178,11 +1179,15 @@ static int if_sdio_probe(struct sdio_func *func, } spin_lock_init(&card->lock); + INIT_LIST_HEAD(&card->packets); + card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0); if (unlikely(!card->workqueue)) { ret = -ENOMEM; goto err_queue; } + + INIT_WORK(&card->reset_worker, if_sdio_reset_card_worker); INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); init_waitqueue_head(&card->pwron_waitq); @@ -1233,13 +1238,12 @@ err_activate_card: flush_workqueue(card->workqueue); lbs_remove_card(priv); free: + cancel_work_sync(&card->packet_worker); + cancel_work_sync(&card->reset_worker); destroy_workqueue(card->workqueue); err_queue: - while (card->packets) { - packet = card->packets; - card->packets = card->packets->next; + list_for_each_entry_safe(packet, tmp, &card->packets, list) kfree(packet); - } kfree(card); @@ -1249,7 +1253,7 @@ err_queue: static void if_sdio_remove(struct sdio_func *func) { struct if_sdio_card *card; - struct if_sdio_packet *packet; + struct if_sdio_packet *packet, *tmp; card = sdio_get_drvdata(func); @@ -1277,13 +1281,12 @@ static void if_sdio_remove(struct sdio_func *func) lbs_stop_card(card->priv); lbs_remove_card(card->priv); + cancel_work_sync(&card->packet_worker); + cancel_work_sync(&card->reset_worker); destroy_workqueue(card->workqueue); - while (card->packets) { - packet = card->packets; - card->packets = card->packets->next; + list_for_each_entry_safe(packet, tmp, &card->packets, list) kfree(packet); - } kfree(card); } @@ -1403,8 +1406,6 @@ static void __exit if_sdio_exit_module(void) /* Set the flag as user is removing this module. */ user_rmmod = 1; - cancel_work_sync(&card_reset_work); - sdio_unregister_driver(&if_sdio_driver); } diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index 1225fc0e3352..8690b0114e23 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -76,16 +76,13 @@ struct if_spi_card { static void free_if_spi_card(struct if_spi_card *card) { - struct list_head *cursor, *next; - struct if_spi_packet *packet; + struct if_spi_packet *packet, *tmp; - list_for_each_safe(cursor, next, &card->cmd_packet_list) { - packet = container_of(cursor, struct if_spi_packet, list); + list_for_each_entry_safe(packet, tmp, &card->cmd_packet_list, list) { list_del(&packet->list); kfree(packet); } - list_for_each_safe(cursor, next, &card->data_packet_list) { - packet = container_of(cursor, struct if_spi_packet, list); + list_for_each_entry_safe(packet, tmp, &card->data_packet_list, list) { list_del(&packet->list); kfree(packet); } @@ -829,11 +826,16 @@ static void if_spi_e2h(struct if_spi_card *card) goto out; /* re-enable the card event interrupt */ - spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, - ~IF_SPI_HICU_CARD_EVENT); + err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, + ~IF_SPI_HICU_CARD_EVENT); + if (err) + goto out; /* generate a card interrupt */ - spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT); + err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, + IF_SPI_CIC_HOST_EVENT); + if (err) + goto out; lbs_queue_event(priv, cause & 0xff); out: diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c index 90ffe8d1e0e8..2dd635935448 100644 --- a/drivers/net/wireless/marvell/libertas/mesh.c +++ b/drivers/net/wireless/marvell/libertas/mesh.c @@ -188,8 +188,11 @@ static ssize_t anycast_mask_store(struct device *dev, uint32_t datum; int ret; + ret = kstrtouint(buf, 16, &datum); + if (ret) + return ret; + memset(&mesh_access, 0, sizeof(mesh_access)); - sscanf(buf, "%x", &datum); mesh_access.data[0] = cpu_to_le32(datum); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_ANYCAST, &mesh_access); @@ -241,15 +244,14 @@ static ssize_t prb_rsp_limit_store(struct device *dev, int ret; unsigned long retry_limit; - memset(&mesh_access, 0, sizeof(mesh_access)); - mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET); - ret = kstrtoul(buf, 10, &retry_limit); if (ret) return ret; if (retry_limit > 15) return -ENOTSUPP; + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET); mesh_access.data[1] = cpu_to_le32(retry_limit); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT, @@ -285,9 +287,12 @@ static ssize_t lbs_mesh_store(struct device *dev, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; - int enable; + int ret, enable; + + ret = kstrtoint(buf, 16, &enable); + if (ret) + return ret; - sscanf(buf, "%x", &enable); enable = !!enable; if (enable == !!priv->mesh_dev) return count; @@ -387,11 +392,13 @@ static ssize_t bootflag_store(struct device *dev, struct device_attribute *attr, uint32_t datum; int ret; - memset(&cmd, 0, sizeof(cmd)); - ret = sscanf(buf, "%d", &datum); - if ((ret != 1) || (datum > 1)) + ret = kstrtouint(buf, 10, &datum); + if (ret) + return ret; + if (datum > 1) return -EINVAL; + memset(&cmd, 0, sizeof(cmd)); *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum); cmd.length = cpu_to_le16(sizeof(uint32_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, @@ -438,11 +445,14 @@ static ssize_t boottime_store(struct device *dev, uint32_t datum; int ret; - memset(&cmd, 0, sizeof(cmd)); - ret = sscanf(buf, "%d", &datum); - if ((ret != 1) || (datum > 255)) + ret = kstrtouint(buf, 10, &datum); + if (ret) + return ret; + if (datum > 255) return -EINVAL; + memset(&cmd, 0, sizeof(cmd)); + /* A too small boot time will result in the device booting into * standalone (no-host) mode before the host can take control of it, * so the change will be hard to revert. This may be a desired @@ -497,11 +507,13 @@ static ssize_t channel_store(struct device *dev, struct device_attribute *attr, uint32_t datum; int ret; - memset(&cmd, 0, sizeof(cmd)); - ret = sscanf(buf, "%d", &datum); - if (ret != 1 || datum < 1 || datum > 11) + ret = kstrtouint(buf, 10, &datum); + if (ret) + return ret; + if (datum < 1 || datum > 11) return -EINVAL; + memset(&cmd, 0, sizeof(cmd)); *((__le16 *)&cmd.data[0]) = cpu_to_le16(datum); cmd.length = cpu_to_le16(sizeof(uint16_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, @@ -626,11 +638,14 @@ static ssize_t protocol_id_store(struct device *dev, uint32_t datum; int ret; - memset(&cmd, 0, sizeof(cmd)); - ret = sscanf(buf, "%d", &datum); - if ((ret != 1) || (datum > 255)) + ret = kstrtouint(buf, 10, &datum); + if (ret) + return ret; + if (datum > 255) return -EINVAL; + memset(&cmd, 0, sizeof(cmd)); + /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index 2ea03725f188..da211372a481 100644 --- a/drivers/net/wireless/marvell/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c @@ -287,7 +287,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) mwifiex_dbg(priv->adapter, MSG, "indicating channel switch completion to kernel\n"); - mutex_lock(&priv->wdev.mtx); + wiphy_lock(priv->wdev.wiphy); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0); - mutex_unlock(&priv->wdev.mtx); + wiphy_unlock(priv->wdev.wiphy); } diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 391793a16adc..10690e82358b 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", event_buf, len); - while (tlv_buf_left >= sizeof(*tlv_rxba)) { + while (tlv_buf_left > sizeof(*tlv_rxba)) { tlv_type = le16_to_cpu(tlv_rxba->header.type); tlv_len = le16_to_cpu(tlv_rxba->header.len); + if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) { + mwifiex_dbg(priv->adapter, WARN, + "TLV size (%zu) overflows event_buf buf_left=%d\n", + size_add(sizeof(tlv_rxba->header), tlv_len), + tlv_buf_left); + return; + } + if (tlv_type != TLV_TYPE_RXBA_SYNC) { mwifiex_dbg(priv->adapter, ERROR, "Wrong TLV id=0x%x\n", tlv_type); @@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num); tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len); + if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) { + mwifiex_dbg(priv->adapter, WARN, + "TLV size (%zu) overflows event_buf buf_left=%d\n", + size_add(sizeof(*tlv_rxba), tlv_bitmap_len), + tlv_buf_left); + return; + } + mwifiex_dbg(priv->adapter, INFO, "%pM tid=%d seq_num=%d bitmap_len=%d\n", tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, @@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, } } - tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len); - tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba); + tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len); + tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len; tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp; } } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 813d1cbebe19..7a15ea8072e6 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1835,10 +1835,11 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy, */ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *data) + struct cfg80211_ap_update *params) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct mwifiex_adapter *adapter = priv->adapter; + struct cfg80211_beacon_data *data = ¶ms->beacon; mwifiex_cancel_scan(adapter); @@ -4395,6 +4396,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | + WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_PS_ON_BY_DEFAULT; if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 52b18f4a774b..f9c9fec7c792 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -253,8 +253,11 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, if (!p) return -ENOMEM; - if (!priv || !priv->hist_data) - return -EFAULT; + if (!priv || !priv->hist_data) { + ret = -EFAULT; + goto free_and_exit; + } + phist_data = priv->hist_data; p += sprintf(p, "\n" @@ -309,6 +312,8 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page, (unsigned long)p - page); +free_and_exit: + free_page(page); return ret; } @@ -420,7 +425,10 @@ mwifiex_regrdwr_write(struct file *file, if (IS_ERR(buf)) return PTR_ERR(buf); - sscanf(buf, "%u %x %x", ®_type, ®_offset, ®_value); + if (sscanf(buf, "%u %x %x", ®_type, ®_offset, ®_value) != 3) { + ret = -EINVAL; + goto done; + } if (reg_type == 0 || reg_offset == 0) { ret = -EINVAL; @@ -686,7 +694,10 @@ mwifiex_rdeeprom_write(struct file *file, if (IS_ERR(buf)) return PTR_ERR(buf); - sscanf(buf, "%d %d", &offset, &bytes); + if (sscanf(buf, "%d %d", &offset, &bytes) != 2) { + ret = -EINVAL; + goto done; + } if (offset == -1 || bytes == -1) { ret = -EINVAL; diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index 88648c062713..326ffb05d791 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -180,7 +180,6 @@ struct mwifiex_rxinfo { }; struct mwifiex_txinfo { - u32 status_code; u8 flags; u8 bss_num; u8 bss_type; diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index f2168fac95ed..8e6db904e5b2 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync { u8 reserved; __le16 seq_num; __le16 bitmap_len; - u8 bitmap[1]; + u8 bitmap[]; } __packed; struct chan_band_param_set { diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 7dddb4b5dea1..c9c58419c37b 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -282,14 +282,12 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM); sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED); - memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params)); memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period)); adapter->tx_lock_flag = false; adapter->null_pkt_interval = 0; adapter->fw_bands = 0; adapter->config_bands = 0; adapter->adhoc_start_band = 0; - adapter->scan_channels = NULL; adapter->fw_release_number = 0; adapter->fw_cap_info = 0; memset(&adapter->upld_buf, 0, sizeof(adapter->upld_buf)); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 1cd9d20cca16..d99127dc466e 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -724,14 +724,9 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter, /* Override default firmware with manufacturing one if * manufacturing mode is enabled */ - if (mfg_mode) { - if (strlcpy(adapter->fw_name, MFG_FIRMWARE, - sizeof(adapter->fw_name)) >= - sizeof(adapter->fw_name)) { - pr_err("%s: fw_name too long!\n", __func__); - return -1; - } - } + if (mfg_mode) + strscpy(adapter->fw_name, MFG_FIRMWARE, + sizeof(adapter->fw_name)); if (req_fw_nowait) { ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name, diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index b95886e1413e..d263eae6078c 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -444,15 +444,6 @@ struct mwifiex_current_bss_params { u8 data_rates[MWIFIEX_SUPPORTED_RATES]; }; -struct mwifiex_sleep_params { - u16 sp_error; - u16 sp_offset; - u16 sp_stable_time; - u8 sp_cal_control; - u8 sp_ext_sleep_clk; - u16 sp_reserved; -}; - struct mwifiex_sleep_period { u16 period; u16 reserved; @@ -681,7 +672,6 @@ struct mwifiex_private { struct cfg80211_chan_def dfs_chandef; struct workqueue_struct *dfs_cac_workqueue; struct delayed_work dfs_cac_work; - struct timer_list dfs_chan_switch_timer; struct workqueue_struct *dfs_chan_sw_workqueue; struct delayed_work dfs_chan_sw_work; struct cfg80211_beacon_data beacon_after; @@ -844,12 +834,12 @@ struct mwifiex_if_ops { void (*cleanup_mpa_buf) (struct mwifiex_adapter *); int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); - int (*init_fw_port) (struct mwifiex_adapter *); + void (*init_fw_port)(struct mwifiex_adapter *adapter); int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); void (*card_reset) (struct mwifiex_adapter *); int (*reg_dump)(struct mwifiex_adapter *, char *); void (*device_dump)(struct mwifiex_adapter *); - int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); + void (*clean_pcie_ring)(struct mwifiex_adapter *adapter); void (*iface_work)(struct work_struct *work); void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *); @@ -888,8 +878,6 @@ struct mwifiex_adapter { struct work_struct main_work; struct workqueue_struct *rx_workqueue; struct work_struct rx_work; - struct workqueue_struct *dfs_workqueue; - struct work_struct dfs_work; bool rx_work_enabled; bool rx_processing; bool delay_main_work; @@ -953,9 +941,7 @@ struct mwifiex_adapter { u8 fw_bands; u8 adhoc_start_band; u8 config_bands; - struct mwifiex_chan_scan_param_set *scan_channels; u8 tx_lock_flag; - struct mwifiex_sleep_params sleep_params; struct mwifiex_sleep_period sleep_period; u16 ps_mode; u32 ps_state; @@ -1155,8 +1141,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *); void mwifiex_delete_all_station_list(struct mwifiex_private *priv); void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr); -void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); -void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb); +void mwifiex_process_sta_txpd(struct mwifiex_private *priv, + struct sk_buff *skb); +void mwifiex_process_uap_txpd(struct mwifiex_private *priv, + struct sk_buff *skb); int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta, bool init); int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, struct mwifiex_scan_cmd_config *scan_cfg); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 9a698a16a8f3..5f997becdbaa 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -189,6 +189,8 @@ static int mwifiex_pcie_probe_of(struct device *dev) } static void mwifiex_pcie_work(struct work_struct *work); +static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter); +static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter); static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, @@ -220,12 +222,19 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter, /* * This function writes data into PCIE card register. */ -static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) +static inline void +mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data) { struct pcie_service_card *card = adapter->card; iowrite32(data, card->pci_mmap1 + reg); +} +/* Non-void wrapper needed for read_poll_timeout(). */ +static inline int +mwifiex_write_reg_rpt(struct mwifiex_adapter *adapter, int reg, u32 data) +{ + mwifiex_write_reg(adapter, reg, data); return 0; } @@ -656,12 +665,12 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) * appears to ignore or miss our wakeup request, so we continue trying * until we receive an interrupt from the card. */ - if (read_poll_timeout(mwifiex_write_reg, retval, + if (read_poll_timeout(mwifiex_write_reg_rpt, retval, READ_ONCE(adapter->int_status) != 0, 500, 500 * N_WAKEUP_TRIES_SHORT_INTERVAL, false, adapter, reg->fw_status, FIRMWARE_READY_PCIE)) { - if (read_poll_timeout(mwifiex_write_reg, retval, + if (read_poll_timeout(mwifiex_write_reg_rpt, retval, READ_ONCE(adapter->int_status) != 0, 10000, 10000 * N_WAKEUP_TRIES_LONG_INTERVAL, false, @@ -701,24 +710,12 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) * The host interrupt mask is read, the disable bit is reset and * written back to the card host interrupt mask register. */ -static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) { - if (mwifiex_pcie_ok_to_access_hw(adapter)) { - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, - 0x00000000)) { - mwifiex_dbg(adapter, ERROR, - "Disable host interrupt failed\n"); - return -1; - } - } + if (mwifiex_pcie_ok_to_access_hw(adapter)) + mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, 0x00000000); atomic_set(&adapter->tx_hw_pending, 0); - return 0; -} - -static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter) -{ - WARN_ON(mwifiex_pcie_disable_host_int(adapter)); } /* @@ -729,15 +726,9 @@ static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter) */ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter) { - if (mwifiex_pcie_ok_to_access_hw(adapter)) { + if (mwifiex_pcie_ok_to_access_hw(adapter)) /* Simply write the mask to the register */ - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, - HOST_INTR_MASK)) { - mwifiex_dbg(adapter, ERROR, - "Enable host interrupt failed\n"); - return -1; - } - } + mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, HOST_INTR_MASK); return 0; } @@ -792,14 +783,15 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for RX ring.\n"); - kfree(card->rxbd_ring_vbase); return -ENOMEM; } if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_RX_DATA_BUF_SIZE, - DMA_FROM_DEVICE)) - return -1; + DMA_FROM_DEVICE)) { + kfree_skb(skb); + return -ENOMEM; + } buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); @@ -849,7 +841,6 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for EVENT buf.\n"); - kfree(card->evtbd_ring_vbase); return -ENOMEM; } skb_put(skb, MAX_EVENT_SIZE); @@ -857,8 +848,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, DMA_FROM_DEVICE)) { kfree_skb(skb); - kfree(card->evtbd_ring_vbase); - return -1; + return -ENOMEM; } buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); @@ -1058,6 +1048,7 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter) */ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) { + int ret; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -1096,7 +1087,10 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) (u32)((u64)card->rxbd_ring_pbase >> 32), card->rxbd_ring_size); - return mwifiex_init_rxq_ring(adapter); + ret = mwifiex_init_rxq_ring(adapter); + if (ret) + mwifiex_pcie_delete_rxbd_ring(adapter); + return ret; } /* @@ -1127,6 +1121,7 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter) */ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) { + int ret; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; @@ -1161,7 +1156,10 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) (u32)((u64)card->evtbd_ring_pbase >> 32), card->evtbd_ring_size); - return mwifiex_pcie_init_evt_ring(adapter); + ret = mwifiex_pcie_init_evt_ring(adapter); + if (ret) + mwifiex_pcie_delete_evtbd_ring(adapter); + return ret; } /* @@ -1294,7 +1292,7 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter) * This function defined as handler is also called while cleaning TXRX * during disconnect/ bss stop. */ -static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) +static void mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; @@ -1303,14 +1301,9 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) /* write pointer already set at last send * send dnld-rdy intr again, wait for completion. */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DNLD_RDY)) { - mwifiex_dbg(adapter, ERROR, - "failed to assert dnld-rdy interrupt.\n"); - return -1; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY); } - return 0; } /* @@ -1420,7 +1413,6 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 wrindx, num_tx_buffs, rx_val; - int ret; dma_addr_t buf_pa; struct mwifiex_pcie_buf_desc *desc = NULL; struct mwifiex_pfu_buf_desc *desc2 = NULL; @@ -1489,13 +1481,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, rx_val = card->rxbd_rdptr & reg->rx_wrap_mask; /* Write the TX ring write pointer in to reg->tx_wrptr */ - if (mwifiex_write_reg(adapter, reg->tx_wrptr, - card->txbd_wrptr | rx_val)) { - mwifiex_dbg(adapter, ERROR, - "SEND DATA: failed to write reg->tx_wrptr\n"); - ret = -1; - goto done_unmap; - } + mwifiex_write_reg(adapter, reg->tx_wrptr, + card->txbd_wrptr | rx_val); /* The firmware (latest version 15.68.19.p21) of the 88W8897 PCIe+USB card * seems to crash randomly after setting the TX ring write pointer when @@ -1512,13 +1499,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, adapter->data_sent = false; } else { /* Send the TX ready interrupt */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DNLD_RDY)) { - mwifiex_dbg(adapter, ERROR, - "SEND DATA: failed to assert dnld-rdy interrupt.\n"); - ret = -1; - goto done_unmap; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY); } mwifiex_dbg(adapter, DATA, "info: SEND DATA: Updated <Rd: %#x, Wr:\t" @@ -1529,24 +1511,12 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, "info: TX Ring full, can't send packets to fw\n"); adapter->data_sent = true; /* Send the TX ready interrupt */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DNLD_RDY)) - mwifiex_dbg(adapter, ERROR, - "SEND DATA: failed to assert door-bell intr\n"); + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_DNLD_RDY); return -EBUSY; } return -EINPROGRESS; -done_unmap: - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - card->tx_buf_list[wrindx] = NULL; - atomic_dec(&adapter->tx_hw_pending); - if (reg->pfu_enabled) - memset(desc2, 0, sizeof(*desc2)); - else - memset(desc, 0, sizeof(*desc)); - - return ret; } /* @@ -1666,13 +1636,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) tx_val = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ - if (mwifiex_write_reg(adapter, reg->rx_rdptr, - card->rxbd_rdptr | tx_val)) { - mwifiex_dbg(adapter, DATA, - "RECV DATA: failed to write reg->rx_rdptr\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->rx_rdptr, + card->rxbd_rdptr | tx_val); /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { @@ -1715,43 +1680,18 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Write the lower 32bits of the physical address to low command * address scratch register */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to write download command to boot code.\n", - __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa); /* Write the upper 32bits of the physical address to high command * address scratch register */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, - (u32)((u64)buf_pa >> 32))) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to write download command to boot code.\n", - __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)buf_pa >> 32)); /* Write the command length to cmd_size scratch register */ - if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to write command len to cmd_size scratch reg\n", - __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, reg->cmd_size, skb->len); /* Ring the door bell */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DOOR_BELL)) { - mwifiex_dbg(adapter, ERROR, - "%s: failed to assert door-bell intr\n", __func__); - mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE); - return -1; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL); return 0; } @@ -1759,20 +1699,14 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* This function init rx port in firmware which in turn enables to receive data * from device before transmitting any packet. */ -static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; int tx_wrap = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ - if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | - tx_wrap)) { - mwifiex_dbg(adapter, ERROR, - "RECV DATA: failed to write reg->rx_rdptr\n"); - return -1; - } - return 0; + mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_wrap); } /* This function downloads commands to the device @@ -1782,7 +1716,6 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - int ret = 0; dma_addr_t cmd_buf_pa, cmdrsp_buf_pa; u8 *payload = (u8 *)skb->data; @@ -1832,63 +1765,29 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf); /* Write the lower 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, - (u32)cmdrsp_buf_pa)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, + (u32)cmdrsp_buf_pa); + /* Write the upper 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, - (u32)((u64)cmdrsp_buf_pa >> 32))) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, + (u32)((u64)cmdrsp_buf_pa >> 32)); } cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf); + /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, - (u32)cmd_buf_pa)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)cmd_buf_pa); + /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */ - if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, - (u32)((u64)cmd_buf_pa >> 32))) { - mwifiex_dbg(adapter, ERROR, - "Failed to write download cmd to boot code.\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmd_addr_hi, + (u32)((u64)cmd_buf_pa >> 32)); /* Write the command length to reg->cmd_size */ - if (mwifiex_write_reg(adapter, reg->cmd_size, - card->cmd_buf->len)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write cmd len to reg->cmd_size\n"); - ret = -1; - goto done; - } + mwifiex_write_reg(adapter, reg->cmd_size, card->cmd_buf->len); /* Ring the door bell */ - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_DOOR_BELL)) { - mwifiex_dbg(adapter, ERROR, - "Failed to assert door-bell intr\n"); - ret = -1; - goto done; - } - -done: - if (ret) - adapter->cmd_sent = false; + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL); return 0; } @@ -1932,13 +1831,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) MWIFIEX_SKB_DMA_ADDR(skb), MWIFIEX_SLEEP_COOKIE_SIZE, DMA_FROM_DEVICE); - if (mwifiex_write_reg(adapter, - PCIE_CPU_INT_EVENT, - CPU_INTR_SLEEP_CFM_DONE)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, + PCIE_CPU_INT_EVENT, + CPU_INTR_SLEEP_CFM_DONE); mwifiex_delay_for_sleep_cookie(adapter, MWIFIEX_MAX_DELAY_COUNT); mwifiex_unmap_pci_memory(adapter, skb, @@ -1971,18 +1866,11 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) /* Clear the cmd-rsp buffer address in scratch registers. This will prevent firmware from writing to the same response buffer again. */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) { - mwifiex_dbg(adapter, ERROR, - "cmd_done: failed to clear cmd_rsp_addr_lo\n"); - return -1; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0); + /* Write the upper 32bits of the cmdrsp buffer physical address */ - if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) { - mwifiex_dbg(adapter, ERROR, - "cmd_done: failed to clear cmd_rsp_addr_hi\n"); - return -1; - } + mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0); } return 0; @@ -2089,12 +1977,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) we need to find a better method of managing these buffers. */ } else { - if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, - CPU_INTR_EVENT_DONE)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, + CPU_INTR_EVENT_DONE); } return 0; @@ -2108,7 +1992,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - int ret = 0; u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK; u32 wrptr; struct mwifiex_evt_buf_desc *desc; @@ -2160,18 +2043,11 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, card->evtbd_rdptr, wrptr); /* Write the event ring read pointer in to reg->evt_rdptr */ - if (mwifiex_write_reg(adapter, reg->evt_rdptr, - card->evtbd_rdptr)) { - mwifiex_dbg(adapter, ERROR, - "event_complete: failed to read reg->evt_rdptr\n"); - return -1; - } + mwifiex_write_reg(adapter, reg->evt_rdptr, card->evtbd_rdptr); mwifiex_dbg(adapter, EVENT, "info: Check Events Again\n"); - ret = mwifiex_pcie_process_event_ready(adapter); - - return ret; + return mwifiex_pcie_process_event_ready(adapter); } /* Combo firmware image is a combination of @@ -2304,11 +2180,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, "info: Downloading FW image (%d bytes)\n", firmware_len); - if (mwifiex_pcie_disable_host_int(adapter)) { - mwifiex_dbg(adapter, ERROR, - "%s: Disabling interrupts failed.\n", __func__); - return -1; - } + mwifiex_pcie_disable_host_int(adapter); skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { @@ -2462,21 +2334,12 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) u32 tries; /* Mask spurios interrupts */ - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK, - HOST_INTR_MASK)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK, HOST_INTR_MASK); mwifiex_dbg(adapter, INFO, "Setting driver ready signature\n"); - if (mwifiex_write_reg(adapter, reg->drv_rdy, - FIRMWARE_READY_PCIE)) { - mwifiex_dbg(adapter, ERROR, - "Failed to write driver ready signature\n"); - return -1; - } + + mwifiex_write_reg(adapter, reg->drv_rdy, FIRMWARE_READY_PCIE); /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { @@ -2562,12 +2425,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, mwifiex_pcie_disable_host_int(adapter); /* Clear the pending interrupts */ - if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, - ~pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return; - } + mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg); } if (!adapter->pps_uapsd_mode && @@ -2662,13 +2520,9 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) } if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { - if (mwifiex_write_reg(adapter, - PCIE_HOST_INT_STATUS, - ~pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, - "Write register failed\n"); - return -1; - } + mwifiex_write_reg(adapter, + PCIE_HOST_INT_STATUS, + ~pcie_ireg); if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP) { adapter->ps_state = PS_STATE_AWAKE; @@ -2792,7 +2646,7 @@ mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) static enum rdwr_status mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) { - int ret, tries; + int tries; u8 ctrl_data; u32 fw_status; struct pcie_service_card *card = adapter->card; @@ -2801,13 +2655,7 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) return RDWR_STATUS_FAILURE; - ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, - reg->fw_dump_host_ready); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "PCIE write err\n"); - return RDWR_STATUS_FAILURE; - } + mwifiex_write_reg(adapter, reg->fw_dump_ctrl, reg->fw_dump_host_ready); for (tries = 0; tries < MAX_POLL_TRIES; tries++) { mwifiex_read_reg_byte(adapter, reg->fw_dump_ctrl, &ctrl_data); @@ -2818,13 +2666,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (ctrl_data != reg->fw_dump_host_ready) { mwifiex_dbg(adapter, WARN, "The ctrl reg was changed, re-try again!\n"); - ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, - reg->fw_dump_host_ready); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "PCIE write err\n"); - return RDWR_STATUS_FAILURE; - } + mwifiex_write_reg(adapter, reg->fw_dump_ctrl, + reg->fw_dump_host_ready); } usleep_range(100, 200); } @@ -2843,7 +2686,6 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) u8 idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; - int ret; if (!card->pcie.can_dump_fw) return; @@ -2897,12 +2739,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) if (memory_size == 0) { mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); - ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, - creg->fw_dump_read_done); - if (ret) { - mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); - return; - } + mwifiex_write_reg(adapter, creg->fw_dump_ctrl, + creg->fw_dump_read_done); break; } @@ -3188,9 +3026,7 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) if (fw_status == FIRMWARE_READY_PCIE) { mwifiex_dbg(adapter, INFO, "Clearing driver ready signature\n"); - if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) - mwifiex_dbg(adapter, ERROR, - "Failed to write driver not-ready signature\n"); + mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000); } pci_disable_device(pdev); @@ -3395,8 +3231,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; struct pci_dev *pdev = card->dev; - if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) - mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); + mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000); pci_clear_master(pdev); @@ -3414,7 +3249,7 @@ static struct mwifiex_if_ops pcie_ops = { .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, .enable_int = mwifiex_pcie_enable_host_int, - .disable_int = mwifiex_pcie_disable_host_int_noerr, + .disable_int = mwifiex_pcie_disable_host_int, .process_int_status = mwifiex_process_int_status, .host_to_card = mwifiex_pcie_host_to_card, .wakeup = mwifiex_pm_wakeup_card, @@ -3440,3 +3275,9 @@ MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION); MODULE_VERSION(PCIE_VERSION); MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME); +MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME); +MODULE_FIRMWARE(PCIE8897_A0_FW_NAME); +MODULE_FIRMWARE(PCIE8897_B0_FW_NAME); +MODULE_FIRMWARE(PCIEUART8997_FW_NAME_V4); +MODULE_FIRMWARE(PCIEUSB8997_FW_NAME_V4); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 644b1e134b01..72904c275461 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -612,7 +612,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; int ret = 0; struct mwifiex_chan_scan_param_set *tmp_chan_list; - struct mwifiex_chan_scan_param_set *start_chan; u32 tlv_idx, rates_size, cmd_no; u32 total_scan_time; u32 done_early; @@ -643,7 +642,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, total_scan_time = 0; radio_type = 0; chan_tlv_out->header.len = 0; - start_chan = tmp_chan_list; done_early = false; /* @@ -750,8 +748,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, rates_size = mwifiex_append_rate_tlv(priv, scan_cfg_out, radio_type); - priv->adapter->scan_channels = start_chan; - /* Send the scan command to the firmware with the specified cfg */ if (priv->adapter->ext_scan) @@ -828,7 +824,6 @@ mwifiex_config_scan(struct mwifiex_private *priv, u8 ssid_filter; struct mwifiex_ie_types_htcap *ht_cap; struct mwifiex_ie_types_bss_mode *bss_mode; - const u8 zero_mac[6] = {0, 0, 0, 0, 0, 0}; /* The tlv_buf_len is calculated for each scan command. The TLVs added in this routine will be preserved since the routine that sends the @@ -966,7 +961,7 @@ mwifiex_config_scan(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_types_scan_chan_gap); } - if (!ether_addr_equal(user_scan_in->random_mac, zero_mac)) { + if (!is_zero_ether_addr(user_scan_in->random_mac)) { random_mac_tlv = (void *)tlv_pos; random_mac_tlv->header.type = cpu_to_le16(TLV_TYPE_RANDOM_MAC); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index a24bd40dd41a..6462a0ffe698 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1083,17 +1083,17 @@ cont: "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); /* Set Host interrupt reset to read to clear */ - if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, ®)) - mwifiex_write_reg(adapter, card->reg->host_int_rsr_reg, - reg | card->reg->sdio_int_mask); - else + if (mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, ®)) + return -1; + if (mwifiex_write_reg(adapter, card->reg->host_int_rsr_reg, + reg | card->reg->sdio_int_mask)) return -1; /* Dnld/Upld ready set to auto reset */ - if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, ®)) - mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg, - reg | AUTO_RE_ENABLE_INT); - else + if (mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, ®)) + return -1; + if (mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg, + reg | AUTO_RE_ENABLE_INT)) return -1; return 0; @@ -1556,7 +1556,7 @@ done: } /* - * This function decode sdio aggreation pkt. + * This function decodes sdio aggregation pkt. * * Based on the data block size and pkt_len, * skb data will be decoded to few packets. @@ -2266,7 +2266,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, card->mpa_tx.buf_len, mport); - /* Save the last multi port tx aggreagation info to debug log */ + /* Save the last multi port tx aggregation info to debug log. */ index = adapter->dbg.last_sdio_mp_index; index = (index + 1) % MWIFIEX_DBG_SDIO_MP_NUM; adapter->dbg.last_sdio_mp_index = index; @@ -2525,7 +2525,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg); /* Get SDIO ioport */ - mwifiex_init_sdio_ioport(adapter); + if (mwifiex_init_sdio_ioport(adapter)) + return -EIO; /* Initialize SDIO variables in card */ card->mp_rd_bitmap = 0; @@ -2554,20 +2555,11 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) if (!card->mp_regs) return -ENOMEM; - /* Allocate skb pointer buffers */ - card->mpa_rx.skb_arr = kcalloc(card->mp_agg_pkt_limit, sizeof(void *), - GFP_KERNEL); - if (!card->mpa_rx.skb_arr) { - kfree(card->mp_regs); - return -ENOMEM; - } - card->mpa_rx.len_arr = kcalloc(card->mp_agg_pkt_limit, sizeof(*card->mpa_rx.len_arr), GFP_KERNEL); if (!card->mpa_rx.len_arr) { kfree(card->mp_regs); - kfree(card->mpa_rx.skb_arr); return -ENOMEM; } @@ -2622,7 +2614,6 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) cancel_work_sync(&card->work); kfree(card->mp_regs); - kfree(card->mpa_rx.skb_arr); kfree(card->mpa_rx.len_arr); kfree(card->mpa_tx.buf); kfree(card->mpa_rx.buf); @@ -3141,7 +3132,8 @@ static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter) */ mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg); - mwifiex_init_sdio_ioport(adapter); + if (mwifiex_init_sdio_ioport(adapter)) + dev_err(&card->func->dev, "error enabling SDIO port\n"); } static struct mwifiex_if_ops sdio_ops = { diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index ae94c172310f..b86a9263a6a8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -164,10 +164,7 @@ struct mwifiex_sdio_mpa_rx { u32 pkt_cnt; u32 ports; u16 start_port; - - struct sk_buff **skb_arr; u32 *len_arr; - u8 enabled; u32 buf_size; u32 pkt_aggr_limit; @@ -372,7 +369,6 @@ static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card, else card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1); } - card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = NULL; card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = rx_len; card->mpa_rx.pkt_cnt++; } diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index 13659b02ba88..257737137cd7 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -86,12 +86,23 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length); rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off; - if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, - sizeof(bridge_tunnel_header))) || - (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, - sizeof(rfc1042_header)) && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && - ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { + if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) + + rx_pkt_off > skb->len) { + mwifiex_dbg(priv->adapter, ERROR, + "wrong rx packet offset: len=%d, rx_pkt_off=%d\n", + skb->len, rx_pkt_off); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return -1; + } + + if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len && + ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, + sizeof(bridge_tunnel_header))) || + (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, + sizeof(rfc1042_header)) && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && + ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) { /* * Replace the 803 header and rfc1042 header (llc/snap) with an * EthernetII header, keep the src/dst and snap_type @@ -194,7 +205,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset; - if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) { + if ((rx_pkt_offset + rx_pkt_length) > skb->len || + sizeof(rx_pkt_hdr->eth803_hdr) + rx_pkt_offset > skb->len) { mwifiex_dbg(adapter, ERROR, "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, rx_pkt_offset, rx_pkt_length); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c index 13c0e67ededf..70c2790b8e35 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c @@ -29,8 +29,8 @@ * - Priority specific Tx control * - Flags */ -void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, - struct sk_buff *skb) +void mwifiex_process_sta_txpd(struct mwifiex_private *priv, + struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct txpd *local_tx_pd; @@ -39,15 +39,6 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, u16 pkt_type, pkt_offset; int hroom = adapter->intf_hdr_len; - if (!skb->len) { - mwifiex_dbg(adapter, ERROR, - "Tx: bad packet length: %d\n", skb->len); - tx_info->status_code = -1; - return skb->data; - } - - BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); - pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; pad = ((uintptr_t)skb->data - (sizeof(*local_tx_pd) + hroom)) & @@ -109,8 +100,6 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, if (!local_tx_pd->tx_control) /* TxCtrl set by user or default */ local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); - - return skb->data; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 97bb87c3676b..6c60621b6ccc 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -735,6 +735,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, int ret; u16 capab; struct ieee80211_ht_cap *ht_cap; + unsigned int extra; u8 radio, *pos; capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap; @@ -753,7 +754,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, switch (action_code) { case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: - skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1); + /* See the layout of 'struct ieee80211_mgmt'. */ + extra = sizeof(mgmt->u.action.u.tdls_discover_resp) + + sizeof(mgmt->u.action.category); + skb_put(skb, extra); mgmt->u.action.category = WLAN_CATEGORY_PUBLIC; mgmt->u.action.u.tdls_discover_resp.action_code = WLAN_PUB_ACTION_TDLS_DISCOVER_RES; @@ -762,8 +766,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, mgmt->u.action.u.tdls_discover_resp.capability = cpu_to_le16(capab); /* move back for addr4 */ - memmove(pos + ETH_ALEN, &mgmt->u.action.category, - sizeof(mgmt->u.action.u.tdls_discover_resp)); + memmove(pos + ETH_ALEN, &mgmt->u.action, extra); /* init address 4 */ eth_broadcast_addr(pos); diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c index 54c204608dab..bd91678d26b4 100644 --- a/drivers/net/wireless/marvell/mwifiex/txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/txrx.c @@ -72,13 +72,18 @@ EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { - int hroom, ret = -1; + int hroom, ret; struct mwifiex_adapter *adapter = priv->adapter; - u8 *head_ptr; struct txpd *local_tx_pd = NULL; struct mwifiex_sta_node *dest_node; struct ethhdr *hdr = (void *)skb->data; + if (unlikely(!skb->len || + skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)) { + ret = -EINVAL; + goto out; + } + hroom = adapter->intf_hdr_len; if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) { @@ -88,33 +93,31 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, dest_node->stats.tx_packets++; } - head_ptr = mwifiex_process_uap_txpd(priv, skb); + mwifiex_process_uap_txpd(priv, skb); } else { - head_ptr = mwifiex_process_sta_txpd(priv, skb); + mwifiex_process_sta_txpd(priv, skb); } - if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) { + if (adapter->data_sent || adapter->tx_lock_flag) { skb_queue_tail(&adapter->tx_data_q, skb); atomic_inc(&adapter->tx_queued); return 0; } - if (head_ptr) { - if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) - local_tx_pd = (struct txpd *)(head_ptr + hroom); - if (adapter->iface_type == MWIFIEX_USB) { - ret = adapter->if_ops.host_to_card(adapter, - priv->usb_port, - skb, tx_param); - } else { - ret = adapter->if_ops.host_to_card(adapter, - MWIFIEX_TYPE_DATA, - skb, tx_param); - } + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) + local_tx_pd = (struct txpd *)(skb->data + hroom); + if (adapter->iface_type == MWIFIEX_USB) { + ret = adapter->if_ops.host_to_card(adapter, + priv->usb_port, + skb, tx_param); + } else { + ret = adapter->if_ops.host_to_card(adapter, + MWIFIEX_TYPE_DATA, + skb, tx_param); } mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data, min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN)); - +out: switch (ret) { case -ENOSR: mwifiex_dbg(adapter, DATA, "data: -ENOSR is returned\n"); @@ -137,6 +140,11 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, break; case -EINPROGRESS: break; + case -EINVAL: + mwifiex_dbg(adapter, ERROR, + "malformed skb (length: %u, headroom: %u)\n", + skb->len, skb_headroom(skb)); + fallthrough; case 0: mwifiex_write_data_complete(adapter, skb, 0, ret); break; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index e495f7eaea03..318bd4ed8399 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -103,6 +103,16 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, return; } + if (sizeof(*rx_pkt_hdr) + + le16_to_cpu(uap_rx_pd->rx_pkt_offset) > skb->len) { + mwifiex_dbg(adapter, ERROR, + "wrong rx packet offset: len=%d,rx_pkt_offset=%d\n", + skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return; + } + if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, sizeof(bridge_tunnel_header))) || (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, @@ -243,7 +253,15 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, if (is_multicast_ether_addr(ra)) { skb_uap = skb_copy(skb, GFP_ATOMIC); - mwifiex_uap_queue_bridged_pkt(priv, skb_uap); + if (likely(skb_uap)) { + mwifiex_uap_queue_bridged_pkt(priv, skb_uap); + } else { + mwifiex_dbg(adapter, ERROR, + "failed to copy skb for uAP\n"); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return -1; + } } else { if (mwifiex_get_sta_entry(priv, ra)) { /* Requeue Intra-BSS packet */ @@ -367,6 +385,16 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type); rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); + if (le16_to_cpu(uap_rx_pd->rx_pkt_offset) + + sizeof(rx_pkt_hdr->eth803_hdr) > skb->len) { + mwifiex_dbg(adapter, ERROR, + "wrong rx packet for struct ethhdr: len=%d, offset=%d\n", + skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); + priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + return 0; + } + ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source); if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + @@ -442,8 +470,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, * - Priority specific Tx control * - Flags */ -void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, - struct sk_buff *skb) +void mwifiex_process_uap_txpd(struct mwifiex_private *priv, + struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct uap_txpd *txpd; @@ -452,15 +480,6 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, u16 pkt_type, pkt_offset; int hroom = adapter->intf_hdr_len; - if (!skb->len) { - mwifiex_dbg(adapter, ERROR, - "Tx: bad packet length: %d\n", skb->len); - tx_info->status_code = -1; - return skb->data; - } - - BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); - pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; pad = ((uintptr_t)skb->data - (sizeof(*txpd) + hroom)) & @@ -508,6 +527,4 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, if (!txpd->tx_control) /* TxCtrl set by user or default */ txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); - - return skb->data; } diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 94c2d219835d..745b1d925b21 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -393,11 +393,15 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, } rx_pd = (struct rxpd *)skb->data; + pkt_len = le16_to_cpu(rx_pd->rx_pkt_length); + if (pkt_len < sizeof(struct ieee80211_hdr) + sizeof(pkt_len)) { + mwifiex_dbg(priv->adapter, ERROR, "invalid rx_pkt_length"); + return -1; + } skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset)); skb_pull(skb, sizeof(pkt_len)); - - pkt_len = le16_to_cpu(rx_pd->rx_pkt_length); + pkt_len -= sizeof(pkt_len); ieee_hdr = (void *)skb->data; if (ieee80211_is_mgmt(ieee_hdr->frame_control)) { @@ -410,7 +414,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, skb->data + sizeof(struct ieee80211_hdr), pkt_len - sizeof(struct ieee80211_hdr)); - pkt_len -= ETH_ALEN + sizeof(pkt_len); + pkt_len -= ETH_ALEN; rx_pd->rx_pkt_length = cpu_to_le16(pkt_len); cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq, diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 18152c16c36f..a86f800b8bf5 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -29,6 +29,14 @@ config MT76_CONNAC_LIB tristate select MT76_CORE +config MT792x_LIB + tristate + select MT76_CONNAC_LIB + +config MT792x_USB + tristate + select MT76_USB + source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig" @@ -36,3 +44,4 @@ source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7915/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7921/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt7996/Kconfig" +source "drivers/net/wireless/mediatek/mt76/mt7925/Kconfig" diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 84c99b7e57f9..d6575fe18c6b 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -5,6 +5,8 @@ obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o obj-$(CONFIG_MT76_CONNAC_LIB) += mt76-connac-lib.o +obj-$(CONFIG_MT792x_LIB) += mt792x-lib.o +obj-$(CONFIG_MT792x_USB) += mt792x-usb.o mt76-y := \ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \ @@ -19,6 +21,7 @@ mt76-sdio-y := sdio.o sdio_txrx.o CFLAGS_trace.o := -I$(src) CFLAGS_usb_trace.o := -I$(src) CFLAGS_mt76x02_trace.o := -I$(src) +CFLAGS_mt792x_trace.o := -I$(src) mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \ mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \ @@ -27,7 +30,12 @@ mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o -mt76-connac-lib-y := mt76_connac_mcu.o mt76_connac_mac.o +mt76-connac-lib-y := mt76_connac_mcu.o mt76_connac_mac.o mt76_connac3_mac.o + +mt792x-lib-y := mt792x_core.o mt792x_mac.o mt792x_trace.o \ + mt792x_debugfs.o mt792x_dma.o +mt792x-lib-$(CONFIG_ACPI) += mt792x_acpi_sar.o +mt792x-usb-y := mt792x_usb.o obj-$(CONFIG_MT76x0_COMMON) += mt76x0/ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/ @@ -36,3 +44,4 @@ obj-$(CONFIG_MT7615_COMMON) += mt7615/ obj-$(CONFIG_MT7915E) += mt7915/ obj-$(CONFIG_MT7921_COMMON) += mt7921/ obj-$(CONFIG_MT7996E) += mt7996/ +obj-$(CONFIG_MT7925_COMMON) += mt7925/ diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index 57fbcc83e074..ae83be572b94 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -109,8 +109,6 @@ mt76_register_debugfs_fops(struct mt76_phy *phy, struct dentry *dir; dir = debugfs_create_dir("mt76", phy->hw->wiphy->debugfsdir); - if (!dir) - return NULL; debugfs_create_u8("led_pin", 0600, dir, &phy->leds.pin); debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 465190ebaf1c..68ad915203aa 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -53,6 +53,11 @@ mt76_alloc_txwi(struct mt76_dev *dev) addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { + kfree(txwi); + return NULL; + } + t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); t->dma_addr = addr; @@ -93,13 +98,13 @@ __mt76_get_rxwi(struct mt76_dev *dev) { struct mt76_txwi_cache *t = NULL; - spin_lock(&dev->wed_lock); + spin_lock_bh(&dev->wed_lock); if (!list_empty(&dev->rxwi_cache)) { t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, list); list_del(&t->list); } - spin_unlock(&dev->wed_lock); + spin_unlock_bh(&dev->wed_lock); return t; } @@ -145,9 +150,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) if (!t) return; - spin_lock(&dev->wed_lock); + spin_lock_bh(&dev->wed_lock); list_add(&t->list, &dev->rxwi_cache); - spin_unlock(&dev->wed_lock); + spin_unlock_bh(&dev->wed_lock); } EXPORT_SYMBOL_GPL(mt76_put_rxwi); @@ -330,9 +335,6 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, if (e->txwi == DMA_DUMMY_DATA) e->txwi = NULL; - if (e->skb == DMA_DUMMY_DATA) - e->skb = NULL; - *prev_e = *e; memset(e, 0, sizeof(*e)); } @@ -466,6 +468,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_buf buf = {}; dma_addr_t addr; + if (test_bit(MT76_MCU_RESET, &dev->phy.state)) + goto error; + if (q->queued + 1 >= q->ndesc - 1) goto error; @@ -507,6 +512,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, dma_addr_t addr; u8 *txwi; + if (test_bit(MT76_RESET, &dev->phy.state)) + goto free_skb; + t = mt76_get_txwi(dev); if (!t) goto free_skb; @@ -731,16 +739,18 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) if (!q->ndesc) return; - spin_lock_bh(&q->lock); - do { + spin_lock_bh(&q->lock); buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); + spin_unlock_bh(&q->lock); + if (!buf) break; mt76_put_page_pool_buf(buf, false); } while (1); + spin_lock_bh(&q->lock); if (q->rx_head) { dev_kfree_skb(q->rx_head); q->rx_head = NULL; @@ -773,7 +783,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) static void mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, - int len, bool more, u32 info) + int len, bool more, u32 info, bool allow_direct) { struct sk_buff *skb = q->rx_head; struct skb_shared_info *shinfo = skb_shinfo(skb); @@ -785,7 +795,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); } else { - mt76_put_page_pool_buf(data, true); + mt76_put_page_pool_buf(data, allow_direct); } if (more) @@ -805,6 +815,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) struct sk_buff *skb; unsigned char *data; bool check_ddone = false; + bool allow_direct = !mt76_queue_is_wed_rx(q); bool more; if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && @@ -845,7 +856,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) } if (q->rx_head) { - mt76_add_fragment(dev, q, data, len, more, info); + mt76_add_fragment(dev, q, data, len, more, info, + allow_direct); continue; } @@ -874,7 +886,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) continue; free_frag: - mt76_put_page_pool_buf(data, true); + mt76_put_page_pool_buf(data, allow_direct); } mt76_dma_rx_fill(dev, q, true); diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index dce851d42e08..7725dd6763ef 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -6,34 +6,39 @@ #include <linux/of_net.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> +#include <linux/nvmem-consumer.h> #include <linux/etherdevice.h> #include "mt76.h" -int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) +static int mt76_get_of_eeprom_data(struct mt76_dev *dev, void *eep, int len) { -#if defined(CONFIG_OF) && defined(CONFIG_MTD) struct device_node *np = dev->dev->of_node; - struct mtd_info *mtd; - const __be32 *list; const void *data; - const char *part; - phandle phandle; int size; - size_t retlen; - int ret; - if (!np) + data = of_get_property(np, "mediatek,eeprom-data", &size); + if (!data) return -ENOENT; - data = of_get_property(np, "mediatek,eeprom-data", &size); - if (data) { - if (size > len) - return -EINVAL; + if (size > len) + return -EINVAL; - memcpy(eep, data, size); + memcpy(eep, data, size); - return 0; - } + return 0; +} + +static int mt76_get_of_epprom_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len) +{ +#ifdef CONFIG_MTD + struct device_node *np = dev->dev->of_node; + struct mtd_info *mtd; + const __be32 *list; + const char *part; + phandle phandle; + size_t retlen; + int size; + int ret; list = of_get_property(np, "mediatek,mtd-eeprom", &size); if (!list) @@ -100,6 +105,56 @@ out_put_node: return -ENOENT; #endif } + +static int mt76_get_of_epprom_from_nvmem(struct mt76_dev *dev, void *eep, int len) +{ + struct device_node *np = dev->dev->of_node; + struct nvmem_cell *cell; + const void *data; + size_t retlen; + int ret = 0; + + cell = of_nvmem_cell_get(np, "eeprom"); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + data = nvmem_cell_read(cell, &retlen); + nvmem_cell_put(cell); + + if (IS_ERR(data)) + return PTR_ERR(data); + + if (retlen < len) { + ret = -EINVAL; + goto exit; + } + + memcpy(eep, data, len); + +exit: + kfree(data); + + return ret; +} + +int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) +{ + struct device_node *np = dev->dev->of_node; + int ret; + + if (!np) + return -ENOENT; + + ret = mt76_get_of_eeprom_data(dev, eep, len); + if (!ret) + return 0; + + ret = mt76_get_of_epprom_from_mtd(dev, eep, offset, len); + if (!ret) + return 0; + + return mt76_get_of_epprom_from_nvmem(dev, eep, len); +} EXPORT_SYMBOL_GPL(mt76_get_of_eeprom); void @@ -133,7 +188,7 @@ static bool mt76_string_prop_find(struct property *prop, const char *str) return false; } -static struct device_node * +struct device_node * mt76_find_power_limits_node(struct mt76_dev *dev) { struct device_node *np = dev->dev->of_node; @@ -172,6 +227,7 @@ mt76_find_power_limits_node(struct mt76_dev *dev) of_node_put(np); return fallback; } +EXPORT_SYMBOL_GPL(mt76_find_power_limits_node); static const __be32 * mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min) @@ -186,7 +242,7 @@ mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min) return prop->value; } -static struct device_node * +struct device_node * mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan) { struct device_node *cur; @@ -210,6 +266,8 @@ mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan) return NULL; } +EXPORT_SYMBOL_GPL(mt76_find_channel_node); + static s8 mt76_get_txs_delta(struct device_node *np, u8 nss) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 467afef98ba2..51a767121b0d 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -4,7 +4,6 @@ */ #include <linux/sched.h> #include <linux/of.h> -#include <net/page_pool.h> #include "mt76.h" #define CHAN2G(_idx, _freq) { \ @@ -76,6 +75,7 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = { CHAN5G(165, 5825), CHAN5G(169, 5845), CHAN5G(173, 5865), + CHAN5G(177, 5885), }; static const struct ieee80211_channel mt76_channels_6ghz[] = { @@ -415,6 +415,9 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) struct mt76_dev *dev = phy->dev; struct wiphy *wiphy = hw->wiphy; + INIT_LIST_HEAD(&phy->tx_list); + spin_lock_init(&phy->tx_lock); + SET_IEEE80211_DEV(hw, dev->dev); SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); @@ -452,7 +455,8 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { + if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) && + hw->max_tx_fragments > 1) { ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); } @@ -566,7 +570,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q) { struct page_pool_params pp_params = { .order = 0, - .flags = PP_FLAG_PAGE_FRAG, + .flags = 0, .nid = NUMA_NO_NODE, .dev = dev->dma_dev, }; @@ -660,6 +664,8 @@ mt76_alloc_device(struct device *pdev, unsigned int size, idr_init(&dev->rx_token); INIT_LIST_HEAD(&dev->wcid_list); + INIT_LIST_HEAD(&dev->sta_poll_list); + spin_lock_init(&dev->sta_poll_lock); INIT_LIST_HEAD(&dev->txwi_cache); INIT_LIST_HEAD(&dev->rxwi_cache); @@ -686,6 +692,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, int ret; dev_set_drvdata(dev->dev, dev); + mt76_wcid_init(&dev->global_wcid); ret = mt76_phy_init(phy, hw); if (ret) return ret; @@ -741,6 +748,7 @@ void mt76_unregister_device(struct mt76_dev *dev) if (IS_ENABLED(CONFIG_MT76_LEDS)) mt76_led_cleanup(&dev->phy); mt76_tx_status_check(dev, true); + mt76_wcid_cleanup(dev, &dev->global_wcid); ieee80211_unregister_hw(hw); } EXPORT_SYMBOL_GPL(mt76_unregister_device); @@ -1409,7 +1417,7 @@ mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif, wcid->phy_idx = phy->band_idx; rcu_assign_pointer(dev->wcid[wcid->idx], wcid); - mt76_packet_id_init(wcid); + mt76_wcid_init(wcid); out: mutex_unlock(&dev->mutex); @@ -1428,7 +1436,7 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, if (dev->drv->sta_remove) dev->drv->sta_remove(dev, vif, sta); - mt76_packet_id_flush(dev, wcid); + mt76_wcid_cleanup(dev, wcid); mt76_wcid_mask_clear(dev->wcid_mask, idx); mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); @@ -1484,6 +1492,47 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); +void mt76_wcid_init(struct mt76_wcid *wcid) +{ + INIT_LIST_HEAD(&wcid->tx_list); + skb_queue_head_init(&wcid->tx_pending); + + INIT_LIST_HEAD(&wcid->list); + idr_init(&wcid->pktid); +} +EXPORT_SYMBOL_GPL(mt76_wcid_init); + +void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) +{ + struct mt76_phy *phy = dev->phys[wcid->phy_idx]; + struct ieee80211_hw *hw; + struct sk_buff_head list; + struct sk_buff *skb; + + mt76_tx_status_lock(dev, &list); + mt76_tx_status_skb_get(dev, wcid, -1, &list); + mt76_tx_status_unlock(dev, &list); + + idr_destroy(&wcid->pktid); + + spin_lock_bh(&phy->tx_lock); + + if (!list_empty(&wcid->tx_list)) + list_del_init(&wcid->tx_list); + + spin_lock(&wcid->tx_pending.lock); + skb_queue_splice_tail_init(&wcid->tx_pending, &list); + spin_unlock(&wcid->tx_pending.lock); + + spin_unlock_bh(&phy->tx_lock); + + while ((skb = __skb_dequeue(&list)) != NULL) { + hw = mt76_tx_status_get_hw(dev, skb); + ieee80211_free_txskb(hw, skb); + } +} +EXPORT_SYMBOL_GPL(mt76_wcid_cleanup); + int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) { @@ -1695,11 +1744,16 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, } EXPORT_SYMBOL_GPL(mt76_init_queue); -u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) +u16 mt76_calculate_default_rate(struct mt76_phy *phy, + struct ieee80211_vif *vif, int rateidx) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : + &phy->chandef; int offset = 0; - if (phy->chandef.chan->band != NL80211_BAND_2GHZ) + if (chandef->chan->band != NL80211_BAND_2GHZ) offset = 4; /* pick the lowest rate for hidden nodes */ @@ -1743,6 +1797,9 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, for (i = 0; i < (eht ? 14 : 12); i++) data[ei++] += stats->tx_mcs[i]; + for (i = 0; i < 4; i++) + data[ei++] += stats->tx_nss[i]; + wi->worker_stat_count = ei - wi->initial_stat_idx; } EXPORT_SYMBOL_GPL(mt76_ethtool_worker); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 6b07b8fafec2..ea828ba0b83a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -15,6 +15,7 @@ #include <linux/average.h> #include <linux/soc/mediatek/mtk_wed.h> #include <net/mac80211.h> +#include <net/page_pool/helpers.h> #include "util.h" #include "testmode.h" @@ -277,7 +278,7 @@ struct mt76_sta_stats { u64 tx_mcs[16]; /* mcs idx */ u64 tx_bytes; /* WED TX */ - u32 tx_packets; + u32 tx_packets; /* unit: MSDU */ u32 tx_retries; u32 tx_failed; /* WED RX */ @@ -316,6 +317,7 @@ struct mt76_wcid { int inactive_count; struct rate_info rate; + unsigned long ampdu_state; u16 idx; u8 hw_key_idx; @@ -332,10 +334,15 @@ struct mt76_wcid { u32 tx_info; bool sw_iv; + struct list_head tx_list; + struct sk_buff_head tx_pending; + struct list_head list; struct idr pktid; struct mt76_sta_stats stats; + + struct list_head poll_list; }; struct mt76_txq { @@ -372,7 +379,7 @@ struct mt76_rx_tid { u8 started:1, stopped:1, timer_pending:1; - struct sk_buff *reorder_buf[]; + struct sk_buff *reorder_buf[] __counted_by(size); }; #define MT_TX_CB_DMA_DONE BIT(0) @@ -702,6 +709,10 @@ struct mt76_vif { u8 wmm_idx; u8 scan_seq_num; u8 cipher; + u8 basic_rates_idx; + u8 mcast_rates_idx; + u8 beacon_rates_idx; + struct ieee80211_chanctx_conf *ctx; }; struct mt76_phy { @@ -712,6 +723,8 @@ struct mt76_phy { unsigned long state; u8 band_idx; + spinlock_t tx_lock; + struct list_head tx_list; struct mt76_queue *q_tx[__MT_TXQ_MAX]; struct cfg80211_chan_def chandef; @@ -823,6 +836,9 @@ struct mt76_dev { struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; struct list_head wcid_list; + struct list_head sta_poll_list; + spinlock_t sta_poll_lock; + u32 rev; struct tasklet_struct pre_tbtt_tasklet; @@ -857,11 +873,107 @@ struct mt76_dev { }; }; +/* per-phy stats. */ +struct mt76_mib_stats { + u32 ack_fail_cnt; + u32 fcs_err_cnt; + u32 rts_cnt; + u32 rts_retries_cnt; + u32 ba_miss_cnt; + u32 tx_bf_cnt; + u32 tx_mu_bf_cnt; + u32 tx_mu_mpdu_cnt; + u32 tx_mu_acked_mpdu_cnt; + u32 tx_su_acked_mpdu_cnt; + u32 tx_bf_ibf_ppdu_cnt; + u32 tx_bf_ebf_ppdu_cnt; + + u32 tx_bf_rx_fb_all_cnt; + u32 tx_bf_rx_fb_eht_cnt; + u32 tx_bf_rx_fb_he_cnt; + u32 tx_bf_rx_fb_vht_cnt; + u32 tx_bf_rx_fb_ht_cnt; + + u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ + u32 tx_bf_rx_fb_nc_cnt; + u32 tx_bf_rx_fb_nr_cnt; + u32 tx_bf_fb_cpl_cnt; + u32 tx_bf_fb_trig_cnt; + + u32 tx_ampdu_cnt; + u32 tx_stop_q_empty_cnt; + u32 tx_mpdu_attempts_cnt; + u32 tx_mpdu_success_cnt; + u32 tx_pkt_ebf_cnt; + u32 tx_pkt_ibf_cnt; + + u32 tx_rwp_fail_cnt; + u32 tx_rwp_need_cnt; + + /* rx stats */ + u32 rx_fifo_full_cnt; + u32 channel_idle_cnt; + u32 primary_cca_busy_time; + u32 secondary_cca_busy_time; + u32 primary_energy_detect_time; + u32 cck_mdrdy_time; + u32 ofdm_mdrdy_time; + u32 green_mdrdy_time; + u32 rx_vector_mismatch_cnt; + u32 rx_delimiter_fail_cnt; + u32 rx_mrdy_cnt; + u32 rx_len_mismatch_cnt; + u32 rx_mpdu_cnt; + u32 rx_ampdu_cnt; + u32 rx_ampdu_bytes_cnt; + u32 rx_ampdu_valid_subframe_cnt; + u32 rx_ampdu_valid_subframe_bytes_cnt; + u32 rx_pfdrop_cnt; + u32 rx_vec_queue_overflow_drop_cnt; + u32 rx_ba_cnt; + + u32 tx_amsdu[8]; + u32 tx_amsdu_cnt; + + /* mcu_muru_stats */ + u32 dl_cck_cnt; + u32 dl_ofdm_cnt; + u32 dl_htmix_cnt; + u32 dl_htgf_cnt; + u32 dl_vht_su_cnt; + u32 dl_vht_2mu_cnt; + u32 dl_vht_3mu_cnt; + u32 dl_vht_4mu_cnt; + u32 dl_he_su_cnt; + u32 dl_he_ext_su_cnt; + u32 dl_he_2ru_cnt; + u32 dl_he_2mu_cnt; + u32 dl_he_3ru_cnt; + u32 dl_he_3mu_cnt; + u32 dl_he_4ru_cnt; + u32 dl_he_4mu_cnt; + u32 dl_he_5to8ru_cnt; + u32 dl_he_9to16ru_cnt; + u32 dl_he_gtr16ru_cnt; + + u32 ul_hetrig_su_cnt; + u32 ul_hetrig_2ru_cnt; + u32 ul_hetrig_3ru_cnt; + u32 ul_hetrig_4ru_cnt; + u32 ul_hetrig_5to8ru_cnt; + u32 ul_hetrig_9to16ru_cnt; + u32 ul_hetrig_gtr16ru_cnt; + u32 ul_hetrig_2mu_cnt; + u32 ul_hetrig_3mu_cnt; + u32 ul_hetrig_4mu_cnt; +}; + struct mt76_power_limits { s8 cck[4]; s8 ofdm[8]; s8 mcs[4][10]; s8 ru[7][12]; + s8 eht[16][16]; }; struct mt76_ethtool_worker_info { @@ -995,7 +1107,8 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, int ring_base, u32 flags); -u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx); +u16 mt76_calculate_default_rate(struct mt76_phy *phy, + struct ieee80211_vif *vif, int rateidx); static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, int n_desc, int ring_base, u32 flags) { @@ -1424,6 +1537,11 @@ mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); +struct device_node * +mt76_find_power_limits_node(struct mt76_dev *dev); +struct device_node * +mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan); + s8 mt76_get_rate_power_limits(struct mt76_phy *phy, struct ieee80211_channel *chan, struct mt76_power_limits *dest, @@ -1493,22 +1611,7 @@ mt76_token_put(struct mt76_dev *dev, int token) return txwi; } -static inline void mt76_packet_id_init(struct mt76_wcid *wcid) -{ - INIT_LIST_HEAD(&wcid->list); - idr_init(&wcid->pktid); -} - -static inline void -mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid) -{ - struct sk_buff_head list; - - mt76_tx_status_lock(dev, &list); - mt76_tx_status_skb_get(dev, wcid, -1, &list); - mt76_tx_status_unlock(dev, &list); - - idr_destroy(&wcid->pktid); -} +void mt76_wcid_init(struct mt76_wcid *wcid); +void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig index 6a0080f1d91c..dd16acfd9735 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig @@ -5,7 +5,7 @@ config MT7603E depends on MAC80211 depends on PCI help - This adds support for MT7603E wireless PCIe devices and the WLAN core + This adds support for MT7603E PCIe wireless devices and the WLAN core on MT7628/MT7688 SoC devices. This family supports IEEE 802.11n 2x2 to 300Mbps PHY rate diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index b65b0a88c1de..c223f7c19e6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -10,12 +10,31 @@ struct beacon_bc_data { }; static void +mt7603_mac_stuck_beacon_recovery(struct mt7603_dev *dev) +{ + if (dev->beacon_check % 5 != 4) + return; + + mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN); + mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); + mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN); + + mt76_set(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS); + mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE); + mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TX_DISABLE); + mt76_clear(dev, MT_WF_CFG_OFF_WOCCR, MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS); +} + +static void mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt7603_dev *dev = (struct mt7603_dev *)priv; struct mt76_dev *mdev = &dev->mt76; struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; struct sk_buff *skb = NULL; + u32 om_idx = mvif->idx; + u32 val; if (!(mdev->beacon_mask & BIT(mvif->idx))) return; @@ -24,20 +43,33 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!skb) return; - mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], - MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL); + if (om_idx) + om_idx |= 0x10; + val = MT_DMA_FQCR0_BUSY | MT_DMA_FQCR0_MODE | + FIELD_PREP(MT_DMA_FQCR0_TARGET_BSS, om_idx) | + FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | + FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8); spin_lock_bh(&dev->ps_lock); - mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | - FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) | - FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, - dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) | - FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | - FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8)); - if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) + mt76_wr(dev, MT_DMA_FQCR0, val | + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BCN)); + if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) { dev->beacon_check = MT7603_WATCHDOG_TIMEOUT; + goto out; + } + + mt76_wr(dev, MT_DMA_FQCR0, val | + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, MT_TX_HW_QUEUE_BMC)); + if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) { + dev->beacon_check = MT7603_WATCHDOG_TIMEOUT; + goto out; + } + mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], + MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL); + +out: spin_unlock_bh(&dev->ps_lock); } @@ -81,6 +113,18 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) data.dev = dev; __skb_queue_head_init(&data.q); + /* Flush all previous CAB queue packets and beacons */ + mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); + + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false); + + if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > 0) + dev->beacon_check++; + else + dev->beacon_check = 0; + mt7603_mac_stuck_beacon_recovery(dev); + q = dev->mphy.q_tx[MT_TXQ_BEACON]; spin_lock(&q->lock); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), @@ -89,14 +133,9 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) mt76_queue_kick(dev, q); spin_unlock(&q->lock); - /* Flush all previous CAB queue packets */ - mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); - - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false); - mt76_csa_check(mdev); if (mdev->csa_complete) - goto out; + return; q = dev->mphy.q_tx[MT_TXQ_CAB]; do { @@ -108,7 +147,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) skb_queue_len(&data.q) < 8); if (skb_queue_empty(&data.q)) - goto out; + return; for (i = 0; i < ARRAY_SIZE(data.tail); i++) { if (!data.tail[i]) @@ -136,11 +175,6 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) MT_WF_ARB_CAB_START_BSSn(0) | (MT_WF_ARB_CAB_START_BSS0n(1) * ((1 << (MT7603_MAX_INTERFACES - 1)) - 1))); - -out: - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false); - if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask)) - dev->beacon_check++; } void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval) @@ -161,7 +195,8 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval) return; } - dev->mt76.beacon_int = intval; + if (intval) + dev->mt76.beacon_int = intval; mt76_wr(dev, MT_TBTT, FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c index 60a996b63c0c..915b8349146a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c @@ -42,11 +42,13 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance) } if (intr & MT_INT_RX_DONE(0)) { + dev->rx_pse_check = 0; mt7603_irq_disable(dev, MT_INT_RX_DONE(0)); napi_schedule(&dev->mt76.napi[0]); } if (intr & MT_INT_RX_DONE(1)) { + dev->rx_pse_check = 0; mt7603_irq_disable(dev, MT_INT_RX_DONE(1)); napi_schedule(&dev->mt76.napi[1]); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index 9a2e632d577a..6c55c72f28a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -184,6 +184,13 @@ mt7603_mac_init(struct mt7603_dev *dev) mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE); + if (is_mt7628(dev)) { + mt76_set(dev, MT_TMAC_TCR, + MT_TMAC_TCR_TXOP_BURST_STOP | BIT(1) | BIT(0)); + mt76_set(dev, MT_TXREQ, BIT(27)); + mt76_set(dev, MT_AGG_TMP, GENMASK(4, 2)); + } + mt7603_set_tmac_template(dev); /* Enable RX group to HIF */ @@ -500,8 +507,6 @@ int mt7603_register_device(struct mt7603_dev *dev) bus_ops->rmw = mt7603_rmw; dev->mt76.bus = bus_ops; - INIT_LIST_HEAD(&dev->sta_poll_list); - spin_lock_init(&dev->sta_poll_lock); spin_lock_init(&dev->ps_lock); INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7603_mac_work); @@ -519,6 +524,7 @@ int mt7603_register_device(struct mt7603_dev *dev) hw->max_rates = 3; hw->max_report_rates = 7; hw->max_rate_tries = 11; + hw->max_tx_fragments = 1; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 12e0af52082a..cf21d06257e5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -178,8 +178,9 @@ mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled) mt76_wr(dev, addr + 3 * 4, val); } -void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) +void mt7603_filter_tx(struct mt7603_dev *dev, int mac_idx, int idx, bool abort) { + u32 flush_mask; int i, port, queue; if (abort) { @@ -195,6 +196,18 @@ void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN | FIELD_PREP(MT_TX_ABORT_WCID, idx)); + flush_mask = MT_WF_ARB_TX_FLUSH_AC0 | + MT_WF_ARB_TX_FLUSH_AC1 | + MT_WF_ARB_TX_FLUSH_AC2 | + MT_WF_ARB_TX_FLUSH_AC3; + flush_mask <<= mac_idx; + + mt76_wr(dev, MT_WF_ARB_TX_FLUSH_0, flush_mask); + mt76_poll(dev, MT_WF_ARB_TX_FLUSH_0, flush_mask, 0, 20000); + mt76_wr(dev, MT_WF_ARB_TX_START_0, flush_mask); + + mt76_wr(dev, MT_TX_ABORT, 0); + for (i = 0; i < 4; i++) { mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) | @@ -202,13 +215,11 @@ void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); - mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 15000); + mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000); } WARN_ON_ONCE(mt76_rr(dev, MT_DMA_FQCR0) & MT_DMA_FQCR0_BUSY); - mt76_wr(dev, MT_TX_ABORT, 0); - mt7603_wtbl_set_skip_tx(dev, idx, false); } @@ -245,7 +256,7 @@ void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); if (enabled) - mt7603_filter_tx(dev, idx, false); + mt7603_filter_tx(dev, sta->vif->idx, idx, false); addr = mt7603_wtbl1_addr(idx); mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); @@ -412,16 +423,16 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev) while (1) { bool clear = false; - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&dev->sta_poll_list)) { - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&dev->mt76.sta_poll_list)) { + spin_unlock_bh(&dev->mt76.sta_poll_lock); break; } - msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta, - poll_list); - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + msta = list_first_entry(&dev->mt76.sta_poll_list, + struct mt7603_sta, wcid.poll_list); + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); addr = mt7603_wtbl4_addr(msta->wcid.idx); for (i = 0; i < 4; i++) { @@ -1267,10 +1278,10 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) msta = container_of(wcid, struct mt7603_sta, wcid); sta = wcid_to_sta(wcid); - if (list_empty(&msta->poll_list)) { - spin_lock_bh(&dev->sta_poll_lock); - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) { + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data)) @@ -1430,15 +1441,6 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mt7603_beacon_set_timer(dev, -1, 0); - if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || - dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || - dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || - dev->cur_reset_cause == RESET_CAUSE_TX_HANG) - mt7603_pse_reset(dev); - - if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) - goto skip_dma_reset; - mt7603_mac_stop(dev); mt76_clear(dev, MT_WPDMA_GLO_CFG, @@ -1448,28 +1450,32 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mt7603_irq_disable(dev, mask); - mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); - mt7603_pse_client_reset(dev); mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); for (i = 0; i < __MT_TXQ_MAX; i++) mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); + mt7603_dma_sched_reset(dev); + + mt76_tx_status_check(&dev->mt76, true); + mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); } - mt76_tx_status_check(&dev->mt76, true); + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || + dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY) + mt7603_pse_reset(dev); - mt7603_dma_sched_reset(dev); + if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { + mt7603_mac_dma_start(dev); - mt7603_mac_dma_start(dev); + mt7603_irq_enable(dev, mask); - mt7603_irq_enable(dev, mask); + clear_bit(MT76_RESET, &dev->mphy.state); + } -skip_dma_reset: - clear_bit(MT76_RESET, &dev->mphy.state); mutex_unlock(&dev->mt76.mutex); mt76_worker_enable(&dev->mt76.tx_worker); @@ -1559,20 +1565,29 @@ static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) { u32 addr, val; - if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) - return true; - if (mt7603_rx_fifo_busy(dev)) - return false; + goto out; addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); mt76_wr(dev, addr, 3); val = mt76_rr(dev, addr) >> 16; - if (is_mt7628(dev) && (val & 0x4001) == 0x4001) - return true; + if (!(val & BIT(0))) + return false; + + if (is_mt7628(dev)) + val &= 0xa000; + else + val &= 0x8000; + if (!val) + return false; + +out: + if (mt76_rr(dev, MT_INT_SOURCE_CSR) & + (MT_INT_RX_DONE(0) | MT_INT_RX_DONE(1))) + return false; - return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; + return true; } static bool diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 1b1358c6bb46..89d738deea62 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -66,10 +66,11 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) idx = MT7603_WTBL_RESERVED - 1 - mvif->idx; dev->mt76.vif_mask |= BIT_ULL(mvif->idx); - INIT_LIST_HEAD(&mvif->sta.poll_list); + INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.hw_key_idx = -1; - mt76_packet_id_init(&mvif->sta.wcid); + mvif->sta.vif = mvif; + mt76_wcid_init(&mvif->sta.wcid); eth_broadcast_addr(bc_addr); mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); @@ -100,16 +101,16 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) rcu_assign_pointer(dev->mt76.wcid[idx], NULL); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); mutex_lock(&dev->mt76.mutex); dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); mutex_unlock(&dev->mt76.mutex); - mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); + mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid); } void mt7603_init_edcca(struct mt7603_dev *dev) @@ -351,12 +352,13 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (idx < 0) return -ENOSPC; - INIT_LIST_HEAD(&msta->poll_list); + INIT_LIST_HEAD(&msta->wcid.poll_list); __skb_queue_head_init(&msta->psq); msta->ps = ~0; msta->smps = ~0; msta->wcid.sta = 1; msta->wcid.idx = idx; + msta->vif = mvif; mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr); mt7603_wtbl_set_ps(dev, msta, false); @@ -380,18 +382,19 @@ mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; spin_lock_bh(&dev->ps_lock); __skb_queue_purge(&msta->psq); - mt7603_filter_tx(dev, wcid->idx, true); + mt7603_filter_tx(dev, mvif->idx, wcid->idx, true); spin_unlock_bh(&dev->ps_lock); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&mdev->sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); mt7603_wtbl_clear(dev, wcid->idx); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 7c3be596da09..9e58df7042ad 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -64,7 +64,6 @@ struct mt7603_sta { struct mt7603_vif *vif; - struct list_head poll_list; u32 tx_airtime_ac[4]; struct sk_buff_head psq; @@ -110,9 +109,6 @@ struct mt7603_dev { u32 rxfilter; - struct list_head sta_poll_list; - spinlock_t sta_poll_lock; - struct mt7603_sta global_sta; u32 agc0, agc3; @@ -234,7 +230,7 @@ void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, bool enabled); void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, bool enabled); -void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort); +void mt7603_filter_tx(struct mt7603_dev *dev, int mac_idx, int idx, bool abort); int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h index 3b901090b29c..524bceb8e958 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h @@ -309,6 +309,13 @@ enum { #define MT_WF_ARB_TX_STOP_0 MT_WF_ARB(0x110) #define MT_WF_ARB_TX_STOP_1 MT_WF_ARB(0x114) +#define MT_WF_ARB_TX_FLUSH_AC0 BIT(0) +#define MT_WF_ARB_TX_FLUSH_AC1 BIT(5) +#define MT_WF_ARB_TX_FLUSH_AC2 BIT(10) +#define MT_WF_ARB_TX_FLUSH_AC3 BIT(16) +#define MT_WF_ARB_TX_FLUSH_AC4 BIT(21) +#define MT_WF_ARB_TX_FLUSH_AC5 BIT(26) + #define MT_WF_ARB_BCN_START MT_WF_ARB(0x118) #define MT_WF_ARB_BCN_START_BSSn(n) BIT(0 + (n)) #define MT_WF_ARB_BCN_START_T_PRE_TTTT BIT(10) @@ -462,6 +469,11 @@ enum { #define MT_WF_SEC_BASE 0x21a00 #define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs)) +#define MT_WF_CFG_OFF_BASE 0x21e00 +#define MT_WF_CFG_OFF(ofs) (MT_WF_CFG_OFF_BASE + (ofs)) +#define MT_WF_CFG_OFF_WOCCR MT_WF_CFG_OFF(0x004) +#define MT_WF_CFG_OFF_WOCCR_TMAC_GC_DIS BIT(4) + #define MT_SEC_SCR MT_WF_SEC(0x004) #define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig index 30fba36ff46b..1ab1439143f4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -11,7 +11,7 @@ config MT7615E depends on MAC80211 depends on PCI help - This adds support for MT7615-based wireless PCIe devices, + This adds support for MT7615-based PCIe wireless devices, which support concurrent dual-band operation at both 5GHz and 2.4GHz, IEEE 802.11ac 4x4:4SS 1733Mbps PHY rate, wave2 MU-MIMO up to 4 users/group and 160MHz channels. diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 621e69f07e3c..f7722f67db57 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -58,10 +58,7 @@ int mt7615_thermal_init(struct mt7615_dev *dev) wiphy_name(wiphy)); hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, mt7615_hwmon_groups); - if (IS_ERR(hwmon)) - return PTR_ERR(hwmon); - - return 0; + return PTR_ERR_OR_ZERO(hwmon); } EXPORT_SYMBOL_GPL(mt7615_thermal_init); @@ -397,6 +394,8 @@ mt7615_init_wiphy(struct ieee80211_hw *hw) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + if (!is_mt7622(&phy->dev->mt76)) + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); @@ -626,8 +625,6 @@ void mt7615_init_device(struct mt7615_dev *dev) INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work); skb_queue_head_init(&dev->phy.scan_event_list); skb_queue_head_init(&dev->coredump.msg_list); - INIT_LIST_HEAD(&dev->sta_poll_list); - spin_lock_init(&dev->sta_poll_lock); init_waitqueue_head(&dev->reset_wait); init_waitqueue_head(&dev->phy.roc_wait); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 64002484ccad..7ba789834e8d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -387,10 +387,11 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) struct mt7615_sta *msta; msta = container_of(status->wcid, struct mt7615_sta, wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && @@ -905,19 +906,19 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev) int i; INIT_LIST_HEAD(&sta_poll_list); - spin_lock_bh(&dev->sta_poll_lock); - list_splice_init(&dev->sta_poll_list, &sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); while (!list_empty(&sta_poll_list)) { bool clear = false; msta = list_first_entry(&sta_poll_list, struct mt7615_sta, - poll_list); + wcid.poll_list); - spin_lock_bh(&dev->sta_poll_lock); - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; @@ -1514,10 +1515,10 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) msta = container_of(wcid, struct mt7615_sta, wcid); sta = wcid_to_sta(wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index dadb13f2ca09..dab16b5fc386 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -222,11 +222,11 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, idx = MT7615_WTBL_RESERVED - mvif->mt76.idx; - INIT_LIST_HEAD(&mvif->sta.poll_list); + INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; mvif->sta.wcid.hw_key_idx = -1; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7615_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -274,12 +274,12 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, mt7615_mutex_release(dev); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); + mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid); } int mt7615_set_channel(struct mt7615_phy *phy) @@ -552,6 +552,32 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw, mt7615_mutex_release(dev); } +static void +mt7615_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + u8 i, band = mvif->mt76.band_idx; + u32 *mu; + + mu = (u32 *)info->mu_group.membership; + for (i = 0; i < WLAN_MEMBERSHIP_LEN / sizeof(*mu); i++) { + if (is_mt7663(&dev->mt76)) + mt76_wr(dev, MT7663_WF_PHY_GID_TAB_VLD(band, i), mu[i]); + else + mt76_wr(dev, MT_WF_PHY_GID_TAB_VLD(band, i), mu[i]); + } + + mu = (u32 *)info->mu_group.position; + for (i = 0; i < WLAN_USER_POSITION_LEN / sizeof(*mu); i++) { + if (is_mt7663(&dev->mt76)) + mt76_wr(dev, MT7663_WF_PHY_GID_TAB_POS(band, i), mu[i]); + else + mt76_wr(dev, MT_WF_PHY_GID_TAB_POS(band, i), mu[i]); + } +} + static void mt7615_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, @@ -600,6 +626,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ASSOC) mt7615_mac_set_beacon_filter(phy, vif, vif->cfg.assoc); + if (changed & BSS_CHANGED_MU_GROUPS) + mt7615_update_mu_group(hw, vif, info); + mt7615_mutex_release(dev); } @@ -628,7 +657,7 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (idx < 0) return -ENOSPC; - INIT_LIST_HEAD(&msta->poll_list); + INIT_LIST_HEAD(&msta->wcid.poll_list); msta->vif = mvif; msta->wcid.sta = 1; msta->wcid.idx = idx; @@ -676,10 +705,10 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) mt7615_mcu_add_bss_info(phy, vif, sta, false); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&mdev->sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); mt76_connac_power_save_sched(phy->mt76, &dev->pm); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 8d745c9730c7..955974a82180 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -2147,7 +2147,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) }; if (cmd == MCU_EXT_CMD(SET_RX_PATH) || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 582d1b5b7cb3..a20322aae967 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -125,7 +125,6 @@ struct mt7615_sta { struct mt7615_vif *vif; - struct list_head poll_list; u32 airtime_ac[8]; struct ieee80211_tx_rate rates[4]; @@ -262,9 +261,6 @@ struct mt7615_dev { wait_queue_head_t reset_wait; u32 reset_state; - struct list_head sta_poll_list; - spinlock_t sta_poll_lock; - struct { u8 n_pulses; u32 period; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h index d3eb49d83b98..9be5a58a4e6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h @@ -14,7 +14,7 @@ #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ +#define DEV_ASSIGN strscpy(__entry->wiphy_name, \ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) #define DEV_PR_FMT "%s" #define DEV_PR_ARG __entry->wiphy_name diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 0019890fdb78..fbb1181c58ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -106,7 +106,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, else mt76_connac_write_hw_txp(mdev, tx_info, txp, id); - tx_info->skb = DMA_DUMMY_DATA; + tx_info->skb = NULL; return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 7cecb22c569e..806b3887c541 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -212,6 +212,15 @@ enum mt7615_reg_base { #define MT7663_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0224 + ((_phy) << 12)) +#define MT_WF_PHY_GID_TAB_VLD(_phy, i) MT_WF_PHY(0x0254 + (i) * 4 + \ + ((_phy) << 9)) +#define MT7663_WF_PHY_GID_TAB_VLD(_phy, i) MT_WF_PHY(0x0254 + (i) * 4 + \ + ((_phy) << 12)) +#define MT_WF_PHY_GID_TAB_POS(_phy, i) MT_WF_PHY(0x025c + (i) * 4 + \ + ((_phy) << 9)) +#define MT7663_WF_PHY_GID_TAB_POS(_phy, i) MT_WF_PHY(0x025c + (i) * 4 + \ + ((_phy) << 12)) + #define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c) #define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \ GENMASK(28, 20)) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h index 15653b274f83..1f29d8cd900c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h @@ -172,6 +172,11 @@ struct mt76_connac_tx_free { extern const struct wiphy_wowlan_support mt76_connac_wowlan_support; +static inline bool is_mt7925(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7925; +} + static inline bool is_mt7922(struct mt76_dev *dev) { return mt76_chip(dev) == 0x7922; @@ -197,11 +202,21 @@ static inline bool is_mt7916(struct mt76_dev *dev) return mt76_chip(dev) == 0x7906; } +static inline bool is_mt7981(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7981; +} + static inline bool is_mt7986(struct mt76_dev *dev) { return mt76_chip(dev) == 0x7986; } +static inline bool is_mt798x(struct mt76_dev *dev) +{ + return is_mt7981(dev) || is_mt7986(dev); +} + static inline bool is_mt7996(struct mt76_dev *dev) { return mt76_chip(dev) == 0x7990; @@ -235,6 +250,7 @@ static inline bool is_mt76_fw_txp(struct mt76_dev *dev) switch (mt76_chip(dev)) { case 0x7961: case 0x7922: + case 0x7925: case 0x7663: case 0x7622: return false; @@ -409,5 +425,13 @@ int mt76_connac2_mac_fill_rx_rate(struct mt76_dev *dev, struct mt76_rx_status *status, struct ieee80211_supported_band *sband, __le32 *rxv, u8 *mode); - +void mt76_connac2_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi); +void mt76_connac2_txwi_free(struct mt76_dev *dev, struct mt76_txwi_cache *t, + struct ieee80211_sta *sta, + struct list_head *free_list); +void mt76_connac2_tx_token_put(struct mt76_dev *dev); + +/* connac3 */ +void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, + u8 mode); #endif /* __MT76_CONNAC_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h index fabf637bdf7f..bd2a92467a97 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h @@ -34,7 +34,7 @@ enum { #define MT_TX_FREE_MSDU_CNT GENMASK(9, 0) #define MT_TX_FREE_WLAN_ID GENMASK(23, 14) -#define MT_TX_FREE_LATENCY GENMASK(12, 0) +#define MT_TX_FREE_COUNT GENMASK(12, 0) /* 0: success, others: dropped */ #define MT_TX_FREE_STATUS GENMASK(14, 13) #define MT_TX_FREE_MSDU_ID GENMASK(30, 16) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c new file mode 100644 index 000000000000..73e9f283d0ae --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt76_connac.h" +#include "mt76_connac3_mac.h" +#include "dma.h" + +#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) +#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ + IEEE80211_RADIOTAP_HE_##f) + +static void +mt76_connac3_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, + struct ieee80211_radiotap_he *he, + __le32 *rxv) +{ + u32 ru = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC), offs = 0; + + status->bw = RATE_INFO_BW_HE_RU; + + switch (ru) { + case 0 ... 36: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + + he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); + he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) | + le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); +} + +#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f) +static void +mt76_connac3_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + static const struct ieee80211_radiotap_he_mu mu_known = { + .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | + HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | + HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) | + HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN), + .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), + }; + struct ieee80211_radiotap_he_mu *he_mu; + + status->flag |= RX_FLAG_RADIOTAP_HE_MU; + + he_mu = skb_push(skb, sizeof(mu_known)); + memcpy(he_mu, &mu_known, sizeof(mu_known)); + + he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx); + if (status->he_dcm) + he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm); + + he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) | + MU_PREP(FLAGS2_SIG_B_SYMS_USERS, + le32_get_bits(rxv[4], MT_CRXV_HE_NUM_USER)); + + he_mu->ru_ch1[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU0) & 0xff; + + if (status->bw >= RATE_INFO_BW_40) { + he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN); + he_mu->ru_ch2[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU1) & 0xff; + } + + if (status->bw >= RATE_INFO_BW_80) { + u32 ru_h, ru_l; + + he_mu->ru_ch1[1] = le32_get_bits(rxv[16], MT_CRXV_HE_RU2) & 0xff; + + ru_l = le32_get_bits(rxv[16], MT_CRXV_HE_RU3_L); + ru_h = le32_get_bits(rxv[17], MT_CRXV_HE_RU3_H) & 0x7; + he_mu->ru_ch2[1] = (u8)(ru_l | ru_h << 4); + } +} + +void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, + u8 mode) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + static const struct ieee80211_radiotap_he known = { + .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | + HE_BITS(DATA1_DATA_DCM_KNOWN) | + HE_BITS(DATA1_STBC_KNOWN) | + HE_BITS(DATA1_CODING_KNOWN) | + HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | + HE_BITS(DATA1_DOPPLER_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE_KNOWN) | + HE_BITS(DATA1_BSS_COLOR_KNOWN), + .data2 = HE_BITS(DATA2_GI_KNOWN) | + HE_BITS(DATA2_TXBF_KNOWN) | + HE_BITS(DATA2_PE_DISAMBIG_KNOWN) | + HE_BITS(DATA2_TXOP_KNOWN), + }; + u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1; + struct ieee80211_radiotap_he *he; + + status->flag |= RX_FLAG_RADIOTAP_HE; + + he = skb_push(skb, sizeof(known)); + memcpy(he, &known, sizeof(known)); + + he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[9]) | + HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[4]); + he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[13]); + he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[5]) | + le16_encode_bits(ltf_size, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); + if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF) + he->data5 |= HE_BITS(DATA5_TXBF); + he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[9]) | + HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[9]); + + switch (mode) { + case MT_PHY_TYPE_HE_SU: + he->data1 |= HE_BITS(DATA1_FORMAT_SU) | + HE_BITS(DATA1_UL_DL_KNOWN) | + HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | + HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); + + he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[8]) | + HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); + break; + case MT_PHY_TYPE_HE_EXT_SU: + he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | + HE_BITS(DATA1_UL_DL_KNOWN) | + HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); + + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); + break; + case MT_PHY_TYPE_HE_MU: + he->data1 |= HE_BITS(DATA1_FORMAT_MU) | + HE_BITS(DATA1_UL_DL_KNOWN); + + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); + he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[8]); + + mt76_connac3_mac_decode_he_radiotap_ru(status, he, rxv); + mt76_connac3_mac_decode_he_mu_radiotap(skb, rxv); + break; + case MT_PHY_TYPE_HE_TB: + he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | + HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | + HE_BITS(DATA1_SPTL_REUSE4_KNOWN); + + he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[13]) | + HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[13]) | + HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[13]) | + HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[13]); + + mt76_connac3_mac_decode_he_radiotap_ru(status, he, rxv); + break; + default: + break; + } +} +EXPORT_SYMBOL_GPL(mt76_connac3_mac_decode_he_radiotap); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h new file mode 100644 index 000000000000..2250252b2047 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT76_CONNAC3_MAC_H +#define __MT76_CONNAC3_MAC_H + +enum { + MT_CTX0, + MT_HIF0 = 0x0, + + MT_LMAC_AC00 = 0x0, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, + MT_LMAC_PSMP0, +}; + +#define MT_CT_PARSE_LEN 72 +#define MT_CT_DMA_BUF_NUM 2 + +#define MT_RXD0_LENGTH GENMASK(15, 0) +#define MT_RXD0_PKT_FLAG GENMASK(19, 16) +#define MT_RXD0_PKT_TYPE GENMASK(31, 27) + +#define MT_RXD0_MESH BIT(18) +#define MT_RXD0_MHCP BIT(19) +#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) +#define MT_RXD0_NORMAL_IP_SUM BIT(23) +#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24) + +#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16) +#define MT_RXD0_SW_PKT_TYPE_MAP 0x380F +#define MT_RXD0_SW_PKT_TYPE_FRAME 0x3801 + +/* RXD DW1 */ +#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(11, 0) +#define MT_RXD1_NORMAL_GROUP_1 BIT(16) +#define MT_RXD1_NORMAL_GROUP_2 BIT(17) +#define MT_RXD1_NORMAL_GROUP_3 BIT(18) +#define MT_RXD1_NORMAL_GROUP_4 BIT(19) +#define MT_RXD1_NORMAL_GROUP_5 BIT(20) +#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21) +#define MT_RXD1_NORMAL_CM BIT(23) +#define MT_RXD1_NORMAL_CLM BIT(24) +#define MT_RXD1_NORMAL_ICV_ERR BIT(25) +#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26) +#define MT_RXD1_NORMAL_BAND_IDX GENMASK(28, 27) +#define MT_RXD1_NORMAL_SPP_EN BIT(29) +#define MT_RXD1_NORMAL_ADD_OM BIT(30) +#define MT_RXD1_NORMAL_SEC_DONE BIT(31) + +/* RXD DW2 */ +#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0) +#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8) +#define MT_RXD2_NORMAL_HDR_TRANS BIT(7) +#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 13) +#define MT_RXD2_NORMAL_SEC_MODE GENMASK(20, 16) +#define MT_RXD2_NORMAL_MU_BAR BIT(21) +#define MT_RXD2_NORMAL_SW_BIT BIT(22) +#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23) +#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24) +#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25) +#define MT_RXD2_NORMAL_INT_FRAME BIT(26) +#define MT_RXD2_NORMAL_FRAG BIT(27) +#define MT_RXD2_NORMAL_NULL_FRAME BIT(28) +#define MT_RXD2_NORMAL_NDATA BIT(29) +#define MT_RXD2_NORMAL_NON_AMPDU BIT(30) +#define MT_RXD2_NORMAL_BF_REPORT BIT(31) + +/* RXD DW3 */ +#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) +#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8) +#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16) +#define MT_RXD3_NORMAL_U2M BIT(0) +#define MT_RXD3_NORMAL_HTC_VLD BIT(18) +#define MT_RXD3_NORMAL_BEACON_MC BIT(20) +#define MT_RXD3_NORMAL_BEACON_UC BIT(21) +#define MT_RXD3_NORMAL_CO_ANT BIT(22) +#define MT_RXD3_NORMAL_FCS_ERR BIT(24) +#define MT_RXD3_NORMAL_VLAN2ETH BIT(31) + +/* RXD DW4 */ +#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0) +#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0) +#define MT_RXD4_MID_AMSDU_FRAME BIT(1) +#define MT_RXD4_LAST_AMSDU_FRAME BIT(0) + +#define MT_RXV_HDR_BAND_IDX BIT(24) + +/* RXD GROUP4 */ +#define MT_RXD8_FRAME_CONTROL GENMASK(15, 0) + +#define MT_RXD10_SEQ_CTRL GENMASK(15, 0) +#define MT_RXD10_QOS_CTL GENMASK(31, 16) + +#define MT_RXD11_HT_CONTROL GENMASK(31, 0) + +/* P-RXV */ +#define MT_PRXV_TX_RATE GENMASK(6, 0) +#define MT_PRXV_TX_DCM BIT(4) +#define MT_PRXV_TX_ER_SU_106T BIT(5) +#define MT_PRXV_NSTS GENMASK(10, 7) +#define MT_PRXV_TXBF BIT(11) +#define MT_PRXV_HT_AD_CODE BIT(12) +#define MT_PRXV_HE_RU_ALLOC GENMASK(30, 22) +#define MT_PRXV_RCPI3 GENMASK(31, 24) +#define MT_PRXV_RCPI2 GENMASK(23, 16) +#define MT_PRXV_RCPI1 GENMASK(15, 8) +#define MT_PRXV_RCPI0 GENMASK(7, 0) +#define MT_PRXV_HT_SHORT_GI GENMASK(4, 3) +#define MT_PRXV_HT_STBC GENMASK(10, 9) +#define MT_PRXV_TX_MODE GENMASK(14, 11) +#define MT_PRXV_FRAME_MODE GENMASK(2, 0) +#define MT_PRXV_DCM BIT(5) + +/* C-RXV */ +#define MT_CRXV_HE_NUM_USER GENMASK(26, 20) +#define MT_CRXV_HE_LTF_SIZE GENMASK(28, 27) +#define MT_CRXV_HE_LDPC_EXT_SYM BIT(30) + +#define MT_CRXV_HE_PE_DISAMBIG BIT(1) +#define MT_CRXV_HE_UPLINK BIT(2) + +#define MT_CRXV_HE_MU_AID GENMASK(27, 17) +#define MT_CRXV_HE_BEAM_CHNG BIT(29) + +#define MT_CRXV_HE_DOPPLER BIT(0) +#define MT_CRXV_HE_BSS_COLOR GENMASK(15, 10) +#define MT_CRXV_HE_TXOP_DUR GENMASK(19, 17) + +#define MT_CRXV_HE_SR_MASK GENMASK(11, 8) +#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12) +#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17) +#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21) + +#define MT_CRXV_HE_RU0 GENMASK(8, 0) +#define MT_CRXV_HE_RU1 GENMASK(17, 9) +#define MT_CRXV_HE_RU2 GENMASK(26, 18) +#define MT_CRXV_HE_RU3_L GENMASK(31, 27) +#define MT_CRXV_HE_RU3_H GENMASK(3, 0) + +enum tx_header_format { + MT_HDR_FORMAT_802_3, + MT_HDR_FORMAT_CMD, + MT_HDR_FORMAT_802_11, + MT_HDR_FORMAT_802_11_EXT, +}; + +enum tx_pkt_type { + MT_TX_TYPE_CT, + MT_TX_TYPE_SF, + MT_TX_TYPE_CMD, + MT_TX_TYPE_FW, +}; + +enum tx_port_idx { + MT_TX_PORT_IDX_LMAC, + MT_TX_PORT_IDX_MCU +}; + +enum tx_mcu_port_q_idx { + MT_TX_MCU_PORT_RX_Q0 = 0x20, + MT_TX_MCU_PORT_RX_Q1, + MT_TX_MCU_PORT_RX_Q2, + MT_TX_MCU_PORT_RX_Q3, + MT_TX_MCU_PORT_RX_FWDL = 0x3e +}; + +enum tx_mgnt_type { + MT_TX_NORMAL, + MT_TX_TIMING, + MT_TX_ADDBA, +}; + +#define MT_CT_INFO_APPLY_TXD BIT(0) +#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1) +#define MT_CT_INFO_MGMT_FRAME BIT(2) +#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3) +#define MT_CT_INFO_HSR2_TX BIT(4) +#define MT_CT_INFO_FROM_HOST BIT(7) + +#define MT_TXD_SIZE (8 * 4) + +#define MT_TXD0_Q_IDX GENMASK(31, 25) +#define MT_TXD0_PKT_FMT GENMASK(24, 23) +#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) +#define MT_TXD0_TX_BYTES GENMASK(15, 0) + +#define MT_TXD1_FIXED_RATE BIT(31) +#define MT_TXD1_OWN_MAC GENMASK(30, 25) +#define MT_TXD1_TID GENMASK(24, 21) +#define MT_TXD1_BIP BIT(24) +#define MT_TXD1_ETH_802_3 BIT(20) +#define MT_TXD1_HDR_INFO GENMASK(20, 16) +#define MT_TXD1_HDR_FORMAT GENMASK(15, 14) +#define MT_TXD1_TGID GENMASK(13, 12) +#define MT_TXD1_WLAN_IDX GENMASK(11, 0) + +#define MT_TXD2_POWER_OFFSET GENMASK(31, 26) +#define MT_TXD2_MAX_TX_TIME GENMASK(25, 16) +#define MT_TXD2_FRAG GENMASK(15, 14) +#define MT_TXD2_HTC_VLD BIT(13) +#define MT_TXD2_DURATION BIT(12) +#define MT_TXD2_HDR_PAD GENMASK(11, 10) +#define MT_TXD2_RTS BIT(9) +#define MT_TXD2_OWN_MAC_MAP BIT(8) +#define MT_TXD2_BF_TYPE GENMASK(6, 7) +#define MT_TXD2_FRAME_TYPE GENMASK(5, 4) +#define MT_TXD2_SUB_TYPE GENMASK(3, 0) + +#define MT_TXD3_SN_VALID BIT(31) +#define MT_TXD3_PN_VALID BIT(30) +#define MT_TXD3_SW_POWER_MGMT BIT(29) +#define MT_TXD3_BA_DISABLE BIT(28) +#define MT_TXD3_SEQ GENMASK(27, 16) +#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11) +#define MT_TXD3_TX_COUNT GENMASK(10, 6) +#define MT_TXD3_HW_AMSDU BIT(5) +#define MT_TXD3_BCM BIT(4) +#define MT_TXD3_EEOSP BIT(3) +#define MT_TXD3_EMRD BIT(2) +#define MT_TXD3_PROTECT_FRAME BIT(1) +#define MT_TXD3_NO_ACK BIT(0) + +#define MT_TXD4_PN_LOW GENMASK(31, 0) + +#define MT_TXD5_PN_HIGH GENMASK(31, 16) +#define MT_TXD5_FL BIT(15) +#define MT_TXD5_BYPASS_TBB BIT(14) +#define MT_TXD5_BYPASS_RBB BIT(13) +#define MT_TXD5_BSS_COLOR_ZERO BIT(12) +#define MT_TXD5_TX_STATUS_HOST BIT(10) +#define MT_TXD5_TX_STATUS_MCU BIT(9) +#define MT_TXD5_TX_STATUS_FMT BIT(8) +#define MT_TXD5_PID GENMASK(7, 0) + +#define MT_TXD6_TX_SRC GENMASK(31, 30) +#define MT_TXD6_VTA BIT(28) +#define MT_TXD6_BW GENMASK(25, 22) +#define MT_TXD6_TX_RATE GENMASK(21, 16) +#define MT_TXD6_TIMESTAMP_OFS_EN BIT(15) +#define MT_TXD6_TIMESTAMP_OFS_IDX GENMASK(14, 10) +#define MT_TXD6_MSDU_CNT GENMASK(9, 4) +#define MT_TXD6_DIS_MAT BIT(3) +#define MT_TXD6_DAS BIT(2) +#define MT_TXD6_AMSDU_CAP BIT(1) + +#define MT_TXD7_TXD_LEN GENMASK(31, 30) +#define MT_TXD7_IP_SUM BIT(29) +#define MT_TXD7_DROP_BY_SDO BIT(28) +#define MT_TXD7_MAC_TXD BIT(27) +#define MT_TXD7_CTXD BIT(26) +#define MT_TXD7_CTXD_CNT GENMASK(25, 22) +#define MT_TXD7_UDP_TCP_SUM BIT(15) +#define MT_TXD7_TX_TIME GENMASK(9, 0) + +#define MT_TXD9_WLAN_IDX GENMASK(23, 8) + +#define MT_TX_RATE_STBC BIT(14) +#define MT_TX_RATE_NSS GENMASK(13, 10) +#define MT_TX_RATE_MODE GENMASK(9, 6) +#define MT_TX_RATE_SU_EXT_TONE BIT(5) +#define MT_TX_RATE_DCM BIT(4) +/* VHT/HE only use bits 0-3 */ +#define MT_TX_RATE_IDX GENMASK(5, 0) + +#define MT_TXFREE0_PKT_TYPE GENMASK(31, 27) +#define MT_TXFREE0_MSDU_CNT GENMASK(25, 16) +#define MT_TXFREE0_RX_BYTE GENMASK(15, 0) + +#define MT_TXFREE1_VER GENMASK(19, 16) + +#define MT_TXFREE_INFO_PAIR BIT(31) +#define MT_TXFREE_INFO_HEADER BIT(30) +#define MT_TXFREE_INFO_WLAN_ID GENMASK(23, 12) +#define MT_TXFREE_INFO_MSDU_ID GENMASK(14, 0) +#define MT_TXFREE_INFO_COUNT GENMASK(27, 24) +#define MT_TXFREE_INFO_STAT GENMASK(29, 28) + +#define MT_TXS0_BW GENMASK(31, 29) +#define MT_TXS0_TID GENMASK(28, 26) +#define MT_TXS0_AMPDU BIT(25) +#define MT_TXS0_TXS_FORMAT GENMASK(24, 23) +#define MT_TXS0_BA_ERROR BIT(22) +#define MT_TXS0_PS_FLAG BIT(21) +#define MT_TXS0_TXOP_TIMEOUT BIT(20) +#define MT_TXS0_BIP_ERROR BIT(19) + +#define MT_TXS0_QUEUE_TIMEOUT BIT(18) +#define MT_TXS0_RTS_TIMEOUT BIT(17) +#define MT_TXS0_ACK_TIMEOUT BIT(16) +#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16) + +#define MT_TXS0_TX_STATUS_HOST BIT(15) +#define MT_TXS0_TX_STATUS_MCU BIT(14) +#define MT_TXS0_TX_RATE GENMASK(13, 0) + +#define MT_TXS1_SEQNO GENMASK(31, 20) +#define MT_TXS1_RESP_RATE GENMASK(19, 16) +#define MT_TXS1_RXV_SEQNO GENMASK(15, 8) +#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0) + +#define MT_TXS2_BF_STATUS GENMASK(31, 30) +#define MT_TXS2_BAND GENMASK(29, 28) +#define MT_TXS2_WCID GENMASK(27, 16) +#define MT_TXS2_TX_DELAY GENMASK(15, 0) + +#define MT_TXS3_PID GENMASK(31, 24) +#define MT_TXS3_RATE_STBC BIT(7) +#define MT_TXS3_FIXED_RATE BIT(6) +#define MT_TXS3_SRC GENMASK(5, 4) +#define MT_TXS3_SHARED_ANTENNA BIT(3) +#define MT_TXS3_LAST_TX_RATE GENMASK(2, 0) + +#define MT_TXS4_TIMESTAMP GENMASK(31, 0) + +/* MPDU based TXS */ +#define MT_TXS5_F0_FINAL_MPDU BIT(31) +#define MT_TXS5_F0_QOS BIT(30) +#define MT_TXS5_F0_TX_COUNT GENMASK(29, 25) +#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0) +#define MT_TXS5_F1_MPDU_TX_COUNT GENMASK(31, 24) +#define MT_TXS5_F1_MPDU_TX_BYTES GENMASK(23, 0) + +#define MT_TXS6_F0_NOISE_3 GENMASK(31, 24) +#define MT_TXS6_F0_NOISE_2 GENMASK(23, 16) +#define MT_TXS6_F0_NOISE_1 GENMASK(15, 8) +#define MT_TXS6_F0_NOISE_0 GENMASK(7, 0) +#define MT_TXS6_F1_MPDU_FAIL_COUNT GENMASK(31, 24) +#define MT_TXS6_F1_MPDU_FAIL_BYTES GENMASK(23, 0) + +#define MT_TXS7_F0_RCPI_3 GENMASK(31, 24) +#define MT_TXS7_F0_RCPI_2 GENMASK(23, 16) +#define MT_TXS7_F0_RCPI_1 GENMASK(15, 8) +#define MT_TXS7_F0_RCPI_0 GENMASK(7, 0) +#define MT_TXS7_F1_MPDU_RETRY_COUNT GENMASK(31, 24) +#define MT_TXS7_F1_MPDU_RETRY_BYTES GENMASK(23, 0) + +/* PPDU based TXS */ +#define MT_TXS5_MPDU_TX_CNT GENMASK(30, 20) +#define MT_TXS5_MPDU_TX_BYTE_SCALE BIT(15) +#define MT_TXS5_MPDU_TX_BYTE GENMASK(14, 0) + +#define MT_TXS6_MPDU_FAIL_CNT GENMASK(30, 20) +#define MT_TXS6_MPDU_FAIL_BYTE_SCALE BIT(15) +#define MT_TXS6_MPDU_FAIL_BYTE GENMASK(14, 0) + +#define MT_TXS7_MPDU_RETRY_CNT GENMASK(30, 20) +#define MT_TXS7_MPDU_RETRY_BYTE_SCALE BIT(15) +#define MT_TXS7_MPDU_RETRY_BYTE GENMASK(14, 0) + +#endif /* __MT76_CONNAC3_MAC_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index d39a3cc5e381..93402d2c2538 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -151,23 +151,6 @@ void mt76_connac_tx_complete_skb(struct mt76_dev *mdev, return; } - /* error path */ - if (e->skb == DMA_DUMMY_DATA) { - struct mt76_connac_txp_common *txp; - struct mt76_txwi_cache *t; - u16 token; - - txp = mt76_connac_txwi_to_txp(mdev, e->txwi); - if (is_mt76_fw_txp(mdev)) - token = le16_to_cpu(txp->fw.token); - else - token = le16_to_cpu(txp->hw.msdu_id[0]) & - ~MT_MSDU_ID_VALID; - - t = mt76_token_put(mdev, token); - e->skb = t ? t->skb : NULL; - } - if (e->skb) mt76_tx_complete_skb(mdev, e->wcid, e->skb); } @@ -187,7 +170,7 @@ void mt76_connac_write_hw_txp(struct mt76_dev *dev, txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); - if (is_mt7663(dev) || is_mt7921(dev)) + if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev)) last_mask = MT_TXD_LEN_LAST; else last_mask = MT_TXD_LEN_AMSDU_LAST | @@ -231,7 +214,7 @@ mt76_connac_txp_skb_unmap_hw(struct mt76_dev *dev, u32 last_mask; int i; - if (is_mt7663(dev) || is_mt7921(dev)) + if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev)) last_mask = MT_TXD_LEN_LAST; else last_mask = MT_TXD_LEN_MSDU_LAST; @@ -310,7 +293,10 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif, bool beacon, bool mcast) { - u8 nss = 0, mode = 0, band = mphy->chandef.chan->band; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : &mphy->chandef; + u8 nss = 0, mode = 0, band = chandef->chan->band; int rateidx = 0, mcast_rate; if (!vif) @@ -343,7 +329,7 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, rateidx = ffs(vif->bss_conf.basic_rates) - 1; legacy: - rateidx = mt76_calculate_default_rate(mphy, rateidx); + rateidx = mt76_calculate_default_rate(mphy, vif, rateidx); mode = rateidx >> 8; rateidx &= GENMASK(7, 0); out: @@ -495,6 +481,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, BSS_CHANGED_BEACON_ENABLED)); bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | BSS_CHANGED_FILS_DISCOVERY)); + bool amsdu_en = wcid->amsdu; if (vif) { struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; @@ -521,9 +508,9 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS + mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); - /* counting non-offloading skbs */ - wcid->stats.tx_bytes += skb->len; - wcid->stats.tx_packets++; + /* mt7915 WA only counts WED path */ + if (is_mt7915(dev) && mtk_wed_device_active(&dev->mmio.wed)) + wcid->stats.tx_packets++; } val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | @@ -554,12 +541,14 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, txwi[4] = 0; val = FIELD_PREP(MT_TXD5_PID, pid); - if (pid >= MT_PACKET_ID_FIRST) + if (pid >= MT_PACKET_ID_FIRST) { val |= MT_TXD5_TX_STATUS_HOST; + amsdu_en = amsdu_en && !is_mt7921(dev); + } txwi[5] = cpu_to_le32(val); txwi[6] = 0; - txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0; + txwi[7] = amsdu_en ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0; if (is_8023) mt76_connac2_mac_write_txwi_8023(txwi, skb, wcid); @@ -606,12 +595,11 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid, txs = le32_to_cpu(txs_data[0]); /* PPDU based reporting */ - if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) { + if (mtk_wed_device_active(&dev->mmio.wed) && + FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) { stats->tx_bytes += le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) - le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE); - stats->tx_packets += - le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT); stats->tx_failed += le32_get_bits(txs_data[6], MT_TXS6_MPDU_FAIL_CNT); stats->tx_retries += @@ -729,17 +717,15 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid, skb = mt76_tx_status_skb_get(dev, wcid, pid, &list); if (skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - bool noacked = !(info->flags & IEEE80211_TX_STAT_ACK); if (!(le32_to_cpu(txs_data[0]) & MT_TXS0_ACK_ERROR_MASK)) info->flags |= IEEE80211_TX_STAT_ACK; info->status.ampdu_len = 1; - info->status.ampdu_ack_len = !noacked; + info->status.ampdu_ack_len = + !!(info->flags & IEEE80211_TX_STAT_ACK); info->status.rates[0].idx = -1; - wcid->stats.tx_failed += noacked; - mt76_connac2_mac_fill_txs(dev, wcid, txs_data); mt76_tx_status_skb_done(dev, skb, &list); } @@ -1112,3 +1098,85 @@ int mt76_connac2_mac_fill_rx_rate(struct mt76_dev *dev, return 0; } EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_rx_rate); + +void mt76_connac2_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +{ + struct mt76_wcid *wcid; + u16 fc, tid; + u32 val; + + if (!sta || + !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) + return; + + tid = le32_get_bits(txwi[1], MT_TXD1_TID); + if (tid >= 6) /* skip VO queue */ + return; + + val = le32_to_cpu(txwi[2]); + fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | + FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) + return; + + wcid = (struct mt76_wcid *)sta->drv_priv; + if (!test_and_set_bit(tid, &wcid->ampdu_state)) + ieee80211_start_tx_ba_session(sta, tid, 0); +} +EXPORT_SYMBOL_GPL(mt76_connac2_tx_check_aggr); + +void mt76_connac2_txwi_free(struct mt76_dev *dev, struct mt76_txwi_cache *t, + struct ieee80211_sta *sta, + struct list_head *free_list) +{ + struct mt76_wcid *wcid; + __le32 *txwi; + u16 wcid_idx; + + mt76_connac_txp_skb_unmap(dev, t); + if (!t->skb) + goto out; + + txwi = (__le32 *)mt76_get_txwi_ptr(dev, t); + if (sta) { + wcid = (struct mt76_wcid *)sta->drv_priv; + wcid_idx = wcid->idx; + } else { + wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + wcid = rcu_dereference(dev->wcid[wcid_idx]); + + if (wcid && wcid->sta) { + sta = container_of((void *)wcid, struct ieee80211_sta, + drv_priv); + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&wcid->poll_list)) + list_add_tail(&wcid->poll_list, + &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + } + } + + if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt76_connac2_tx_check_aggr(sta, txwi); + + __mt76_tx_complete_skb(dev, wcid_idx, t->skb, free_list); +out: + t->skb = NULL; + mt76_put_txwi(dev, t); +} +EXPORT_SYMBOL_GPL(mt76_connac2_txwi_free); + +void mt76_connac2_tx_token_put(struct mt76_dev *dev) +{ + struct mt76_txwi_cache *txwi; + int id; + + spin_lock_bh(&dev->token_lock); + idr_for_each_entry(&dev->token, txwi, id) { + mt76_connac2_txwi_free(dev, txwi, NULL, NULL); + dev->token_count--; + } + spin_unlock_bh(&dev->token_lock); + idr_destroy(&dev->token); +} +EXPORT_SYMBOL_GPL(mt76_connac2_tx_token_put); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 0f0a519f956f..ae6bf3c968df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -66,6 +66,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len, if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) || (is_mt7921(dev) && addr == 0x900000) || + (is_mt7925(dev) && addr == 0x900000) || (is_mt7996(dev) && addr == 0x900000)) cmd = MCU_CMD(PATCH_START_REQ); else @@ -745,7 +746,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) he->pkt_ext = 2; } -static void +void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta) { struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; @@ -777,20 +778,23 @@ mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta) he->pkt_ext = IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US; } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_he_tlv_v2); -static u8 +u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, enum nl80211_band band, struct ieee80211_sta *sta) { struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; u8 mode = 0; if (sta) { ht_cap = &sta->deflink.ht_cap; vht_cap = &sta->deflink.vht_cap; he_cap = &sta->deflink.he_cap; + eht_cap = &sta->deflink.eht_cap; } else { struct ieee80211_supported_band *sband; @@ -798,6 +802,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, ht_cap = &sband->ht_cap; vht_cap = &sband->vht_cap; he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type); } if (band == NL80211_BAND_2GHZ) { @@ -808,6 +813,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, if (he_cap && he_cap->has_he) mode |= PHY_TYPE_BIT_HE; + + if (eht_cap && eht_cap->has_eht) + mode |= PHY_TYPE_BIT_BE; } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) { mode |= PHY_TYPE_BIT_OFDM; @@ -819,17 +827,23 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, if (he_cap && he_cap->has_he) mode |= PHY_TYPE_BIT_HE; + + if (eht_cap && eht_cap->has_eht) + mode |= PHY_TYPE_BIT_BE; } return mode; } +EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_v2); void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, struct ieee80211_sta *sta, struct ieee80211_vif *vif, u8 rcpi, u8 sta_state) { - struct cfg80211_chan_def *chandef = &mphy->chandef; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : &mphy->chandef; enum nl80211_band band = chandef->chan->band; struct mt76_dev *dev = mphy->dev; struct sta_rec_ra_info *ra_info; @@ -1369,7 +1383,10 @@ EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext); const struct ieee80211_sta_he_cap * mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif) { - enum nl80211_band band = phy->chandef.chan->band; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = mvif->ctx ? + &mvif->ctx->def : &phy->chandef; + enum nl80211_band band = chandef->chan->band; struct ieee80211_supported_band *sband; sband = phy->hw->wiphy->bands[band]; @@ -1924,126 +1941,6 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event); -static void mt76_connac_mcu_parse_tx_resource(struct mt76_dev *dev, - struct sk_buff *skb) -{ - struct mt76_sdio *sdio = &dev->sdio; - struct mt76_connac_tx_resource { - __le32 version; - __le32 pse_data_quota; - __le32 pse_mcu_quota; - __le32 ple_data_quota; - __le32 ple_mcu_quota; - __le16 pse_page_size; - __le16 ple_page_size; - u8 pp_padding; - u8 pad[3]; - } __packed * tx_res; - - tx_res = (struct mt76_connac_tx_resource *)skb->data; - sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota); - sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota); - sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota); - sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size); - sdio->sched.deficit = tx_res->pp_padding; -} - -static void mt76_connac_mcu_parse_phy_cap(struct mt76_dev *dev, - struct sk_buff *skb) -{ - struct mt76_connac_phy_cap { - u8 ht; - u8 vht; - u8 _5g; - u8 max_bw; - u8 nss; - u8 dbdc; - u8 tx_ldpc; - u8 rx_ldpc; - u8 tx_stbc; - u8 rx_stbc; - u8 hw_path; - u8 he; - } __packed * cap; - - enum { - WF0_24G, - WF0_5G - }; - - cap = (struct mt76_connac_phy_cap *)skb->data; - - dev->phy.antenna_mask = BIT(cap->nss) - 1; - dev->phy.chainmask = dev->phy.antenna_mask; - dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); - dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); -} - -int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy) -{ - struct mt76_connac_cap_hdr { - __le16 n_element; - u8 rsv[2]; - } __packed * hdr; - struct sk_buff *skb; - int ret, i; - - ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB), - NULL, 0, true, &skb); - if (ret) - return ret; - - hdr = (struct mt76_connac_cap_hdr *)skb->data; - if (skb->len < sizeof(*hdr)) { - ret = -EINVAL; - goto out; - } - - skb_pull(skb, sizeof(*hdr)); - - for (i = 0; i < le16_to_cpu(hdr->n_element); i++) { - struct tlv_hdr { - __le32 type; - __le32 len; - } __packed * tlv = (struct tlv_hdr *)skb->data; - int len; - - if (skb->len < sizeof(*tlv)) - break; - - skb_pull(skb, sizeof(*tlv)); - - len = le32_to_cpu(tlv->len); - if (skb->len < len) - break; - - switch (le32_to_cpu(tlv->type)) { - case MT_NIC_CAP_6G: - phy->cap.has_6ghz = skb->data[0]; - break; - case MT_NIC_CAP_MAC_ADDR: - memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN); - break; - case MT_NIC_CAP_PHY: - mt76_connac_mcu_parse_phy_cap(phy->dev, skb); - break; - case MT_NIC_CAP_TX_RESOURCE: - if (mt76_is_sdio(phy->dev)) - mt76_connac_mcu_parse_tx_resource(phy->dev, - skb); - break; - default: - break; - } - skb_pull(skb, len); - } -out: - dev_kfree_skb(skb); - - return ret; -} -EXPORT_SYMBOL_GPL(mt76_connac_mcu_get_nic_capability); - static void mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, struct mt76_power_limits *limits, @@ -2087,9 +1984,9 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, } } -static s8 mt76_connac_get_ch_power(struct mt76_phy *phy, - struct ieee80211_channel *chan, - s8 target_power) +s8 mt76_connac_get_ch_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + s8 target_power) { struct mt76_dev *dev = phy->dev; struct ieee80211_supported_band *sband; @@ -2126,6 +2023,7 @@ static s8 mt76_connac_get_ch_power(struct mt76_phy *phy, return target_power; } +EXPORT_SYMBOL_GPL(mt76_connac_get_ch_power); static int mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, @@ -2144,7 +2042,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, 112, 114, 116, 118, 120, 122, 124, 126, 128, 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 165 + 159, 161, 165, 169, 173, 177 }; static const u8 chan_list_6ghz[] = { 1, 3, 5, 7, 9, 11, 13, @@ -2164,11 +2062,15 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, 209, 211, 213, 215, 217, 219, 221, 225, 227, 229, 233 }; - int i, n_chan, batch_size, idx = 0, tx_power, last_ch; + int i, n_chan, batch_size, idx = 0, tx_power, last_ch, err = 0; struct mt76_connac_sku_tlv sku_tlbv; - struct mt76_power_limits limits; + struct mt76_power_limits *limits; const u8 *ch_list; + limits = devm_kmalloc(dev->dev, sizeof(*limits), GFP_KERNEL); + if (!limits) + return -ENOMEM; + sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92; tx_power = 2 * phy->hw->conf.power_level; if (!tx_power) @@ -2195,14 +2097,16 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, for (i = 0; i < batch_size; i++) { struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {}; - int j, err, msg_len, num_ch; + int j, msg_len, num_ch; struct sk_buff *skb; num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len; msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv); skb = mt76_mcu_msg_alloc(dev, NULL, msg_len); - if (!skb) - return -ENOMEM; + if (!skb) { + err = -ENOMEM; + goto out; + } skb_reserve(skb, sizeof(tx_power_tlv)); @@ -2233,14 +2137,14 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, tx_power); sar_power = mt76_get_sar_power(phy, &chan, reg_power); - mt76_get_rate_power_limits(phy, &chan, &limits, + mt76_get_rate_power_limits(phy, &chan, limits, sar_power); tx_power_tlv.last_msg = ch_list[idx] == last_ch; sku_tlbv.channel = ch_list[idx]; mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit, - &limits, band); + limits, band); skb_put_data(skb, &sku_tlbv, sku_len); } __skb_push(skb, sizeof(tx_power_tlv)); @@ -2250,10 +2154,12 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, MCU_CE_CMD(SET_RATE_TX_POWER), false); if (err < 0) - return err; + goto out; } - return 0; +out: + devm_kfree(dev->dev, limits); + return err; } int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy) @@ -2457,7 +2363,7 @@ mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif, sizeof(req), true); } -static int +int mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif, bool suspend) { @@ -2482,8 +2388,9 @@ mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif, return mt76_mcu_send_msg(dev, MCU_UNI_CMD(OFFLOAD), &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_gtk_rekey); -static int +int mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, struct ieee80211_vif *vif, bool enable, u8 mdtim, @@ -2512,6 +2419,7 @@ mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_suspend_mode); static int mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev, @@ -2547,7 +2455,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev, return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SUSPEND), true); } -static int +int mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, bool suspend, struct cfg80211_wowlan *wowlan) { @@ -2599,6 +2507,7 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, sizeof(req), true); } +EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_wow_ctrl); int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) { @@ -3064,7 +2973,7 @@ static u32 mt76_connac2_get_data_mode(struct mt76_dev *dev, u32 info) { u32 mode = DL_MODE_NEED_RSP; - if (!is_mt7921(dev) || info == PATCH_SEC_NOT_SUPPORT) + if ((!is_mt7921(dev) && !is_mt7925(dev)) || info == PATCH_SEC_NOT_SUPPORT) return mode; switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index ca1ce97a6d2f..0563b1b22f48 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -22,6 +22,7 @@ #define FW_START_OVERRIDE BIT(0) #define FW_START_WORKING_PDA_CR4 BIT(2) +#define FW_START_WORKING_PDA_DSP BIT(3) #define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0) #define PATCH_SEC_TYPE_MASK GENMASK(15, 0) @@ -190,6 +191,7 @@ struct mt76_connac2_fw_region { struct tlv { __le16 tag; __le16 len; + u8 data[]; } __packed; struct bss_info_omac { @@ -518,7 +520,8 @@ struct sta_rec_muru { u8 uo_ra; u8 he_2x996_tone; u8 rx_t_frame_11ac; - u8 rsv[3]; + u8 rx_ctrl_frame_to_mbss; + u8 rsv[2]; } ofdma_ul; struct { @@ -793,6 +796,7 @@ enum { STA_REC_PHY = 0x15, STA_REC_HE_6G = 0x17, STA_REC_HE_V2 = 0x19, + STA_REC_MLD = 0x20, STA_REC_EHT = 0x22, STA_REC_HDRT = 0x28, STA_REC_HDR_TRANS = 0x2B, @@ -917,6 +921,7 @@ enum { PHY_TYPE_HT_INDEX, PHY_TYPE_VHT_INDEX, PHY_TYPE_HE_INDEX, + PHY_TYPE_BE_INDEX, PHY_TYPE_INDEX_NUM }; @@ -926,6 +931,7 @@ enum { #define PHY_TYPE_BIT_HT BIT(PHY_TYPE_HT_INDEX) #define PHY_TYPE_BIT_VHT BIT(PHY_TYPE_VHT_INDEX) #define PHY_TYPE_BIT_HE BIT(PHY_TYPE_HE_INDEX) +#define PHY_TYPE_BIT_BE BIT(PHY_TYPE_BE_INDEX) #define MT_WTBL_RATE_TX_MODE GENMASK(9, 6) #define MT_WTBL_RATE_MCS GENMASK(5, 0) @@ -998,6 +1004,7 @@ enum { MCU_EXT_EVENT_ASSERT_DUMP = 0x23, MCU_EXT_EVENT_RDD_REPORT = 0x3a, MCU_EXT_EVENT_CSA_NOTIFY = 0x4f, + MCU_EXT_EVENT_WA_TX_STAT = 0x74, MCU_EXT_EVENT_BCC_NOTIFY = 0x75, MCU_EXT_EVENT_MURU_CTRL = 0x9f, }; @@ -1006,8 +1013,17 @@ enum { enum { MCU_UNI_EVENT_RESULT = 0x01, MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04, + MCU_UNI_EVENT_ACCESS_REG = 0x6, MCU_UNI_EVENT_IE_COUNTDOWN = 0x09, + MCU_UNI_EVENT_COREDUMP = 0x0a, + MCU_UNI_EVENT_BSS_BEACON_LOSS = 0x0c, + MCU_UNI_EVENT_SCAN_DONE = 0x0e, MCU_UNI_EVENT_RDD_REPORT = 0x11, + MCU_UNI_EVENT_ROC = 0x27, + MCU_UNI_EVENT_TX_DONE = 0x2d, + MCU_UNI_EVENT_NIC_CAPAB = 0x43, + MCU_UNI_EVENT_PER_STA_INFO = 0x6d, + MCU_UNI_EVENT_ALL_STA_INFO = 0x6e, }; #define MCU_UNI_CMD_EVENT BIT(1) @@ -1206,12 +1222,17 @@ enum { MCU_UNI_CMD_RX_HDR_TRANS = 0x12, MCU_UNI_CMD_SER = 0x13, MCU_UNI_CMD_TWT = 0x14, + MCU_UNI_CMD_SET_DOMAIN_INFO = 0x15, + MCU_UNI_CMD_SCAN_REQ = 0x16, MCU_UNI_CMD_RDD_CTRL = 0x19, MCU_UNI_CMD_GET_MIB_INFO = 0x22, + MCU_UNI_CMD_GET_STAT_INFO = 0x23, MCU_UNI_CMD_SNIFFER = 0x24, MCU_UNI_CMD_SR = 0x25, MCU_UNI_CMD_ROC = 0x27, + MCU_UNI_CMD_SET_DBDC_PARMS = 0x28, MCU_UNI_CMD_TXPOWER = 0x2b, + MCU_UNI_CMD_SET_POWER_LIMIT = 0x2c, MCU_UNI_CMD_EFUSE_CTRL = 0x2d, MCU_UNI_CMD_RA = 0x2f, MCU_UNI_CMD_MURU = 0x31, @@ -1221,6 +1242,8 @@ enum { MCU_UNI_CMD_VOW = 0x37, MCU_UNI_CMD_RRO = 0x57, MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58, + MCU_UNI_CMD_PER_STA_INFO = 0x6d, + MCU_UNI_CMD_ALL_STA_INFO = 0x6e, MCU_UNI_CMD_ASSERT_DUMP = 0x6f, }; @@ -1276,6 +1299,7 @@ enum { UNI_BSS_INFO_RLM = 2, UNI_BSS_INFO_BSS_COLOR = 4, UNI_BSS_INFO_HE_BASIC = 5, + UNI_BSS_INFO_11V_MBSSID = 6, UNI_BSS_INFO_BCN_CONTENT = 7, UNI_BSS_INFO_BCN_CSA = 8, UNI_BSS_INFO_BCN_BCC = 9, @@ -1287,8 +1311,10 @@ enum { UNI_BSS_INFO_UAPSD = 19, UNI_BSS_INFO_PS = 21, UNI_BSS_INFO_BCNFT = 22, + UNI_BSS_INFO_IFS_TIME = 23, UNI_BSS_INFO_OFFLOAD = 25, UNI_BSS_INFO_MLD = 26, + UNI_BSS_INFO_PM_DISABLE = 27, }; enum { @@ -1298,6 +1324,17 @@ enum { UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT, }; +enum UNI_ALL_STA_INFO_TAG { + UNI_ALL_STA_TX_RATE, + UNI_ALL_STA_TX_STAT, + UNI_ALL_STA_TXRX_ADM_STAT, + UNI_ALL_STA_TXRX_AIR_TIME, + UNI_ALL_STA_DATA_TX_RETRY_COUNT, + UNI_ALL_STA_GI_MODE, + UNI_ALL_STA_TXRX_MSDU_COUNT, + UNI_ALL_STA_MAX_NUM +}; + enum { MT_NIC_CAP_TX_RESOURCE, MT_NIC_CAP_TX_EFUSE_ADDR, @@ -1318,6 +1355,7 @@ enum { MT_NIC_CAP_ANTSWP = 0x16, MT_NIC_CAP_WFDMA_REALLOC, MT_NIC_CAP_6G, + MT_NIC_CAP_CHIP_CAP = 0x20, }; #define UNI_WOW_DETECT_TYPE_MAGIC BIT(0) @@ -1545,6 +1583,15 @@ struct bss_info_uni_he { u8 rsv[2]; } __packed; +struct bss_info_uni_mbssid { + __le16 tag; + __le16 len; + u8 max_indicator; + u8 mbss_idx; + u8 tx_bss_omac_idx; + u8 rsv; +} __packed; + struct mt76_connac_gtk_rekey_tlv { __le16 tag; __le16 len; @@ -1735,7 +1782,7 @@ mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa) ret |= feature_set & FW_FEATURE_SET_ENCRYPT ? DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0; - if (is_mt7921(dev)) + if (is_mt7921(dev) || is_mt7925(dev)) ret |= feature_set & FW_FEATURE_ENCRY_MODE ? DL_CONFIG_ENCRY_MODE_SEL : 0; ret |= FIELD_PREP(DL_MODE_KEY_IDX, @@ -1803,6 +1850,9 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev, struct ieee80211_vif *vif, struct mt76_wcid *wcid, int cmd); +void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta); +u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta); int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -1847,7 +1897,6 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len, int mt76_connac_mcu_start_patch(struct mt76_dev *dev); int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get); int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option); -int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy); int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req); @@ -1862,9 +1911,17 @@ int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy, int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, struct mt76_vif *vif, struct ieee80211_bss_conf *info); +int mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif, + bool suspend); +int mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, + bool suspend, struct cfg80211_wowlan *wowlan); int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *key); +int mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, + struct ieee80211_vif *vif, + bool enable, u8 mdtim, + bool wow_suspend); int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend); void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); @@ -1875,6 +1932,9 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev); int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable); void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, struct mt76_connac_coredump *coredump); +s8 mt76_connac_get_ch_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + s8 target_power); int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy); int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig index 7c88ed8b8f1e..3ed888782a70 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig @@ -10,7 +10,7 @@ config MT76x0U depends on MAC80211 depends on USB help - This adds support for MT7610U-based wireless USB 2.0 dongles, + This adds support for MT7610U-based USB 2.0 wireless dongles, which comply with IEEE 802.11ac standards and support 1x1 433Mbps PHY rate. @@ -22,7 +22,7 @@ config MT76x0E depends on MAC80211 depends on PCI help - This adds support for MT7610/MT7630-based wireless PCIe devices, + This adds support for MT7610/MT7630-based PCIe wireless devices, which comply with IEEE 802.11ac standards and support 1x1 433Mbps PHY rate. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c index ad4dc8e17b58..d570b99bccb9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c @@ -136,7 +136,8 @@ EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer); void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt76x02_dev *dev = (struct mt76x02_dev *)priv; + struct beacon_bc_data *data = priv; + struct mt76x02_dev *dev = data->dev; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct sk_buff *skb = NULL; @@ -147,7 +148,7 @@ mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!skb) return; - mt76x02_mac_set_beacon(dev, skb); + __skb_queue_tail(&data->q, skb); } EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter); @@ -182,9 +183,6 @@ mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, { int i, nframes; - data->dev = dev; - __skb_queue_head_init(&data->q); - do { nframes = skb_queue_len(&data->q); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index 0acabba2d1a5..5d402cf2951c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, s8 *lna_2g, s8 *lna_5g, struct ieee80211_channel *chan) { - u16 val; u8 lna; - val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); - if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G) - *lna_2g = 0; - if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G) - memset(lna_5g, 0, sizeof(s8) * 3); - if (chan->band == NL80211_BAND_2GHZ) lna = *lna_2g; else if (chan->hw_value <= 64) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 3e41d809ade3..d5db6ffd6d36 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -853,7 +853,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, if (WARN_ON_ONCE(len > skb->len)) return -EINVAL; - pskb_trim(skb, len); + if (pskb_trim(skb, len)) + return -EINVAL; status->chains = BIT(0); signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index e9c5e85ec07c..9b5e3fb7b0df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -16,13 +16,17 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet); struct mt76_dev *mdev = &dev->mt76; struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD]; - struct beacon_bc_data data = {}; + struct beacon_bc_data data = { + .dev = dev, + }; struct sk_buff *skb; int i; if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) return; + __skb_queue_head_init(&data.q); + mt76x02_resync_beacon_timer(dev); /* Prevent corrupt transmissions during update */ @@ -31,7 +35,10 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, - mt76x02_update_beacon_iter, dev); + mt76x02_update_beacon_iter, &data); + + while ((skb = __skb_dequeue(&data.q)) != NULL) + mt76x02_mac_set_beacon(dev, skb); mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~(0xff00 >> dev->beacon_data_count)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h index 6a98092e996b..11d119cd0f6f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h @@ -14,7 +14,7 @@ #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ +#define DEV_ASSIGN strscpy(__entry->wiphy_name, \ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) #define DEV_PR_FMT "%s" #define DEV_PR_ARG __entry->wiphy_name diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 2c6c03809b20..85a78dea4085 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -182,7 +182,9 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work) { struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, pre_tbtt_work); - struct beacon_bc_data data = {}; + struct beacon_bc_data data = { + .dev = dev, + }; struct sk_buff *skb; int nbeacons; @@ -192,15 +194,20 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work) if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) return; + __skb_queue_head_init(&data.q); + mt76x02_resync_beacon_timer(dev); /* Prevent corrupt transmissions during update */ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff); dev->beacon_data_count = 0; - ieee80211_iterate_active_interfaces(mt76_hw(dev), + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, - mt76x02_update_beacon_iter, dev); + mt76x02_update_beacon_iter, &data); + + while ((skb = __skb_dequeue(&data.q)) != NULL) + mt76x02_mac_set_beacon(dev, skb); mt76_csa_check(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index dcbb5c605dfe..8a0e8124b894 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -288,7 +288,7 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; - mt76_packet_id_init(&mvif->group_wcid); + mt76_wcid_init(&mvif->group_wcid); mtxq = (struct mt76_txq *)vif->txq->drv_priv; rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid); @@ -346,7 +346,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL); - mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid); + mt76_wcid_cleanup(&dev->mt76, &mvif->group_wcid); } EXPORT_SYMBOL_GPL(mt76x02_remove_interface); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig index 5fd4973e32df..482a32b70ddf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig @@ -9,7 +9,7 @@ config MT76x2E depends on MAC80211 depends on PCI help - This adds support for MT7612/MT7602/MT7662-based wireless PCIe + This adds support for MT7612/MT7602/MT7662-based PCIe wireless devices, which comply with IEEE 802.11ac standards and support 2SS to 866Mbit/s PHY rate. @@ -22,7 +22,7 @@ config MT76x2U depends on MAC80211 depends on USB help - This adds support for MT7612U-based wireless USB 3.0 dongles, + This adds support for MT7612U-based USB 3.0 wireless dongles, which comply with IEEE 802.11ac standards and support 2SS to 866Mbit/s PHY rate. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index d5809408d1d3..8c01855885ce 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) struct ieee80211_channel *chan = dev->mphy.chandef.chan; int channel = chan->hw_value; s8 lna_5g[3], lna_2g; - u8 lna; + bool use_lna; + u8 lna = 0; u16 val; if (chan->band == NL80211_BAND_2GHZ) @@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev) dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; - lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); + if (chan->band == NL80211_BAND_2GHZ) + use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G); + else + use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G); + + if (use_lna) + lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); + dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); } EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig index d710726d47bf..193112c49bd1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig @@ -7,19 +7,19 @@ config MT7915E depends on PCI select RELAY help - This adds support for MT7915-based wireless PCIe devices, + This adds support for MT7915-based PCIe wireless devices, which support concurrent dual-band operation at both 5GHz and 2.4GHz IEEE 802.11ax 4x4:4SS 1024-QAM, 160MHz channels, OFDMA, spatial reuse and dual carrier modulation. To compile this driver as a module, choose M here. -config MT7986_WMAC - bool "MT7986 (SoC) WMAC support" +config MT798X_WMAC + bool "MT798x (SoC) WMAC support" depends on MT7915E depends on ARCH_MEDIATEK || COMPILE_TEST select REGMAP help - This adds support for the built-in WMAC on MT7986 SoC device + This adds support for the built-in WMAC on MT7981 and MT7986 SoC device which has the same feature set as a MT7915, but enables 6E support. diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile index 797ae49805c3..e0ca638c91a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile @@ -6,5 +6,5 @@ mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \ debugfs.o mmio.o mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o -mt7915e-$(CONFIG_MT7986_WMAC) += soc.o +mt7915e-$(CONFIG_MT798X_WMAC) += soc.o mt7915e-$(CONFIG_DEV_COREDUMP) += coredump.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c index d097a56dd33d..5daf2258dfe6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c @@ -52,7 +52,7 @@ static const struct mt7915_mem_region mt7916_mem_regions[] = { }, }; -static const struct mt7915_mem_region mt7986_mem_regions[] = { +static const struct mt7915_mem_region mt798x_mem_regions[] = { { .start = 0x00800000, .len = 0x0005ffff, @@ -92,9 +92,10 @@ mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num) case 0x7915: *num = ARRAY_SIZE(mt7915_mem_regions); return &mt7915_mem_regions[0]; + case 0x7981: case 0x7986: - *num = ARRAY_SIZE(mt7986_mem_regions); - return &mt7986_mem_regions[0]; + *num = ARRAY_SIZE(mt798x_mem_regions); + return &mt798x_mem_regions[0]; case 0x7916: *num = ARRAY_SIZE(mt7916_mem_regions); return &mt7916_mem_regions[0]; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 879884ead660..6c3696c8c700 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -251,7 +251,6 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) { struct mt7915_phy *phy = file->private; struct mt7915_dev *dev = phy->dev; - struct mt7915_mcu_muru_stats mu_stats = {}; static const char * const dl_non_he_type[] = { "CCK", "OFDM", "HT MIX", "HT GF", "VHT SU", "VHT 2MU", "VHT 3MU", "VHT 4MU" @@ -275,7 +274,7 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) mutex_lock(&dev->mt76.mutex); - ret = mt7915_mcu_muru_debug_get(phy, &mu_stats); + ret = mt7915_mcu_muru_debug_get(phy); if (ret) goto exit; @@ -285,14 +284,13 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) for (i = 0; i < 5; i++) seq_printf(file, "%8s | ", dl_non_he_type[i]); -#define __dl_u32(s) le32_to_cpu(mu_stats.dl.s) seq_puts(file, "\nTotal Count:"); seq_printf(file, "%8u | %8u | %8u | %8u | %8u | ", - __dl_u32(cck_cnt), - __dl_u32(ofdm_cnt), - __dl_u32(htmix_cnt), - __dl_u32(htgf_cnt), - __dl_u32(vht_su_cnt)); + phy->mib.dl_cck_cnt, + phy->mib.dl_ofdm_cnt, + phy->mib.dl_htmix_cnt, + phy->mib.dl_htgf_cnt, + phy->mib.dl_vht_su_cnt); seq_puts(file, "\nDownlink MU-MIMO\nData Type: "); @@ -301,23 +299,23 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) seq_puts(file, "\nTotal Count:"); seq_printf(file, "%8u | %8u | %8u | ", - __dl_u32(vht_2mu_cnt), - __dl_u32(vht_3mu_cnt), - __dl_u32(vht_4mu_cnt)); + phy->mib.dl_vht_2mu_cnt, + phy->mib.dl_vht_3mu_cnt, + phy->mib.dl_vht_4mu_cnt); - sub_total_cnt = __dl_u32(vht_2mu_cnt) + - __dl_u32(vht_3mu_cnt) + - __dl_u32(vht_4mu_cnt); + sub_total_cnt = phy->mib.dl_vht_2mu_cnt + + phy->mib.dl_vht_3mu_cnt + + phy->mib.dl_vht_4mu_cnt; seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld", sub_total_cnt); total_ppdu_cnt = sub_total_cnt + - __dl_u32(cck_cnt) + - __dl_u32(ofdm_cnt) + - __dl_u32(htmix_cnt) + - __dl_u32(htgf_cnt) + - __dl_u32(vht_su_cnt); + phy->mib.dl_cck_cnt + + phy->mib.dl_ofdm_cnt + + phy->mib.dl_htmix_cnt + + phy->mib.dl_htgf_cnt + + phy->mib.dl_vht_su_cnt; seq_printf(file, "\nAll non-HE DL PPDU count: %lld", total_ppdu_cnt); @@ -329,8 +327,7 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) seq_puts(file, "\nTotal Count:"); seq_printf(file, "%8u | %8u | ", - __dl_u32(he_su_cnt), - __dl_u32(he_ext_su_cnt)); + phy->mib.dl_he_su_cnt, phy->mib.dl_he_ext_su_cnt); seq_puts(file, "\nDownlink MU-MIMO\nData Type: "); @@ -339,9 +336,8 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) seq_puts(file, "\nTotal Count:"); seq_printf(file, "%8u | %8u | %8u | ", - __dl_u32(he_2mu_cnt), - __dl_u32(he_3mu_cnt), - __dl_u32(he_4mu_cnt)); + phy->mib.dl_he_2mu_cnt, phy->mib.dl_he_3mu_cnt, + phy->mib.dl_he_4mu_cnt); seq_puts(file, "\nDownlink OFDMA\nData Type: "); @@ -350,37 +346,35 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) seq_puts(file, "\nTotal Count:"); seq_printf(file, "%8u | %8u | %8u | %8u | %9u | %8u | ", - __dl_u32(he_2ru_cnt), - __dl_u32(he_3ru_cnt), - __dl_u32(he_4ru_cnt), - __dl_u32(he_5to8ru_cnt), - __dl_u32(he_9to16ru_cnt), - __dl_u32(he_gtr16ru_cnt)); - - sub_total_cnt = __dl_u32(he_2mu_cnt) + - __dl_u32(he_3mu_cnt) + - __dl_u32(he_4mu_cnt); + phy->mib.dl_he_2ru_cnt, + phy->mib.dl_he_3ru_cnt, + phy->mib.dl_he_4ru_cnt, + phy->mib.dl_he_5to8ru_cnt, + phy->mib.dl_he_9to16ru_cnt, + phy->mib.dl_he_gtr16ru_cnt); + + sub_total_cnt = phy->mib.dl_he_2mu_cnt + + phy->mib.dl_he_3mu_cnt + + phy->mib.dl_he_4mu_cnt; total_ppdu_cnt = sub_total_cnt; seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld", sub_total_cnt); - sub_total_cnt = __dl_u32(he_2ru_cnt) + - __dl_u32(he_3ru_cnt) + - __dl_u32(he_4ru_cnt) + - __dl_u32(he_5to8ru_cnt) + - __dl_u32(he_9to16ru_cnt) + - __dl_u32(he_gtr16ru_cnt); + sub_total_cnt = phy->mib.dl_he_2ru_cnt + + phy->mib.dl_he_3ru_cnt + + phy->mib.dl_he_4ru_cnt + + phy->mib.dl_he_5to8ru_cnt + + phy->mib.dl_he_9to16ru_cnt + + phy->mib.dl_he_gtr16ru_cnt; total_ppdu_cnt += sub_total_cnt; seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld", sub_total_cnt); - total_ppdu_cnt += __dl_u32(he_su_cnt) + - __dl_u32(he_ext_su_cnt); + total_ppdu_cnt += phy->mib.dl_he_su_cnt + phy->mib.dl_he_ext_su_cnt; seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt); -#undef __dl_u32 /* HE Uplink */ seq_puts(file, "\n\nUplink"); @@ -389,12 +383,11 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) for (i = 0; i < 3; i++) seq_printf(file, "%8s | ", ul_he_type[i]); -#define __ul_u32(s) le32_to_cpu(mu_stats.ul.s) seq_puts(file, "\nTotal Count:"); seq_printf(file, "%8u | %8u | %8u | ", - __ul_u32(hetrig_2mu_cnt), - __ul_u32(hetrig_3mu_cnt), - __ul_u32(hetrig_4mu_cnt)); + phy->mib.ul_hetrig_2mu_cnt, + phy->mib.ul_hetrig_3mu_cnt, + phy->mib.ul_hetrig_4mu_cnt); seq_puts(file, "\nTrigger-based Uplink OFDMA\nData Type: "); @@ -403,37 +396,36 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) seq_puts(file, "\nTotal Count:"); seq_printf(file, "%8u | %8u | %8u | %8u | %8u | %9u | %7u | ", - __ul_u32(hetrig_su_cnt), - __ul_u32(hetrig_2ru_cnt), - __ul_u32(hetrig_3ru_cnt), - __ul_u32(hetrig_4ru_cnt), - __ul_u32(hetrig_5to8ru_cnt), - __ul_u32(hetrig_9to16ru_cnt), - __ul_u32(hetrig_gtr16ru_cnt)); - - sub_total_cnt = __ul_u32(hetrig_2mu_cnt) + - __ul_u32(hetrig_3mu_cnt) + - __ul_u32(hetrig_4mu_cnt); + phy->mib.ul_hetrig_su_cnt, + phy->mib.ul_hetrig_2ru_cnt, + phy->mib.ul_hetrig_3ru_cnt, + phy->mib.ul_hetrig_4ru_cnt, + phy->mib.ul_hetrig_5to8ru_cnt, + phy->mib.ul_hetrig_9to16ru_cnt, + phy->mib.ul_hetrig_gtr16ru_cnt); + + sub_total_cnt = phy->mib.ul_hetrig_2mu_cnt + + phy->mib.ul_hetrig_3mu_cnt + + phy->mib.ul_hetrig_4mu_cnt; total_ppdu_cnt = sub_total_cnt; seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld", sub_total_cnt); - sub_total_cnt = __ul_u32(hetrig_2ru_cnt) + - __ul_u32(hetrig_3ru_cnt) + - __ul_u32(hetrig_4ru_cnt) + - __ul_u32(hetrig_5to8ru_cnt) + - __ul_u32(hetrig_9to16ru_cnt) + - __ul_u32(hetrig_gtr16ru_cnt); + sub_total_cnt = phy->mib.ul_hetrig_2ru_cnt + + phy->mib.ul_hetrig_3ru_cnt + + phy->mib.ul_hetrig_4ru_cnt + + phy->mib.ul_hetrig_5to8ru_cnt + + phy->mib.ul_hetrig_9to16ru_cnt + + phy->mib.ul_hetrig_gtr16ru_cnt; total_ppdu_cnt += sub_total_cnt; seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld", sub_total_cnt); - total_ppdu_cnt += __ul_u32(hetrig_su_cnt); + total_ppdu_cnt += phy->mib.ul_hetrig_su_cnt; seq_printf(file, "\nAll HE UL TB PPDU count: %lld\n", total_ppdu_cnt); -#undef __ul_u32 exit: mutex_unlock(&dev->mt76.mutex); @@ -719,10 +711,10 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy, static void mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s) { + struct mt76_mib_stats *mib = &phy->mib; static const char * const bw[] = { "BW20", "BW40", "BW80", "BW160" }; - struct mib_stats *mib = &phy->mib; /* Tx Beamformer monitor */ seq_puts(s, "\nTx Beamformer applied PPDU counts: "); @@ -768,7 +760,7 @@ mt7915_tx_stats_show(struct seq_file *file, void *data) { struct mt7915_phy *phy = file->private; struct mt7915_dev *dev = phy->dev; - struct mib_stats *mib = &phy->mib; + struct mt76_mib_stats *mib = &phy->mib; int i; mutex_lock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index 43a5456d4b97..59a44d79aaed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -11,7 +11,7 @@ mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base struct mt7915_dev *dev = phy->dev; if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { - if (is_mt7986(&dev->mt76)) + if (is_mt798x(&dev->mt76)) ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; else ring_base = MT_WED_TX_RING_BASE; @@ -250,7 +250,7 @@ static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst) } } -static int mt7915_dma_enable(struct mt7915_dev *dev) +int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset) { struct mt76_dev *mdev = &dev->mt76; u32 hif1_ofs = 0; @@ -259,6 +259,84 @@ static int mt7915_dma_enable(struct mt7915_dev *dev) if (dev->hif2) hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); + /* enable wpdma tx/rx */ + if (!reset) { + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + if (is_mt7915(mdev)) + mt76_set(dev, MT_WFDMA1_GLO_CFG, + MT_WFDMA1_GLO_CFG_TX_DMA_EN | + MT_WFDMA1_GLO_CFG_RX_DMA_EN | + MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); + + if (dev->hif2) { + mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + if (is_mt7915(mdev)) + mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, + MT_WFDMA1_GLO_CFG_TX_DMA_EN | + MT_WFDMA1_GLO_CFG_RX_DMA_EN | + MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); + + mt76_set(dev, MT_WFDMA_HOST_CONFIG, + MT_WFDMA_HOST_CONFIG_PDMA_BAND); + } + } + + /* enable interrupts for TX/RX rings */ + irq_mask = MT_INT_RX_DONE_MCU | + MT_INT_TX_DONE_MCU | + MT_INT_MCU_CMD; + + if (!dev->phy.mt76->band_idx) + irq_mask |= MT_INT_BAND0_RX_DONE; + + if (dev->dbdc_support || dev->phy.mt76->band_idx) + irq_mask |= MT_INT_BAND1_RX_DONE; + + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { + u32 wed_irq_mask = irq_mask; + int ret; + + wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; + if (!is_mt798x(&dev->mt76)) + mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); + else + mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); + + ret = mt7915_mcu_wed_enable_rx_stats(dev); + if (ret) + return ret; + + mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); + } + + irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; + + mt7915_irq_enable(dev, irq_mask); + mt7915_irq_disable(dev, 0); + + return 0; +} + +static int mt7915_dma_enable(struct mt7915_dev *dev, bool reset) +{ + struct mt76_dev *mdev = &dev->mt76; + u32 hif1_ofs = 0; + + if (dev->hif2) + hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); + /* reset dma idx */ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); if (is_mt7915(mdev)) @@ -322,69 +400,7 @@ static int mt7915_dma_enable(struct mt7915_dev *dev) mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); - /* set WFDMA Tx/Rx */ - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - - if (is_mt7915(mdev)) - mt76_set(dev, MT_WFDMA1_GLO_CFG, - MT_WFDMA1_GLO_CFG_TX_DMA_EN | - MT_WFDMA1_GLO_CFG_RX_DMA_EN | - MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); - - if (dev->hif2) { - mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - - if (is_mt7915(mdev)) - mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, - MT_WFDMA1_GLO_CFG_TX_DMA_EN | - MT_WFDMA1_GLO_CFG_RX_DMA_EN | - MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); - - mt76_set(dev, MT_WFDMA_HOST_CONFIG, - MT_WFDMA_HOST_CONFIG_PDMA_BAND); - } - - /* enable interrupts for TX/RX rings */ - irq_mask = MT_INT_RX_DONE_MCU | - MT_INT_TX_DONE_MCU | - MT_INT_MCU_CMD; - - if (!dev->phy.mt76->band_idx) - irq_mask |= MT_INT_BAND0_RX_DONE; - - if (dev->dbdc_support || dev->phy.mt76->band_idx) - irq_mask |= MT_INT_BAND1_RX_DONE; - - if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { - u32 wed_irq_mask = irq_mask; - int ret; - - wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; - if (!is_mt7986(&dev->mt76)) - mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); - else - mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); - - ret = mt7915_mcu_wed_enable_rx_stats(dev); - if (ret) - return ret; - - mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); - } - - mt7915_irq_enable(dev, irq_mask); - - return 0; + return mt7915_dma_start(dev, reset, true); } int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) @@ -404,7 +420,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) mt7915_dma_disable(dev, true); if (mtk_wed_device_active(&mdev->mmio.wed)) { - if (!is_mt7986(mdev)) { + if (!is_mt798x(mdev)) { u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2; mt76_set(dev, MT_WFDMA_HOST_CONFIG, @@ -560,7 +576,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) mt7915_poll_tx); napi_enable(&dev->mt76.tx_napi); - mt7915_dma_enable(dev); + mt7915_dma_enable(dev, false); return 0; } @@ -642,7 +658,7 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force) mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, MT_WFDMA0_EXT0_RXWB_KEEP); - mt7915_dma_enable(dev); + mt7915_dma_enable(dev, !force); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index a79628933948..76be7308460b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -39,6 +39,8 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev) return CHECK_EEPROM_ERR(is_mt7915(&dev->mt76)); case 0x7916: return CHECK_EEPROM_ERR(is_mt7916(&dev->mt76)); + case 0x7981: + return CHECK_EEPROM_ERR(is_mt7981(&dev->mt76)); case 0x7986: return CHECK_EEPROM_ERR(is_mt7986(&dev->mt76)); default: @@ -52,6 +54,9 @@ static char *mt7915_eeprom_name(struct mt7915_dev *dev) case 0x7915: return dev->dbdc_support ? MT7915_EEPROM_DEFAULT_DBDC : MT7915_EEPROM_DEFAULT; + case 0x7981: + /* mt7981 only supports mt7976 and only in DBDC mode */ + return MT7981_EEPROM_MT7976_DEFAULT_DBDC; case 0x7986: switch (mt7915_check_adie(dev, true)) { case MT7976_ONE_ADIE_DBDC: @@ -215,7 +220,7 @@ void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev, eeprom[MT_EE_WIFI_CONF + 2 + band]); } - if (!is_mt7986(&dev->mt76)) + if (!is_mt798x(&dev->mt76)) nss_max = 2; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index ac2049f49bb3..81478289f17e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -4,6 +4,7 @@ #include <linux/etherdevice.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> +#include <linux/of.h> #include <linux/thermal.h> #include "mt7915.h" #include "mac.h" @@ -212,10 +213,7 @@ static int mt7915_thermal_init(struct mt7915_phy *phy) hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7915_hwmon_groups); - if (IS_ERR(hwmon)) - return PTR_ERR(hwmon); - - return 0; + return PTR_ERR_OR_ZERO(hwmon); } static void mt7915_led_set_config(struct led_classdev *led_cdev, @@ -346,6 +344,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy) hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; hw->netdev_features = NETIF_F_RXCSUM; + if (mtk_wed_device_active(&mdev->mmio.wed)) + hw->netdev_features |= NETIF_F_HW_TC; + hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; @@ -392,8 +393,12 @@ mt7915_init_wiphy(struct mt7915_phy *phy) phy->mt76->sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; - phy->mt76->sband_2g.sband.ht_cap.ampdu_density = - IEEE80211_HT_MPDU_DENSITY_4; + if (is_mt7915(&dev->mt76)) + phy->mt76->sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_4; + else + phy->mt76->sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_2; } if (phy->mt76->cap.has_5ghz) { @@ -403,10 +408,11 @@ mt7915_init_wiphy(struct mt7915_phy *phy) phy->mt76->sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; - phy->mt76->sband_5g.sband.ht_cap.ampdu_density = - IEEE80211_HT_MPDU_DENSITY_4; if (is_mt7915(&dev->mt76)) { + phy->mt76->sband_5g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_4; + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; @@ -414,9 +420,11 @@ mt7915_init_wiphy(struct mt7915_phy *phy) if (!dev->dbdc_support) vht_cap->cap |= IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1); } else { + phy->mt76->sband_5g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_2; + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; @@ -499,6 +507,12 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band) set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) | FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3); mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set); + + /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than + * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set. + */ + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) + mt76_set(dev, MT_AGG_ACR4(band), MT_AGG_ACR_PPDU_TXS2H); } static void @@ -581,6 +595,8 @@ void mt7915_mac_init(struct mt7915_dev *dev) if (!is_mt7915(&dev->mt76)) mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT); + else + mt76_clear(dev, MT_PLE_HOST_RPT0, MT_PLE_HOST_RPT0_TX_LATENCY); /* enable hardware de-agg */ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); @@ -732,7 +748,7 @@ void mt7915_wfsys_reset(struct mt7915_dev *dev) mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE); msleep(100); - } else if (is_mt7986(&dev->mt76)) { + } else if (is_mt798x(&dev->mt76)) { mt7986_wmac_disable(dev); msleep(20); @@ -753,7 +769,7 @@ static bool mt7915_band_config(struct mt7915_dev *dev) dev->phy.mt76->band_idx = 0; - if (is_mt7986(&dev->mt76)) { + if (is_mt798x(&dev->mt76)) { u32 sku = mt7915_check_adie(dev, true); /* @@ -1119,8 +1135,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data); band = &phy->mt76->sband_2g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } if (phy->mt76->cap.has_5ghz) { @@ -1128,8 +1143,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data); band = &phy->mt76->sband_5g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } if (phy->mt76->cap.has_6ghz) { @@ -1137,8 +1151,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) n = mt7915_init_he_caps(phy, NL80211_BAND_6GHZ, data); band = &phy->mt76->sband_6g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } } @@ -1158,11 +1171,11 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev) static void mt7915_stop_hardware(struct mt7915_dev *dev) { mt7915_mcu_exit(dev); - mt7915_tx_token_put(dev); + mt76_connac2_tx_token_put(&dev->mt76); mt7915_dma_cleanup(dev); tasklet_disable(&dev->mt76.irq_tasklet); - if (is_mt7986(&dev->mt76)) + if (is_mt798x(&dev->mt76)) mt7986_wmac_disable(dev); } @@ -1177,9 +1190,7 @@ int mt7915_register_device(struct mt7915_dev *dev) INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work); INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7915_mac_work); INIT_LIST_HEAD(&dev->sta_rc_list); - INIT_LIST_HEAD(&dev->sta_poll_list); INIT_LIST_HEAD(&dev->twt_list); - spin_lock_init(&dev->sta_poll_lock); init_waitqueue_head(&dev->reset_wait); INIT_WORK(&dev->reset_work, mt7915_mac_reset_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 7df8d95fc3fb..2222fb9aa103 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -105,9 +105,9 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) LIST_HEAD(sta_poll_list); int i; - spin_lock_bh(&dev->sta_poll_lock); - list_splice_init(&dev->sta_poll_list, &sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); rcu_read_lock(); @@ -118,15 +118,15 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) s8 rssi[4]; u8 bw; - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); if (list_empty(&sta_poll_list)) { - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); break; } msta = list_first_entry(&sta_poll_list, - struct mt7915_sta, poll_list); - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + struct mt7915_sta, wcid.poll_list); + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); idx = msta->wcid.idx; @@ -326,10 +326,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb, if (status->wcid) { msta = container_of(status->wcid, struct mt7915_sta, wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } status->freq = mphy->chandef.chan->center_freq; @@ -808,7 +809,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, txp->rept_wds_wcid = cpu_to_le16(wcid->idx); else txp->rept_wds_wcid = cpu_to_le16(0x3ff); - tx_info->skb = DMA_DUMMY_DATA; + tx_info->skb = NULL; /* pass partial skb header to fw */ tx_info->buf[1].len = MT_CT_PARSE_LEN; @@ -842,74 +843,6 @@ u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) } static void -mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) -{ - struct mt7915_sta *msta; - u16 fc, tid; - u32 val; - - if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) - return; - - tid = le32_get_bits(txwi[1], MT_TXD1_TID); - if (tid >= 6) /* skip VO queue */ - return; - - val = le32_to_cpu(txwi[2]); - fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | - FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; - if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) - return; - - msta = (struct mt7915_sta *)sta->drv_priv; - if (!test_and_set_bit(tid, &msta->ampdu_state)) - ieee80211_start_tx_ba_session(sta, tid, 0); -} - -static void -mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, - struct ieee80211_sta *sta, struct list_head *free_list) -{ - struct mt76_dev *mdev = &dev->mt76; - struct mt7915_sta *msta; - struct mt76_wcid *wcid; - __le32 *txwi; - u16 wcid_idx; - - mt76_connac_txp_skb_unmap(mdev, t); - if (!t->skb) - goto out; - - txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); - if (sta) { - wcid = (struct mt76_wcid *)sta->drv_priv; - wcid_idx = wcid->idx; - } else { - wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); - wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); - - if (wcid && wcid->sta) { - msta = container_of(wcid, struct mt7915_sta, wcid); - sta = container_of((void *)msta, struct ieee80211_sta, - drv_priv); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); - } - } - - if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7915_tx_check_aggr(sta, txwi); - - __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); - -out: - t->skb = NULL; - mt76_put_txwi(mdev, t); -} - -static void mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) { struct mt76_dev *mdev = &dev->mt76; @@ -951,6 +884,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) struct mt76_dev *mdev = &dev->mt76; struct mt76_txwi_cache *txwi; struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; LIST_HEAD(free_list); void *end = data + len; bool v3, wake = false; @@ -977,7 +911,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) info = le32_to_cpu(*cur_info); if (info & MT_TX_FREE_PAIR) { struct mt7915_sta *msta; - struct mt76_wcid *wcid; u16 idx; idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); @@ -987,14 +920,33 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) continue; msta = container_of(wcid, struct mt7915_sta, wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&mdev->sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &mdev->sta_poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); continue; } - if (v3 && (info & MT_TX_FREE_MPDU_HEADER)) + if (!mtk_wed_device_active(&mdev->mmio.wed) && wcid) { + u32 tx_retries = 0, tx_failed = 0; + + if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) { + tx_retries = + FIELD_GET(MT_TX_FREE_COUNT_V3, info) - 1; + tx_failed = tx_retries + + !!FIELD_GET(MT_TX_FREE_STAT_V3, info); + } else if (!v3 && (info & MT_TX_FREE_MPDU_HEADER)) { + tx_retries = + FIELD_GET(MT_TX_FREE_COUNT, info) - 1; + tx_failed = tx_retries + + !!FIELD_GET(MT_TX_FREE_STAT, info); + } + wcid->stats.tx_retries += tx_retries; + wcid->stats.tx_failed += tx_failed; + } + + if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) continue; for (i = 0; i < 1 + v3; i++) { @@ -1010,7 +962,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) if (!txwi) continue; - mt7915_txwi_free(dev, txwi, sta, &free_list); + mt76_connac2_txwi_free(mdev, txwi, sta, &free_list); } } @@ -1042,7 +994,7 @@ mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) if (!txwi) continue; - mt7915_txwi_free(dev, txwi, NULL, &free_list); + mt76_connac2_txwi_free(mdev, txwi, NULL, &free_list); } mt7915_mac_tx_free_done(dev, &free_list, wake); @@ -1081,10 +1033,10 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) if (!wcid->sta) goto out; - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); out: rcu_read_unlock(); @@ -1357,20 +1309,6 @@ mt7915_update_beacons(struct mt7915_dev *dev) mt7915_update_vif_beacon, mphy_ext->hw); } -void mt7915_tx_token_put(struct mt7915_dev *dev) -{ - struct mt76_txwi_cache *txwi; - int id; - - spin_lock_bh(&dev->mt76.token_lock); - idr_for_each_entry(&dev->mt76.token, txwi, id) { - mt7915_txwi_free(dev, txwi, NULL, NULL); - dev->mt76.token_count--; - } - spin_unlock_bh(&dev->mt76.token_lock); - idr_destroy(&dev->mt76.token); -} - static int mt7915_mac_restart(struct mt7915_dev *dev) { @@ -1389,8 +1327,12 @@ mt7915_mac_restart(struct mt7915_dev *dev) if (dev_is_pci(mdev->dev)) { mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); - if (dev->hif2) - mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); + if (dev->hif2) { + if (is_mt7915(mdev)) + mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); + else + mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0x0); + } } set_bit(MT76_RESET, &dev->mphy.state); @@ -1415,7 +1357,7 @@ mt7915_mac_restart(struct mt7915_dev *dev) napi_disable(&dev->mt76.tx_napi); /* token reinit */ - mt7915_tx_token_put(dev); + mt76_connac2_tx_token_put(&dev->mt76); idr_init(&dev->mt76.token); mt7915_dma_reset(dev, true); @@ -1440,8 +1382,12 @@ mt7915_mac_restart(struct mt7915_dev *dev) } if (dev_is_pci(mdev->dev)) { mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); - if (dev->hif2) - mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); + if (dev->hif2) { + if (is_mt7915(mdev)) + mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); + else + mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff); + } } /* load firmware */ @@ -1576,7 +1522,7 @@ void mt7915_mac_reset_work(struct work_struct *work) if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { mtk_wed_device_stop(&dev->mt76.mmio.wed); - if (!is_mt7986(&dev->mt76)) + if (!is_mt798x(&dev->mt76)) mt76_wr(dev, MT_INT_WED_MASK_CSR, 0); } @@ -1604,13 +1550,19 @@ void mt7915_mac_reset_work(struct work_struct *work) if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { mt7915_dma_reset(dev, false); - mt7915_tx_token_put(dev); + mt76_connac2_tx_token_put(&dev->mt76); idr_init(&dev->mt76.token); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); } + mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); + mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + + /* enable DMA Tx/Rx and interrupt */ + mt7915_dma_start(dev, false, false); + clear_bit(MT76_MCU_RESET, &dev->mphy.state); clear_bit(MT76_RESET, &dev->mphy.state); if (phy2) @@ -1625,9 +1577,6 @@ void mt7915_mac_reset_work(struct work_struct *work) tasklet_schedule(&dev->mt76.irq_tasklet); - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); - mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); - mt76_worker_enable(&dev->mt76.tx_worker); local_bh_disable(); @@ -1747,8 +1696,8 @@ void mt7915_reset(struct mt7915_dev *dev) void mt7915_mac_update_stats(struct mt7915_phy *phy) { + struct mt76_mib_stats *mib = &phy->mib; struct mt7915_dev *dev = phy->dev; - struct mib_stats *mib = &phy->mib; int i, aggr0 = 0, aggr1, cnt; u8 band = phy->mt76->band_idx; u32 val; @@ -2010,7 +1959,7 @@ void mt7915_mac_sta_rc_work(struct work_struct *work) u32 changed; LIST_HEAD(list); - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); list_splice_init(&dev->sta_rc_list, &list); while (!list_empty(&list)) { @@ -2018,7 +1967,7 @@ void mt7915_mac_sta_rc_work(struct work_struct *work) list_del_init(&msta->rc_list); changed = msta->changed; msta->changed = 0; - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); @@ -2031,10 +1980,10 @@ void mt7915_mac_sta_rc_work(struct work_struct *work) if (changed & IEEE80211_RC_SMPS_CHANGED) mt7915_mcu_add_smps(dev, vif, sta); - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); } - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } void mt7915_mac_work(struct work_struct *work) @@ -2054,6 +2003,9 @@ void mt7915_mac_work(struct work_struct *work) mt7915_mac_update_stats(phy); mt7915_mac_severe_check(phy); + + if (phy->dev->muru_debug) + mt7915_mcu_muru_debug_get(phy); } mutex_unlock(&mphy->dev->mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index ce94f87e2042..448b1b380190 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -9,7 +9,12 @@ #define MT_TX_FREE_VER GENMASK(18, 16) #define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0) /* 0: success, others: dropped */ -#define MT_TX_FREE_MPDU_HEADER BIT(30) +#define MT_TX_FREE_COUNT GENMASK(12, 0) +#define MT_TX_FREE_COUNT_V3 GENMASK(27, 24) +#define MT_TX_FREE_STAT GENMASK(14, 13) +#define MT_TX_FREE_STAT_V3 GENMASK(29, 28) +#define MT_TX_FREE_MPDU_HEADER BIT(15) +#define MT_TX_FREE_MPDU_HEADER_V3 BIT(30) #define MT_TX_FREE_MSDU_ID_V3 GENMASK(14, 0) #define MT_TXS5_F0_FINAL_MPDU BIT(31) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 1b361199c061..a3fd54cc1911 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -248,12 +248,12 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, idx = MT7915_WTBL_RESERVED - mvif->mt76.idx; INIT_LIST_HEAD(&mvif->sta.rc_list); - INIT_LIST_HEAD(&mvif->sta.poll_list); + INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.phy_idx = ext_phy; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7915_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -269,6 +269,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR; mt7915_init_bitrate_mask(vif); + memset(&mvif->cap, -1, sizeof(mvif->cap)); mt7915_mcu_add_bss_info(phy, vif, true); mt7915_mcu_add_sta(dev, vif, NULL, true); @@ -308,12 +309,12 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mutex_unlock(&dev->mt76.mutex); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_packet_id_flush(&dev->mt76, &msta->wcid); + mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } int mt7915_set_channel(struct mt7915_phy *phy) @@ -470,7 +471,8 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) ieee80211_wake_queues(hw); } - if (changed & IEEE80211_CONF_CHANGE_POWER) { + if (changed & (IEEE80211_CONF_CHANGE_POWER | + IEEE80211_CONF_CHANGE_CHANNEL)) { ret = mt7915_mcu_set_txpower_sku(phy); if (ret) return ret; @@ -481,16 +483,22 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_MONITOR) { bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); bool band = phy->mt76->band_idx; - - if (!enabled) - phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; - else - phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + u32 rxfilter = phy->rxfilter; + + if (!enabled) { + rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; + dev->monitor_mask &= ~BIT(band); + } else { + rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + dev->monitor_mask |= BIT(band); + } mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN, enabled); + mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_MDP_DCR0_RX_HDR_TRANS_EN, + !dev->monitor_mask); mt76_testmode_reset(phy->mt76, true); - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + mt76_wr(dev, MT_WF_RFCR(band), rxfilter); } mutex_unlock(&dev->mt76.mutex); @@ -525,6 +533,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR1_DROP_BA | MT_WF_RFCR1_DROP_CFEND | MT_WF_RFCR1_DROP_CFACK; + u32 rxfilter; u32 flags = 0; #define MT76_FILTER(_flag, _hw) do { \ @@ -559,7 +568,12 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR_DROP_NDPA); *total_flags = flags; - mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); + rxfilter = phy->rxfilter; + if (hw->conf.flags & IEEE80211_CONF_MONITOR) + rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + else + rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; + mt76_wr(dev, MT_WF_RFCR(band), rxfilter); if (*total_flags & FIF_CONTROL) mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); @@ -599,6 +613,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, { struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_dev *dev = mt7915_hw_dev(hw); + int set_bss_info = -1, set_sta = -1; mutex_lock(&dev->mt76.mutex); @@ -607,15 +622,18 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, * and then peer references bss_info_rfch to set bandwidth cap. */ if (changed & BSS_CHANGED_BSSID && - vif->type == NL80211_IFTYPE_STATION) { - bool join = !is_zero_ether_addr(info->bssid); - - mt7915_mcu_add_bss_info(phy, vif, join); - mt7915_mcu_add_sta(dev, vif, NULL, join); - } - + vif->type == NL80211_IFTYPE_STATION) + set_bss_info = set_sta = !is_zero_ether_addr(info->bssid); if (changed & BSS_CHANGED_ASSOC) - mt7915_mcu_add_bss_info(phy, vif, vif->cfg.assoc); + set_bss_info = vif->cfg.assoc; + if (changed & BSS_CHANGED_BEACON_ENABLED && + vif->type != NL80211_IFTYPE_AP) + set_bss_info = set_sta = info->enable_beacon; + + if (set_bss_info == 1) + mt7915_mcu_add_bss_info(phy, vif, true); + if (set_sta == 1) + mt7915_mcu_add_sta(dev, vif, NULL, true); if (changed & BSS_CHANGED_ERP_CTS_PROT) mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot); @@ -629,11 +647,6 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { - mt7915_mcu_add_bss_info(phy, vif, true); - mt7915_mcu_add_sta(dev, vif, NULL, true); - } - /* ensure that enable txcmd_mode after bss_info */ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) mt7915_mcu_set_tx(dev, vif); @@ -645,11 +658,69 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, mt7915_update_bss_color(hw, vif, &info->he_bss_color); if (changed & (BSS_CHANGED_BEACON | - BSS_CHANGED_BEACON_ENABLED | - BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | - BSS_CHANGED_FILS_DISCOVERY)) + BSS_CHANGED_BEACON_ENABLED)) mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed); + if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)) + mt7915_mcu_add_inband_discov(dev, vif, changed); + + if (set_bss_info == 0) + mt7915_mcu_add_bss_info(phy, vif, false); + if (set_sta == 0) + mt7915_mcu_add_sta(dev, vif, NULL, false); + + mutex_unlock(&dev->mt76.mutex); +} + +static void +mt7915_vif_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_vif_cap *vc = &mvif->cap; + + vc->ht_ldpc = vif->bss_conf.ht_ldpc; + vc->vht_ldpc = vif->bss_conf.vht_ldpc; + vc->vht_su_ebfer = vif->bss_conf.vht_su_beamformer; + vc->vht_su_ebfee = vif->bss_conf.vht_su_beamformee; + vc->vht_mu_ebfer = vif->bss_conf.vht_mu_beamformer; + vc->vht_mu_ebfee = vif->bss_conf.vht_mu_beamformee; + vc->he_ldpc = vif->bss_conf.he_ldpc; + vc->he_su_ebfer = vif->bss_conf.he_su_beamformer; + vc->he_su_ebfee = vif->bss_conf.he_su_beamformee; + vc->he_mu_ebfer = vif->bss_conf.he_mu_beamformer; +} + +static int +mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = mt7915_hw_dev(hw); + int err; + + mutex_lock(&dev->mt76.mutex); + + mt7915_vif_check_caps(phy, vif); + + err = mt7915_mcu_add_bss_info(phy, vif, true); + if (err) + goto out; + err = mt7915_mcu_add_sta(dev, vif, NULL, true); +out: + mutex_unlock(&dev->mt76.mutex); + + return err; +} + +static void +mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + mt7915_mcu_add_sta(dev, vif, NULL, false); mutex_unlock(&dev->mt76.mutex); } @@ -679,7 +750,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, return -ENOSPC; INIT_LIST_HEAD(&msta->rc_list); - INIT_LIST_HEAD(&msta->poll_list); + INIT_LIST_HEAD(&msta->wcid.poll_list); msta->vif = mvif; msta->wcid.sta = 1; msta->wcid.idx = idx; @@ -714,12 +785,12 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++) mt7915_mac_twt_teardown_flow(dev, msta, i); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); + spin_lock_bh(&mdev->sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); if (!list_empty(&msta->rc_list)) list_del_init(&msta->rc_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&mdev->sta_poll_lock); } static void mt7915_tx(struct ieee80211_hw *hw, @@ -801,16 +872,16 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->ampdu_state); + clear_bit(tid, &msta->wcid.ampdu_state); ret = mt7915_mcu_add_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->ampdu_state); + set_bit(tid, &msta->wcid.ampdu_state); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->ampdu_state); + clear_bit(tid, &msta->wcid.ampdu_state); ret = mt7915_mcu_add_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -842,7 +913,7 @@ mt7915_get_stats(struct ieee80211_hw *hw, { struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_dev *dev = mt7915_hw_dev(hw); - struct mib_stats *mib = &phy->mib; + struct mt76_mib_stats *mib = &phy->mib; mutex_lock(&dev->mt76.mutex); @@ -1019,21 +1090,20 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } - if (!txrate->legacy && !txrate->flags) - return; - - if (txrate->legacy) { - sinfo->txrate.legacy = txrate->legacy; - } else { - sinfo->txrate.mcs = txrate->mcs; - sinfo->txrate.nss = txrate->nss; - sinfo->txrate.bw = txrate->bw; - sinfo->txrate.he_gi = txrate->he_gi; - sinfo->txrate.he_dcm = txrate->he_dcm; - sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; + if (txrate->legacy || txrate->flags) { + if (txrate->legacy) { + sinfo->txrate.legacy = txrate->legacy; + } else { + sinfo->txrate.mcs = txrate->mcs; + sinfo->txrate.nss = txrate->nss; + sinfo->txrate.bw = txrate->bw; + sinfo->txrate.he_gi = txrate->he_gi; + sinfo->txrate.he_dcm = txrate->he_dcm; + sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; + } + sinfo->txrate.flags = txrate->flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } - sinfo->txrate.flags = txrate->flags; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); /* offloading flows bypass networking stack, so driver counts and * reports sta statistics via NL80211_STA_INFO when WED is active. @@ -1042,14 +1112,10 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, sinfo->tx_bytes = msta->wcid.stats.tx_bytes; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); - sinfo->tx_packets = msta->wcid.stats.tx_packets; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); - - sinfo->tx_failed = msta->wcid.stats.tx_failed; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); - - sinfo->tx_retries = msta->wcid.stats.tx_retries; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + if (!mt7915_mcu_wed_wa_tx_stats(phy->dev, msta->wcid.idx)) { + sinfo->tx_packets = msta->wcid.stats.tx_packets; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + } if (mtk_wed_get_rx_capa(&phy->dev->mt76.mmio.wed)) { sinfo->rx_bytes = msta->wcid.stats.rx_bytes; @@ -1060,6 +1126,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, } } + sinfo->tx_failed = msta->wcid.stats.tx_failed; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + + sinfo->tx_retries = msta->wcid.stats.tx_retries; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + sinfo->ack_signal = (s8)msta->ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); @@ -1073,11 +1145,11 @@ static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta) struct mt7915_dev *dev = msta->vif->phy->dev; u32 *changed = data; - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); msta->changed |= *changed; if (list_empty(&msta->rc_list)) list_add_tail(&msta->rc_list, &dev->sta_rc_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } static void mt7915_sta_rc_update(struct ieee80211_hw *hw, @@ -1253,6 +1325,38 @@ static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_vec_queue_overflow_drop_cnt", "rx_ba_cnt", + /* muru mu-mimo and ofdma related stats */ + "dl_cck_cnt", + "dl_ofdm_cnt", + "dl_htmix_cnt", + "dl_htgf_cnt", + "dl_vht_su_cnt", + "dl_vht_2mu_cnt", + "dl_vht_3mu_cnt", + "dl_vht_4mu_cnt", + "dl_he_su_cnt", + "dl_he_ext_su_cnt", + "dl_he_2ru_cnt", + "dl_he_2mu_cnt", + "dl_he_3ru_cnt", + "dl_he_3mu_cnt", + "dl_he_4ru_cnt", + "dl_he_4mu_cnt", + "dl_he_5to8ru_cnt", + "dl_he_9to16ru_cnt", + "dl_he_gtr16ru_cnt", + + "ul_hetrig_su_cnt", + "ul_hetrig_2ru_cnt", + "ul_hetrig_3ru_cnt", + "ul_hetrig_4ru_cnt", + "ul_hetrig_5to8ru_cnt", + "ul_hetrig_9to16ru_cnt", + "ul_hetrig_gtr16ru_cnt", + "ul_hetrig_2mu_cnt", + "ul_hetrig_3mu_cnt", + "ul_hetrig_4mu_cnt", + /* per vif counters */ "v_tx_mode_cck", "v_tx_mode_ofdm", @@ -1279,6 +1383,10 @@ static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = { "v_tx_mcs_9", "v_tx_mcs_10", "v_tx_mcs_11", + "v_tx_nss_1", + "v_tx_nss_2", + "v_tx_nss_3", + "v_tx_nss_4", }; #define MT7915_SSTATS_LEN ARRAY_SIZE(mt7915_gstrings_stats) @@ -1292,7 +1400,7 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw, if (sset != ETH_SS_STATS) return; - memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats)); + memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats)); data += sizeof(mt7915_gstrings_stats); page_pool_ethtool_stats_get_strings(data); } @@ -1326,11 +1434,11 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw, struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt76_mib_stats *mib = &phy->mib; struct mt76_ethtool_worker_info wi = { .data = data, .idx = mvif->mt76.idx, }; - struct mib_stats *mib = &phy->mib; /* See mt7915_ampdu_stat_read_phy, etc */ int i, ei = 0, stats_size; @@ -1403,6 +1511,37 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw, data[ei++] = mib->rx_vec_queue_overflow_drop_cnt; data[ei++] = mib->rx_ba_cnt; + data[ei++] = mib->dl_cck_cnt; + data[ei++] = mib->dl_ofdm_cnt; + data[ei++] = mib->dl_htmix_cnt; + data[ei++] = mib->dl_htgf_cnt; + data[ei++] = mib->dl_vht_su_cnt; + data[ei++] = mib->dl_vht_2mu_cnt; + data[ei++] = mib->dl_vht_3mu_cnt; + data[ei++] = mib->dl_vht_4mu_cnt; + data[ei++] = mib->dl_he_su_cnt; + data[ei++] = mib->dl_he_ext_su_cnt; + data[ei++] = mib->dl_he_2ru_cnt; + data[ei++] = mib->dl_he_2mu_cnt; + data[ei++] = mib->dl_he_3ru_cnt; + data[ei++] = mib->dl_he_3mu_cnt; + data[ei++] = mib->dl_he_4ru_cnt; + data[ei++] = mib->dl_he_4mu_cnt; + data[ei++] = mib->dl_he_5to8ru_cnt; + data[ei++] = mib->dl_he_9to16ru_cnt; + data[ei++] = mib->dl_he_gtr16ru_cnt; + + data[ei++] = mib->ul_hetrig_su_cnt; + data[ei++] = mib->ul_hetrig_2ru_cnt; + data[ei++] = mib->ul_hetrig_3ru_cnt; + data[ei++] = mib->ul_hetrig_4ru_cnt; + data[ei++] = mib->ul_hetrig_5to8ru_cnt; + data[ei++] = mib->ul_hetrig_9to16ru_cnt; + data[ei++] = mib->ul_hetrig_gtr16ru_cnt; + data[ei++] = mib->ul_hetrig_2mu_cnt; + data[ei++] = mib->ul_hetrig_3mu_cnt; + data[ei++] = mib->ul_hetrig_4mu_cnt; + /* Add values for all stations owned by this vif */ wi.initial_stat_idx = ei; ieee80211_iterate_stations_atomic(hw, mt7915_ethtool_worker, &wi); @@ -1514,6 +1653,20 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw, return 0; } + +static int +mt7915_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; + + if (!mtk_wed_device_active(wed)) + return -EOPNOTSUPP; + + return mtk_wed_device_setup_tc(wed, netdev, type, type_data); +} #endif const struct ieee80211_ops mt7915_ops = { @@ -1526,6 +1679,8 @@ const struct ieee80211_ops mt7915_ops = { .conf_tx = mt7915_conf_tx, .configure_filter = mt7915_configure_filter, .bss_info_changed = mt7915_bss_info_changed, + .start_ap = mt7915_start_ap, + .stop_ap = mt7915_stop_ap, .sta_add = mt7915_sta_add, .sta_remove = mt7915_sta_remove, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, @@ -1566,5 +1721,6 @@ const struct ieee80211_ops mt7915_ops = { .set_radar_background = mt7915_set_radar_background, #ifdef CONFIG_NET_MEDIATEK_SOC_WED .net_fill_forward_path = mt7915_net_fill_forward_path, + .net_setup_tc = mt7915_net_setup_tc, #endif }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 9fcb22fa1f97..b22f06d4411a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -13,6 +13,9 @@ case 0x7915: \ _fw = MT7915_##name; \ break; \ + case 0x7981: \ + _fw = MT7981_##name; \ + break; \ case 0x7986: \ _fw = MT7986_##name##__VA_ARGS__; \ break; \ @@ -164,7 +167,9 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd, } rxd = (struct mt76_connac2_mcu_rxd *)skb->data; - if (seq != rxd->seq) + if (seq != rxd->seq && + !(rxd->eid == MCU_CMD_EXT_CID && + rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT)) return -EAGAIN; if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) { @@ -220,8 +225,10 @@ int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) static void mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (vif->bss_conf.csa_active) - ieee80211_csa_finish(vif); + if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION) + return; + + ieee80211_csa_finish(vif); } static void @@ -274,7 +281,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) r = (struct mt7915_mcu_rdd_report *)skb->data; - if (r->band_idx > MT_BAND1) + if (r->band_idx > MT_RX_SEL2) return; if ((r->band_idx && !dev->phy.mt76->band_idx) && @@ -321,7 +328,7 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) static void mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (!vif->bss_conf.color_change_active) + if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) return; ieee80211_color_change_finish(vif); @@ -395,12 +402,14 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb) struct mt76_connac2_mcu_rxd *rxd; rxd = (struct mt76_connac2_mcu_rxd *)skb->data; - if (rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT || - rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || - rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || - rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || - rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY || - !rxd->seq) + if ((rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT || + rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || + rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || + rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || + rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY || + !rxd->seq) && + !(rxd->eid == MCU_CMD_EXT_CID && + rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT)) mt7915_mcu_rx_unsolicited_event(dev, skb); else mt76_mcu_rx_event(&dev->mt76, skb); @@ -706,6 +715,7 @@ static void mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; struct ieee80211_he_mcs_nss_supp mcs_map; struct sta_rec_he *he; @@ -739,7 +749,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G)) cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT; - if (vif->bss_conf.he_ldpc && + if (mvif->cap.he_ldpc && (elem->phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) cap |= STA_REC_HE_CAP_LDPC; @@ -848,6 +858,7 @@ static void mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; struct sta_rec_muru *muru; struct tlv *tlv; @@ -860,9 +871,9 @@ mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb, muru = (struct sta_rec_muru *)tlv; - muru->cfg.mimo_dl_en = vif->bss_conf.he_mu_beamformer || - vif->bss_conf.vht_mu_beamformer || - vif->bss_conf.vht_mu_beamformee; + muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer || + mvif->cap.vht_mu_ebfer || + mvif->cap.vht_mu_ebfee; if (!is_mt7915(&dev->mt76)) muru->cfg.mimo_ul_en = true; muru->cfg.ofdma_dl_en = true; @@ -897,6 +908,8 @@ mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb, HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); muru->ofdma_ul.uo_ra = HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); + muru->ofdma_ul.rx_ctrl_frame_to_mbss = + HE_MAC(CAP3_RX_CTRL_FRAME_TO_MULTIBSS, elem->mac_cap_info[3]); } static void @@ -995,8 +1008,8 @@ mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb, mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr); if (sta) mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv, - wtbl_hdr, vif->bss_conf.ht_ldpc, - vif->bss_conf.vht_ldpc); + wtbl_hdr, mvif->cap.ht_ldpc, + mvif->cap.vht_ldpc); return 0; } @@ -1005,23 +1018,24 @@ static inline bool mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool bfee) { - int tx_ant = hweight8(phy->mt76->chainmask) - 1; + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + int sts = hweight16(phy->mt76->chainmask); if (vif->type != NL80211_IFTYPE_STATION && vif->type != NL80211_IFTYPE_AP) return false; - if (!bfee && tx_ant < 2) + if (!bfee && sts < 2) return false; if (sta->deflink.he_cap.has_he) { struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; if (bfee) - return vif->bss_conf.he_su_beamformee && + return mvif->cap.he_su_ebfee && HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]); else - return vif->bss_conf.he_su_beamformer && + return mvif->cap.he_su_ebfer && HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]); } @@ -1029,10 +1043,10 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, u32 cap = sta->deflink.vht_cap.cap; if (bfee) - return vif->bss_conf.vht_su_beamformee && + return mvif->cap.vht_su_ebfee && (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); else - return vif->bss_conf.vht_su_beamformer && + return mvif->cap.vht_su_ebfer && (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); } @@ -1527,7 +1541,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, cap |= STA_CAP_TX_STBC; if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) cap |= STA_CAP_RX_STBC; - if (vif->bss_conf.ht_ldpc && + if (mvif->cap.ht_ldpc && (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) cap |= STA_CAP_LDPC; @@ -1553,7 +1567,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, cap |= STA_CAP_VHT_TX_STBC; if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) cap |= STA_CAP_VHT_RX_STBC; - if (vif->bss_conf.vht_ldpc && + if (mvif->cap.vht_ldpc && (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) cap |= STA_CAP_VHT_LDPC; @@ -1872,10 +1886,9 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif, memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); } -static void -mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct sk_buff *rskb, struct bss_info_bcn *bcn, - u32 changed) +int +mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, + u32 changed) { #define OFFLOAD_TX_MODE_SU BIT(0) #define OFFLOAD_TX_MODE_MU BIT(1) @@ -1885,14 +1898,27 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; enum nl80211_band band = chandef->chan->band; struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct bss_info_bcn *bcn; struct bss_info_inband_discovery *discov; struct ieee80211_tx_info *info; - struct sk_buff *skb = NULL; - struct tlv *tlv; + struct sk_buff *rskb, *skb = NULL; + struct tlv *tlv, *sub_tlv; bool ext_phy = phy != &dev->phy; u8 *buf, interval; int len; + if (vif->bss_conf.nontransmitted) + return 0; + + rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL, + MT7915_MAX_BSS_OFFLOAD_SIZE); + if (IS_ERR(rskb)) + return PTR_ERR(rskb); + + tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); + bcn = (struct bss_info_bcn *)tlv; + bcn->enable = true; + if (changed & BSS_CHANGED_FILS_DISCOVERY && vif->bss_conf.fils_discovery.max_interval) { interval = vif->bss_conf.fils_discovery.max_interval; @@ -1903,27 +1929,29 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); } - if (!skb) - return; + if (!skb) { + dev_kfree_skb(rskb); + return -EINVAL; + } info = IEEE80211_SKB_CB(skb); info->control.vif = vif; info->band = band; - info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy); len = sizeof(*discov) + MT_TXD_SIZE + skb->len; len = (len & 0x3) ? ((len | 0x3) + 1) : len; - if (len > (MT7915_MAX_BSS_OFFLOAD_SIZE - rskb->len)) { + if (skb->len > MT7915_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "inband discovery size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); - return; + return -EINVAL; } - tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV, - len, &bcn->sub_ntlv, &bcn->len); - discov = (struct bss_info_inband_discovery *)tlv; + sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV, + len, &bcn->sub_ntlv, &bcn->len); + discov = (struct bss_info_inband_discovery *)sub_tlv; discov->tx_mode = OFFLOAD_TX_MODE_SU; /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY); @@ -1931,13 +1959,16 @@ mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vi discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len); discov->enable = true; - buf = (u8 *)tlv + sizeof(*discov); + buf = (u8 *)sub_tlv + sizeof(*discov); mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL, 0, changed); memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); dev_kfree_skb(skb); + + return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, + MCU_EXT_CMD(BSS_INFO_UPDATE), true); } int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1970,11 +2001,14 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto out; skb = ieee80211_beacon_get_template(hw, vif, &offs, 0); - if (!skb) + if (!skb) { + dev_kfree_skb(rskb); return -EINVAL; + } - if (skb->len > MT7915_MAX_BEACON_SIZE - MT_TXD_SIZE) { + if (skb->len > MT7915_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); return -EINVAL; } @@ -1987,11 +2021,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); dev_kfree_skb(skb); - if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP || - changed & BSS_CHANGED_FILS_DISCOVERY) - mt7915_mcu_beacon_inband_discov(dev, vif, rskb, - bcn, changed); - out: return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, MCU_EXT_CMD(BSS_INFO_UPDATE), true); @@ -2112,12 +2141,11 @@ int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enabled) sizeof(data), false); } -int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms) +int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; struct sk_buff *skb; - struct mt7915_mcu_muru_stats *mu_stats = - (struct mt7915_mcu_muru_stats *)ms; + struct mt7915_mcu_muru_stats *mu_stats; int ret; struct { @@ -2133,7 +2161,43 @@ int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms) if (ret) return ret; - memcpy(mu_stats, skb->data, sizeof(struct mt7915_mcu_muru_stats)); + mu_stats = (struct mt7915_mcu_muru_stats *)(skb->data); + + /* accumulate stats, these are clear-on-read */ +#define __dl_u32(s) phy->mib.dl_##s += le32_to_cpu(mu_stats->dl.s) +#define __ul_u32(s) phy->mib.ul_##s += le32_to_cpu(mu_stats->ul.s) + __dl_u32(cck_cnt); + __dl_u32(ofdm_cnt); + __dl_u32(htmix_cnt); + __dl_u32(htgf_cnt); + __dl_u32(vht_su_cnt); + __dl_u32(vht_2mu_cnt); + __dl_u32(vht_3mu_cnt); + __dl_u32(vht_4mu_cnt); + __dl_u32(he_su_cnt); + __dl_u32(he_2ru_cnt); + __dl_u32(he_2mu_cnt); + __dl_u32(he_3ru_cnt); + __dl_u32(he_3mu_cnt); + __dl_u32(he_4ru_cnt); + __dl_u32(he_4mu_cnt); + __dl_u32(he_5to8ru_cnt); + __dl_u32(he_9to16ru_cnt); + __dl_u32(he_gtr16ru_cnt); + + __ul_u32(hetrig_su_cnt); + __ul_u32(hetrig_2ru_cnt); + __ul_u32(hetrig_3ru_cnt); + __ul_u32(hetrig_4ru_cnt); + __ul_u32(hetrig_5to8ru_cnt); + __ul_u32(hetrig_9to16ru_cnt); + __ul_u32(hetrig_gtr16ru_cnt); + __ul_u32(hetrig_2mu_cnt); + __ul_u32(hetrig_3mu_cnt); + __ul_u32(hetrig_4mu_cnt); +#undef __dl_u32 +#undef __ul_u32 + dev_kfree_skb(skb); return 0; @@ -2680,10 +2744,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) if (mt76_connac_spe_idx(phy->mt76->antenna_mask)) req.tx_path_num = fls(phy->mt76->antenna_mask); - if (cmd == MCU_EXT_CMD(SET_RX_PATH) || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL || + phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, NL80211_IFTYPE_AP)) @@ -2993,7 +3057,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch) } ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO), - req, sizeof(req), true, &skb); + req, len * sizeof(req[0]), true, &skb); if (ret) return ret; @@ -3733,6 +3797,62 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev, &req, sizeof(req), true); } +int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx) +{ + struct { + __le32 cmd; + __le32 num; + __le32 __rsv; + __le16 wlan_idx; + } req = { + .cmd = cpu_to_le32(0x15), + .num = cpu_to_le32(1), + .wlan_idx = cpu_to_le16(wlan_idx), + }; + struct mt7915_mcu_wa_tx_stat { + __le16 wlan_idx; + u8 __rsv[2]; + + /* tx_bytes is deprecated since WA byte counter uses u32, + * which easily leads to overflow. + */ + __le32 tx_bytes; + __le32 tx_packets; + } *res; + struct mt76_wcid *wcid; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + if (!is_mt7915(&dev->mt76)) + skb_pull(skb, 4); + + res = (struct mt7915_mcu_wa_tx_stat *)skb->data; + + if (le16_to_cpu(res->wlan_idx) != wlan_idx) { + ret = -EINVAL; + goto out; + } + + rcu_read_lock(); + + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + if (wcid) + wcid->stats.tx_packets += le32_to_cpu(res->tx_packets); + else + ret = -EINVAL; + + rcu_read_unlock(); +out: + dev_kfree_skb(skb); + + return ret; +} + int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set) { struct { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index b9ea297f382c..1592b5d6751a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -495,10 +495,14 @@ enum { SER_RECOVER }; -#define MT7915_MAX_BEACON_SIZE 512 -#define MT7915_MAX_INBAND_FRAME_SIZE 256 -#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \ - MT7915_MAX_INBAND_FRAME_SIZE + \ +#define MT7915_MAX_BEACON_SIZE 1308 +#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ + sizeof(struct bss_info_bcn) + \ + sizeof(struct bss_info_bcn_cntdwn) + \ + sizeof(struct bss_info_bcn_mbss) + \ + MT_TXD_SIZE + \ + sizeof(struct bss_info_bcn_cont)) +#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \ MT7915_BEACON_UPDATE_SIZE) #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ @@ -511,12 +515,6 @@ enum { sizeof(struct bss_info_bmc_rate) +\ sizeof(struct bss_info_ext_bss)) -#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct bss_info_bcn_cntdwn) + \ - sizeof(struct bss_info_bcn_mbss) + \ - sizeof(struct bss_info_bcn_cont) + \ - sizeof(struct bss_info_inband_discovery)) - static inline s8 mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 45f3558bf31c..e7d8e03f826f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -417,7 +417,7 @@ static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr) u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); u32 l1_remap; - if (is_mt7986(&dev->mt76)) + if (is_mt798x(&dev->mt76)) return MT_CONN_INFRA_OFFSET(addr); l1_remap = is_mt7915(&dev->mt76) ? @@ -447,7 +447,7 @@ static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr) /* use read to push write */ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2); } else { - u32 ofs = is_mt7986(&dev->mt76) ? 0x400000 : 0; + u32 ofs = is_mt798x(&dev->mt76) ? 0x400000 : 0; offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr); base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr); @@ -545,8 +545,6 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed) { struct mt7915_dev *dev; - struct mt7915_phy *phy; - int ret; dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); @@ -554,43 +552,19 @@ static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed) dev->mt76.token_size = wed->wlan.token_start; spin_unlock_bh(&dev->mt76.token_lock); - ret = wait_event_timeout(dev->mt76.tx_wait, - !dev->mt76.wed_token_count, HZ); - if (!ret) - return -EAGAIN; - - phy = &dev->phy; - mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H); - - phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL; - if (phy) - mt76_set(dev, MT_AGG_ACR4(phy->mt76->band_idx), - MT_AGG_ACR_PPDU_TXS2H); - - return 0; + return !wait_event_timeout(dev->mt76.tx_wait, + !dev->mt76.wed_token_count, HZ); } static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed) { struct mt7915_dev *dev; - struct mt7915_phy *phy; dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); spin_lock_bh(&dev->mt76.token_lock); dev->mt76.token_size = MT7915_TOKEN_SIZE; spin_unlock_bh(&dev->mt76.token_lock); - - /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than - * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set. - */ - phy = &dev->phy; - mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx), MT_AGG_ACR_PPDU_TXS2H); - - phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL; - if (phy) - mt76_clear(dev, MT_AGG_ACR4(phy->mt76->band_idx), - MT_AGG_ACR_PPDU_TXS2H); } static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) @@ -617,7 +591,7 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) { - struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc; + struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc; struct mt76_txwi_cache *t = NULL; struct mt7915_dev *dev; struct mt76_queue *q; @@ -785,7 +759,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, wed->wlan.nbuf = MT7915_HW_TOKEN_SIZE; wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30; wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31; - wed->wlan.txfree_tbit = is_mt7986(&dev->mt76) ? 2 : 1; + wed->wlan.txfree_tbit = is_mt798x(&dev->mt76) ? 2 : 1; wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf; wed->wlan.wcid_512 = !is_mt7915(&dev->mt76); @@ -795,7 +769,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, if (is_mt7915(&dev->mt76)) { wed->wlan.rx_tbit[0] = 16; wed->wlan.rx_tbit[1] = 17; - } else if (is_mt7986(&dev->mt76)) { + } else if (is_mt798x(&dev->mt76)) { wed->wlan.rx_tbit[0] = 22; wed->wlan.rx_tbit[1] = 23; } else { @@ -853,6 +827,7 @@ static int mt7915_mmio_init(struct mt76_dev *mdev, dev->reg.map = mt7916_reg_map; dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map); break; + case 0x7981: case 0x7986: dev->reg.reg_rev = mt7986_reg; dev->reg.offs_rev = mt7916_offs; @@ -1062,8 +1037,8 @@ static int __init mt7915_init(void) if (ret) goto error_pci; - if (IS_ENABLED(CONFIG_MT7986_WMAC)) { - ret = platform_driver_register(&mt7986_wmac_driver); + if (IS_ENABLED(CONFIG_MT798X_WMAC)) { + ret = platform_driver_register(&mt798x_wmac_driver); if (ret) goto error_wmac; } @@ -1080,8 +1055,8 @@ error_pci: static void __exit mt7915_exit(void) { - if (IS_ENABLED(CONFIG_MT7986_WMAC)) - platform_driver_unregister(&mt7986_wmac_driver); + if (IS_ENABLED(CONFIG_MT798X_WMAC)) + platform_driver_unregister(&mt798x_wmac_driver); pci_unregister_driver(&mt7915_pci_driver); pci_unregister_driver(&mt7915_hif_driver); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index b3ead3530740..d317c523b23f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -34,6 +34,10 @@ #define MT7916_FIRMWARE_WM "mediatek/mt7916_wm.bin" #define MT7916_ROM_PATCH "mediatek/mt7916_rom_patch.bin" +#define MT7981_FIRMWARE_WA "mediatek/mt7981_wa.bin" +#define MT7981_FIRMWARE_WM "mediatek/mt7981_wm.bin" +#define MT7981_ROM_PATCH "mediatek/mt7981_rom_patch.bin" + #define MT7986_FIRMWARE_WA "mediatek/mt7986_wa.bin" #define MT7986_FIRMWARE_WM "mediatek/mt7986_wm.bin" #define MT7986_FIRMWARE_WM_MT7975 "mediatek/mt7986_wm_mt7975.bin" @@ -43,6 +47,9 @@ #define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin" #define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin" #define MT7916_EEPROM_DEFAULT "mediatek/mt7916_eeprom.bin" + +#define MT7981_EEPROM_MT7976_DEFAULT_DBDC "mediatek/mt7981_eeprom_mt7976_dbdc.bin" + #define MT7986_EEPROM_MT7975_DEFAULT "mediatek/mt7986_eeprom_mt7975.bin" #define MT7986_EEPROM_MT7975_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7975_dual.bin" #define MT7986_EEPROM_MT7976_DEFAULT "mediatek/mt7986_eeprom_mt7976.bin" @@ -129,7 +136,6 @@ struct mt7915_sta { struct mt7915_vif *vif; - struct list_head poll_list; struct list_head rc_list; u32 airtime_ac[8]; @@ -138,7 +144,6 @@ struct mt7915_sta { unsigned long changed; unsigned long jiffies; - unsigned long ampdu_state; struct mt76_connac_sta_key_conf bip; struct { @@ -147,9 +152,23 @@ struct mt7915_sta { } twt; }; +struct mt7915_vif_cap { + bool ht_ldpc:1; + bool vht_ldpc:1; + bool he_ldpc:1; + bool vht_su_ebfer:1; + bool vht_su_ebfee:1; + bool vht_mu_ebfer:1; + bool vht_mu_ebfee:1; + bool he_su_ebfer:1; + bool he_su_ebfee:1; + bool he_mu_ebfer:1; +}; + struct mt7915_vif { struct mt76_vif mt76; /* must be first */ + struct mt7915_vif_cap cap; struct mt7915_sta sta; struct mt7915_phy *phy; @@ -157,67 +176,6 @@ struct mt7915_vif { struct cfg80211_bitrate_mask bitrate_mask; }; -/* per-phy stats. */ -struct mib_stats { - u32 ack_fail_cnt; - u32 fcs_err_cnt; - u32 rts_cnt; - u32 rts_retries_cnt; - u32 ba_miss_cnt; - u32 tx_bf_cnt; - u32 tx_mu_mpdu_cnt; - u32 tx_mu_acked_mpdu_cnt; - u32 tx_su_acked_mpdu_cnt; - u32 tx_bf_ibf_ppdu_cnt; - u32 tx_bf_ebf_ppdu_cnt; - - u32 tx_bf_rx_fb_all_cnt; - u32 tx_bf_rx_fb_he_cnt; - u32 tx_bf_rx_fb_vht_cnt; - u32 tx_bf_rx_fb_ht_cnt; - - u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ - u32 tx_bf_rx_fb_nc_cnt; - u32 tx_bf_rx_fb_nr_cnt; - u32 tx_bf_fb_cpl_cnt; - u32 tx_bf_fb_trig_cnt; - - u32 tx_ampdu_cnt; - u32 tx_stop_q_empty_cnt; - u32 tx_mpdu_attempts_cnt; - u32 tx_mpdu_success_cnt; - u32 tx_pkt_ebf_cnt; - u32 tx_pkt_ibf_cnt; - - u32 tx_rwp_fail_cnt; - u32 tx_rwp_need_cnt; - - /* rx stats */ - u32 rx_fifo_full_cnt; - u32 channel_idle_cnt; - u32 primary_cca_busy_time; - u32 secondary_cca_busy_time; - u32 primary_energy_detect_time; - u32 cck_mdrdy_time; - u32 ofdm_mdrdy_time; - u32 green_mdrdy_time; - u32 rx_vector_mismatch_cnt; - u32 rx_delimiter_fail_cnt; - u32 rx_mrdy_cnt; - u32 rx_len_mismatch_cnt; - u32 rx_mpdu_cnt; - u32 rx_ampdu_cnt; - u32 rx_ampdu_bytes_cnt; - u32 rx_ampdu_valid_subframe_cnt; - u32 rx_ampdu_valid_subframe_bytes_cnt; - u32 rx_pfdrop_cnt; - u32 rx_vec_queue_overflow_drop_cnt; - u32 rx_ba_cnt; - - u32 tx_amsdu[8]; - u32 tx_amsdu_cnt; -}; - /* crash-dump */ struct mt7915_crash_data { guid_t guid; @@ -263,7 +221,7 @@ struct mt7915_phy { u32 rx_ampdu_ts; u32 ampdu_ref; - struct mib_stats mib; + struct mt76_mib_stats mib; struct mt76_channel_state state_ts; #ifdef CONFIG_NL80211_TESTMODE @@ -328,9 +286,7 @@ struct mt7915_dev { #endif struct list_head sta_rc_list; - struct list_head sta_poll_list; struct list_head twt_list; - spinlock_t sta_poll_lock; u32 hw_pattern; @@ -339,6 +295,8 @@ struct mt7915_dev { bool muru_debug; bool ibf; + u8 monitor_mask; + struct dentry *debugfs_dir; struct rchan *relay_fwlog; @@ -420,8 +378,7 @@ mt7915_ext_phy(struct mt7915_dev *dev) static inline u32 mt7915_check_adie(struct mt7915_dev *dev, bool sku) { u32 mask = sku ? MT_CONNINFRA_SKU_MASK : MT_ADIE_TYPE_MASK; - - if (!is_mt7986(&dev->mt76)) + if (!is_mt798x(&dev->mt76)) return 0; return mt76_rr(dev, MT_CONNINFRA_SKU_DEC_ADDR) & mask; @@ -431,9 +388,9 @@ extern const struct ieee80211_ops mt7915_ops; extern const struct mt76_testmode_ops mt7915_testmode_ops; extern struct pci_driver mt7915_pci_driver; extern struct pci_driver mt7915_hif_driver; -extern struct platform_driver mt7986_wmac_driver; +extern struct platform_driver mt798x_wmac_driver; -#ifdef CONFIG_MT7986_WMAC +#ifdef CONFIG_MT798X_WMAC int mt7986_wmac_enable(struct mt7915_dev *dev); void mt7986_wmac_disable(struct mt7915_dev *dev); #else @@ -466,6 +423,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2); void mt7915_dma_prefetch(struct mt7915_dev *dev); void mt7915_dma_cleanup(struct mt7915_dev *dev); int mt7915_dma_reset(struct mt7915_dev *dev, bool force); +int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset); int mt7915_txbf_init(struct mt7915_dev *dev); void mt7915_init_txpower(struct mt7915_dev *dev, struct ieee80211_supported_band *sband); @@ -491,6 +449,8 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, bool add); int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct cfg80211_he_bss_color *he_bss_color); +int mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, + u32 changed); int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int enable, u32 changed); int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif, @@ -539,6 +499,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct rate_info *rate); int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy, struct cfg80211_chan_def *chandef); +int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wcid); int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set); int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3); int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl); @@ -612,7 +573,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); -void mt7915_tx_token_put(struct mt7915_dev *dev); void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info); bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len); @@ -623,7 +583,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy); void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy); void mt7915_update_channel(struct mt76_phy *mphy); int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable); -int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms); +int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy); int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev); int mt7915_init_debugfs(struct mt7915_phy *phy); void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index c8e478a55081..89ac8e6707b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -145,6 +145,9 @@ enum offs_rev { #define MT_PLE_BASE 0x820c0000 #define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) +#define MT_PLE_HOST_RPT0 MT_PLE(0x030) +#define MT_PLE_HOST_RPT0_TX_LATENCY BIT(3) + #define MT_FL_Q_EMPTY MT_PLE(__OFFS(PLE_FL_Q_EMPTY)) #define MT_FL_Q0_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL)) #define MT_FL_Q2_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0x8) @@ -169,6 +172,7 @@ enum offs_rev { #define MT_MDP_DCR0 MT_MDP(0x000) #define MT_MDP_DCR0_DAMSDU_EN BIT(15) +#define MT_MDP_DCR0_RX_HDR_TRANS_EN BIT(19) #define MT_MDP_DCR1 MT_MDP(0x004) #define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3) @@ -871,7 +875,12 @@ enum offs_rev { #define MT_AFE_RG_WBG_EN_WPLL_UP_MASK BIT(20) #define MT_AFE_RG_WBG_EN_PLL_UP_MASK (MT_AFE_RG_WBG_EN_BPLL_UP_MASK | \ MT_AFE_RG_WBG_EN_WPLL_UP_MASK) -#define MT_AFE_RG_WBG_EN_TXCAL_MASK GENMASK(21, 17) +#define MT_AFE_RG_WBG_EN_TXCAL_WF4 BIT(29) +#define MT_AFE_RG_WBG_EN_TXCAL_BT BIT(21) +#define MT_AFE_RG_WBG_EN_TXCAL_WF3 BIT(20) +#define MT_AFE_RG_WBG_EN_TXCAL_WF2 BIT(19) +#define MT_AFE_RG_WBG_EN_TXCAL_WF1 BIT(18) +#define MT_AFE_RG_WBG_EN_TXCAL_WF0 BIT(17) #define MT_ADIE_SLP_CTRL_BASE(_band) (0x18005000 + ((_band) << 19)) #define MT_ADIE_SLP_CTRL(_band, ofs) (MT_ADIE_SLP_CTRL_BASE(_band) + (ofs)) @@ -1096,6 +1105,12 @@ enum offs_rev { #define MT_TOP_MCU_EMI_BASE MT_TOP(0x1c4) #define MT_TOP_MCU_EMI_BASE_MASK GENMASK(19, 0) +#define MT_TOP_WF_AP_PERI_BASE MT_TOP(0x1c8) +#define MT_TOP_WF_AP_PERI_BASE_MASK GENMASK(19, 0) + +#define MT_TOP_EFUSE_BASE MT_TOP(0x1cc) +#define MT_TOP_EFUSE_BASE_MASK GENMASK(19, 0) + #define MT_TOP_CONN_INFRA_WAKEUP MT_TOP(0x1a0) #define MT_TOP_CONN_INFRA_WAKEUP_MASK BIT(0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index 32c137066e7f..06e3d9db996c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -6,7 +6,6 @@ #include <linux/platform_device.h> #include <linux/pinctrl/consumer.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/of_reserved_mem.h> #include <linux/of_gpio.h> #include <linux/iopoll.h> @@ -16,6 +15,9 @@ #include "mt7915.h" +#define MT7981_CON_INFRA_VERSION 0x02090000 +#define MT7986_CON_INFRA_VERSION 0x02070000 + /* INFRACFG */ #define MT_INFRACFG_CONN2AP_SLPPROT 0x0d0 #define MT_INFRACFG_AP2CONN_SLPPROT 0x0d4 @@ -167,10 +169,14 @@ static u32 mt76_wmac_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) return val; } -static u8 mt7986_wmac_check_adie_type(struct mt7915_dev *dev) +static u8 mt798x_wmac_check_adie_type(struct mt7915_dev *dev) { u32 val; + /* Only DBDC A-die is used with MT7981 */ + if (is_mt7981(&dev->mt76)) + return ADIE_DBDC; + val = readl(dev->sku + MT_TOP_POS_SKU); return FIELD_GET(MT_TOP_POS_SKU_ADIE_DBDC_MASK, val); @@ -195,7 +201,7 @@ static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev) int ret; u8 type; - type = mt7986_wmac_check_adie_type(dev); + type = mt798x_wmac_check_adie_type(dev); pinctrl = devm_pinctrl_get(dev->mt76.dev); if (IS_ERR(pinctrl)) return PTR_ERR(pinctrl); @@ -257,16 +263,26 @@ static int mt7986_wmac_consys_lockup(struct mt7915_dev *dev, bool enable) return 0; } -static int mt7986_wmac_coninfra_check(struct mt7915_dev *dev) +static int mt798x_wmac_coninfra_check(struct mt7915_dev *dev) { u32 cur; + u32 con_infra_version; + + if (is_mt7981(&dev->mt76)) { + con_infra_version = MT7981_CON_INFRA_VERSION; + } else if (is_mt7986(&dev->mt76)) { + con_infra_version = MT7986_CON_INFRA_VERSION; + } else { + WARN_ON(1); + return -EINVAL; + } - return read_poll_timeout(mt76_rr, cur, (cur == 0x02070000), + return read_poll_timeout(mt76_rr, cur, (cur == con_infra_version), USEC_PER_MSEC, 50 * USEC_PER_MSEC, false, dev, MT_CONN_INFRA_BASE); } -static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev) +static int mt798x_wmac_coninfra_setup(struct mt7915_dev *dev) { struct device *pdev = dev->mt76.dev; struct reserved_mem *rmem; @@ -284,15 +300,25 @@ static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev) val = (rmem->base >> 16) & MT_TOP_MCU_EMI_BASE_MASK; - /* Set conninfra subsys PLL check */ - mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS, - MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1); - mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS, - MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1); + if (is_mt7986(&dev->mt76)) { + /* Set conninfra subsys PLL check */ + mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS, + MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1); + mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS, + MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1); + } mt76_rmw_field(dev, MT_TOP_MCU_EMI_BASE, MT_TOP_MCU_EMI_BASE_MASK, val); + if (is_mt7981(&dev->mt76)) { + mt76_rmw_field(dev, MT_TOP_WF_AP_PERI_BASE, + MT_TOP_WF_AP_PERI_BASE_MASK, 0x300d0000 >> 16); + + mt76_rmw_field(dev, MT_TOP_EFUSE_BASE, + MT_TOP_EFUSE_BASE_MASK, 0x11f20000 >> 16); + } + mt76_wr(dev, MT_INFRA_BUS_EMI_START, rmem->base); mt76_wr(dev, MT_INFRA_BUS_EMI_END, rmem->size); @@ -305,15 +331,18 @@ static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev) return 0; } -static int mt7986_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type) +static int mt798x_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type) { int ret; - u32 adie_main, adie_ext; + u32 adie_main = 0, adie_ext = 0; mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET, MT_CONN_INFRA_ADIE1_RESET_MASK, 0x1); - mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET, - MT_CONN_INFRA_ADIE2_RESET_MASK, 0x1); + + if (is_mt7986(&dev->mt76)) { + mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET, + MT_CONN_INFRA_ADIE2_RESET_MASK, 0x1); + } mt76_wmac_spi_lock(dev); @@ -321,9 +350,11 @@ static int mt7986_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type) if (ret) goto out; - ret = mt76_wmac_spi_read(dev, 1, MT_ADIE_CHIP_ID, &adie_ext); - if (ret) - goto out; + if (is_mt7986(&dev->mt76)) { + ret = mt76_wmac_spi_read(dev, 1, MT_ADIE_CHIP_ID, &adie_ext); + if (ret) + goto out; + } *adie_type = FIELD_GET(MT_ADIE_CHIP_ID_MASK, adie_main) | (MT_ADIE_CHIP_ID_MASK & adie_ext); @@ -470,7 +501,7 @@ static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie) return ret; } -static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie) +static int mt798x_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie) { u32 id, version, rg_xo_01, rg_xo_03; int ret; @@ -489,7 +520,14 @@ static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie) rg_xo_01 = 0x1d59080f; rg_xo_03 = 0x34c00fe0; } else { - rg_xo_01 = 0x1959f80f; + if (is_mt7981(&dev->mt76)) { + rg_xo_01 = 0x1959c80f; + } else if (is_mt7986(&dev->mt76)) { + rg_xo_01 = 0x1959f80f; + } else { + WARN_ON(1); + return -EINVAL; + } rg_xo_03 = 0x34d00fe0; } @@ -611,7 +649,15 @@ static int mt7986_wmac_adie_patch_7975(struct mt7915_dev *dev, u8 adie) return ret; /* turn on SX0 LTBUF */ - ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000002); + if (is_mt7981(&dev->mt76)) { + ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000007); + } else if (is_mt7986(&dev->mt76)) { + ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000002); + } else { + WARN_ON(1); + return -EINVAL; + } + if (ret) return ret; @@ -658,7 +704,10 @@ static int mt7986_wmac_adie_patch_7975(struct mt7915_dev *dev, u8 adie) return ret; /* set CKB driving and filter */ - return mt76_wmac_spi_write(dev, adie, 0x2c8, 0x00000072); + if (is_mt7986(&dev->mt76)) + return mt76_wmac_spi_write(dev, adie, 0x2c8, 0x00000072); + + return ret; } static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type) @@ -686,7 +735,7 @@ static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type) ret = mt7986_wmac_adie_patch_7975(dev, adie); } else if (is_7976(dev, adie, adie_type)) { - if (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC) { + if (mt798x_wmac_check_adie_type(dev) == ADIE_DBDC) { ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_WRI_CK_SEL, 0x1c); if (ret) @@ -701,7 +750,7 @@ static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type) if (ret) goto out; - ret = mt7986_wmac_adie_patch_7976(dev, adie); + ret = mt798x_wmac_adie_patch_7976(dev, adie); } out: mt76_wmac_spi_unlock(dev); @@ -714,6 +763,7 @@ mt7986_wmac_afe_cal(struct mt7915_dev *dev, u8 adie, bool dbdc, u32 adie_type) { int ret; u8 idx; + u32 txcal; mt76_wmac_spi_lock(dev); if (is_7975(dev, adie, adie_type)) @@ -744,12 +794,18 @@ mt7986_wmac_afe_cal(struct mt7915_dev *dev, u8 adie, bool dbdc, u32 adie_type) MT_AFE_RG_WBG_EN_WPLL_UP_MASK, 0x1); usleep_range(60, 100); - mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx), - MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x1f); + txcal = (MT_AFE_RG_WBG_EN_TXCAL_BT | + MT_AFE_RG_WBG_EN_TXCAL_WF0 | + MT_AFE_RG_WBG_EN_TXCAL_WF1 | + MT_AFE_RG_WBG_EN_TXCAL_WF2 | + MT_AFE_RG_WBG_EN_TXCAL_WF3); + if (is_mt7981(&dev->mt76)) + txcal |= MT_AFE_RG_WBG_EN_TXCAL_WF4; + + mt76_set(dev, MT_AFE_DIG_EN_01(idx), txcal); usleep_range(800, 1000); - mt76_rmw(dev, MT_AFE_DIG_EN_01(idx), - MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x0); + mt76_clear(dev, MT_AFE_DIG_EN_01(idx), txcal); mt76_rmw(dev, MT_AFE_DIG_EN_03(idx), MT_AFE_RG_WBG_EN_PLL_UP_MASK, 0x0); @@ -806,7 +862,7 @@ static int mt7986_wmac_bus_timeout(struct mt7915_dev *dev) mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT, MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf); - return mt7986_wmac_coninfra_check(dev); + return mt798x_wmac_coninfra_check(dev); } static void mt7986_wmac_clock_enable(struct mt7915_dev *dev, u32 adie_type) @@ -876,14 +932,15 @@ static int mt7986_wmac_top_wfsys_wakeup(struct mt7915_dev *dev, bool enable) if (!enable) return 0; - return mt7986_wmac_coninfra_check(dev); + return mt798x_wmac_coninfra_check(dev); } static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable) { u32 cur; - mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, 0); + if (is_mt7986(&dev->mt76)) + mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, 0); mt76_rmw_field(dev, MT7986_TOP_WM_RESET, MT7986_TOP_WM_RESET_MASK, enable); @@ -1006,7 +1063,7 @@ mt7986_wmac_adie_setup(struct mt7915_dev *dev, u8 adie, u32 adie_type) if (ret) return ret; - if (!adie && (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC)) + if (!adie && (mt798x_wmac_check_adie_type(dev) == ADIE_DBDC)) ret = mt7986_wmac_afe_cal(dev, adie, true, adie_type); return ret; @@ -1061,15 +1118,15 @@ int mt7986_wmac_enable(struct mt7915_dev *dev) if (ret) return ret; - ret = mt7986_wmac_coninfra_check(dev); + ret = mt798x_wmac_coninfra_check(dev); if (ret) return ret; - ret = mt7986_wmac_coninfra_setup(dev); + ret = mt798x_wmac_coninfra_setup(dev); if (ret) return ret; - ret = mt7986_wmac_sku_setup(dev, &adie_type); + ret = mt798x_wmac_sku_setup(dev, &adie_type); if (ret) return ret; @@ -1077,9 +1134,12 @@ int mt7986_wmac_enable(struct mt7915_dev *dev) if (ret) return ret; - ret = mt7986_wmac_adie_setup(dev, 1, adie_type); - if (ret) - return ret; + /* mt7981 doesn't support a second a-die */ + if (is_mt7986(&dev->mt76)) { + ret = mt7986_wmac_adie_setup(dev, 1, adie_type); + if (ret) + return ret; + } ret = mt7986_wmac_subsys_powerup(dev, adie_type); if (ret) @@ -1132,7 +1192,7 @@ void mt7986_wmac_disable(struct mt7915_dev *dev) mt7986_wmac_consys_reset(dev, false); } -static int mt7986_wmac_init(struct mt7915_dev *dev) +static int mt798x_wmac_init(struct mt7915_dev *dev) { struct device *pdev = dev->mt76.dev; struct platform_device *pfdev = to_platform_device(pdev); @@ -1159,13 +1219,10 @@ static int mt7986_wmac_init(struct mt7915_dev *dev) return PTR_ERR(dev->sku); dev->rstc = devm_reset_control_get(pdev, "consys"); - if (IS_ERR(dev->rstc)) - return PTR_ERR(dev->rstc); - - return 0; + return PTR_ERR_OR_ZERO(dev->rstc); } -static int mt7986_wmac_probe(struct platform_device *pdev) +static int mt798x_wmac_probe(struct platform_device *pdev) { void __iomem *mem_base; struct mt7915_dev *dev; @@ -1203,7 +1260,7 @@ static int mt7986_wmac_probe(struct platform_device *pdev) if (ret) goto free_device; - ret = mt7986_wmac_init(dev); + ret = mt798x_wmac_init(dev); if (ret) goto free_irq; @@ -1225,7 +1282,7 @@ free_device: return ret; } -static int mt7986_wmac_remove(struct platform_device *pdev) +static int mt798x_wmac_remove(struct platform_device *pdev) { struct mt7915_dev *dev = platform_get_drvdata(pdev); @@ -1234,20 +1291,21 @@ static int mt7986_wmac_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id mt7986_wmac_of_match[] = { +static const struct of_device_id mt798x_wmac_of_match[] = { + { .compatible = "mediatek,mt7981-wmac", .data = (u32 *)0x7981 }, { .compatible = "mediatek,mt7986-wmac", .data = (u32 *)0x7986 }, {}, }; -MODULE_DEVICE_TABLE(of, mt7986_wmac_of_match); +MODULE_DEVICE_TABLE(of, mt798x_wmac_of_match); -struct platform_driver mt7986_wmac_driver = { +struct platform_driver mt798x_wmac_driver = { .driver = { - .name = "mt7986-wmac", - .of_match_table = mt7986_wmac_of_match, + .name = "mt798x-wmac", + .of_match_table = mt798x_wmac_of_match, }, - .probe = mt7986_wmac_probe, - .remove = mt7986_wmac_remove, + .probe = mt798x_wmac_probe, + .remove = mt798x_wmac_remove, }; MODULE_FIRMWARE(MT7986_FIRMWARE_WA); @@ -1255,3 +1313,7 @@ MODULE_FIRMWARE(MT7986_FIRMWARE_WM); MODULE_FIRMWARE(MT7986_FIRMWARE_WM_MT7975); MODULE_FIRMWARE(MT7986_ROM_PATCH); MODULE_FIRMWARE(MT7986_ROM_PATCH_MT7975); + +MODULE_FIRMWARE(MT7981_FIRMWARE_WA); +MODULE_FIRMWARE(MT7981_FIRMWARE_WM); +MODULE_FIRMWARE(MT7981_ROM_PATCH); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig index adff2d7350b5..7ed51e057857 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: ISC config MT7921_COMMON tristate - select MT76_CONNAC_LIB + select MT792x_LIB select WANT_DEV_COREDUMP config MT7921E @@ -27,7 +27,7 @@ config MT7921S config MT7921U tristate "MediaTek MT7921U (USB) support" - select MT76_USB + select MT792x_USB select MT7921_COMMON depends on MAC80211 depends on USB diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile index e5d2d2e131a2..849be9e848e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile @@ -5,11 +5,8 @@ obj-$(CONFIG_MT7921E) += mt7921e.o obj-$(CONFIG_MT7921S) += mt7921s.o obj-$(CONFIG_MT7921U) += mt7921u.o -CFLAGS_trace.o := -I$(src) - -mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o +mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o -mt7921-common-$(CONFIG_ACPI) += acpi_sar.o -mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o +mt7921e-y := pci.o pci_mac.o pci_mcu.o mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o -mt7921u-y := usb.o usb_mac.o +mt7921u-y := usb.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h deleted file mode 100644 index 6f2c4a572572..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: ISC */ -/* Copyright (C) 2022 MediaTek Inc. */ - -#ifndef __MT7921_ACPI_SAR_H -#define __MT7921_ACPI_SAR_H - -#define MT7921_ASAR_MIN_DYN 1 -#define MT7921_ASAR_MAX_DYN 8 -#define MT7921_ASAR_MIN_GEO 3 -#define MT7921_ASAR_MAX_GEO 8 -#define MT7921_ASAR_MIN_FG 8 - -#define MT7921_ACPI_MTCL "MTCL" -#define MT7921_ACPI_MTDS "MTDS" -#define MT7921_ACPI_MTGS "MTGS" -#define MT7921_ACPI_MTFG "MTFG" - -struct mt7921_asar_dyn_limit { - u8 idx; - u8 frp[5]; -} __packed; - -struct mt7921_asar_dyn { - u8 names[4]; - u8 enable; - u8 nr_tbl; - DECLARE_FLEX_ARRAY(struct mt7921_asar_dyn_limit, tbl); -} __packed; - -struct mt7921_asar_dyn_limit_v2 { - u8 idx; - u8 frp[11]; -} __packed; - -struct mt7921_asar_dyn_v2 { - u8 names[4]; - u8 enable; - u8 rsvd; - u8 nr_tbl; - DECLARE_FLEX_ARRAY(struct mt7921_asar_dyn_limit_v2, tbl); -} __packed; - -struct mt7921_asar_geo_band { - u8 pwr; - u8 offset; -} __packed; - -struct mt7921_asar_geo_limit { - u8 idx; - /* 0:2G, 1:5G */ - struct mt7921_asar_geo_band band[2]; -} __packed; - -struct mt7921_asar_geo { - u8 names[4]; - u8 version; - u8 nr_tbl; - DECLARE_FLEX_ARRAY(struct mt7921_asar_geo_limit, tbl); -} __packed; - -struct mt7921_asar_geo_limit_v2 { - u8 idx; - /* 0:2G, 1:5G, 2:6G */ - struct mt7921_asar_geo_band band[3]; -} __packed; - -struct mt7921_asar_geo_v2 { - u8 names[4]; - u8 version; - u8 rsvd; - u8 nr_tbl; - DECLARE_FLEX_ARRAY(struct mt7921_asar_geo_limit_v2, tbl); -} __packed; - -struct mt7921_asar_cl { - u8 names[4]; - u8 version; - u8 mode_6g; - u8 cl6g[6]; -} __packed; - -struct mt7921_asar_fg { - u8 names[4]; - u8 version; - u8 rsvd; - u8 nr_flag; - u8 rsvd1; - u8 flag[]; -} __packed; - -struct mt7921_acpi_sar { - u8 ver; - union { - struct mt7921_asar_dyn *dyn; - struct mt7921_asar_dyn_v2 *dyn_v2; - }; - union { - struct mt7921_asar_geo *geo; - struct mt7921_asar_geo_v2 *geo_v2; - }; - struct mt7921_asar_cl *countrylist; - struct mt7921_asar_fg *fg; -}; - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c index d6b6edba2fec..616b66a3fde2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c @@ -6,11 +6,11 @@ static int mt7921_reg_set(void *data, u64 val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt76_wr(dev, dev->mt76.debugfs_reg, val); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } @@ -18,11 +18,11 @@ mt7921_reg_set(void *data, u64 val) static int mt7921_reg_get(void *data, u64 *val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); *val = mt76_rr(dev, dev->mt76.debugfs_reg); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } @@ -32,14 +32,14 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7921_reg_get, mt7921_reg_set, static int mt7921_fw_debug_set(void *data, u64 val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); dev->fw_debug = (u8)val; mt7921_mcu_fw_log_2_host(dev, dev->fw_debug); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } @@ -47,7 +47,7 @@ mt7921_fw_debug_set(void *data, u64 val) static int mt7921_fw_debug_get(void *data, u64 *val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; *val = dev->fw_debug; @@ -57,128 +57,7 @@ mt7921_fw_debug_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7921_fw_debug_get, mt7921_fw_debug_set, "%lld\n"); -static void -mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy, - struct seq_file *file) -{ - struct mt7921_dev *dev = file->private; - int bound[15], range[4], i; - - if (!phy) - return; - - mt7921_mac_update_mib_stats(phy); - - /* Tx ampdu stat */ - for (i = 0; i < ARRAY_SIZE(range); i++) - range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i)); - - for (i = 0; i < ARRAY_SIZE(bound); i++) - bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1; - - seq_printf(file, "\nPhy0\n"); - - seq_printf(file, "Length: %8d | ", bound[0]); - for (i = 0; i < ARRAY_SIZE(bound) - 1; i++) - seq_printf(file, "%3d %3d | ", bound[i] + 1, bound[i + 1]); - - seq_puts(file, "\nCount: "); - for (i = 0; i < ARRAY_SIZE(bound); i++) - seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]); - seq_puts(file, "\n"); - - seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt); -} - -static int -mt7921_tx_stats_show(struct seq_file *file, void *data) -{ - struct mt7921_dev *dev = file->private; - struct mt7921_phy *phy = &dev->phy; - struct mib_stats *mib = &phy->mib; - int i; - - mt7921_mutex_acquire(dev); - - mt7921_ampdu_stat_read_phy(phy, file); - - seq_puts(file, "Tx MSDU stat:\n"); - for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { - seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ", - i + 1, mib->tx_amsdu[i]); - if (mib->tx_amsdu_cnt) - seq_printf(file, "(%3d%%)\n", - mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt); - else - seq_puts(file, "\n"); - } - - mt7921_mutex_release(dev); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(mt7921_tx_stats); - -static int -mt7921_queues_acq(struct seq_file *s, void *data) -{ - struct mt7921_dev *dev = dev_get_drvdata(s->private); - int i; - - mt7921_mutex_acquire(dev); - - for (i = 0; i < 4; i++) { - u32 ctrl, val, qlen = 0; - int j; - - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i)); - ctrl = BIT(31) | BIT(11) | (i << 24); - - for (j = 0; j < 32; j++) { - if (val & BIT(j)) - continue; - - mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j); - qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, - GENMASK(11, 0)); - } - seq_printf(s, "AC%d: queued=%d\n", i, qlen); - } - - mt7921_mutex_release(dev); - - return 0; -} - -static int -mt7921_queues_read(struct seq_file *s, void *data) -{ - struct mt7921_dev *dev = dev_get_drvdata(s->private); - struct { - struct mt76_queue *q; - char *queue; - } queue_map[] = { - { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" }, - { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" }, - { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, - }; - int i; - - for (i = 0; i < ARRAY_SIZE(queue_map); i++) { - struct mt76_queue *q = queue_map[i].q; - - if (!q) - continue; - - seq_printf(s, - "%s: queued=%d head=%d tail=%d\n", - queue_map[i].queue, q->queued, q->head, - q->tail); - } - - return 0; -} +DEFINE_SHOW_ATTRIBUTE(mt792x_tx_stats); static void mt7921_seq_puts_array(struct seq_file *file, const char *str, @@ -211,13 +90,13 @@ mt7921_seq_puts_array(struct seq_file *file, const char *str, static int mt7921_txpwr(struct seq_file *s, void *data) { - struct mt7921_dev *dev = dev_get_drvdata(s->private); + struct mt792x_dev *dev = dev_get_drvdata(s->private); struct mt7921_txpwr txpwr; int ret; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); ret = mt7921_get_txpwr_info(dev, &txpwr); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); if (ret) return ret; @@ -263,7 +142,7 @@ mt7921_txpwr(struct seq_file *s, void *data) static int mt7921_pm_set(void *data, u64 val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; struct mt76_connac_pm *pm = &dev->pm; if (mt76_is_usb(&dev->mt76)) @@ -296,7 +175,7 @@ out: static int mt7921_pm_get(void *data, u64 *val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; *val = dev->pm.enable_user; @@ -308,7 +187,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n"); static int mt7921_deep_sleep_set(void *data, u64 val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; struct mt76_connac_pm *pm = &dev->pm; bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR); bool enable = !!val; @@ -316,7 +195,7 @@ mt7921_deep_sleep_set(void *data, u64 val) if (mt76_is_usb(&dev->mt76)) return -EOPNOTSUPP; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); if (pm->ds_enable_user == enable) goto out; @@ -324,7 +203,7 @@ mt7921_deep_sleep_set(void *data, u64 val) pm->ds_enable = enable && !monitor; mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable); out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } @@ -332,7 +211,7 @@ out: static int mt7921_deep_sleep_get(void *data, u64 *val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; *val = dev->pm.ds_enable_user; @@ -342,67 +221,24 @@ mt7921_deep_sleep_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_ds, mt7921_deep_sleep_get, mt7921_deep_sleep_set, "%lld\n"); -static int -mt7921_pm_stats(struct seq_file *s, void *data) -{ - struct mt7921_dev *dev = dev_get_drvdata(s->private); - struct mt76_connac_pm *pm = &dev->pm; - - unsigned long awake_time = pm->stats.awake_time; - unsigned long doze_time = pm->stats.doze_time; - - if (!test_bit(MT76_STATE_PM, &dev->mphy.state)) - awake_time += jiffies - pm->stats.last_wake_event; - else - doze_time += jiffies - pm->stats.last_doze_event; - - seq_printf(s, "awake time: %14u\ndoze time: %15u\n", - jiffies_to_msecs(awake_time), - jiffies_to_msecs(doze_time)); - - seq_printf(s, "low power wakes: %9d\n", pm->stats.lp_wake); - - return 0; -} - -static int -mt7921_pm_idle_timeout_set(void *data, u64 val) -{ - struct mt7921_dev *dev = data; - - dev->pm.idle_timeout = msecs_to_jiffies(val); - - return 0; -} - -static int -mt7921_pm_idle_timeout_get(void *data, u64 *val) -{ - struct mt7921_dev *dev = data; - - *val = jiffies_to_msecs(dev->pm.idle_timeout); - - return 0; -} - -DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7921_pm_idle_timeout_get, - mt7921_pm_idle_timeout_set, "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt792x_pm_idle_timeout_get, + mt792x_pm_idle_timeout_set, "%lld\n"); static int mt7921_chip_reset(void *data, u64 val) { - struct mt7921_dev *dev = data; + struct mt792x_dev *dev = data; int ret = 0; switch (val) { case 1: /* Reset wifisys directly. */ - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); break; default: /* Collect the core dump before reset wifisys. */ - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); ret = mt76_connac_mcu_chip_config(&dev->mt76); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); break; } @@ -414,7 +250,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7921_chip_reset, "%lld\n"); static int mt7921s_sched_quota_read(struct seq_file *s, void *data) { - struct mt7921_dev *dev = dev_get_drvdata(s->private); + struct mt792x_dev *dev = dev_get_drvdata(s->private); struct mt76_sdio *sdio = &dev->mt76.sdio; seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota); @@ -425,7 +261,7 @@ mt7921s_sched_quota_read(struct seq_file *s, void *data) return 0; } -int mt7921_init_debugfs(struct mt7921_dev *dev) +int mt7921_init_debugfs(struct mt792x_dev *dev) { struct dentry *dir; @@ -435,23 +271,23 @@ int mt7921_init_debugfs(struct mt7921_dev *dev) if (mt76_is_mmio(&dev->mt76)) debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", - dir, mt7921_queues_read); + dir, mt792x_queues_read); else debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, mt76_queues_read); debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, - mt7921_queues_acq); + mt792x_queues_acq); debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir, mt7921_txpwr); - debugfs_create_file("tx_stats", 0400, dir, dev, &mt7921_tx_stats_fops); + debugfs_create_file("tx_stats", 0400, dir, dev, &mt792x_tx_stats_fops); debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); debugfs_create_file("idle-timeout", 0600, dir, dev, &fops_pm_idle_timeout); debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset); debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, - mt7921_pm_stats); + mt792x_pm_stats); debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds); if (mt76_is_sdio(&dev->mt76)) debugfs_create_devm_seqfile(dev->mt76.dev, "sched-quota", dir, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c deleted file mode 100644 index 4153cd6c2a01..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +++ /dev/null @@ -1,314 +0,0 @@ -// SPDX-License-Identifier: ISC -/* Copyright (C) 2020 MediaTek Inc. */ - -#include "mt7921.h" -#include "../dma.h" -#include "../mt76_connac2_mac.h" - -static int mt7921_poll_tx(struct napi_struct *napi, int budget) -{ - struct mt7921_dev *dev; - - dev = container_of(napi, struct mt7921_dev, mt76.tx_napi); - - if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { - napi_complete(napi); - queue_work(dev->mt76.wq, &dev->pm.wake_work); - return 0; - } - - mt76_connac_tx_cleanup(&dev->mt76); - if (napi_complete(napi)) - mt76_connac_irq_enable(&dev->mt76, MT_INT_TX_DONE_ALL); - mt76_connac_pm_unref(&dev->mphy, &dev->pm); - - return 0; -} - -static int mt7921_poll_rx(struct napi_struct *napi, int budget) -{ - struct mt7921_dev *dev; - int done; - - dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev); - - if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { - napi_complete(napi); - queue_work(dev->mt76.wq, &dev->pm.wake_work); - return 0; - } - done = mt76_dma_rx_poll(napi, budget); - mt76_connac_pm_unref(&dev->mphy, &dev->pm); - - return done; -} - -static void mt7921_dma_prefetch(struct mt7921_dev *dev) -{ -#define PREFETCH(base, depth) ((base) << 16 | (depth)) - - mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4)); - mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4)); - - mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4)); - mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4)); -} - -static int mt7921_dma_disable(struct mt7921_dev *dev, bool force) -{ - /* disable WFDMA0 */ - mt76_clear(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - - if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | - MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1)) - return -ETIMEDOUT; - - /* disable dmashdl */ - mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, - MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); - mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS); - - if (force) { - /* reset */ - mt76_clear(dev, MT_WFDMA0_RST, - MT_WFDMA0_RST_DMASHDL_ALL_RST | - MT_WFDMA0_RST_LOGIC_RST); - - mt76_set(dev, MT_WFDMA0_RST, - MT_WFDMA0_RST_DMASHDL_ALL_RST | - MT_WFDMA0_RST_LOGIC_RST); - } - - return 0; -} - -static int mt7921_dma_enable(struct mt7921_dev *dev) -{ - /* configure perfetch settings */ - mt7921_dma_prefetch(dev); - - /* reset dma idx */ - mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); - - /* configure delay interrupt */ - mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); - - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_WB_DDONE | - MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | - MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); - - mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); - - /* enable interrupts for TX/RX rings */ - mt76_connac_irq_enable(&dev->mt76, - MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | - MT_INT_MCU_CMD); - mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); - - return 0; -} - -static int mt7921_dma_reset(struct mt7921_dev *dev, bool force) -{ - int i, err; - - err = mt7921_dma_disable(dev, force); - if (err) - return err; - - /* reset hw queues */ - for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_reset(dev, dev->mphy.q_tx[i]); - - for (i = 0; i < __MT_MCUQ_MAX; i++) - mt76_queue_reset(dev, dev->mt76.q_mcu[i]); - - mt76_for_each_q_rx(&dev->mt76, i) - mt76_queue_reset(dev, &dev->mt76.q_rx[i]); - - mt76_tx_status_check(&dev->mt76, true); - - return mt7921_dma_enable(dev); -} - -int mt7921_wfsys_reset(struct mt7921_dev *dev) -{ - mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B); - msleep(50); - mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B); - - if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B, - WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500)) - return -ETIMEDOUT; - - return 0; -} - -int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force) -{ - int i, err; - - /* clean up hw queues */ - for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); - - for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); - - mt76_for_each_q_rx(&dev->mt76, i) - mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); - - if (force) { - err = mt7921_wfsys_reset(dev); - if (err) - return err; - } - err = mt7921_dma_reset(dev, force); - if (err) - return err; - - mt76_for_each_q_rx(&dev->mt76, i) - mt76_queue_rx_reset(dev, i); - - return 0; -} - -int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev) -{ - struct mt76_connac_pm *pm = &dev->pm; - int err; - - /* check if the wpdma must be reinitialized */ - if (mt7921_dma_need_reinit(dev)) { - /* disable interrutpts */ - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); - mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); - - err = mt7921_wpdma_reset(dev, false); - if (err) { - dev_err(dev->mt76.dev, "wpdma reset failed\n"); - return err; - } - - /* enable interrutpts */ - mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); - pm->stats.lp_wake++; - } - - return 0; -} - -int mt7921_dma_init(struct mt7921_dev *dev) -{ - int ret; - - mt76_dma_attach(&dev->mt76); - - ret = mt7921_dma_disable(dev, true); - if (ret) - return ret; - - /* init tx queue */ - ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0, - MT7921_TX_RING_SIZE, - MT_TX_RING_BASE, 0); - if (ret) - return ret; - - mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4); - - /* command to WM */ - ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM, - MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE); - if (ret) - return ret; - - /* firmware download */ - ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7921_TXQ_FWDL, - MT7921_TX_FWDL_RING_SIZE, MT_TX_RING_BASE); - if (ret) - return ret; - - /* event from WM before firmware download */ - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], - MT7921_RXQ_MCU_WM, - MT7921_RX_MCU_RING_SIZE, - MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE); - if (ret) - return ret; - - /* Change mcu queue after firmware download */ - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], - MT7921_RXQ_MCU_WM, - MT7921_RX_MCU_RING_SIZE, - MT_RX_BUF_SIZE, MT_WFDMA0(0x540)); - if (ret) - return ret; - - /* rx data */ - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], - MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE, - MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE); - if (ret) - return ret; - - ret = mt76_init_queues(dev, mt7921_poll_rx); - if (ret < 0) - return ret; - - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, - mt7921_poll_tx); - napi_enable(&dev->mt76.tx_napi); - - return mt7921_dma_enable(dev); -} - -void mt7921_dma_cleanup(struct mt7921_dev *dev) -{ - /* disable */ - mt76_clear(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - - mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | - MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1); - - /* reset */ - mt76_clear(dev, MT_WFDMA0_RST, - MT_WFDMA0_RST_DMASHDL_ALL_RST | - MT_WFDMA0_RST_LOGIC_RST); - - mt76_set(dev, MT_WFDMA0_RST, - MT_WFDMA0_RST_DMASHDL_ALL_RST | - MT_WFDMA0_RST_LOGIC_RST); - - mt76_dma_cleanup(&dev->mt76); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index bf1da9fddfab..7d6a9d746011 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -2,259 +2,135 @@ /* Copyright (C) 2020 MediaTek Inc. */ #include <linux/etherdevice.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/thermal.h> #include <linux/firmware.h> #include "mt7921.h" #include "../mt76_connac2_mac.h" #include "mcu.h" -static const struct ieee80211_iface_limit if_limits[] = { - { - .max = MT7921_MAX_INTERFACES, - .types = BIT(NL80211_IFTYPE_STATION) - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP) +static ssize_t mt7921_thermal_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + switch (to_sensor_dev_attr(attr)->index) { + case 0: { + struct mt792x_phy *phy = dev_get_drvdata(dev); + struct mt792x_dev *mdev = phy->dev; + int temperature; + + mt792x_mutex_acquire(mdev); + temperature = mt7921_mcu_get_temperature(phy); + mt792x_mutex_release(mdev); + + if (temperature < 0) + return temperature; + /* display in millidegree Celsius */ + return sprintf(buf, "%u\n", temperature * 1000); } -}; - -static const struct ieee80211_iface_combination if_comb[] = { - { - .limits = if_limits, - .n_limits = ARRAY_SIZE(if_limits), - .max_interfaces = MT7921_MAX_INTERFACES, - .num_different_channels = 1, - .beacon_int_infra_match = true, - }, -}; - -static const struct ieee80211_iface_limit if_limits_chanctx[] = { - { - .max = 2, - .types = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_P2P_CLIENT) - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) + default: + return -EINVAL; } -}; +} +static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7921_thermal_temp, 0); -static const struct ieee80211_iface_combination if_comb_chanctx[] = { - { - .limits = if_limits_chanctx, - .n_limits = ARRAY_SIZE(if_limits_chanctx), - .max_interfaces = 2, - .num_different_channels = 2, - .beacon_int_infra_match = false, - } +static struct attribute *mt7921_hwmon_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL, }; +ATTRIBUTE_GROUPS(mt7921_hwmon); -static void -mt7921_regd_notifier(struct wiphy *wiphy, - struct regulatory_request *request) +static int mt7921_thermal_init(struct mt792x_phy *phy) { - struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct wiphy *wiphy = phy->mt76->hw->wiphy; + struct device *hwmon; + const char *name; - memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); - dev->mt76.region = request->dfs_region; - dev->country_ie_env = request->country_ie_env; + if (!IS_REACHABLE(CONFIG_HWMON)) + return 0; - mt7921_mutex_acquire(dev); - mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env); - mt76_connac_mcu_set_channel_domain(hw->priv); - mt7921_set_tx_sar_pwr(hw, NULL); - mt7921_mutex_release(dev); -} - -static int -mt7921_init_wiphy(struct ieee80211_hw *hw) -{ - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mt7921_dev *dev = phy->dev; - struct wiphy *wiphy = hw->wiphy; - - hw->queues = 4; - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; - hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; - hw->netdev_features = NETIF_F_RXCSUM; - - hw->radiotap_timestamp.units_pos = - IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; - - phy->slottime = 9; - - hw->sta_data_size = sizeof(struct mt7921_sta); - hw->vif_data_size = sizeof(struct mt7921_vif); - - if (dev->fw_features & MT7921_FW_CAP_CNM) { - wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; - wiphy->iface_combinations = if_comb_chanctx; - wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_chanctx); - } else { - wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; - wiphy->iface_combinations = if_comb; - wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); - } - wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP | - WIPHY_FLAG_4ADDR_STATION); - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO); - wiphy->max_remain_on_channel_duration = 5000; - wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; - wiphy->max_scan_ssids = 4; - wiphy->max_sched_scan_plan_interval = - MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL; - wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN; - wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID; - wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH; - wiphy->max_sched_scan_reqs = 1; - wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; - wiphy->reg_notifier = mt7921_regd_notifier; - - wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | - NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT); - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT); - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE); - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); - - ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); - ieee80211_hw_set(hw, HAS_RATE_CONTROL); - ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); - ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); - ieee80211_hw_set(hw, WANT_MONITOR_VIF); - ieee80211_hw_set(hw, SUPPORTS_PS); - ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); - ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); - ieee80211_hw_set(hw, CONNECTION_MONITOR); - - if (dev->pm.enable) - ieee80211_hw_set(hw, CONNECTION_MONITOR); - - hw->max_tx_fragments = 4; + name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s", + wiphy_name(wiphy)); - return 0; + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, + mt7921_hwmon_groups); + return PTR_ERR_OR_ZERO(hwmon); } static void -mt7921_mac_init_band(struct mt7921_dev *dev, u8 band) -{ - u32 mask, set; - - mt76_rmw_field(dev, MT_TMAC_CTCR0(band), - MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f); - mt76_set(dev, MT_TMAC_CTCR0(band), - MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN | - MT_TMAC_CTCR0_INS_DDLMT_EN); - - mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); - mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); - - /* enable MIB tx-rx time reporting */ - mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_TXDUR_EN); - mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_RXDUR_EN); - - mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536); - /* disable rx rate report by default due to hw issues */ - mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); - - /* filter out non-resp frames and get instantaneous signal reporting */ - mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM; - set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) | - FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3); - mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set); -} - -static u8 -mt7921_get_offload_capability(struct device *dev, const char *fw_wm) +mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev) { - const struct mt76_connac2_fw_trailer *hdr; - struct mt7921_realease_info *rel_info; - const struct firmware *fw; - int ret, i, offset = 0; - const u8 *data, *end; - u8 offload_caps = 0; - - ret = request_firmware(&fw, fw_wm, dev); - if (ret) - return ret; - - if (!fw || !fw->data || fw->size < sizeof(*hdr)) { - dev_err(dev, "Invalid firmware\n"); - goto out; - } - - data = fw->data; - hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); - - for (i = 0; i < hdr->n_region; i++) { - const struct mt76_connac2_fw_region *region; +#define IS_UNII_INVALID(idx, sfreq, efreq) \ + (!(dev->phy.clc_chan_conf & BIT(idx)) && (cfreq) >= (sfreq) && (cfreq) <= (efreq)) + struct ieee80211_supported_band *sband; + struct mt76_dev *mdev = &dev->mt76; + struct device_node *np, *band_np; + struct ieee80211_channel *ch; + int i, cfreq; + + np = mt76_find_power_limits_node(mdev); + + sband = wiphy->bands[NL80211_BAND_5GHZ]; + band_np = np ? of_get_child_by_name(np, "txpower-5g") : NULL; + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + cfreq = ch->center_freq; + + if (np && (!band_np || !mt76_find_channel_node(band_np, ch))) { + ch->flags |= IEEE80211_CHAN_DISABLED; + continue; + } - region = (const void *)((const u8 *)hdr - - (hdr->n_region - i) * sizeof(*region)); - offset += le32_to_cpu(region->len); + /* UNII-4 */ + if (IS_UNII_INVALID(0, 5850, 5925)) + ch->flags |= IEEE80211_CHAN_DISABLED; } - data += offset + 16; - rel_info = (struct mt7921_realease_info *)data; - data += sizeof(*rel_info); - end = data + le16_to_cpu(rel_info->len); - - while (data < end) { - rel_info = (struct mt7921_realease_info *)data; - data += sizeof(*rel_info); + sband = wiphy->bands[NL80211_BAND_6GHZ]; + if (!sband) + return; - if (rel_info->tag == MT7921_FW_TAG_FEATURE) { - struct mt7921_fw_features *features; + band_np = np ? of_get_child_by_name(np, "txpower-6g") : NULL; + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + cfreq = ch->center_freq; - features = (struct mt7921_fw_features *)data; - offload_caps = features->data; - break; + if (np && (!band_np || !mt76_find_channel_node(band_np, ch))) { + ch->flags |= IEEE80211_CHAN_DISABLED; + continue; } - data += le16_to_cpu(rel_info->len) + rel_info->pad_len; + /* UNII-5/6/7/8 */ + if (IS_UNII_INVALID(1, 5925, 6425) || + IS_UNII_INVALID(2, 6425, 6525) || + IS_UNII_INVALID(3, 6525, 6875) || + IS_UNII_INVALID(4, 6875, 7125)) + ch->flags |= IEEE80211_CHAN_DISABLED; } - -out: - release_firmware(fw); - - return offload_caps; } -struct ieee80211_ops * -mt7921_get_mac80211_ops(struct device *dev, void *drv_data, u8 *fw_features) +static void +mt7921_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) { - struct ieee80211_ops *ops; - - ops = devm_kmemdup(dev, &mt7921_ops, sizeof(mt7921_ops), GFP_KERNEL); - if (!ops) - return NULL; - - *fw_features = mt7921_get_offload_capability(dev, drv_data); - if (!(*fw_features & MT7921_FW_CAP_CNM)) { - ops->remain_on_channel = NULL; - ops->cancel_remain_on_channel = NULL; - ops->add_chanctx = NULL; - ops->remove_chanctx = NULL; - ops->change_chanctx = NULL; - ops->assign_vif_chanctx = NULL; - ops->unassign_vif_chanctx = NULL; - ops->mgd_prepare_tx = NULL; - ops->mgd_complete_tx = NULL; - } - return ops; + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); + dev->mt76.region = request->dfs_region; + dev->country_ie_env = request->country_ie_env; + + mt792x_mutex_acquire(dev); + mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env); + mt76_connac_mcu_set_channel_domain(hw->priv); + mt7921_set_tx_sar_pwr(hw, NULL); + mt792x_mutex_release(dev); + + mt7921_regd_channel_update(wiphy, dev); } -EXPORT_SYMBOL_GPL(mt7921_get_mac80211_ops); -int mt7921_mac_init(struct mt7921_dev *dev) +int mt7921_mac_init(struct mt792x_dev *dev) { int i; @@ -264,17 +140,17 @@ int mt7921_mac_init(struct mt7921_dev *dev) /* enable hardware rx header translation */ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN); - for (i = 0; i < MT7921_WTBL_SIZE; i++) + for (i = 0; i < MT792x_WTBL_SIZE; i++) mt7921_mac_wtbl_update(dev, i, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); for (i = 0; i < 2; i++) - mt7921_mac_init_band(dev, i); + mt792x_mac_init_band(dev, i); return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0); } EXPORT_SYMBOL_GPL(mt7921_mac_init); -static int __mt7921_init_hardware(struct mt7921_dev *dev) +static int __mt7921_init_hardware(struct mt792x_dev *dev) { int ret; @@ -282,7 +158,7 @@ static int __mt7921_init_hardware(struct mt7921_dev *dev) * which should be set before firmware download stage. */ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); - ret = mt7921_mcu_init(dev); + ret = mt792x_mcu_init(dev); if (ret) goto out; @@ -297,21 +173,21 @@ out: return ret; } -static int mt7921_init_hardware(struct mt7921_dev *dev) +static int mt7921_init_hardware(struct mt792x_dev *dev) { int ret, i; set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); - for (i = 0; i < MT7921_MCU_INIT_RETRY_COUNT; i++) { + for (i = 0; i < MT792x_MCU_INIT_RETRY_COUNT; i++) { ret = __mt7921_init_hardware(dev); if (!ret) break; - mt7921_init_reset(dev); + mt792x_init_reset(dev); } - if (i == MT7921_MCU_INIT_RETRY_COUNT) { + if (i == MT792x_MCU_INIT_RETRY_COUNT) { dev_err(dev->mt76.dev, "hardware init failed\n"); return ret; } @@ -319,26 +195,9 @@ static int mt7921_init_hardware(struct mt7921_dev *dev) return 0; } -static int mt7921_init_wcid(struct mt7921_dev *dev) -{ - int idx; - - /* Beacon and mgmt frames should occupy wcid 0 */ - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1); - if (idx) - return -ENOSPC; - - dev->mt76.global_wcid.idx = idx; - dev->mt76.global_wcid.hw_key_idx = -1; - dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; - rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); - - return 0; -} - static void mt7921_init_work(struct work_struct *work) { - struct mt7921_dev *dev = container_of(work, struct mt7921_dev, + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, init_work); int ret; @@ -362,13 +221,19 @@ static void mt7921_init_work(struct work_struct *work) return; } + ret = mt7921_thermal_init(&dev->phy); + if (ret) { + dev_err(dev->mt76.dev, "thermal init failed\n"); + return; + } + /* we support chip reset now */ dev->hw_init_done = true; mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable); } -int mt7921_register_device(struct mt7921_dev *dev) +int mt7921_register_device(struct mt792x_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); int ret; @@ -376,17 +241,17 @@ int mt7921_register_device(struct mt7921_dev *dev) dev->phy.dev = dev; dev->phy.mt76 = &dev->mt76.phy; dev->mt76.phy.priv = &dev->phy; - dev->mt76.tx_worker.fn = mt7921_tx_worker; + dev->mt76.tx_worker.fn = mt792x_tx_worker; - INIT_DELAYED_WORK(&dev->pm.ps_work, mt7921_pm_power_save_work); - INIT_WORK(&dev->pm.wake_work, mt7921_pm_wake_work); + INIT_DELAYED_WORK(&dev->pm.ps_work, mt792x_pm_power_save_work); + INIT_WORK(&dev->pm.wake_work, mt792x_pm_wake_work); spin_lock_init(&dev->pm.wake.lock); mutex_init(&dev->pm.mutex); init_waitqueue_head(&dev->pm.wait); if (mt76_is_sdio(&dev->mt76)) init_waitqueue_head(&dev->mt76.sdio.wait); spin_lock_init(&dev->pm.txq_lock); - INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work); + INIT_DELAYED_WORK(&dev->mphy.mac_work, mt792x_mac_work); INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work); INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work); #if IS_ENABLED(CONFIG_IPV6) @@ -395,17 +260,15 @@ int mt7921_register_device(struct mt7921_dev *dev) #endif skb_queue_head_init(&dev->phy.scan_event_list); skb_queue_head_init(&dev->coredump.msg_list); - INIT_LIST_HEAD(&dev->sta_poll_list); - spin_lock_init(&dev->sta_poll_lock); INIT_WORK(&dev->reset_work, mt7921_mac_reset_work); INIT_WORK(&dev->init_work, mt7921_init_work); INIT_WORK(&dev->phy.roc_work, mt7921_roc_work); - timer_setup(&dev->phy.roc_timer, mt7921_roc_timer, 0); + timer_setup(&dev->phy.roc_timer, mt792x_roc_timer, 0); init_waitqueue_head(&dev->phy.roc_wait); - dev->pm.idle_timeout = MT7921_PM_TIMEOUT; + dev->pm.idle_timeout = MT792x_PM_TIMEOUT; dev->pm.stats.last_wake_event = jiffies; dev->pm.stats.last_doze_event = jiffies; if (!mt76_is_usb(&dev->mt76)) { @@ -418,16 +281,17 @@ int mt7921_register_device(struct mt7921_dev *dev) if (!mt76_is_mmio(&dev->mt76)) hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE; - mt7921_init_acpi_sar(dev); + mt792x_init_acpi_sar(dev); - ret = mt7921_init_wcid(dev); + ret = mt792x_init_wcid(dev); if (ret) return ret; - ret = mt7921_init_wiphy(hw); + ret = mt792x_init_wiphy(hw); if (ret) return ret; + hw->wiphy->reg_notifier = mt7921_regd_notifier; dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 1675bf520481..867e14f6b93a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -15,35 +15,7 @@ #define MT_WTBL_AC0_CTT_OFFSET 20 -static u32 mt7921_mac_wtbl_lmac_addr(int idx, u8 offset) -{ - return MT_WTBL_LMAC_OFFS(idx, 0) + offset * 4; -} - -static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev, - u16 idx, bool unicast) -{ - struct mt7921_sta *sta; - struct mt76_wcid *wcid; - - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) - return NULL; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); - if (unicast || !wcid) - return wcid; - - if (!wcid->sta) - return NULL; - - sta = container_of(wcid, struct mt7921_sta, wcid); - if (!sta->vif) - return NULL; - - return &sta->vif->sta.wcid; -} - -bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) +bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask) { mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); @@ -52,7 +24,12 @@ bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) 0, 5000); } -void mt7921_mac_sta_poll(struct mt7921_dev *dev) +static u32 mt7921_mac_wtbl_lmac_addr(int idx, u8 offset) +{ + return MT_WTBL_LMAC_OFFS(idx, 0) + offset * 4; +} + +static void mt7921_mac_sta_poll(struct mt792x_dev *dev) { static const u8 ac_to_tid[] = { [IEEE80211_AC_BE] = 0, @@ -61,16 +38,16 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev) [IEEE80211_AC_VO] = 6 }; struct ieee80211_sta *sta; - struct mt7921_sta *msta; + struct mt792x_sta *msta; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); struct rate_info *rate; s8 rssi[4]; int i; - spin_lock_bh(&dev->sta_poll_lock); - list_splice_init(&dev->sta_poll_list, &sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); while (true) { bool clear = false; @@ -78,15 +55,15 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev) u16 idx; u8 bw; - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); if (list_empty(&sta_poll_list)) { - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); break; } msta = list_first_entry(&sta_poll_list, - struct mt7921_sta, poll_list); - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + struct mt792x_sta, wcid.poll_list); + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); idx = msta->wcid.idx; addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET); @@ -183,56 +160,9 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev) ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); } } -EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll); - -static void -mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy, - struct mt76_rx_status *status, u8 chfreq) -{ - if (chfreq > 180) { - status->band = NL80211_BAND_6GHZ; - chfreq = (chfreq - 181) * 4 + 1; - } else if (chfreq > 14) { - status->band = NL80211_BAND_5GHZ; - } else { - status->band = NL80211_BAND_2GHZ; - } - status->freq = ieee80211_channel_to_frequency(chfreq, status->band); -} - -static void -mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) -{ - struct sk_buff *skb = priv; - struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); - - if (status->signal > 0) - return; - - if (!ether_addr_equal(vif->addr, hdr->addr1)) - return; - - ewma_rssi_add(&mvif->rssi, -status->signal); -} - -static void -mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); - - if (!ieee80211_is_assoc_resp(hdr->frame_control) && - !ieee80211_is_auth(hdr->frame_control)) - return; - - ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7921_mac_rssi_iter, skb); -} static int -mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) { u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -241,7 +171,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) u16 hdr_gap; __le32 *rxv = NULL, *rxd = (__le32 *)skb->data; struct mt76_phy *mphy = &dev->mt76.phy; - struct mt7921_phy *phy = &dev->phy; + struct mt792x_phy *phy = &dev->phy; struct ieee80211_supported_band *sband; u32 csum_status = *(u32 *)skb->cb; u32 rxd0 = le32_to_cpu(rxd[0]); @@ -249,7 +179,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); u32 rxd4 = le32_to_cpu(rxd[4]); - struct mt7921_sta *msta = NULL; + struct mt792x_sta *msta = NULL; u16 seq_ctrl = 0; __le16 fc = 0; u8 mode = 0; @@ -277,17 +207,18 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3); unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); - status->wcid = mt7921_rx_get_wcid(dev, idx, unicast); + status->wcid = mt792x_rx_get_wcid(dev, idx, unicast); if (status->wcid) { - msta = container_of(status->wcid, struct mt7921_sta, wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + msta = container_of(status->wcid, struct mt792x_sta, wcid); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } - mt7921_get_status_freq_info(dev, mphy, status, chfreq); + mt792x_get_status_freq_info(status, chfreq); switch (status->band) { case NL80211_BAND_5GHZ: @@ -496,7 +427,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->flag |= RX_FLAG_8023; } - mt7921_mac_assoc_rssi(dev, skb); + mt792x_mac_assoc_rssi(dev, skb); if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); @@ -511,33 +442,9 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) return 0; } -static void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data) { - struct mt7921_sta *msta; - u16 fc, tid; - u32 val; - - if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) - return; - - tid = le32_get_bits(txwi[1], MT_TXD1_TID); - if (tid >= 6) /* skip VO queue */ - return; - - val = le32_to_cpu(txwi[2]); - fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | - FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; - if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) - return; - - msta = (struct mt7921_sta *)sta->drv_priv; - if (!test_and_set_bit(tid, &msta->ampdu_state)) - ieee80211_start_tx_ba_session(sta, tid, 0); -} - -void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data) -{ - struct mt7921_sta *msta = NULL; + struct mt792x_sta *msta = NULL; struct mt76_wcid *wcid; __le32 *txs_data = data; u16 wcidx; @@ -552,7 +459,7 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data) if (pid < MT_PACKET_ID_FIRST) return; - if (wcidx >= MT7921_WTBL_SIZE) + if (wcidx >= MT792x_WTBL_SIZE) return; rcu_read_lock(); @@ -561,59 +468,29 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data) if (!wcid) goto out; - msta = container_of(wcid, struct mt7921_sta, wcid); + msta = container_of(wcid, struct mt792x_sta, wcid); mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); if (!wcid->sta) goto out; - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); out: rcu_read_unlock(); } -void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t, - struct ieee80211_sta *sta, bool clear_status, - struct list_head *free_list) -{ - struct mt76_dev *mdev = &dev->mt76; - __le32 *txwi; - u16 wcid_idx; - - mt76_connac_txp_skb_unmap(mdev, t); - if (!t->skb) - goto out; - - txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); - if (sta) { - struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; - - if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7921_tx_check_aggr(sta, txwi); - - wcid_idx = wcid->idx; - } else { - wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); - } - - __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); -out: - t->skb = NULL; - mt76_put_txwi(mdev, t); -} -EXPORT_SYMBOL_GPL(mt7921_txwi_free); - -static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len) +static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len) { struct mt76_connac_tx_free *free = data; __le32 *tx_info = (__le32 *)(data + sizeof(*free)); struct mt76_dev *mdev = &dev->mt76; struct mt76_txwi_cache *txwi; struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; struct sk_buff *skb, *tmp; void *end = data + len; LIST_HEAD(free_list); @@ -636,8 +513,7 @@ static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len) * 1'b0: msdu_id with the same 'wcid pair' as above. */ if (info & MT_TX_FREE_PAIR) { - struct mt7921_sta *msta; - struct mt76_wcid *wcid; + struct mt792x_sta *msta; u16 idx; count++; @@ -647,22 +523,29 @@ static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len) if (!sta) continue; - msta = container_of(wcid, struct mt7921_sta, wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + msta = container_of(wcid, struct mt792x_sta, wcid); + spin_lock_bh(&mdev->sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &mdev->sta_poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); continue; } msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); stat = FIELD_GET(MT_TX_FREE_STATUS, info); + if (wcid) { + wcid->stats.tx_retries += + FIELD_GET(MT_TX_FREE_COUNT, info) - 1; + wcid->stats.tx_failed += !!stat; + } + txwi = mt76_token_release(mdev, msdu, &wake); if (!txwi) continue; - mt7921_txwi_free(dev, txwi, sta, stat, &free_list); + mt76_connac2_txwi_free(mdev, txwi, sta, &free_list); } if (wake) @@ -682,7 +565,7 @@ static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len) bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); __le32 *rxd = (__le32 *)data; __le32 *end = (__le32 *)&rxd[len / 4]; enum rx_pkt_type type; @@ -707,7 +590,7 @@ EXPORT_SYMBOL_GPL(mt7921_rx_check); void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); __le32 *rxd = (__le32 *)skb->data; __le32 *end = (__le32 *)&skb->data[skb->len]; enum rx_pkt_type type; @@ -747,128 +630,12 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, } EXPORT_SYMBOL_GPL(mt7921_queue_rx_skb); -void mt7921_mac_reset_counters(struct mt7921_phy *phy) -{ - struct mt7921_dev *dev = phy->dev; - int i; - - for (i = 0; i < 4; i++) { - mt76_rr(dev, MT_TX_AGG_CNT(0, i)); - mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); - } - - dev->mt76.phy.survey_time = ktime_get_boottime(); - memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); - - /* reset airtime counters */ - mt76_rr(dev, MT_MIB_SDR9(0)); - mt76_rr(dev, MT_MIB_SDR36(0)); - mt76_rr(dev, MT_MIB_SDR37(0)); - - mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); - mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); -} - -void mt7921_mac_set_timing(struct mt7921_phy *phy) -{ - s16 coverage_class = phy->coverage_class; - struct mt7921_dev *dev = phy->dev; - u32 val, reg_offset; - u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | - FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); - u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | - FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); - bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ; - int sifs = is_2ghz ? 10 : 16, offset; - - if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) - return; - - mt76_set(dev, MT_ARB_SCR(0), - MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); - udelay(1); - - offset = 3 * coverage_class; - reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | - FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); - - mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset); - mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset); - mt76_wr(dev, MT_TMAC_ICR0(0), - FIELD_PREP(MT_IFS_EIFS, 360) | - FIELD_PREP(MT_IFS_RIFS, 2) | - FIELD_PREP(MT_IFS_SIFS, sifs) | - FIELD_PREP(MT_IFS_SLOT, phy->slottime)); - - if (phy->slottime < 20 || !is_2ghz) - val = MT7921_CFEND_RATE_DEFAULT; - else - val = MT7921_CFEND_RATE_11B; - - mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val); - mt76_clear(dev, MT_ARB_SCR(0), - MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); -} - -static u8 -mt7921_phy_get_nf(struct mt7921_phy *phy, int idx) -{ - return 0; -} - -static void -mt7921_phy_update_channel(struct mt76_phy *mphy, int idx) -{ - struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76); - struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv; - struct mt76_channel_state *state; - u64 busy_time, tx_time, rx_time, obss_time; - int nf; - - busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), - MT_MIB_SDR9_BUSY_MASK); - tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), - MT_MIB_SDR36_TXTIME_MASK); - rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), - MT_MIB_SDR37_RXTIME_MASK); - obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx), - MT_MIB_OBSSTIME_MASK); - - nf = mt7921_phy_get_nf(phy, idx); - if (!phy->noise) - phy->noise = nf << 4; - else if (nf) - phy->noise += nf - (phy->noise >> 4); - - state = mphy->chan_state; - state->cc_busy += busy_time; - state->cc_tx += tx_time; - state->cc_rx += rx_time + obss_time; - state->cc_bss_rx += rx_time; - state->noise = -(phy->noise >> 4); -} - -void mt7921_update_channel(struct mt76_phy *mphy) -{ - struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76); - - if (mt76_connac_pm_wake(mphy, &dev->pm)) - return; - - mt7921_phy_update_channel(mphy, 0); - /* reset obss airtime */ - mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); - - mt76_connac_power_save_sched(mphy, &dev->pm); -} -EXPORT_SYMBOL_GPL(mt7921_update_channel); - static void mt7921_vif_connect_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mvif->phy->dev; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mvif->phy->dev; struct ieee80211_hw *hw = mt76_hw(dev); if (vif->type == NL80211_IFTYPE_STATION) @@ -889,7 +656,7 @@ mt7921_vif_connect_iter(void *priv, u8 *mac, /* system error recovery */ void mt7921_mac_reset_work(struct work_struct *work) { - struct mt7921_dev *dev = container_of(work, struct mt7921_dev, + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, reset_work); struct ieee80211_hw *hw = mt76_hw(dev); struct mt76_connac_pm *pm = &dev->pm; @@ -905,7 +672,7 @@ void mt7921_mac_reset_work(struct work_struct *work) for (i = 0; i < 10; i++) { mutex_lock(&dev->mt76.mutex); - ret = mt7921_dev_reset(dev); + ret = mt792x_dev_reset(dev); mutex_unlock(&dev->mt76.mutex); if (!ret) @@ -932,185 +699,12 @@ void mt7921_mac_reset_work(struct work_struct *work) mt76_connac_power_save_sched(&dev->mt76.phy, pm); } -void mt7921_reset(struct mt76_dev *mdev) -{ - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - struct mt76_connac_pm *pm = &dev->pm; - - if (!dev->hw_init_done) - return; - - if (dev->hw_full_reset) - return; - - if (pm->suspended) - return; - - queue_work(dev->mt76.wq, &dev->reset_work); -} -EXPORT_SYMBOL_GPL(mt7921_reset); - -void mt7921_mac_update_mib_stats(struct mt7921_phy *phy) -{ - struct mt7921_dev *dev = phy->dev; - struct mib_stats *mib = &phy->mib; - int i, aggr0 = 0, aggr1; - u32 val; - - mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0), - MT_MIB_SDR3_FCS_ERR_MASK); - mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0), - MT_MIB_ACK_FAIL_COUNT_MASK); - mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0), - MT_MIB_BA_FAIL_COUNT_MASK); - mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0), - MT_MIB_RTS_COUNT_MASK); - mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), - MT_MIB_RTS_FAIL_COUNT_MASK); - - mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0)); - mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0)); - mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0)); - - val = mt76_rr(dev, MT_MIB_SDR32(0)); - mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val); - mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val); - - val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0)); - mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val); - mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val); - - val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0)); - mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val); - mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val); - mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val); - mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val); - - mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0)); - mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0)); - mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0)); - mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0)); - - for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { - val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); - mib->tx_amsdu[i] += val; - mib->tx_amsdu_cnt += val; - } - - for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { - u32 val2; - - val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); - val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); - - phy->mt76->aggr_stats[aggr0++] += val & 0xffff; - phy->mt76->aggr_stats[aggr0++] += val >> 16; - phy->mt76->aggr_stats[aggr1++] += val2 & 0xffff; - phy->mt76->aggr_stats[aggr1++] += val2 >> 16; - } -} - -void mt7921_mac_work(struct work_struct *work) -{ - struct mt7921_phy *phy; - struct mt76_phy *mphy; - - mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, - mac_work.work); - phy = mphy->priv; - - mt7921_mutex_acquire(phy->dev); - - mt76_update_survey(mphy); - if (++mphy->mac_work_count == 2) { - mphy->mac_work_count = 0; - - mt7921_mac_update_mib_stats(phy); - } - - mt7921_mutex_release(phy->dev); - - mt76_tx_status_check(mphy->dev, false); - ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work, - MT7921_WATCHDOG_TIME); -} - -void mt7921_pm_wake_work(struct work_struct *work) -{ - struct mt7921_dev *dev; - struct mt76_phy *mphy; - - dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, - pm.wake_work); - mphy = dev->phy.mt76; - - if (!mt7921_mcu_drv_pmctrl(dev)) { - struct mt76_dev *mdev = &dev->mt76; - int i; - - if (mt76_is_sdio(mdev)) { - mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); - mt76_worker_schedule(&mdev->sdio.txrx_worker); - } else { - local_bh_disable(); - mt76_for_each_q_rx(mdev, i) - napi_schedule(&mdev->napi[i]); - local_bh_enable(); - mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); - mt76_connac_tx_cleanup(mdev); - } - if (test_bit(MT76_STATE_RUNNING, &mphy->state)) - ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, - MT7921_WATCHDOG_TIME); - } - - ieee80211_wake_queues(mphy->hw); - wake_up(&dev->pm.wait); -} - -void mt7921_pm_power_save_work(struct work_struct *work) -{ - struct mt7921_dev *dev; - unsigned long delta; - struct mt76_phy *mphy; - - dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, - pm.ps_work.work); - mphy = dev->phy.mt76; - - delta = dev->pm.idle_timeout; - if (test_bit(MT76_HW_SCANNING, &mphy->state) || - test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) || - dev->fw_assert) - goto out; - - if (mutex_is_locked(&dev->mt76.mutex)) - /* if mt76 mutex is held we should not put the device - * to sleep since we are currently accessing device - * register map. We need to wait for the next power_save - * trigger. - */ - goto out; - - if (time_is_after_jiffies(dev->pm.last_activity + delta)) { - delta = dev->pm.last_activity + delta - jiffies; - goto out; - } - - if (!mt7921_mcu_fw_pmctrl(dev)) { - cancel_delayed_work_sync(&mphy->mac_work); - return; - } -out: - queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); -} - void mt7921_coredump_work(struct work_struct *work) { - struct mt7921_dev *dev; + struct mt792x_dev *dev; char *dump, *data; - dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, + dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev, coredump.work.work); if (time_is_after_jiffies(dev->coredump.last_activity + @@ -1149,12 +743,12 @@ void mt7921_coredump_work(struct work_struct *work) dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, GFP_KERNEL); - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); } /* usb_sdio */ static void -mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid, +mt7921_usb_sdio_write_txwi(struct mt792x_dev *dev, struct mt76_wcid *wcid, enum mt76_txq_id qid, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, int pid, struct sk_buff *skb) @@ -1171,7 +765,7 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); struct ieee80211_key_conf *key = info->control.hw_key; struct sk_buff *skb = tx_info->skb; @@ -1180,11 +774,15 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (unlikely(tx_info->skb->len <= ETH_HLEN)) return -EINVAL; + err = skb_cow_head(skb, MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE); + if (err) + return err; + if (!wcid) wcid = &dev->mt76.global_wcid; if (sta) { - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; if (time_after(jiffies, msta->last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; @@ -1196,7 +794,7 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0; - mt7921_skb_add_usb_sdio_hdr(dev, skb, type); + mt792x_skb_add_usb_sdio_hdr(dev, skb, type); pad = round_up(skb->len, 4) - skb->len; if (mt76_is_usb(mdev)) pad += 4; @@ -1224,7 +822,7 @@ void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, sta = wcid_to_sta(wcid); if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7921_tx_check_aggr(sta, txwi); + mt76_connac2_tx_check_aggr(sta, txwi); skb_pull(e->skb, headroom); mt76_tx_complete_skb(mdev, e->wcid, e->skb); @@ -1233,11 +831,11 @@ EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_complete_skb); bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt7921_mac_sta_poll(dev); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return false; } @@ -1246,8 +844,8 @@ EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data); #if IS_ENABLED(CONFIG_IPV6) void mt7921_set_ipv6_ns_work(struct work_struct *work) { - struct mt7921_dev *dev = container_of(work, struct mt7921_dev, - ipv6_ns_work); + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + ipv6_ns_work); struct sk_buff *skb; int ret = 0; @@ -1257,10 +855,10 @@ void mt7921_set_ipv6_ns_work(struct work_struct *work) if (!skb) break; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD(OFFLOAD), true); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } while (!ret); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 3b6adb29cbef..510a575a973b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -10,7 +10,7 @@ #include "mcu.h" static int -mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, +mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band, struct ieee80211_sband_iftype_data *data) { int i, idx = 0; @@ -185,7 +185,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, return idx; } -void mt7921_set_stream_he_caps(struct mt7921_phy *phy) +void mt7921_set_stream_he_caps(struct mt792x_phy *phy) { struct ieee80211_sband_iftype_data *data; struct ieee80211_supported_band *band; @@ -196,8 +196,7 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy) n = mt7921_init_he_caps(phy, NL80211_BAND_2GHZ, data); band = &phy->mt76->sband_2g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } if (phy->mt76->cap.has_5ghz) { @@ -205,21 +204,19 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy) n = mt7921_init_he_caps(phy, NL80211_BAND_5GHZ, data); band = &phy->mt76->sband_5g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); if (phy->mt76->cap.has_6ghz) { data = phy->iftype[NL80211_BAND_6GHZ]; n = mt7921_init_he_caps(phy, NL80211_BAND_6GHZ, data); band = &phy->mt76->sband_6g.sband; - band->iftype_data = data; - band->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(band, data, n); } } } -int __mt7921_start(struct mt7921_phy *phy) +int __mt7921_start(struct mt792x_phy *phy) { struct mt76_phy *mphy = phy->mt76; int err; @@ -240,11 +237,11 @@ int __mt7921_start(struct mt7921_phy *phy) if (err) return err; - mt7921_mac_reset_counters(phy); + mt792x_mac_reset_counters(phy); set_bit(MT76_STATE_RUNNING, &mphy->state); ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, - MT7921_WATCHDOG_TIME); + MT792x_WATCHDOG_TIME); return 0; } @@ -252,48 +249,29 @@ EXPORT_SYMBOL_GPL(__mt7921_start); static int mt7921_start(struct ieee80211_hw *hw) { - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); int err; - mt7921_mutex_acquire(phy->dev); + mt792x_mutex_acquire(phy->dev); err = __mt7921_start(phy); - mt7921_mutex_release(phy->dev); + mt792x_mutex_release(phy->dev); return err; } -void mt7921_stop(struct ieee80211_hw *hw) -{ - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); - - cancel_delayed_work_sync(&phy->mt76->mac_work); - - cancel_delayed_work_sync(&dev->pm.ps_work); - cancel_work_sync(&dev->pm.wake_work); - cancel_work_sync(&dev->reset_work); - mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); - - mt7921_mutex_acquire(dev); - clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); - mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); - mt7921_mutex_release(dev); -} -EXPORT_SYMBOL_GPL(mt7921_stop); - -static int mt7921_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int +mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt76_txq *mtxq; int idx, ret = 0; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); - if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) { + if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) { ret = -ENOSPC; goto out; } @@ -311,14 +289,14 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); - idx = MT7921_WTBL_RESERVED - mvif->mt76.idx; + idx = MT792x_WTBL_RESERVED - mvif->mt76.idx; - INIT_LIST_HEAD(&mvif->sta.poll_list); + INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -333,89 +311,55 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return ret; } -static void mt7921_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_sta *msta = &mvif->sta; - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); - int idx = msta->wcid.idx; - - mt7921_mutex_acquire(dev); - mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); - - rcu_assign_pointer(dev->mt76.wcid[idx], NULL); - - dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); - phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); - mt7921_mutex_release(dev); - - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); - - mt76_packet_id_flush(&dev->mt76, &msta->wcid); -} - static void mt7921_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_phy *phy = priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = priv; mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id); } void mt7921_roc_work(struct work_struct *work) { - struct mt7921_phy *phy; + struct mt792x_phy *phy; - phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy, + phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, roc_work); if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) return; - mt7921_mutex_acquire(phy->dev); + mt792x_mutex_acquire(phy->dev); ieee80211_iterate_active_interfaces(phy->mt76->hw, IEEE80211_IFACE_ITER_RESUME_ALL, mt7921_roc_iter, phy); - mt7921_mutex_release(phy->dev); + mt792x_mutex_release(phy->dev); ieee80211_remain_on_channel_expired(phy->mt76->hw); } -void mt7921_roc_timer(struct timer_list *timer) -{ - struct mt7921_phy *phy = from_timer(phy, timer, roc_timer); - - ieee80211_queue_work(phy->mt76->hw, &phy->roc_work); -} - -static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif) +static int mt7921_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif) { int err = 0; del_timer_sync(&phy->roc_timer); cancel_work_sync(&phy->roc_work); - mt7921_mutex_acquire(phy->dev); + mt792x_mutex_acquire(phy->dev); if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id); - mt7921_mutex_release(phy->dev); + mt792x_mutex_release(phy->dev); return err; } -static int mt7921_set_roc(struct mt7921_phy *phy, - struct mt7921_vif *vif, +static int mt7921_set_roc(struct mt792x_phy *phy, + struct mt792x_vif *vif, struct ieee80211_channel *chan, int duration, enum mt7921_roc_req type) @@ -450,13 +394,13 @@ static int mt7921_remain_on_channel(struct ieee80211_hw *hw, int duration, enum ieee80211_roc_type type) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); int err; - mt7921_mutex_acquire(phy->dev); + mt792x_mutex_acquire(phy->dev); err = mt7921_set_roc(phy, mvif, chan, duration, MT7921_ROC_REQ_ROC); - mt7921_mutex_release(phy->dev); + mt792x_mutex_release(phy->dev); return err; } @@ -464,20 +408,20 @@ static int mt7921_remain_on_channel(struct ieee80211_hw *hw, static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); return mt7921_abort_roc(phy, mvif); } -static int mt7921_set_channel(struct mt7921_phy *phy) +static int mt7921_set_channel(struct mt792x_phy *phy) { - struct mt7921_dev *dev = phy->dev; + struct mt792x_dev *dev = phy->dev; int ret; cancel_delayed_work_sync(&phy->mt76->mac_work); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); set_bit(MT76_RESET, &phy->mt76->state); mt76_set_channel(phy->mt76); @@ -486,18 +430,18 @@ static int mt7921_set_channel(struct mt7921_phy *phy) if (ret) goto out; - mt7921_mac_set_timing(phy); + mt792x_mac_set_timeing(phy); - mt7921_mac_reset_counters(phy); + mt792x_mac_reset_counters(phy); phy->noise = 0; out: clear_bit(MT76_RESET, &phy->mt76->state); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); mt76_worker_schedule(&dev->mt76.tx_worker); ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work, - MT7921_WATCHDOG_TIME); + MT792x_WATCHDOG_TIME); return ret; } @@ -506,9 +450,9 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv : + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; u8 *wcid_keyidx = &wcid->hw_key_idx; @@ -546,7 +490,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); if (cmd == SET_KEY) { *wcid_keyidx = idx; @@ -570,7 +514,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key, MCU_UNI_CMD(STA_REC_UPDATE), &mvif->wep_sta->wcid, cmd); out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return err; } @@ -578,7 +522,7 @@ out: static void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt7921_dev *dev = priv; + struct mt792x_dev *dev = priv; struct ieee80211_hw *hw = mt76_hw(dev); bool pm_enable = dev->pm.enable; int err; @@ -599,7 +543,7 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) static void mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt7921_dev *dev = priv; + struct mt792x_dev *dev = priv; struct ieee80211_hw *hw = mt76_hw(dev); struct mt76_connac_pm *pm = &dev->pm; bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); @@ -614,7 +558,7 @@ mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) mt7921_mcu_set_beacon_filter(dev, vif, false); } -void mt7921_set_runtime_pm(struct mt7921_dev *dev) +void mt7921_set_runtime_pm(struct mt792x_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct mt76_connac_pm *pm = &dev->pm; @@ -630,8 +574,8 @@ void mt7921_set_runtime_pm(struct mt7921_dev *dev) static int mt7921_config(struct ieee80211_hw *hw, u32 changed) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); int ret = 0; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { @@ -642,7 +586,7 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) ieee80211_wake_queues(hw); } - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); if (changed & IEEE80211_CONF_CHANGE_POWER) { ret = mt7921_set_tx_sar_pwr(hw, NULL); @@ -657,25 +601,11 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) } out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return ret; } -static int -mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - unsigned int link_id, u16 queue, - const struct ieee80211_tx_queue_params *params) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - - /* no need to update right away, we'll get BSS_CHANGED_QOS */ - queue = mt76_connac_lmac_mapping(queue); - mvif->queue_params[queue] = *params; - - return 0; -} - static void mt7921_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, @@ -686,7 +616,7 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, #define MT7921_FILTER_OTHER_BSS BIT(6) #define MT7921_FILTER_ENABLE BIT(31) - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); u32 flags = MT7921_FILTER_ENABLE; #define MT7921_FILTER(_fif, _type) do { \ @@ -698,9 +628,9 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, MT7921_FILTER(FIF_CONTROL, CONTROL); MT7921_FILTER(FIF_OTHER_BSS, OTHER_BSS); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt7921_mcu_set_rxfilter(dev, flags, 0, 0); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); *total_flags &= (FIF_OTHER_BSS | FIF_FCSFAIL | FIF_CONTROL); } @@ -710,17 +640,17 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_bss_conf *info, u64 changed) { - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); if (changed & BSS_CHANGED_ERP_SLOT) { int slottime = info->use_short_slot ? 9 : 20; if (slottime != phy->slottime) { phy->slottime = slottime; - mt7921_mac_set_timing(phy); + mt792x_mac_set_timeing(phy); } } @@ -743,28 +673,60 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_ARP_FILTER) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, info); } - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); +} + +static void +mt7921_regd_set_6ghz_power_type(struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mvif->phy; + struct mt792x_dev *dev = phy->dev; + + if (hweight64(dev->mt76.vif_mask) > 1) { + phy->power_type = MT_AP_DEFAULT; + goto out; + } + + switch (vif->bss_conf.power_type) { + case IEEE80211_REG_SP_AP: + phy->power_type = MT_AP_SP; + break; + case IEEE80211_REG_VLP_AP: + phy->power_type = MT_AP_VLP; + break; + case IEEE80211_REG_LPI_AP: + phy->power_type = MT_AP_LPI; + break; + case IEEE80211_REG_UNSET_AP: + default: + phy->power_type = MT_AP_DEFAULT; + break; + } + +out: + mt7921_mcu_set_clc(dev, dev->mt76.alpha2, dev->country_ie_env); } int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; int ret, idx; - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1); + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); if (idx < 0) return -ENOSPC; - INIT_LIST_HEAD(&msta->poll_list); + INIT_LIST_HEAD(&msta->wcid.poll_list); msta->vif = mvif; msta->wcid.sta = 1; msta->wcid.idx = idx; @@ -787,6 +749,8 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (ret) return ret; + mt7921_regd_set_6ghz_power_type(vif); + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); return 0; @@ -796,15 +760,15 @@ EXPORT_SYMBOL_GPL(mt7921_mac_sta_add); void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, - true, mvif->ctx); + true, mvif->mt76.ctx); ewma_avg_signal_init(&msta->avg_ack_signal); @@ -814,15 +778,15 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc); void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm); @@ -832,86 +796,32 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); if (vif->type == NL80211_IFTYPE_STATION) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; mvif->wep_sta = NULL; ewma_rssi_init(&mvif->rssi); if (!sta->tdls) mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, false, - mvif->ctx); + mvif->mt76.ctx); } - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } EXPORT_SYMBOL_GPL(mt7921_mac_sta_remove); -void mt7921_tx_worker(struct mt76_worker *w) -{ - struct mt7921_dev *dev = container_of(w, struct mt7921_dev, - mt76.tx_worker); - - if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { - queue_work(dev->mt76.wq, &dev->pm.wake_work); - return; - } - - mt76_txq_schedule_all(&dev->mphy); - mt76_connac_pm_unref(&dev->mphy, &dev->pm); -} - -static void mt7921_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt76_phy *mphy = hw->priv; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_vif *vif = info->control.vif; - struct mt76_wcid *wcid = &dev->mt76.global_wcid; - int qid; - - if (control->sta) { - struct mt7921_sta *sta; - - sta = (struct mt7921_sta *)control->sta->drv_priv; - wcid = &sta->wcid; - } - - if (vif && !control->sta) { - struct mt7921_vif *mvif; - - mvif = (struct mt7921_vif *)vif->drv_priv; - wcid = &mvif->sta.wcid; - } - - if (mt76_connac_pm_ref(mphy, &dev->pm)) { - mt76_tx(mphy, control->sta, wcid, skb); - mt76_connac_pm_unref(mphy, &dev->pm); - return; - } - - qid = skb_get_queue_mapping(skb); - if (qid >= MT_TXQ_PSD) { - qid = IEEE80211_AC_BE; - skb_set_queue_mapping(skb, qid); - } - - mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb); -} - static int mt7921_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, 0); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } @@ -921,10 +831,10 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { enum ieee80211_ampdu_mlme_action action = params->action; - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); struct ieee80211_sta *sta = params->sta; struct ieee80211_txq *txq = sta->txq[params->tid]; - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; u16 tid = params->tid; u16 ssn = params->ssn; struct mt76_txq *mtxq; @@ -935,7 +845,7 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mtxq = (struct mt76_txq *)txq->drv_priv; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, @@ -954,21 +864,21 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->ampdu_state); + clear_bit(tid, &msta->wcid.ampdu_state); mt7921_mcu_uni_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->ampdu_state); + set_bit(tid, &msta->wcid.ampdu_state); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->ampdu_state); + clear_bit(tid, &msta->wcid.ampdu_state); mt7921_mcu_uni_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return ret; } @@ -979,289 +889,22 @@ static int mt7921_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); if (dev->pm.ds_enable) { - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt76_connac_sta_state_dp(&dev->mt76, old_state, new_state); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } return mt76_sta_state(hw, vif, sta, old_state, new_state); } -static int -mt7921_get_stats(struct ieee80211_hw *hw, - struct ieee80211_low_level_stats *stats) -{ - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mib_stats *mib = &phy->mib; - - mt7921_mutex_acquire(phy->dev); - - stats->dot11RTSSuccessCount = mib->rts_cnt; - stats->dot11RTSFailureCount = mib->rts_retries_cnt; - stats->dot11FCSErrorCount = mib->fcs_err_cnt; - stats->dot11ACKFailureCount = mib->ack_fail_cnt; - - mt7921_mutex_release(phy->dev); - - return 0; -} - -static const char mt7921_gstrings_stats[][ETH_GSTRING_LEN] = { - /* tx counters */ - "tx_ampdu_cnt", - "tx_mpdu_attempts", - "tx_mpdu_success", - "tx_pkt_ebf_cnt", - "tx_pkt_ibf_cnt", - "tx_ampdu_len:0-1", - "tx_ampdu_len:2-10", - "tx_ampdu_len:11-19", - "tx_ampdu_len:20-28", - "tx_ampdu_len:29-37", - "tx_ampdu_len:38-46", - "tx_ampdu_len:47-55", - "tx_ampdu_len:56-79", - "tx_ampdu_len:80-103", - "tx_ampdu_len:104-127", - "tx_ampdu_len:128-151", - "tx_ampdu_len:152-175", - "tx_ampdu_len:176-199", - "tx_ampdu_len:200-223", - "tx_ampdu_len:224-247", - "ba_miss_count", - "tx_beamformer_ppdu_iBF", - "tx_beamformer_ppdu_eBF", - "tx_beamformer_rx_feedback_all", - "tx_beamformer_rx_feedback_he", - "tx_beamformer_rx_feedback_vht", - "tx_beamformer_rx_feedback_ht", - "tx_msdu_pack_1", - "tx_msdu_pack_2", - "tx_msdu_pack_3", - "tx_msdu_pack_4", - "tx_msdu_pack_5", - "tx_msdu_pack_6", - "tx_msdu_pack_7", - "tx_msdu_pack_8", - /* rx counters */ - "rx_mpdu_cnt", - "rx_ampdu_cnt", - "rx_ampdu_bytes_cnt", - "rx_ba_cnt", - /* per vif counters */ - "v_tx_mode_cck", - "v_tx_mode_ofdm", - "v_tx_mode_ht", - "v_tx_mode_ht_gf", - "v_tx_mode_vht", - "v_tx_mode_he_su", - "v_tx_mode_he_ext_su", - "v_tx_mode_he_tb", - "v_tx_mode_he_mu", - "v_tx_bw_20", - "v_tx_bw_40", - "v_tx_bw_80", - "v_tx_bw_160", - "v_tx_mcs_0", - "v_tx_mcs_1", - "v_tx_mcs_2", - "v_tx_mcs_3", - "v_tx_mcs_4", - "v_tx_mcs_5", - "v_tx_mcs_6", - "v_tx_mcs_7", - "v_tx_mcs_8", - "v_tx_mcs_9", - "v_tx_mcs_10", - "v_tx_mcs_11", -}; - -static void -mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u32 sset, u8 *data) -{ - struct mt7921_dev *dev = mt7921_hw_dev(hw); - - if (sset != ETH_SS_STATS) - return; - - memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats)); - - if (mt76_is_sdio(&dev->mt76)) - return; - - data += sizeof(mt7921_gstrings_stats); - page_pool_ethtool_stats_get_strings(data); -} - -static int -mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - int sset) -{ - struct mt7921_dev *dev = mt7921_hw_dev(hw); - - if (sset != ETH_SS_STATS) - return 0; - - if (mt76_is_sdio(&dev->mt76)) - return ARRAY_SIZE(mt7921_gstrings_stats); - - return ARRAY_SIZE(mt7921_gstrings_stats) + - page_pool_ethtool_stats_get_count(); -} - -static void -mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) -{ - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; - struct mt76_ethtool_worker_info *wi = wi_data; - - if (msta->vif->mt76.idx != wi->idx) - return; - - mt76_ethtool_worker(wi, &msta->wcid.stats, false); -} - -static -void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ethtool_stats *stats, u64 *data) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - int stats_size = ARRAY_SIZE(mt7921_gstrings_stats); - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mt7921_dev *dev = phy->dev; - struct mib_stats *mib = &phy->mib; - struct mt76_ethtool_worker_info wi = { - .data = data, - .idx = mvif->mt76.idx, - }; - int i, ei = 0; - - mt7921_mutex_acquire(dev); - - mt7921_mac_update_mib_stats(phy); - - data[ei++] = mib->tx_ampdu_cnt; - data[ei++] = mib->tx_mpdu_attempts_cnt; - data[ei++] = mib->tx_mpdu_success_cnt; - data[ei++] = mib->tx_pkt_ebf_cnt; - data[ei++] = mib->tx_pkt_ibf_cnt; - - /* Tx ampdu stat */ - for (i = 0; i < 15; i++) - data[ei++] = phy->mt76->aggr_stats[i]; - - data[ei++] = phy->mib.ba_miss_cnt; - - /* Tx Beamformer monitor */ - data[ei++] = mib->tx_bf_ibf_ppdu_cnt; - data[ei++] = mib->tx_bf_ebf_ppdu_cnt; - - /* Tx Beamformer Rx feedback monitor */ - data[ei++] = mib->tx_bf_rx_fb_all_cnt; - data[ei++] = mib->tx_bf_rx_fb_he_cnt; - data[ei++] = mib->tx_bf_rx_fb_vht_cnt; - data[ei++] = mib->tx_bf_rx_fb_ht_cnt; - - /* Tx amsdu info (pack-count histogram) */ - for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) - data[ei++] = mib->tx_amsdu[i]; - - /* rx counters */ - data[ei++] = mib->rx_mpdu_cnt; - data[ei++] = mib->rx_ampdu_cnt; - data[ei++] = mib->rx_ampdu_bytes_cnt; - data[ei++] = mib->rx_ba_cnt; - - /* Add values for all stations owned by this vif */ - wi.initial_stat_idx = ei; - ieee80211_iterate_stations_atomic(hw, mt7921_ethtool_worker, &wi); - - mt7921_mutex_release(dev); - - if (!wi.sta_count) - return; - - ei += wi.worker_stat_count; - - if (!mt76_is_sdio(&dev->mt76)) { - mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei); - stats_size += page_pool_ethtool_stats_get_count(); - } - - if (ei != stats_size) - dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size); -} - -static u64 -mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); - u8 omac_idx = mvif->mt76.omac_idx; - union { - u64 t64; - u32 t32[2]; - } tsf; - u16 n; - - mt7921_mutex_acquire(dev); - - n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; - /* TSF software read */ - mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_MODE); - tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(0)); - tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(0)); - - mt7921_mutex_release(dev); - - return tsf.t64; -} - -static void -mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u64 timestamp) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); - u8 omac_idx = mvif->mt76.omac_idx; - union { - u64 t64; - u32 t32[2]; - } tsf = { .t64 = timestamp, }; - u16 n; - - mt7921_mutex_acquire(dev); - - n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; - mt76_wr(dev, MT_LPON_UTTR0(0), tsf.t32[0]); - mt76_wr(dev, MT_LPON_UTTR1(0), tsf.t32[1]); - /* TSF software overwrite */ - mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_WRITE); - - mt7921_mutex_release(dev); -} - -static void -mt7921_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) -{ - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mt7921_dev *dev = phy->dev; - - mt7921_mutex_acquire(dev); - phy->coverage_class = max_t(s16, coverage_class, 0); - mt7921_mac_set_timing(phy); - mt7921_mutex_release(dev); -} - void mt7921_scan_work(struct work_struct *work) { - struct mt7921_phy *phy; + struct mt792x_phy *phy; - phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy, + phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, scan_work.work); while (true) { @@ -1294,13 +937,13 @@ static int mt7921_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *req) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_phy *mphy = hw->priv; int err; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); err = mt76_connac_mcu_hw_scan(mphy, vif, req); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return err; } @@ -1308,12 +951,12 @@ mt7921_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static void mt7921_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_phy *mphy = hw->priv; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt76_connac_mcu_cancel_hw_scan(mphy, vif); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } static int @@ -1321,11 +964,11 @@ mt7921_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_phy *mphy = hw->priv; int err; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); err = mt76_connac_mcu_sched_scan_req(mphy, vif, req); if (err < 0) @@ -1333,7 +976,7 @@ mt7921_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, err = mt76_connac_mcu_sched_scan_enable(mphy, vif, true); out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return err; } @@ -1341,13 +984,13 @@ out: static int mt7921_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_phy *mphy = hw->priv; int err; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); err = mt76_connac_mcu_sched_scan_enable(mphy, vif, false); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return err; } @@ -1355,17 +998,17 @@ mt7921_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static int mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); int max_nss = hweight8(hw->wiphy->available_antennas_tx); if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss) return -EINVAL; if ((BIT(hweight8(tx_ant)) - 1) != tx_ant) - tx_ant = BIT(ffs(tx_ant) - 1) - 1; + return -EINVAL; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); phy->mt76->antenna_mask = tx_ant; phy->mt76->chainmask = tx_ant; @@ -1373,48 +1016,17 @@ mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) mt76_set_stream_caps(phy->mt76, true); mt7921_set_stream_he_caps(phy); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } -static void mt7921_sta_statistics(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct station_info *sinfo) -{ - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; - struct rate_info *txrate = &msta->wcid.rate; - - if (!txrate->legacy && !txrate->flags) - return; - - if (txrate->legacy) { - sinfo->txrate.legacy = txrate->legacy; - } else { - sinfo->txrate.mcs = txrate->mcs; - sinfo->txrate.nss = txrate->nss; - sinfo->txrate.bw = txrate->bw; - sinfo->txrate.he_gi = txrate->he_gi; - sinfo->txrate.he_dcm = txrate->he_dcm; - sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; - } - sinfo->txrate.flags = txrate->flags; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); - - sinfo->ack_signal = (s8)msta->ack_signal; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); - - sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal); - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); -} - #ifdef CONFIG_PM static int mt7921_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); cancel_delayed_work_sync(&phy->scan_work); cancel_delayed_work_sync(&phy->mt76->mac_work); @@ -1422,7 +1034,7 @@ static int mt7921_suspend(struct ieee80211_hw *hw, cancel_delayed_work_sync(&dev->pm.ps_work); mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); ieee80211_iterate_active_interfaces(hw, @@ -1430,17 +1042,17 @@ static int mt7921_suspend(struct ieee80211_hw *hw, mt7921_mcu_set_suspend_iter, &dev->mphy); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } static int mt7921_resume(struct ieee80211_hw *hw) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); set_bit(MT76_STATE_RUNNING, &phy->mt76->state); ieee80211_iterate_active_interfaces(hw, @@ -1449,51 +1061,34 @@ static int mt7921_resume(struct ieee80211_hw *hw) &dev->mphy); ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, - MT7921_WATCHDOG_TIME); + MT792x_WATCHDOG_TIME); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return 0; } -static void mt7921_set_wakeup(struct ieee80211_hw *hw, bool enabled) -{ - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt76_dev *mdev = &dev->mt76; - - device_set_wakeup_enable(mdev->dev, enabled); -} - static void mt7921_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt76_connac_mcu_update_gtk_rekey(hw, vif, data); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } #endif /* CONFIG_PM */ -static void mt7921_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u32 queues, bool drop) -{ - struct mt7921_dev *dev = mt7921_hw_dev(hw); - - wait_event_timeout(dev->mt76.tx_wait, !mt76_has_tx_pending(&dev->mphy), - HZ / 2); -} - static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enabled) { - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); if (enabled) set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); @@ -1503,7 +1098,7 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid, MCU_UNI_CMD(STA_REC_UPDATE)); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } #if IS_ENABLED(CONFIG_IPV6) @@ -1511,8 +1106,8 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mvif->phy->dev; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mvif->phy->dev; struct inet6_ifaddr *ifa; struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; struct sk_buff *skb; @@ -1570,28 +1165,25 @@ int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { struct mt76_phy *mphy = hw->priv; - int err; if (sar) { - err = mt76_init_sar_power(hw, sar); + int err = mt76_init_sar_power(hw, sar); + if (err) return err; } + mt792x_init_acpi_sar_power(mt792x_hw_phy(hw), !sar); - mt7921_init_acpi_sar_power(mt7921_hw_phy(hw), !sar); - - err = mt76_connac_mcu_set_rate_txpower(mphy); - - return err; + return mt76_connac_mcu_set_rate_txpower(mphy); } static int mt7921_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); int err; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); err = mt7921_mcu_set_clc(dev, dev->mt76.alpha2, dev->country_ie_env); if (err < 0) @@ -1599,7 +1191,7 @@ static int mt7921_set_sar_specs(struct ieee80211_hw *hw, err = mt7921_set_tx_sar_pwr(hw, sar); out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return err; } @@ -1609,26 +1201,26 @@ mt7921_channel_switch_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } static int mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); int err; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, - true, mvif->ctx); + true, mvif->mt76.ctx); if (err) goto out; @@ -1639,7 +1231,7 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, err = mt7921_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_NONE); out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); return err; } @@ -1648,22 +1240,22 @@ static void mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_phy *phy = mt7921_hw_phy(hw); - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); int err; - mt7921_mutex_acquire(dev); + mt792x_mutex_acquire(dev); err = mt7921_mcu_set_bss_pm(dev, vif, false); if (err) goto out; mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false, - mvif->ctx); + mvif->mt76.ctx); out: - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } static int @@ -1682,10 +1274,10 @@ mt7921_remove_chanctx(struct ieee80211_hw *hw, static void mt7921_ctx_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct ieee80211_chanctx_conf *ctx = priv; - if (ctx != mvif->ctx) + if (ctx != mvif->mt76.ctx) return; if (vif->type == NL80211_IFTYPE_MONITOR) @@ -1699,77 +1291,47 @@ mt7921_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { - struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); - mt7921_mutex_acquire(phy->dev); + mt792x_mutex_acquire(phy->dev); ieee80211_iterate_active_interfaces(phy->mt76->hw, IEEE80211_IFACE_ITER_ACTIVE, mt7921_ctx_iter, ctx); - mt7921_mutex_release(phy->dev); -} - -static int -mt7921_assign_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, - struct ieee80211_chanctx_conf *ctx) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); - - mutex_lock(&dev->mt76.mutex); - mvif->ctx = ctx; - mutex_unlock(&dev->mt76.mutex); - - return 0; -} - -static void -mt7921_unassign_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, - struct ieee80211_chanctx_conf *ctx) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); - - mutex_lock(&dev->mt76.mutex); - mvif->ctx = NULL; - mutex_unlock(&dev->mt76.mutex); + mt792x_mutex_release(phy->dev); } static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); u16 duration = info->duration ? info->duration : jiffies_to_msecs(HZ); - mt7921_mutex_acquire(dev); - mt7921_set_roc(mvif->phy, mvif, mvif->ctx->def.chan, duration, + mt792x_mutex_acquire(dev); + mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, MT7921_ROC_REQ_JOIN); - mt7921_mutex_release(dev); + mt792x_mutex_release(dev); } static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; mt7921_abort_roc(mvif->phy, mvif); } const struct ieee80211_ops mt7921_ops = { - .tx = mt7921_tx, + .tx = mt792x_tx, .start = mt7921_start, - .stop = mt7921_stop, + .stop = mt792x_stop, .add_interface = mt7921_add_interface, - .remove_interface = mt7921_remove_interface, + .remove_interface = mt792x_remove_interface, .config = mt7921_config, - .conf_tx = mt7921_conf_tx, + .conf_tx = mt792x_conf_tx, .configure_filter = mt7921_configure_filter, .bss_info_changed = mt7921_bss_info_changed, .start_ap = mt7921_start_ap, @@ -1787,19 +1349,19 @@ const struct ieee80211_ops mt7921_ops = { .release_buffered_frames = mt76_release_buffered_frames, .channel_switch_beacon = mt7921_channel_switch_beacon, .get_txpower = mt76_get_txpower, - .get_stats = mt7921_get_stats, - .get_et_sset_count = mt7921_get_et_sset_count, - .get_et_strings = mt7921_get_et_strings, - .get_et_stats = mt7921_get_et_stats, - .get_tsf = mt7921_get_tsf, - .set_tsf = mt7921_set_tsf, + .get_stats = mt792x_get_stats, + .get_et_sset_count = mt792x_get_et_sset_count, + .get_et_strings = mt792x_get_et_strings, + .get_et_stats = mt792x_get_et_stats, + .get_tsf = mt792x_get_tsf, + .set_tsf = mt792x_set_tsf, .get_survey = mt76_get_survey, .get_antenna = mt76_get_antenna, .set_antenna = mt7921_set_antenna, - .set_coverage_class = mt7921_set_coverage_class, + .set_coverage_class = mt792x_set_coverage_class, .hw_scan = mt7921_hw_scan, .cancel_hw_scan = mt7921_cancel_hw_scan, - .sta_statistics = mt7921_sta_statistics, + .sta_statistics = mt792x_sta_statistics, .sched_scan_start = mt7921_start_sched_scan, .sched_scan_stop = mt7921_stop_sched_scan, CFG80211_TESTMODE_CMD(mt7921_testmode_cmd) @@ -1807,18 +1369,18 @@ const struct ieee80211_ops mt7921_ops = { #ifdef CONFIG_PM .suspend = mt7921_suspend, .resume = mt7921_resume, - .set_wakeup = mt7921_set_wakeup, + .set_wakeup = mt792x_set_wakeup, .set_rekey_data = mt7921_set_rekey_data, #endif /* CONFIG_PM */ - .flush = mt7921_flush, + .flush = mt792x_flush, .set_sar_specs = mt7921_set_sar_specs, .remain_on_channel = mt7921_remain_on_channel, .cancel_remain_on_channel = mt7921_cancel_remain_on_channel, .add_chanctx = mt7921_add_chanctx, .remove_chanctx = mt7921_remove_chanctx, .change_chanctx = mt7921_change_chanctx, - .assign_vif_chanctx = mt7921_assign_vif_chanctx, - .unassign_vif_chanctx = mt7921_unassign_vif_chanctx, + .assign_vif_chanctx = mt792x_assign_vif_chanctx, + .unassign_vif_chanctx = mt792x_unassign_vif_chanctx, .mgd_prepare_tx = mt7921_mgd_prepare_tx, .mgd_complete_tx = mt7921_mgd_complete_tx, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index f55caa00ac69..2cc2d2788f83 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -4,9 +4,9 @@ #include <linux/fs.h> #include <linux/firmware.h> #include "mt7921.h" -#include "mt7921_trace.h" #include "mcu.h" #include "../mt76_connac2_mac.h" +#include "../mt792x_trace.h" #define MT_STA_BFER BIT(0) #define MT_STA_BFEE BIT(1) @@ -25,7 +25,7 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, if (!skb) { dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", cmd, seq); - mt7921_reset(mdev); + mt792x_reset(mdev); return -ETIMEDOUT; } @@ -69,7 +69,7 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, } EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response); -static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val) +static int mt7921_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val) { struct mt7921_mcu_eeprom_info *res, req = { .addr = cpu_to_le32(round_down(offset, @@ -96,7 +96,7 @@ static int mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev, struct ieee80211_vif *vif, bool suspend) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct { struct { u8 bss_idx; @@ -134,7 +134,7 @@ void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) #endif /* CONFIG_PM */ static void -mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt7921_roc_grant_tlv *grant; struct mt76_connac2_mcu_rxd *rxd; @@ -157,17 +157,17 @@ mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb) } static void -mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt76_phy *mphy = &dev->mt76.phy; - struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv; + struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv; spin_lock_bh(&dev->mt76.lock); __skb_queue_tail(&phy->scan_event_list, skb); spin_unlock_bh(&dev->mt76.lock); ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, - MT7921_HW_SCAN_TIMEOUT); + MT792x_HW_SCAN_TIMEOUT); } static void @@ -188,7 +188,7 @@ mt7921_mcu_connection_loss_iter(void *priv, u8 *mac, } static void -mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mcu_connection_loss_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt76_connac_beacon_loss_event *event; struct mt76_phy *mphy = &dev->mt76.phy; @@ -202,7 +202,7 @@ mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb) } static void -mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mcu_debug_msg_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt7921_debug_msg { __le16 id; @@ -229,7 +229,7 @@ mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb) } static void -mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mcu_low_power_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt7921_mcu_lp_event { u8 state; @@ -243,7 +243,7 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) } static void -mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt7921_mcu_tx_done_event *event; @@ -254,7 +254,7 @@ mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb) } static void -mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) +mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt76_connac2_mcu_rxd *rxd; @@ -288,7 +288,7 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) } static void -mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev, +mt7921_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt76_connac2_mcu_rxd *rxd; @@ -305,7 +305,7 @@ mt7921_mcu_uni_rx_unsolicited_event(struct mt7921_dev *dev, dev_kfree_skb(skb); } -void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) +void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt76_connac2_mcu_rxd *rxd; @@ -339,11 +339,11 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) } /** starec & wtbl **/ -int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, +int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev, struct ieee80211_ampdu_params *params, bool enable) { - struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; if (enable && !params->amsdu) msta->wcid.amsdu = false; @@ -353,52 +353,29 @@ int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, enable, true); } -int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, +int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev, struct ieee80211_ampdu_params *params, bool enable) { - struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, MCU_UNI_CMD(STA_REC_UPDATE), enable, false); } -static char *mt7921_patch_name(struct mt7921_dev *dev) -{ - char *ret; - - if (is_mt7922(&dev->mt76)) - ret = MT7922_ROM_PATCH; - else - ret = MT7921_ROM_PATCH; - - return ret; -} - -static char *mt7921_ram_name(struct mt7921_dev *dev) -{ - char *ret; - - if (is_mt7922(&dev->mt76)) - ret = MT7922_FIRMWARE_WM; - else - ret = MT7921_FIRMWARE_WM; - - return ret; -} - -static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name) +static int mt7921_load_clc(struct mt792x_dev *dev, const char *fw_name) { const struct mt76_connac2_fw_trailer *hdr; const struct mt76_connac2_fw_region *region; const struct mt7921_clc *clc; struct mt76_dev *mdev = &dev->mt76; - struct mt7921_phy *phy = &dev->phy; + struct mt792x_phy *phy = &dev->phy; const struct firmware *fw; int ret, i, len, offset = 0; u8 *clc_base = NULL, hw_encap = 0; + dev->phy.clc_chan_conf = 0xff; if (mt7921_disable_clc || mt76_is_usb(&dev->mt76)) return 0; @@ -472,42 +449,130 @@ out: return ret; } -static int mt7921_load_firmware(struct mt7921_dev *dev) +static void mt7921_mcu_parse_tx_resource(struct mt76_dev *dev, + struct sk_buff *skb) { - int ret; + struct mt76_sdio *sdio = &dev->sdio; + struct mt7921_tx_resource { + __le32 version; + __le32 pse_data_quota; + __le32 pse_mcu_quota; + __le32 ple_data_quota; + __le32 ple_mcu_quota; + __le16 pse_page_size; + __le16 ple_page_size; + u8 pp_padding; + u8 pad[3]; + } __packed * tx_res; + + tx_res = (struct mt7921_tx_resource *)skb->data; + sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota); + sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota); + sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota); + sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size); + sdio->sched.deficit = tx_res->pp_padding; +} + +static void mt7921_mcu_parse_phy_cap(struct mt76_dev *dev, + struct sk_buff *skb) +{ + struct mt7921_phy_cap { + u8 ht; + u8 vht; + u8 _5g; + u8 max_bw; + u8 nss; + u8 dbdc; + u8 tx_ldpc; + u8 rx_ldpc; + u8 tx_stbc; + u8 rx_stbc; + u8 hw_path; + u8 he; + } __packed * cap; + + enum { + WF0_24G, + WF0_5G + }; + + cap = (struct mt7921_phy_cap *)skb->data; + + dev->phy.antenna_mask = BIT(cap->nss) - 1; + dev->phy.chainmask = dev->phy.antenna_mask; + dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); + dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); +} + +static int mt7921_mcu_get_nic_capability(struct mt792x_phy *mphy) +{ + struct mt76_connac_cap_hdr { + __le16 n_element; + u8 rsv[2]; + } __packed * hdr; + struct sk_buff *skb; + struct mt76_phy *phy = mphy->mt76; + int ret, i; - ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev)); + ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB), + NULL, 0, true, &skb); if (ret) return ret; - if (mt76_is_sdio(&dev->mt76)) { - /* activate again */ - ret = __mt7921_mcu_fw_pmctrl(dev); - if (!ret) - ret = __mt7921_mcu_drv_pmctrl(dev); + hdr = (struct mt76_connac_cap_hdr *)skb->data; + if (skb->len < sizeof(*hdr)) { + ret = -EINVAL; + goto out; } - ret = mt76_connac2_load_ram(&dev->mt76, mt7921_ram_name(dev), NULL); - if (ret) - return ret; + skb_pull(skb, sizeof(*hdr)); - if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY, - MT_TOP_MISC2_FW_N9_RDY, 1500)) { - dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); + for (i = 0; i < le16_to_cpu(hdr->n_element); i++) { + struct tlv_hdr { + __le32 type; + __le32 len; + } __packed * tlv = (struct tlv_hdr *)skb->data; + int len; - return -EIO; - } + if (skb->len < sizeof(*tlv)) + break; -#ifdef CONFIG_PM - dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; -#endif /* CONFIG_PM */ + skb_pull(skb, sizeof(*tlv)); - dev_dbg(dev->mt76.dev, "Firmware init done\n"); + len = le32_to_cpu(tlv->len); + if (skb->len < len) + break; - return 0; + switch (le32_to_cpu(tlv->type)) { + case MT_NIC_CAP_6G: + phy->cap.has_6ghz = skb->data[0]; + break; + case MT_NIC_CAP_MAC_ADDR: + memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN); + break; + case MT_NIC_CAP_PHY: + mt7921_mcu_parse_phy_cap(phy->dev, skb); + break; + case MT_NIC_CAP_TX_RESOURCE: + if (mt76_is_sdio(phy->dev)) + mt7921_mcu_parse_tx_resource(phy->dev, + skb); + break; + case MT_NIC_CAP_CHIP_CAP: + memcpy(&mphy->chip_cap, (void *)skb->data, sizeof(u64)); + break; + default: + break; + } + skb_pull(skb, len); + } +out: + dev_kfree_skb(skb); + + return ret; } -int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) +int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl) { struct { u8 ctrl_val; @@ -520,20 +585,20 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) &data, sizeof(data), false); } -int mt7921_run_firmware(struct mt7921_dev *dev) +int mt7921_run_firmware(struct mt792x_dev *dev) { int err; - err = mt7921_load_firmware(dev); + err = mt792x_load_firmware(dev); if (err) return err; - err = mt76_connac_mcu_get_nic_capability(&dev->mphy); + err = mt7921_mcu_get_nic_capability(&dev->phy); if (err) return err; set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); - err = mt7921_load_clc(dev, mt7921_ram_name(dev)); + err = mt7921_load_clc(dev, mt792x_ram_name(dev)); if (err) return err; @@ -541,9 +606,9 @@ int mt7921_run_firmware(struct mt7921_dev *dev) } EXPORT_SYMBOL_GPL(mt7921_run_firmware); -int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) +int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct edca { __le16 cw_min; __le16 cw_max; @@ -635,12 +700,12 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) &req_mu, sizeof(req_mu), false); } -int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, +int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, struct ieee80211_channel *chan, int duration, enum mt7921_roc_req type, u8 token_id) { int center_ch = ieee80211_frequency_to_channel(chan->center_freq); - struct mt7921_dev *dev = phy->dev; + struct mt792x_dev *dev = phy->dev; struct { struct { u8 rsv[4]; @@ -702,10 +767,10 @@ int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, &req, sizeof(req), false); } -int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, +int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, u8 token_id) { - struct mt7921_dev *dev = phy->dev; + struct mt792x_dev *dev = phy->dev; struct { struct { u8 rsv[4]; @@ -732,9 +797,9 @@ int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, &req, sizeof(req), false); } -int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) +int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd) { - struct mt7921_dev *dev = phy->dev; + struct mt792x_dev *dev = phy->dev; struct cfg80211_chan_def *chandef = &phy->mt76->chandef; int freq1 = chandef->center_freq1; struct { @@ -791,7 +856,7 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); } -int mt7921_mcu_set_eeprom(struct mt7921_dev *dev) +int mt7921_mcu_set_eeprom(struct mt792x_dev *dev) { struct req_hdr { u8 buffer_mode; @@ -807,9 +872,9 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev) } EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom); -int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif) +int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct { struct { u8 bss_idx; @@ -845,10 +910,10 @@ int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif) } static int -mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, +mt7921_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct { struct { u8 bss_idx; @@ -881,10 +946,10 @@ mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, } int -mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, +mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct { u8 bss_idx; u8 dtim_period; @@ -918,11 +983,11 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, &req, sizeof(req), false); } -int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, +int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, struct ieee80211_vif *vif, bool enable, enum mt76_sta_info_state state) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; int rssi = -ewma_rssi_read(&mvif->rssi); struct mt76_sta_cmd_info info = { .sta = sta, @@ -933,60 +998,16 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, .offload_fw = true, .rcpi = to_rcpi(rssi), }; - struct mt7921_sta *msta; + struct mt792x_sta *msta; - msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL; + msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); } -int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) -{ - struct mt76_phy *mphy = &dev->mt76.phy; - struct mt76_connac_pm *pm = &dev->pm; - int err = 0; - - mutex_lock(&pm->mutex); - - if (!test_bit(MT76_STATE_PM, &mphy->state)) - goto out; - - err = __mt7921_mcu_drv_pmctrl(dev); -out: - mutex_unlock(&pm->mutex); - - if (err) - mt7921_reset(&dev->mt76); - - return err; -} -EXPORT_SYMBOL_GPL(mt7921_mcu_drv_pmctrl); - -int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) -{ - struct mt76_phy *mphy = &dev->mt76.phy; - struct mt76_connac_pm *pm = &dev->pm; - int err = 0; - - mutex_lock(&pm->mutex); - - if (mt76_connac_skip_fw_pmctrl(mphy, pm)) - goto out; - - err = __mt7921_mcu_fw_pmctrl(dev); -out: - mutex_unlock(&pm->mutex); - - if (err) - mt7921_reset(&dev->mt76); - - return err; -} -EXPORT_SYMBOL_GPL(mt7921_mcu_fw_pmctrl); - -int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev, +int mt7921_mcu_set_beacon_filter(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable) { @@ -1021,7 +1042,7 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev, return 0; } -int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) +int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr) { struct mt7921_txpwr_event *event; struct mt7921_txpwr_req req = { @@ -1044,7 +1065,7 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) return 0; } -int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif, +int mt7921_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable) { struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; @@ -1074,7 +1095,7 @@ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif, true); } -int mt7921_mcu_config_sniffer(struct mt7921_vif *vif, +int mt7921_mcu_config_sniffer(struct mt792x_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct cfg80211_chan_def *chandef = &ctx->def; @@ -1143,12 +1164,12 @@ int mt7921_mcu_config_sniffer(struct mt7921_vif *vif, } int -mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev, +mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool enable) { - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt76_wcid *wcid = &dev->mt76.global_wcid; struct ieee80211_mutable_offsets offs; struct { @@ -1221,12 +1242,14 @@ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev, } static -int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, +int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, enum environment_cap env_cap, struct mt7921_clc *clc, u8 idx) { - struct sk_buff *skb; +#define CLC_CAP_EVT_EN BIT(0) +#define CLC_CAP_DTS_EN BIT(1) + struct sk_buff *skb, *ret_skb = NULL; struct { u8 ver; u8 pad0; @@ -1234,14 +1257,16 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, u8 idx; u8 env; u8 acpi_conf; - u8 pad1; + u8 cap; u8 alpha2[2]; u8 type[2]; - u8 rsvd[64]; + u8 env_6g; + u8 rsvd[63]; } __packed req = { .idx = idx, .env = env_cap, - .acpi_conf = mt7921_acpi_get_flags(&dev->phy), + .env_6g = dev->phy.power_type, + .acpi_conf = mt792x_acpi_get_flags(&dev->phy), }; int ret, valid_cnt = 0; u8 i, *pos; @@ -1249,6 +1274,11 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, if (!clc) return 0; + if (dev->phy.chip_cap & MT792x_CHIP_CAP_CLC_EVT_EN) + req.cap |= CLC_CAP_EVT_EN; + if (mt76_find_power_limits_node(&dev->mt76)) + req.cap |= CLC_CAP_DTS_EN; + pos = clc->data; for (i = 0; i < clc->nr_country; i++) { struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos; @@ -1270,10 +1300,21 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, return -ENOMEM; skb_put_data(skb, rule->data, len); - ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_CE_CMD(SET_CLC), false); + ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb, + MCU_CE_CMD(SET_CLC), + !!(req.cap & CLC_CAP_EVT_EN), + &ret_skb); if (ret < 0) return ret; + + if (ret_skb) { + struct mt7921_clc_info_tlv *info; + + info = (struct mt7921_clc_info_tlv *)(ret_skb->data + 4); + dev->phy.clc_chan_conf = info->chan_conf; + dev_kfree_skb(ret_skb); + } + valid_cnt++; } @@ -1283,10 +1324,10 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, return 0; } -int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, +int mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, enum environment_cap env_cap) { - struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy; + struct mt792x_phy *phy = (struct mt792x_phy *)&dev->phy; int i, ret; /* submit all clc config */ @@ -1305,7 +1346,24 @@ int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, return 0; } -int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif, +int mt7921_mcu_get_temperature(struct mt792x_phy *phy) +{ + struct mt792x_dev *dev = phy->dev; + struct { + u8 ctrl_id; + u8 action; + u8 band_idx; + u8 rsv[5]; + } req = { + .ctrl_id = THERMAL_SENSOR_TEMP_QUERY, + .band_idx = phy->mt76->band_idx, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req, + sizeof(req), true); +} + +int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, u8 bit_op, u32 bit_map) { struct { diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h index 9b0aa3b70f0e..f9a259ee6b82 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h @@ -99,4 +99,17 @@ struct mt7921_rftest_evt { __le32 param0; __le32 param1; } __packed; + +struct mt7921_clc_info_tlv { + __le16 tag; + __le16 len; + + u8 chan_conf; /* BIT(0) : Enable UNII-4 + * BIT(1) : Enable UNII-5 + * BIT(2) : Enable UNII-6 + * BIT(3) : Enable UNII-7 + * BIT(4) : Enable UNII-8 + */ + u8 rsv[63]; +} __packed; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 149acb1662d5..f28621121927 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -4,22 +4,8 @@ #ifndef __MT7921_H #define __MT7921_H -#include <linux/interrupt.h> -#include <linux/ktime.h> -#include "../mt76_connac_mcu.h" +#include "../mt792x.h" #include "regs.h" -#include "acpi_sar.h" - -#define MT7921_MAX_INTERFACES 4 -#define MT7921_WTBL_SIZE 20 -#define MT7921_WTBL_RESERVED (MT7921_WTBL_SIZE - 1) -#define MT7921_WTBL_STA (MT7921_WTBL_RESERVED - \ - MT7921_MAX_INTERFACES) - -#define MT7921_PM_TIMEOUT (HZ / 12) -#define MT7921_HW_SCAN_TIMEOUT (HZ / 10) -#define MT7921_WATCHDOG_TIME (HZ / 4) -#define MT7921_RESET_TIMEOUT (30 * HZ) #define MT7921_TX_RING_SIZE 2048 #define MT7921_TX_MCU_RING_SIZE 256 @@ -28,35 +14,17 @@ #define MT7921_RX_RING_SIZE 1536 #define MT7921_RX_MCU_RING_SIZE 512 -#define MT7921_DRV_OWN_RETRY_COUNT 10 -#define MT7921_MCU_INIT_RETRY_COUNT 10 -#define MT7921_WFSYS_INIT_RETRY_COUNT 2 - -#define MT7921_FW_TAG_FEATURE 4 -#define MT7921_FW_CAP_CNM BIT(7) - -#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin" -#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin" - -#define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin" -#define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin" - #define MT7921_EEPROM_SIZE 3584 #define MT7921_TOKEN_SIZE 8192 #define MT7921_EEPROM_BLOCK_SIZE 16 -#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ -#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ - #define MT7921_SKU_RATE_NUM 161 #define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM #define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1) -#define MT7921_SDIO_HDR_TX_BYTES GENMASK(15, 0) -#define MT7921_SDIO_HDR_PKT_TYPE GENMASK(17, 16) - #define MCU_UNI_EVENT_ROC 0x27 +#define MCU_UNI_EVENT_CLC 0x80 enum { UNI_ROC_ACQUIRE, @@ -128,9 +96,6 @@ struct mt7921_sdio_intr { #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) #define to_rcpi(rssi) (2 * (rssi) + 220) -struct mt7921_vif; -struct mt7921_sta; - enum mt7921_txq_id { MT7921_TXQ_BAND0, MT7921_TXQ_BAND1, @@ -144,70 +109,6 @@ enum mt7921_rxq_id { MT7921_RXQ_MCU_WM = 0, }; -DECLARE_EWMA(avg_signal, 10, 8) - -struct mt7921_sta { - struct mt76_wcid wcid; /* must be first */ - - struct mt7921_vif *vif; - - struct list_head poll_list; - u32 airtime_ac[8]; - - int ack_signal; - struct ewma_avg_signal avg_ack_signal; - - unsigned long last_txs; - unsigned long ampdu_state; - - struct mt76_connac_sta_key_conf bip; -}; - -DECLARE_EWMA(rssi, 10, 8); - -struct mt7921_vif { - struct mt76_vif mt76; /* must be first */ - - struct mt7921_sta sta; - struct mt7921_sta *wep_sta; - - struct mt7921_phy *phy; - - struct ewma_rssi rssi; - - struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; - struct ieee80211_chanctx_conf *ctx; -}; - -struct mib_stats { - u32 ack_fail_cnt; - u32 fcs_err_cnt; - u32 rts_cnt; - u32 rts_retries_cnt; - u32 ba_miss_cnt; - - u32 tx_bf_ibf_ppdu_cnt; - u32 tx_bf_ebf_ppdu_cnt; - u32 tx_bf_rx_fb_all_cnt; - u32 tx_bf_rx_fb_he_cnt; - u32 tx_bf_rx_fb_vht_cnt; - u32 tx_bf_rx_fb_ht_cnt; - - u32 tx_ampdu_cnt; - u32 tx_mpdu_attempts_cnt; - u32 tx_mpdu_success_cnt; - u32 tx_pkt_ebf_cnt; - u32 tx_pkt_ibf_cnt; - - u32 rx_mpdu_cnt; - u32 rx_ampdu_cnt; - u32 rx_ampdu_bytes_cnt; - u32 rx_ba_cnt; - - u32 tx_amsdu[8]; - u32 tx_amsdu_cnt; -}; - enum { MT7921_CLC_POWER, MT7921_CLC_CHAN, @@ -231,41 +132,6 @@ struct mt7921_clc { u8 data[]; } __packed; -struct mt7921_phy { - struct mt76_phy *mt76; - struct mt7921_dev *dev; - - struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES]; - - u64 omac_mask; - - u16 noise; - - s16 coverage_class; - u8 slottime; - - u32 rx_ampdu_ts; - u32 ampdu_ref; - - struct mib_stats mib; - - u8 sta_work_count; - - struct sk_buff_head scan_event_list; - struct delayed_work scan_work; -#ifdef CONFIG_ACPI - struct mt7921_acpi_sar *acpisar; -#endif - - struct mt7921_clc *clc[MT7921_CLC_MAX_NUM]; - - struct work_struct roc_work; - struct timer_list roc_timer; - wait_queue_head_t roc_wait; - u8 roc_token_id; - bool roc_grant; -}; - enum mt7921_eeprom_field { MT_EE_CHIP_ID = 0x000, MT_EE_VERSION = 0x002, @@ -277,52 +143,6 @@ enum mt7921_eeprom_field { #define MT_EE_HW_TYPE_ENCAP BIT(0) -#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev)) -#define mt7921_dev_reset(dev) ((dev)->hif_ops->reset(dev)) -#define mt7921_mcu_init(dev) ((dev)->hif_ops->mcu_init(dev)) -#define __mt7921_mcu_drv_pmctrl(dev) ((dev)->hif_ops->drv_own(dev)) -#define __mt7921_mcu_fw_pmctrl(dev) ((dev)->hif_ops->fw_own(dev)) -struct mt7921_hif_ops { - int (*init_reset)(struct mt7921_dev *dev); - int (*reset)(struct mt7921_dev *dev); - int (*mcu_init)(struct mt7921_dev *dev); - int (*drv_own)(struct mt7921_dev *dev); - int (*fw_own)(struct mt7921_dev *dev); -}; - -struct mt7921_dev { - union { /* must be first */ - struct mt76_dev mt76; - struct mt76_phy mphy; - }; - - const struct mt76_bus_ops *bus_ops; - struct mt7921_phy phy; - - struct work_struct reset_work; - bool hw_full_reset:1; - bool hw_init_done:1; - bool fw_assert:1; - - struct list_head sta_poll_list; - spinlock_t sta_poll_lock; - - struct work_struct init_work; - - u8 fw_debug; - u8 fw_features; - - struct mt76_connac_pm pm; - struct mt76_connac_coredump coredump; - const struct mt7921_hif_ops *hif_ops; - - struct work_struct ipv6_ns_work; - /* IPv6 addresses for WoWLAN */ - struct sk_buff_head ipv6_ns_list; - - enum environment_cap country_ie_env; -}; - enum { TXPWR_USER, TXPWR_EEPROM, @@ -353,56 +173,31 @@ struct mt7921_txpwr { } data[TXPWR_MAX_NUM]; }; -static inline struct mt7921_phy * -mt7921_hw_phy(struct ieee80211_hw *hw) -{ - struct mt76_phy *phy = hw->priv; - - return phy->priv; -} - -static inline struct mt7921_dev * -mt7921_hw_dev(struct ieee80211_hw *hw) -{ - struct mt76_phy *phy = hw->priv; - - return container_of(phy->dev, struct mt7921_dev, mt76); -} - -#define mt7921_mutex_acquire(dev) \ - mt76_connac_mutex_acquire(&(dev)->mt76, &(dev)->pm) -#define mt7921_mutex_release(dev) \ - mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm) - extern const struct ieee80211_ops mt7921_ops; -u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr); - -int __mt7921_start(struct mt7921_phy *phy); -int mt7921_register_device(struct mt7921_dev *dev); -void mt7921_unregister_device(struct mt7921_dev *dev); -int mt7921_dma_init(struct mt7921_dev *dev); -int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force); -int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev); -void mt7921_dma_cleanup(struct mt7921_dev *dev); -int mt7921_run_firmware(struct mt7921_dev *dev); -int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, +u32 mt7921_reg_map(struct mt792x_dev *dev, u32 addr); + +int __mt7921_start(struct mt792x_phy *phy); +int mt7921_register_device(struct mt792x_dev *dev); +void mt7921_unregister_device(struct mt792x_dev *dev); +int mt7921_run_firmware(struct mt792x_dev *dev); +int mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable); -int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, +int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, struct ieee80211_vif *vif, bool enable, enum mt76_sta_info_state state); -int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd); -int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif); -int mt7921_mcu_set_eeprom(struct mt7921_dev *dev); -int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif, +int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd); +int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif); +int mt7921_mcu_set_eeprom(struct mt792x_dev *dev); +int mt7921_mcu_get_rx_rate(struct mt792x_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct rate_info *rate); -int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl); -void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb); -int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif, +int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl); +void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb); +int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, u8 bit_op, u32 bit_map); static inline u32 -mt7921_reg_map_l1(struct mt7921_dev *dev, u32 addr) +mt7921_reg_map_l1(struct mt792x_dev *dev, u32 addr) { u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); @@ -415,19 +210,19 @@ mt7921_reg_map_l1(struct mt7921_dev *dev, u32 addr) } static inline u32 -mt7921_l1_rr(struct mt7921_dev *dev, u32 addr) +mt7921_l1_rr(struct mt792x_dev *dev, u32 addr) { return mt76_rr(dev, mt7921_reg_map_l1(dev, addr)); } static inline void -mt7921_l1_wr(struct mt7921_dev *dev, u32 addr, u32 val) +mt7921_l1_wr(struct mt792x_dev *dev, u32 addr, u32 val) { mt76_wr(dev, mt7921_reg_map_l1(dev, addr), val); } static inline u32 -mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val) +mt7921_l1_rmw(struct mt792x_dev *dev, u32 addr, u32 mask, u32 val) { val |= mt7921_l1_rr(dev, addr) & ~mask; mt7921_l1_wr(dev, addr, val); @@ -438,108 +233,69 @@ mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val) #define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val) #define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0) -static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev) -{ - return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); -} - -static inline void -mt7921_skb_add_usb_sdio_hdr(struct mt7921_dev *dev, struct sk_buff *skb, - int type) -{ - u32 hdr, len; - - len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr); - hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, len) | - FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type); - - put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr))); -} - -void mt7921_stop(struct ieee80211_hw *hw); -int mt7921_mac_init(struct mt7921_dev *dev); -bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask); -void mt7921_mac_reset_counters(struct mt7921_phy *phy); -void mt7921_mac_set_timing(struct mt7921_phy *phy); +int mt7921_mac_init(struct mt792x_dev *dev); +bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask); int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void mt7921_mac_work(struct work_struct *work); void mt7921_mac_reset_work(struct work_struct *work); -void mt7921_mac_update_mib_stats(struct mt7921_phy *phy); -void mt7921_reset(struct mt76_dev *mdev); int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); -void mt7921_tx_worker(struct mt76_worker *w); -void mt7921_tx_token_put(struct mt7921_dev *dev); bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len); void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info); void mt7921_stats_work(struct work_struct *work); -void mt7921_set_stream_he_caps(struct mt7921_phy *phy); -void mt7921_update_channel(struct mt76_phy *mphy); -int mt7921_init_debugfs(struct mt7921_dev *dev); +void mt7921_set_stream_he_caps(struct mt792x_phy *phy); +int mt7921_init_debugfs(struct mt792x_dev *dev); -int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev, +int mt7921_mcu_set_beacon_filter(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable); -int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, +int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev, struct ieee80211_ampdu_params *params, bool enable); -int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, +int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev, struct ieee80211_ampdu_params *params, bool enable); void mt7921_scan_work(struct work_struct *work); void mt7921_roc_work(struct work_struct *work); -void mt7921_roc_timer(struct timer_list *timer); -int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif); -int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); -int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev); -void mt7921_pm_wake_work(struct work_struct *work); -void mt7921_pm_power_save_work(struct work_struct *work); +int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif); void mt7921_coredump_work(struct work_struct *work); -int mt7921_wfsys_reset(struct mt7921_dev *dev); -int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr); +int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr); int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len); int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, struct netlink_callback *cb, void *data, int len); -void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t, - struct ieee80211_sta *sta, bool clear_status, - struct list_head *free_list); -void mt7921_mac_sta_poll(struct mt7921_dev *dev); int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, struct sk_buff *skb, int seq); -int mt7921e_driver_own(struct mt7921_dev *dev); -int mt7921e_mac_reset(struct mt7921_dev *dev); -int mt7921e_mcu_init(struct mt7921_dev *dev); -int mt7921s_wfsys_reset(struct mt7921_dev *dev); -int mt7921s_mac_reset(struct mt7921_dev *dev); -int mt7921s_init_reset(struct mt7921_dev *dev); -int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev); -int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev); -int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev); - -int mt7921s_mcu_init(struct mt7921_dev *dev); -int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev); -int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev); -void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data); -void mt7921_set_runtime_pm(struct mt7921_dev *dev); +int mt7921e_driver_own(struct mt792x_dev *dev); +int mt7921e_mac_reset(struct mt792x_dev *dev); +int mt7921e_mcu_init(struct mt792x_dev *dev); +int mt7921s_wfsys_reset(struct mt792x_dev *dev); +int mt7921s_mac_reset(struct mt792x_dev *dev); +int mt7921s_init_reset(struct mt792x_dev *dev); + +int mt7921s_mcu_init(struct mt792x_dev *dev); +int mt7921s_mcu_drv_pmctrl(struct mt792x_dev *dev); +int mt7921s_mcu_fw_pmctrl(struct mt792x_dev *dev); +void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data); +void mt7921_set_runtime_pm(struct mt792x_dev *dev); void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); void mt7921_set_ipv6_ns_work(struct work_struct *work); -int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif, +int mt7921_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable); -int mt7921_mcu_config_sniffer(struct mt7921_vif *vif, +int mt7921_mcu_config_sniffer(struct mt792x_vif *vif, struct ieee80211_chanctx_conf *ctx); +int mt7921_mcu_get_temperature(struct mt792x_phy *phy); int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, @@ -550,51 +306,18 @@ void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); /* usb */ -#define MT_USB_TYPE_VENDOR (USB_TYPE_VENDOR | 0x1f) -#define MT_USB_TYPE_UHW_VENDOR (USB_TYPE_VENDOR | 0x1e) - -int mt7921u_mcu_power_on(struct mt7921_dev *dev); -int mt7921u_wfsys_reset(struct mt7921_dev *dev); -int mt7921u_dma_init(struct mt7921_dev *dev, bool resume); -int mt7921u_init_reset(struct mt7921_dev *dev); -int mt7921u_mac_reset(struct mt7921_dev *dev); -int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev, +int mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool enable); -#ifdef CONFIG_ACPI -int mt7921_init_acpi_sar(struct mt7921_dev *dev); -int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default); -u8 mt7921_acpi_get_flags(struct mt7921_phy *phy); -#else -static inline int -mt7921_init_acpi_sar(struct mt7921_dev *dev) -{ - return 0; -} - -static inline int -mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default) -{ - return 0; -} - -static inline u8 -mt7921_acpi_get_flags(struct mt7921_phy *phy) -{ - return 0; -} -#endif int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); -int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, +int mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, enum environment_cap env_cap); -int mt7921_mcu_set_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, +int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, struct ieee80211_channel *chan, int duration, enum mt7921_roc_req type, u8 token_id); -int mt7921_mcu_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif, +int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, u8 token_id); -struct ieee80211_ops *mt7921_get_mac80211_ops(struct device *dev, - void *drv_data, u8 *fw_features); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 95610a117d2f..f04e7095e181 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -9,14 +9,16 @@ #include "mt7921.h" #include "../mt76_connac2_mac.h" +#include "../dma.h" #include "mcu.h" -#include "../trace.h" static const struct pci_device_id mt7921_pci_device_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961), .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922), .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM }, + { PCI_DEVICE(PCI_VENDOR_ID_ITTIM, 0x7922), + .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM }, { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608), .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616), @@ -28,81 +30,12 @@ static bool mt7921_disable_aspm; module_param_named(disable_aspm, mt7921_disable_aspm, bool, 0644); MODULE_PARM_DESC(disable_aspm, "disable PCI ASPM support"); -static void -mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) -{ - if (q == MT_RXQ_MAIN) - mt76_connac_irq_enable(mdev, MT_INT_RX_DONE_DATA); - else if (q == MT_RXQ_MCU_WA) - mt76_connac_irq_enable(mdev, MT_INT_RX_DONE_WM2); - else - mt76_connac_irq_enable(mdev, MT_INT_RX_DONE_WM); -} - -static irqreturn_t mt7921_irq_handler(int irq, void *dev_instance) -{ - struct mt7921_dev *dev = dev_instance; - - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); - - if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) - return IRQ_NONE; - - tasklet_schedule(&dev->mt76.irq_tasklet); - - return IRQ_HANDLED; -} - -static void mt7921_irq_tasklet(unsigned long data) -{ - struct mt7921_dev *dev = (struct mt7921_dev *)data; - u32 intr, mask = 0; - - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); - - intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA); - intr &= dev->mt76.mmio.irqmask; - mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr); - - trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); - - mask |= intr & MT_INT_RX_DONE_ALL; - if (intr & MT_INT_TX_DONE_MCU) - mask |= MT_INT_TX_DONE_MCU; - - if (intr & MT_INT_MCU_CMD) { - u32 intr_sw; - - intr_sw = mt76_rr(dev, MT_MCU_CMD); - /* ack MCU2HOST_SW_INT_STA */ - mt76_wr(dev, MT_MCU_CMD, intr_sw); - if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) { - mask |= MT_INT_RX_DONE_DATA; - intr |= MT_INT_RX_DONE_DATA; - } - } - - mt76_set_irq_mask(&dev->mt76, MT_WFDMA0_HOST_INT_ENA, mask, 0); - - if (intr & MT_INT_TX_DONE_ALL) - napi_schedule(&dev->mt76.tx_napi); - - if (intr & MT_INT_RX_DONE_WM) - napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]); - - if (intr & MT_INT_RX_DONE_WM2) - napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]); - - if (intr & MT_INT_RX_DONE_DATA) - napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]); -} - -static int mt7921e_init_reset(struct mt7921_dev *dev) +static int mt7921e_init_reset(struct mt792x_dev *dev) { - return mt7921_wpdma_reset(dev, true); + return mt792x_wpdma_reset(dev, true); } -static void mt7921e_unregister_device(struct mt7921_dev *dev) +static void mt7921e_unregister_device(struct mt792x_dev *dev) { int i; struct mt76_connac_pm *pm = &dev->pm; @@ -115,16 +48,16 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev) cancel_work_sync(&pm->wake_work); cancel_work_sync(&dev->reset_work); - mt7921_tx_token_put(dev); - __mt7921_mcu_drv_pmctrl(dev); - mt7921_dma_cleanup(dev); - mt7921_wfsys_reset(dev); + mt76_connac2_tx_token_put(&dev->mt76); + __mt792x_mcu_drv_pmctrl(dev); + mt792x_dma_cleanup(dev); + mt792x_wfsys_reset(dev); skb_queue_purge(&dev->mt76.mcu.res_q); tasklet_disable(&dev->mt76.irq_tasklet); } -static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr) +static u32 __mt7921_reg_addr(struct mt792x_dev *dev, u32 addr) { static const struct mt76_connac_reg_map fixed_map[] = { { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ @@ -203,7 +136,7 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr) static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); u32 addr = __mt7921_reg_addr(dev, offset); return dev->bus_ops->rr(mdev, addr); @@ -211,7 +144,7 @@ static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset) static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); u32 addr = __mt7921_reg_addr(dev, offset); dev->bus_ops->wr(mdev, addr, val); @@ -219,12 +152,77 @@ static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val) static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); u32 addr = __mt7921_reg_addr(dev, offset); return dev->bus_ops->rmw(mdev, addr, mask, val); } +static int mt7921_dma_init(struct mt792x_dev *dev) +{ + int ret; + + mt76_dma_attach(&dev->mt76); + + ret = mt792x_dma_disable(dev, true); + if (ret) + return ret; + + /* init tx queue */ + ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0, + MT7921_TX_RING_SIZE, + MT_TX_RING_BASE, 0); + if (ret) + return ret; + + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4); + + /* command to WM */ + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM, + MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE); + if (ret) + return ret; + + /* firmware download */ + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7921_TXQ_FWDL, + MT7921_TX_FWDL_RING_SIZE, MT_TX_RING_BASE); + if (ret) + return ret; + + /* event from WM before firmware download */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], + MT7921_RXQ_MCU_WM, + MT7921_RX_MCU_RING_SIZE, + MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE); + if (ret) + return ret; + + /* Change mcu queue after firmware download */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], + MT7921_RXQ_MCU_WM, + MT7921_RX_MCU_RING_SIZE, + MT_RX_BUF_SIZE, MT_WFDMA0(0x540)); + if (ret) + return ret; + + /* rx data */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], + MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE, + MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE); + if (ret) + return ret; + + ret = mt76_init_queues(dev, mt792x_poll_rx); + if (ret < 0) + return ret; + + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt792x_poll_tx); + napi_enable(&dev->mt76.tx_napi); + + return mt792x_dma_enable(dev); +} + static int mt7921_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -241,22 +239,34 @@ static int mt7921_pci_probe(struct pci_dev *pdev, .tx_complete_skb = mt76_connac_tx_complete_skb, .rx_check = mt7921_rx_check, .rx_skb = mt7921_queue_rx_skb, - .rx_poll_complete = mt7921_rx_poll_complete, + .rx_poll_complete = mt792x_rx_poll_complete, .sta_add = mt7921_mac_sta_add, .sta_assoc = mt7921_mac_sta_assoc, .sta_remove = mt7921_mac_sta_remove, - .update_survey = mt7921_update_channel, + .update_survey = mt792x_update_channel, }; - static const struct mt7921_hif_ops mt7921_pcie_ops = { + static const struct mt792x_hif_ops mt7921_pcie_ops = { .init_reset = mt7921e_init_reset, .reset = mt7921e_mac_reset, .mcu_init = mt7921e_mcu_init, - .drv_own = mt7921e_mcu_drv_pmctrl, - .fw_own = mt7921e_mcu_fw_pmctrl, + .drv_own = mt792xe_mcu_drv_pmctrl, + .fw_own = mt792xe_mcu_fw_pmctrl, + }; + static const struct mt792x_irq_map irq_map = { + .host_irq_enable = MT_WFDMA0_HOST_INT_ENA, + .tx = { + .all_complete_mask = MT_INT_TX_DONE_ALL, + .mcu_complete_mask = MT_INT_TX_DONE_MCU, + }, + .rx = { + .data_complete_mask = MT_INT_RX_DONE_DATA, + .wm_complete_mask = MT_INT_RX_DONE_WM, + .wm2_complete_mask = MT_INT_RX_DONE_WM2, + }, }; struct ieee80211_ops *ops; struct mt76_bus_ops *bus_ops; - struct mt7921_dev *dev; + struct mt792x_dev *dev; struct mt76_dev *mdev; u8 features; int ret; @@ -288,8 +298,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev, if (mt7921_disable_aspm) mt76_pci_disable_aspm(pdev); - ops = mt7921_get_mac80211_ops(&pdev->dev, (void *)id->driver_data, - &features); + ops = mt792x_get_mac80211_ops(&pdev->dev, &mt7921_ops, + (void *)id->driver_data, &features); if (!ops) { ret = -ENOMEM; goto err_free_pci_vec; @@ -303,11 +313,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, mdev); - dev = container_of(mdev, struct mt7921_dev, mt76); + dev = container_of(mdev, struct mt792x_dev, mt76); dev->fw_features = features; dev->hif_ops = &mt7921_pcie_ops; + dev->irq_map = &irq_map; mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); - tasklet_init(&mdev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev); + tasklet_init(&mdev->irq_tasklet, mt792x_irq_tasklet, (unsigned long)dev); dev->phy.dev = dev; dev->phy.mt76 = &dev->mt76.phy; @@ -325,11 +336,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev, bus_ops->rmw = mt7921_rmw; dev->mt76.bus = bus_ops; - ret = mt7921e_mcu_fw_pmctrl(dev); + ret = mt792xe_mcu_fw_pmctrl(dev); if (ret) goto err_free_dev; - ret = __mt7921e_mcu_drv_pmctrl(dev); + ret = __mt792xe_mcu_drv_pmctrl(dev); if (ret) goto err_free_dev; @@ -337,15 +348,15 @@ static int mt7921_pci_probe(struct pci_dev *pdev, (mt7921_l1_rr(dev, MT_HW_REV) & 0xff); dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); - ret = mt7921_wfsys_reset(dev); + ret = mt792x_wfsys_reset(dev); if (ret) goto err_free_dev; - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, irq_map.host_irq_enable, 0); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); - ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler, + ret = devm_request_irq(mdev->dev, pdev->irq, mt792x_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) goto err_free_dev; @@ -373,7 +384,7 @@ err_free_pci_vec: static void mt7921_pci_remove(struct pci_dev *pdev) { struct mt76_dev *mdev = pci_get_drvdata(pdev); - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); mt7921e_unregister_device(dev); devm_free_irq(&pdev->dev, pdev->irq, dev); @@ -385,7 +396,7 @@ static int mt7921_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct mt76_dev *mdev = pci_get_drvdata(pdev); - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt76_connac_pm *pm = &dev->pm; int i, err; @@ -394,7 +405,7 @@ static int mt7921_pci_suspend(struct device *device) cancel_delayed_work_sync(&pm->ps_work); cancel_work_sync(&pm->wake_work); - err = mt7921_mcu_drv_pmctrl(dev); + err = mt792x_mcu_drv_pmctrl(dev); if (err < 0) goto restore_suspend; @@ -424,12 +435,12 @@ static int mt7921_pci_suspend(struct device *device) MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); /* disable interrupt */ - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); synchronize_irq(pdev->irq); tasklet_kill(&mdev->irq_tasklet); - err = mt7921_mcu_fw_pmctrl(dev); + err = mt792x_mcu_fw_pmctrl(dev); if (err) goto restore_napi; @@ -450,7 +461,7 @@ restore_suspend: pm->suspended = false; if (err < 0) - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); return err; } @@ -459,21 +470,21 @@ static int mt7921_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct mt76_dev *mdev = pci_get_drvdata(pdev); - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt76_connac_pm *pm = &dev->pm; int i, err; - err = mt7921_mcu_drv_pmctrl(dev); + err = mt792x_mcu_drv_pmctrl(dev); if (err < 0) goto failed; - mt7921_wpdma_reinit_cond(dev); + mt792x_wpdma_reinit_cond(dev); /* enable interrupt */ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); mt76_connac_irq_enable(&dev->mt76, - MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | - MT_INT_MCU_CMD); + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); /* put dma enabled */ @@ -500,7 +511,7 @@ failed: pm->suspended = false; if (err < 0) - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c index 6053a2556c20..c866144ff061 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -10,7 +10,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); struct ieee80211_key_conf *key = info->control.hw_key; struct mt76_connac_hw_txp *txp; @@ -32,7 +32,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, return id; if (sta) { - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; if (time_after(jiffies, msta->last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; @@ -48,34 +48,20 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, memset(txp, 0, sizeof(struct mt76_connac_hw_txp)); mt76_connac_write_hw_txp(mdev, tx_info, txp, id); - tx_info->skb = DMA_DUMMY_DATA; + tx_info->skb = NULL; return 0; } -void mt7921_tx_token_put(struct mt7921_dev *dev) -{ - struct mt76_txwi_cache *txwi; - int id; - - spin_lock_bh(&dev->mt76.token_lock); - idr_for_each_entry(&dev->mt76.token, txwi, id) { - mt7921_txwi_free(dev, txwi, NULL, false, NULL); - dev->mt76.token_count--; - } - spin_unlock_bh(&dev->mt76.token_lock); - idr_destroy(&dev->mt76.token); -} - -int mt7921e_mac_reset(struct mt7921_dev *dev) +int mt7921e_mac_reset(struct mt792x_dev *dev) { int i, err; - mt7921e_mcu_drv_pmctrl(dev); + mt792xe_mcu_drv_pmctrl(dev); mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); set_bit(MT76_RESET, &dev->mphy.state); @@ -91,10 +77,10 @@ int mt7921e_mac_reset(struct mt7921_dev *dev) napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); napi_disable(&dev->mt76.tx_napi); - mt7921_tx_token_put(dev); + mt76_connac2_tx_token_put(&dev->mt76); idr_init(&dev->mt76.token); - mt7921_wpdma_reset(dev, true); + mt792x_wpdma_reset(dev, true); local_bh_disable(); mt76_for_each_q_rx(&dev->mt76, i) { @@ -106,9 +92,9 @@ int mt7921e_mac_reset(struct mt7921_dev *dev) dev->fw_assert = false; clear_bit(MT76_MCU_RESET, &dev->mphy.state); - mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, - MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | - MT_INT_MCU_CMD); + mt76_wr(dev, dev->irq_map->host_irq_enable, + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); err = mt7921e_driver_own(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c index 1aefbb6cf0ab..4cf1f2f0f968 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c @@ -4,7 +4,7 @@ #include "mt7921.h" #include "mcu.h" -int mt7921e_driver_own(struct mt7921_dev *dev) +int mt7921e_driver_own(struct mt792x_dev *dev) { u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); @@ -22,7 +22,7 @@ static int mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *seq) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); enum mt76_mcuq_id txq = MT_MCUQ_WM; int ret; @@ -38,7 +38,7 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); } -int mt7921e_mcu_init(struct mt7921_dev *dev) +int mt7921e_mcu_init(struct mt792x_dev *dev) { static const struct mt76_mcu_ops mt7921_mcu_ops = { .headroom = sizeof(struct mt76_connac2_mcu_txd), @@ -61,68 +61,3 @@ int mt7921e_mcu_init(struct mt7921_dev *dev) return err; } - -int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev) -{ - int i, err = 0; - - for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { - mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); - if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, - PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1)) - break; - } - - if (i == MT7921_DRV_OWN_RETRY_COUNT) { - dev_err(dev->mt76.dev, "driver own failed\n"); - err = -EIO; - } - - return err; -} - -int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev) -{ - struct mt76_phy *mphy = &dev->mt76.phy; - struct mt76_connac_pm *pm = &dev->pm; - int err; - - err = __mt7921e_mcu_drv_pmctrl(dev); - if (err < 0) - goto out; - - mt7921_wpdma_reinit_cond(dev); - clear_bit(MT76_STATE_PM, &mphy->state); - - pm->stats.last_wake_event = jiffies; - pm->stats.doze_time += pm->stats.last_wake_event - - pm->stats.last_doze_event; -out: - return err; -} - -int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev) -{ - struct mt76_phy *mphy = &dev->mt76.phy; - struct mt76_connac_pm *pm = &dev->pm; - int i; - - for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { - mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); - if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, - PCIE_LPCR_HOST_OWN_SYNC, 4, 50, 1)) - break; - } - - if (i == MT7921_DRV_OWN_RETRY_COUNT) { - dev_err(dev->mt76.dev, "firmware own failed\n"); - clear_bit(MT76_STATE_PM, &mphy->state); - return -EIO; - } - - pm->stats.last_doze_event = jiffies; - pm->stats.awake_time += pm->stats.last_doze_event - - pm->stats.last_wake_event; - - return 0; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h index e52977ff3349..43427a3a48af 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h @@ -4,26 +4,7 @@ #ifndef __MT7921_REGS_H #define __MT7921_REGS_H -/* MCU WFDMA1 */ -#define MT_MCU_WFDMA1_BASE 0x3000 -#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs)) - -#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108) -#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0) -#define MT_MCU_INT_EVENT_DMA_INIT BIT(1) -#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2) -#define MT_MCU_INT_EVENT_RESET_DONE BIT(3) - -#define MT_PLE_BASE 0x820c0000 -#define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) - -#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0) -#define MT_PLE_FL_Q1_CTRL MT_PLE(0x3e4) -#define MT_PLE_FL_Q2_CTRL MT_PLE(0x3e8) -#define MT_PLE_FL_Q3_CTRL MT_PLE(0x3ec) - -#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n)) -#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) +#include "../mt792x_regs.h" #define MT_MDP_BASE 0x820cd000 #define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) @@ -47,280 +28,7 @@ #define MT_MDP_TO_HIF 0 #define MT_MDP_TO_WM 1 -/* TMAC: band 0(0x21000), band 1(0xa1000) */ -#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000) -#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) - -#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0) -#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25) - -#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090) -#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094) -#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0) -#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16) - -#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4) -#define MT_IFS_EIFS GENMASK(8, 0) -#define MT_IFS_RIFS GENMASK(14, 10) -#define MT_IFS_SIFS GENMASK(22, 16) -#define MT_IFS_SLOT GENMASK(30, 24) - -#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4) -#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0) -#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) -#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18) - -#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c) -#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0) - -#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000) -#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs)) - -#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000) -#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3) -#define MT_DMA_DCR0_RXD_G5_EN BIT(23) - -/* WTBLOFF TOP: band 0(0x820e9000),band 1(0x820f9000) */ -#define MT_WTBLOFF_TOP_BASE(_band) ((_band) ? 0x820f9000 : 0x820e9000) -#define MT_WTBLOFF_TOP(_band, ofs) (MT_WTBLOFF_TOP_BASE(_band) + (ofs)) - -#define MT_WTBLOFF_TOP_RSCR(_band) MT_WTBLOFF_TOP(_band, 0x008) -#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30) -#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24) - -/* LPON: band 0(0x24200), band 1(0xa4200) */ -#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000) -#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs)) - -#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080) -#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084) - -#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4) -#define MT_LPON_TCR_SW_MODE GENMASK(1, 0) -#define MT_LPON_TCR_SW_WRITE BIT(0) - -/* ETBF: band 0(0x24000), band 1(0xa4000) */ -#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000) -#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs)) - -#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x150) -#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16) -#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0) - -#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x158) -#define MT_ETBF_RX_FB_ALL GENMASK(31, 24) -#define MT_ETBF_RX_FB_HE GENMASK(23, 16) -#define MT_ETBF_RX_FB_VHT GENMASK(15, 8) -#define MT_ETBF_RX_FB_HT GENMASK(7, 0) - -/* MIB: band 0(0x24800), band 1(0xa4800) */ -#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000) -#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) - -#define MT_MIB_SCR1(_band) MT_WF_MIB(_band, 0x004) -#define MT_MIB_TXDUR_EN BIT(8) -#define MT_MIB_RXDUR_EN BIT(9) - -#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x698) -#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(31, 16) - -#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x780) - -#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) -#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) - -#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x558) -#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x564) -#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x568) - -#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) -#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) - -#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x770) -#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x774) -#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x55c) - -#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x7a8) -#define MT_MIB_SDR9_IBF_CNT_MASK GENMASK(31, 16) -#define MT_MIB_SDR9_EBF_CNT_MASK GENMASK(15, 0) - -#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090) -#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0) - -#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x054) -#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) -#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x058) -#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) - -#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0) -#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4) -#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc) - -#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4)) -#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) -#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) - -#define MT_MIB_MB_BSDR0(_band) MT_WF_MIB(_band, 0x688) -#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_MB_BSDR1(_band) MT_WF_MIB(_band, 0x690) -#define MT_MIB_RTS_FAIL_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_MB_BSDR2(_band) MT_WF_MIB(_band, 0x518) -#define MT_MIB_BA_FAIL_COUNT_MASK GENMASK(15, 0) -#define MT_MIB_MB_BSDR3(_band) MT_WF_MIB(_band, 0x520) -#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(15, 0) - -#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4)) -#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0) - -#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x7dc + ((n) << 2)) -#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x7ec + ((n) << 2)) -#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2)) -#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0)) - -#define MT_WTBLON_TOP_BASE 0x820d4000 -#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs)) -#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200) -#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0) - -#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x230) -#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0) -#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) -#define MT_WTBL_UPDATE_BUSY BIT(31) - -#define MT_WTBL_BASE 0x820d8000 -#define MT_WTBL_LMAC_ID GENMASK(14, 8) -#define MT_WTBL_LMAC_DW GENMASK(7, 2) -#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \ - FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \ - FIELD_PREP(MT_WTBL_LMAC_DW, _dw)) - -/* AGG: band 0(0x20800), band 1(0xa0800) */ -#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000) -#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) - -#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4) -#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4) -#define MT_AGG_PCR0_MM_PROT BIT(0) -#define MT_AGG_PCR0_GF_PROT BIT(1) -#define MT_AGG_PCR0_BW20_PROT BIT(2) -#define MT_AGG_PCR0_BW40_PROT BIT(4) -#define MT_AGG_PCR0_BW80_PROT BIT(6) -#define MT_AGG_PCR0_ERP_PROT GENMASK(12, 8) -#define MT_AGG_PCR0_VHT_PROT BIT(13) -#define MT_AGG_PCR0_PTA_WIN_DIS BIT(15) - -#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23) -#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0) - -#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084) -#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0) -#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16) - -#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098) -#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12) -#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6) -#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7) -#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24) - -#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0) -#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4) - -/* ARB: band 0(0x20c00), band 1(0xa0c00) */ -#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000) -#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) - -#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080) -#define MT_ARB_SCR_TX_DISABLE BIT(8) -#define MT_ARB_SCR_RX_DISABLE BIT(9) - -#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4) - -/* RMAC: band 0(0x21400), band 1(0xa1400) */ -#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000) -#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs)) - -#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000) -#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0) -#define MT_WF_RFCR_DROP_FCSFAIL BIT(1) -#define MT_WF_RFCR_DROP_VERSION BIT(3) -#define MT_WF_RFCR_DROP_PROBEREQ BIT(4) -#define MT_WF_RFCR_DROP_MCAST BIT(5) -#define MT_WF_RFCR_DROP_BCAST BIT(6) -#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7) -#define MT_WF_RFCR_DROP_A3_MAC BIT(8) -#define MT_WF_RFCR_DROP_A3_BSSID BIT(9) -#define MT_WF_RFCR_DROP_A2_BSSID BIT(10) -#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11) -#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12) -#define MT_WF_RFCR_DROP_CTL_RSV BIT(13) -#define MT_WF_RFCR_DROP_CTS BIT(14) -#define MT_WF_RFCR_DROP_RTS BIT(15) -#define MT_WF_RFCR_DROP_DUPLICATE BIT(16) -#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17) -#define MT_WF_RFCR_DROP_OTHER_UC BIT(18) -#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19) -#define MT_WF_RFCR_DROP_NDPA BIT(20) -#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21) - -#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004) -#define MT_WF_RFCR1_DROP_ACK BIT(4) -#define MT_WF_RFCR1_DROP_BF_POLL BIT(5) -#define MT_WF_RFCR1_DROP_BA BIT(6) -#define MT_WF_RFCR1_DROP_CFEND BIT(7) -#define MT_WF_RFCR1_DROP_CFACK BIT(8) - -#define MT_WF_RMAC_MIB_TIME0(_band) MT_WF_RMAC(_band, 0x03c4) -#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31) -#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30) - -#define MT_WF_RMAC_MIB_AIRTIME14(_band) MT_WF_RMAC(_band, 0x03b8) -#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0) -#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380) - -/* WFDMA0 */ -#define MT_WFDMA0_BASE 0xd4000 -#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs)) - -#define MT_WFDMA0_RST MT_WFDMA0(0x100) -#define MT_WFDMA0_RST_LOGIC_RST BIT(4) -#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5) - -#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c) -#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0) -#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1) -#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2) - -#define MT_MCU_CMD MT_WFDMA0(0x1f0) -#define MT_MCU_CMD_WAKE_RX_PCIE BIT(0) -#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1) -#define MT_MCU_CMD_STOP_DMA BIT(2) -#define MT_MCU_CMD_RESET_DONE BIT(3) -#define MT_MCU_CMD_RECOVERY_DONE BIT(4) -#define MT_MCU_CMD_NORMAL_STATE BIT(5) -#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1) - -#define MT_MCU2HOST_SW_INT_ENA MT_WFDMA0(0x1f4) - -#define MT_WFDMA0_HOST_INT_STA MT_WFDMA0(0x200) -#define HOST_RX_DONE_INT_STS0 BIT(0) /* Rx mcu */ -#define HOST_RX_DONE_INT_STS2 BIT(2) /* Rx data */ -#define HOST_RX_DONE_INT_STS4 BIT(22) /* Rx mcu after fw downloaded */ -#define HOST_TX_DONE_INT_STS16 BIT(26) -#define HOST_TX_DONE_INT_STS17 BIT(27) /* MCU tx done*/ - #define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x204) -#define HOST_RX_DONE_INT_ENA0 BIT(0) -#define HOST_RX_DONE_INT_ENA1 BIT(1) -#define HOST_RX_DONE_INT_ENA2 BIT(2) -#define HOST_RX_DONE_INT_ENA3 BIT(3) -#define HOST_TX_DONE_INT_ENA0 BIT(4) -#define HOST_TX_DONE_INT_ENA1 BIT(5) -#define HOST_TX_DONE_INT_ENA2 BIT(6) -#define HOST_TX_DONE_INT_ENA3 BIT(7) -#define HOST_TX_DONE_INT_ENA4 BIT(8) -#define HOST_TX_DONE_INT_ENA5 BIT(9) -#define HOST_TX_DONE_INT_ENA6 BIT(10) -#define HOST_TX_DONE_INT_ENA7 BIT(11) #define HOST_TX_DONE_INT_ENA8 BIT(12) #define HOST_TX_DONE_INT_ENA9 BIT(13) #define HOST_TX_DONE_INT_ENA10 BIT(14) @@ -328,14 +36,10 @@ #define HOST_TX_DONE_INT_ENA12 BIT(16) #define HOST_TX_DONE_INT_ENA13 BIT(17) #define HOST_TX_DONE_INT_ENA14 BIT(18) -#define HOST_RX_COHERENT_EN BIT(20) -#define HOST_TX_COHERENT_EN BIT(21) #define HOST_RX_DONE_INT_ENA4 BIT(22) #define HOST_RX_DONE_INT_ENA5 BIT(23) #define HOST_TX_DONE_INT_ENA16 BIT(26) #define HOST_TX_DONE_INT_ENA17 BIT(27) -#define MCU2HOST_SW_INT_ENA BIT(29) -#define HOST_TX_DONE_INT_ENA18 BIT(30) /* WFDMA interrupt */ #define MT_INT_RX_DONE_DATA HOST_RX_DONE_INT_ENA2 @@ -347,7 +51,6 @@ #define MT_INT_TX_DONE_MCU_WM HOST_TX_DONE_INT_ENA17 #define MT_INT_TX_DONE_FWDL HOST_TX_DONE_INT_ENA16 #define MT_INT_TX_DONE_BAND0 HOST_TX_DONE_INT_ENA0 -#define MT_INT_MCU_CMD MCU2HOST_SW_INT_ENA #define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WM | \ MT_INT_TX_DONE_FWDL) @@ -355,56 +58,8 @@ MT_INT_TX_DONE_BAND0 | \ GENMASK(18, 4)) -#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208) -#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0) -#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1) -#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2) -#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3) -#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6) -#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9) -#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) -#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15) -#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21) -#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27) -#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28) -#define MT_WFDMA0_GLO_CFG_CLK_GAT_DIS BIT(30) - -#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c) -#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0) -#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6) -#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0) - #define MT_RX_DATA_RING_BASE MT_WFDMA0(0x520) -#define MT_WFDMA0_TX_RING0_EXT_CTRL MT_WFDMA0(0x600) -#define MT_WFDMA0_TX_RING1_EXT_CTRL MT_WFDMA0(0x604) -#define MT_WFDMA0_TX_RING2_EXT_CTRL MT_WFDMA0(0x608) -#define MT_WFDMA0_TX_RING3_EXT_CTRL MT_WFDMA0(0x60c) -#define MT_WFDMA0_TX_RING4_EXT_CTRL MT_WFDMA0(0x610) -#define MT_WFDMA0_TX_RING5_EXT_CTRL MT_WFDMA0(0x614) -#define MT_WFDMA0_TX_RING6_EXT_CTRL MT_WFDMA0(0x618) -#define MT_WFDMA0_TX_RING16_EXT_CTRL MT_WFDMA0(0x640) -#define MT_WFDMA0_TX_RING17_EXT_CTRL MT_WFDMA0(0x644) - -#define MT_WPDMA0_MAX_CNT_MASK GENMASK(7, 0) -#define MT_WPDMA0_BASE_PTR_MASK GENMASK(31, 16) - -#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680) -#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684) -#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688) -#define MT_WFDMA0_RX_RING3_EXT_CTRL MT_WFDMA0(0x68c) -#define MT_WFDMA0_RX_RING4_EXT_CTRL MT_WFDMA0(0x690) -#define MT_WFDMA0_RX_RING5_EXT_CTRL MT_WFDMA0(0x694) - -#define MT_TX_RING_BASE MT_WFDMA0(0x300) -#define MT_RX_EVENT_RING_BASE MT_WFDMA0(0x500) - -/* WFDMA CSR */ -#define MT_WFDMA_EXT_CSR_BASE 0xd7000 -#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs)) -#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44) -#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0) - #define MT_INFRA_CFG_BASE 0xfe000 #define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) @@ -414,121 +69,13 @@ #define MT_HIF_REMAP_L1_BASE GENMASK(31, 16) #define MT_HIF_REMAP_BASE_L1 0x40000 -#define MT_SWDEF_BASE 0x41f200 -#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) -#define MT_SWDEF_MODE MT_SWDEF(0x3c) -#define MT_SWDEF_NORMAL_MODE 0 -#define MT_SWDEF_ICAP_MODE 1 -#define MT_SWDEF_SPECTRUM_MODE 2 - -#define MT_TOP_BASE 0x18060000 -#define MT_TOP(ofs) (MT_TOP_BASE + (ofs)) - -#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10) -#define MT_TOP_LPCR_HOST_FW_OWN BIT(0) -#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1) - -#define MT_TOP_MISC MT_TOP(0xf0) -#define MT_TOP_MISC_FW_STATE GENMASK(2, 0) - -#define MT_MCU_WPDMA0_BASE 0x54000000 -#define MT_MCU_WPDMA0(ofs) (MT_MCU_WPDMA0_BASE + (ofs)) - -#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120) -#define MT_WFDMA_NEED_REINIT BIT(1) - -#define MT_CBTOP_RGU(ofs) (0x70002000 + (ofs)) -#define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600) -#define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0) - -#define MT_HW_BOUND 0x70010020 -#define MT_HW_CHIPID 0x70010200 -#define MT_HW_REV 0x70010204 - -#define MT_PCIE_MAC_BASE 0x10000 -#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs)) -#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188) -#define MT_PCIE_MAC_PM MT_PCIE_MAC(0x194) -#define MT_PCIE_MAC_PM_L0S_DIS BIT(8) - -#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs)) -#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004) -#define MT_DMASHDL_DMASHDL_BYPASS BIT(28) -#define MT_DMASHDL_OPTIONAL MT_DMA_SHDL(0x008) -#define MT_DMASHDL_PAGE MT_DMA_SHDL(0x00c) -#define MT_DMASHDL_GROUP_SEQ_ORDER BIT(16) -#define MT_DMASHDL_REFILL MT_DMA_SHDL(0x010) -#define MT_DMASHDL_REFILL_MASK GENMASK(31, 16) -#define MT_DMASHDL_PKT_MAX_SIZE MT_DMA_SHDL(0x01c) -#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0) -#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16) - -#define MT_DMASHDL_GROUP_QUOTA(_n) MT_DMA_SHDL(0x020 + ((_n) << 2)) -#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0) -#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16) - -#define MT_DMASHDL_Q_MAP(_n) MT_DMA_SHDL(0x060 + ((_n) << 2)) -#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0) -#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8)) - -#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2)) - -#define MT_WFDMA_HOST_CONFIG 0x7c027030 -#define MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN BIT(6) - -#define MT_UMAC(ofs) (0x74000000 + (ofs)) -#define MT_UDMA_TX_QSEL MT_UMAC(0x008) -#define MT_FW_DL_EN BIT(3) - -#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c) -#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0) -#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8) - -#define MT_UDMA_WLCFG_0 MT_UMAC(0x18) -#define MT_WL_RX_AGG_TO GENMASK(7, 0) -#define MT_WL_RX_AGG_LMT GENMASK(15, 8) -#define MT_WL_TX_TMOUT_FUNC_EN BIT(16) -#define MT_WL_TX_DPH_CHK_EN BIT(17) -#define MT_WL_RX_MPSZ_PAD0 BIT(18) -#define MT_WL_RX_FLUSH BIT(19) -#define MT_TICK_1US_EN BIT(20) -#define MT_WL_RX_AGG_EN BIT(21) -#define MT_WL_RX_EN BIT(22) -#define MT_WL_TX_EN BIT(23) -#define MT_WL_RX_BUSY BIT(30) -#define MT_WL_TX_BUSY BIT(31) - -#define MT_UDMA_CONN_INFRA_STATUS MT_UMAC(0xa20) -#define MT_UDMA_CONN_WFSYS_INIT_DONE BIT(22) -#define MT_UDMA_CONN_INFRA_STATUS_SEL MT_UMAC(0xa24) - -#define MT_SSUSB_EPCTL_CSR(ofs) (0x74011800 + (ofs)) -#define MT_SSUSB_EPCTL_CSR_EP_RST_OPT MT_SSUSB_EPCTL_CSR(0x090) - -#define MT_UWFDMA0(ofs) (0x7c024000 + (ofs)) -#define MT_UWFDMA0_GLO_CFG MT_UWFDMA0(0x208) -#define MT_UWFDMA0_GLO_CFG_EXT0 MT_UWFDMA0(0x2b0) -#define MT_UWFDMA0_TX_RING_EXT_CTRL(_n) MT_UWFDMA0(0x600 + ((_n) << 2)) - -#define MT_CONN_STATUS 0x7c053c10 -#define MT_WIFI_PATCH_DL_STATE BIT(0) - -#define MT_CONN_ON_LPCTL 0x7c060010 -#define PCIE_LPCR_HOST_OWN_SYNC BIT(2) -#define PCIE_LPCR_HOST_CLR_OWN BIT(1) -#define PCIE_LPCR_HOST_SET_OWN BIT(0) - #define MT_WFSYS_SW_RST_B 0x18000140 -#define WFSYS_SW_RST_B BIT(0) -#define WFSYS_SW_INIT_DONE BIT(4) -#define MT_CONN_ON_MISC 0x7c0600f0 -#define MT_TOP_MISC2_FW_PWR_ON BIT(0) -#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0) +#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200) +#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0) -#define MT_WF_SW_DEF_CR(ofs) (0x401a00 + (ofs)) -#define MT_WF_SW_DEF_CR_USB_MCU_EVENT MT_WF_SW_DEF_CR(0x028) -#define MT_WF_SW_SER_TRIGGER_SUSPEND BIT(6) -#define MT_WF_SW_SER_DONE_SUSPEND BIT(7) +#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x230) +#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0) +#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c index a77a309c0d60..dc1beb76df3e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -27,7 +27,7 @@ static void mt7921s_txrx_worker(struct mt76_worker *w) struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, txrx_worker); struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio); - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { queue_work(mdev->wq, &dev->pm.wake_work); @@ -38,7 +38,7 @@ static void mt7921s_txrx_worker(struct mt76_worker *w) mt76_connac_pm_unref(&dev->mphy, &dev->pm); } -static void mt7921s_unregister_device(struct mt7921_dev *dev) +static void mt7921s_unregister_device(struct mt792x_dev *dev) { struct mt76_connac_pm *pm = &dev->pm; @@ -102,7 +102,7 @@ static int mt7921s_probe(struct sdio_func *func, .sta_add = mt7921_mac_sta_add, .sta_assoc = mt7921_mac_sta_assoc, .sta_remove = mt7921_mac_sta_remove, - .update_survey = mt7921_update_channel, + .update_survey = mt792x_update_channel, }; static const struct mt76_bus_ops mt7921s_ops = { .rr = mt76s_rr, @@ -114,7 +114,7 @@ static int mt7921s_probe(struct sdio_func *func, .rd_rp = mt76s_rd_rp, .type = MT76_BUS_SDIO, }; - static const struct mt7921_hif_ops mt7921_sdio_ops = { + static const struct mt792x_hif_ops mt7921_sdio_ops = { .init_reset = mt7921s_init_reset, .reset = mt7921s_mac_reset, .mcu_init = mt7921s_mcu_init, @@ -122,13 +122,13 @@ static int mt7921s_probe(struct sdio_func *func, .fw_own = mt7921s_mcu_fw_pmctrl, }; struct ieee80211_ops *ops; - struct mt7921_dev *dev; + struct mt792x_dev *dev; struct mt76_dev *mdev; u8 features; int ret; - ops = mt7921_get_mac80211_ops(&func->dev, (void *)id->driver_data, - &features); + ops = mt792x_get_mac80211_ops(&func->dev, &mt7921_ops, + (void *)id->driver_data, &features); if (!ops) return -ENOMEM; @@ -136,7 +136,7 @@ static int mt7921s_probe(struct sdio_func *func, if (!mdev) return -ENOMEM; - dev = container_of(mdev, struct mt7921_dev, mt76); + dev = container_of(mdev, struct mt792x_dev, mt76); dev->fw_features = features; dev->hif_ops = &mt7921_sdio_ops; sdio_set_drvdata(func, dev); @@ -196,7 +196,7 @@ error: static void mt7921s_remove(struct sdio_func *func) { - struct mt7921_dev *dev = sdio_get_drvdata(func); + struct mt792x_dev *dev = sdio_get_drvdata(func); mt7921s_unregister_device(dev); } @@ -204,7 +204,7 @@ static void mt7921s_remove(struct sdio_func *func) static int mt7921s_suspend(struct device *__dev) { struct sdio_func *func = dev_to_sdio_func(__dev); - struct mt7921_dev *dev = sdio_get_drvdata(func); + struct mt792x_dev *dev = sdio_get_drvdata(func); struct mt76_connac_pm *pm = &dev->pm; struct mt76_dev *mdev = &dev->mt76; int err; @@ -216,7 +216,7 @@ static int mt7921s_suspend(struct device *__dev) cancel_delayed_work_sync(&pm->ps_work); cancel_work_sync(&pm->wake_work); - err = mt7921_mcu_drv_pmctrl(dev); + err = mt792x_mcu_drv_pmctrl(dev); if (err < 0) goto restore_suspend; @@ -244,7 +244,7 @@ static int mt7921s_suspend(struct device *__dev) mt76_worker_disable(&mdev->sdio.txrx_worker); mt76_worker_disable(&mdev->sdio.net_worker); - err = mt7921_mcu_fw_pmctrl(dev); + err = mt792x_mcu_fw_pmctrl(dev); if (err) goto restore_txrx_worker; @@ -269,7 +269,7 @@ restore_suspend: pm->suspended = false; if (err < 0) - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); return err; } @@ -277,14 +277,14 @@ restore_suspend: static int mt7921s_resume(struct device *__dev) { struct sdio_func *func = dev_to_sdio_func(__dev); - struct mt7921_dev *dev = sdio_get_drvdata(func); + struct mt792x_dev *dev = sdio_get_drvdata(func); struct mt76_connac_pm *pm = &dev->pm; struct mt76_dev *mdev = &dev->mt76; int err; clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state); - err = mt7921_mcu_drv_pmctrl(dev); + err = mt792x_mcu_drv_pmctrl(dev); if (err < 0) goto failed; @@ -302,7 +302,7 @@ failed: pm->suspended = false; if (err < 0) - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c index cff9925c41ea..8edd0291c128 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -30,7 +30,7 @@ static u32 mt7921s_read_whcr(struct mt76_dev *dev) return sdio_readl(dev->sdio.func, MCR_WHCR, NULL); } -int mt7921s_wfsys_reset(struct mt7921_dev *dev) +int mt7921s_wfsys_reset(struct mt792x_dev *dev) { struct mt76_sdio *sdio = &dev->mt76.sdio; u32 val, status; @@ -71,7 +71,7 @@ int mt7921s_wfsys_reset(struct mt7921_dev *dev) return 0; } -int mt7921s_init_reset(struct mt7921_dev *dev) +int mt7921s_init_reset(struct mt792x_dev *dev) { set_bit(MT76_MCU_RESET, &dev->mphy.state); @@ -91,7 +91,7 @@ int mt7921s_init_reset(struct mt7921_dev *dev) return 0; } -int mt7921s_mac_reset(struct mt7921_dev *dev) +int mt7921s_mac_reset(struct mt792x_dev *dev) { int err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c index 177679ce1c80..5e4501d7f1c0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c @@ -16,14 +16,14 @@ static int mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *seq) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); enum mt7921_sdio_pkt_type type = MT7921_SDIO_CMD; enum mt76_mcuq_id txq = MT_MCUQ_WM; int ret, pad; /* We just return in case firmware assertion to avoid blocking the * common workqueue to run, for example, the coredump work might be - * blocked by mt7921_mac_work that is excuting register access via sdio + * blocked by mt792x_mac_work that is excuting register access via sdio * bus. */ if (dev->fw_assert) @@ -38,7 +38,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, if (cmd == MCU_CMD(FW_SCATTER)) type = MT7921_SDIO_FWDL; - mt7921_skb_add_usb_sdio_hdr(dev, skb, type); + mt792x_skb_add_usb_sdio_hdr(dev, skb, type); pad = round_up(skb->len, 4) - skb->len; __skb_put_zero(skb, pad); @@ -51,14 +51,14 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, return ret; } -static u32 mt7921s_read_rm3r(struct mt7921_dev *dev) +static u32 mt7921s_read_rm3r(struct mt792x_dev *dev) { struct mt76_sdio *sdio = &dev->mt76.sdio; return sdio_readl(sdio->func, MCR_D2HRM3R, NULL); } -static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev) +static u32 mt7921s_clear_rm3r_drv_own(struct mt792x_dev *dev) { struct mt76_sdio *sdio = &dev->mt76.sdio; u32 val; @@ -71,7 +71,7 @@ static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev) return val; } -int mt7921s_mcu_init(struct mt7921_dev *dev) +int mt7921s_mcu_init(struct mt792x_dev *dev) { static const struct mt76_mcu_ops mt7921s_mcu_ops = { .headroom = MT_SDIO_HDR_SIZE + @@ -97,7 +97,7 @@ int mt7921s_mcu_init(struct mt7921_dev *dev) return 0; } -int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev) +int mt7921s_mcu_drv_pmctrl(struct mt792x_dev *dev) { struct sdio_func *func = dev->mt76.sdio.func; struct mt76_phy *mphy = &dev->mt76.phy; @@ -133,7 +133,7 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev) return 0; } -int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev) +int mt7921s_mcu_fw_pmctrl(struct mt792x_dev *dev) { struct sdio_func *func = dev->mt76.sdio.func; struct mt76_phy *mphy = &dev->mt76.phy; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c index 7f408212e716..e838d93477c1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c @@ -31,7 +31,7 @@ static const struct nla_policy mt7921_tm_policy[NUM_MT7921_TM_ATTRS] = { }; static int -mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req) +mt7921_tm_set(struct mt792x_dev *dev, struct mt7921_tm_cmd *req) { struct mt7921_rftest_cmd cmd = { .action = req->action, @@ -57,7 +57,7 @@ mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req) pm->enable = false; cancel_delayed_work_sync(&pm->ps_work); cancel_work_sync(&pm->wake_work); - __mt7921_mcu_drv_pmctrl(dev); + __mt792x_mcu_drv_pmctrl(dev); phy->test.state = MT76_TM_STATE_ON; } @@ -82,7 +82,7 @@ out: } static int -mt7921_tm_query(struct mt7921_dev *dev, struct mt7921_tm_cmd *req, +mt7921_tm_query(struct mt792x_dev *dev, struct mt7921_tm_cmd *req, struct mt7921_tm_evt *evt_resp) { struct mt7921_rftest_cmd cmd = { @@ -113,7 +113,7 @@ int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct nlattr *tb[NUM_MT76_TM_ATTRS]; struct mt76_phy *mphy = hw->priv; - struct mt7921_phy *phy = mphy->priv; + struct mt792x_phy *phy = mphy->priv; int err; if (!test_bit(MT76_STATE_RUNNING, &mphy->state) || @@ -150,7 +150,7 @@ int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, { struct nlattr *tb[NUM_MT76_TM_ATTRS]; struct mt76_phy *mphy = hw->priv; - struct mt7921_phy *phy = mphy->priv; + struct mt792x_phy *phy = mphy->priv; int err; if (!test_bit(MT76_STATE_RUNNING, &mphy->state) || diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/trace.c b/drivers/net/wireless/mediatek/mt76/mt7921/trace.c deleted file mode 100644 index 4dc3c7b89ebd..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt7921/trace.c +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: ISC -/* - * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org> - */ - -#include <linux/module.h> - -#ifndef __CHECKER__ -#define CREATE_TRACE_POINTS -#include "mt7921_trace.h" - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index 1f302c430339..e5258c74fc07 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -24,88 +24,11 @@ static const struct usb_device_id mt7921u_device_table[] = { { }, }; -static u32 mt7921u_rr(struct mt76_dev *dev, u32 addr) -{ - u32 ret; - - mutex_lock(&dev->usb.usb_ctrl_mtx); - ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, - USB_DIR_IN | MT_USB_TYPE_VENDOR, addr); - mutex_unlock(&dev->usb.usb_ctrl_mtx); - - return ret; -} - -static void mt7921u_wr(struct mt76_dev *dev, u32 addr, u32 val) -{ - mutex_lock(&dev->usb.usb_ctrl_mtx); - ___mt76u_wr(dev, MT_VEND_WRITE_EXT, - USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val); - mutex_unlock(&dev->usb.usb_ctrl_mtx); -} - -static u32 mt7921u_rmw(struct mt76_dev *dev, u32 addr, - u32 mask, u32 val) -{ - mutex_lock(&dev->usb.usb_ctrl_mtx); - val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, - USB_DIR_IN | MT_USB_TYPE_VENDOR, addr) & ~mask; - ___mt76u_wr(dev, MT_VEND_WRITE_EXT, - USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val); - mutex_unlock(&dev->usb.usb_ctrl_mtx); - - return val; -} - -static void mt7921u_copy(struct mt76_dev *dev, u32 offset, - const void *data, int len) -{ - struct mt76_usb *usb = &dev->usb; - int ret, i = 0, batch_len; - const u8 *val = data; - - len = round_up(len, 4); - - mutex_lock(&usb->usb_ctrl_mtx); - while (i < len) { - batch_len = min_t(int, usb->data_len, len - i); - memcpy(usb->data, val + i, batch_len); - ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT, - USB_DIR_OUT | MT_USB_TYPE_VENDOR, - (offset + i) >> 16, offset + i, - usb->data, batch_len); - if (ret < 0) - break; - - i += batch_len; - } - mutex_unlock(&usb->usb_ctrl_mtx); -} - -int mt7921u_mcu_power_on(struct mt7921_dev *dev) -{ - int ret; - - ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON, - USB_DIR_OUT | MT_USB_TYPE_VENDOR, - 0x0, 0x1, NULL, 0); - if (ret) - return ret; - - if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, - MT_TOP_MISC2_FW_PWR_ON, 500)) { - dev_err(dev->mt76.dev, "Timeout for power on\n"); - ret = -EIO; - } - - return ret; -} - static int mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *seq) { - struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); u32 pad, ep; int ret; @@ -120,7 +43,7 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, else ep = MT_EP_OUT_AC_BE; - mt7921_skb_add_usb_sdio_hdr(dev, skb, 0); + mt792x_skb_add_usb_sdio_hdr(dev, skb, 0); pad = round_up(skb->len, 4) + 4 - skb->len; __skb_put_zero(skb, pad); @@ -131,7 +54,7 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, return ret; } -static int mt7921u_mcu_init(struct mt7921_dev *dev) +static int mt7921u_mcu_init(struct mt792x_dev *dev) { static const struct mt76_mcu_ops mcu_ops = { .headroom = MT_SDIO_HDR_SIZE + @@ -155,20 +78,61 @@ static int mt7921u_mcu_init(struct mt7921_dev *dev) return 0; } -static void mt7921u_stop(struct ieee80211_hw *hw) +static int mt7921u_mac_reset(struct mt792x_dev *dev) { - struct mt7921_dev *dev = mt7921_hw_dev(hw); + int err; - mt76u_stop_tx(&dev->mt76); - mt7921_stop(hw); -} + mt76_txq_schedule_all(&dev->mphy); + mt76_worker_disable(&dev->mt76.tx_worker); -static void mt7921u_cleanup(struct mt7921_dev *dev) -{ - clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); - mt7921u_wfsys_reset(dev); + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + + wake_up(&dev->mt76.mcu.wait); skb_queue_purge(&dev->mt76.mcu.res_q); - mt76u_queues_deinit(&dev->mt76); + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + mt792xu_wfsys_reset(dev); + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + err = mt76u_resume_rx(&dev->mt76); + if (err) + goto out; + + err = mt792xu_mcu_power_on(dev); + if (err) + goto out; + + err = mt792xu_dma_init(dev, false); + if (err) + goto out; + + mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); + mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + err = mt7921_run_firmware(dev); + if (err) + goto out; + + mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + err = mt7921_mcu_set_eeprom(dev); + if (err) + goto out; + + err = mt7921_mac_init(dev); + if (err) + goto out; + + err = __mt7921_start(&dev->phy); +out: + clear_bit(MT76_RESET, &dev->mphy.state); + + mt76_worker_enable(&dev->mt76.tx_worker); + + return err; } static int mt7921u_probe(struct usb_interface *usb_intf, @@ -189,40 +153,40 @@ static int mt7921u_probe(struct usb_interface *usb_intf, .sta_add = mt7921_mac_sta_add, .sta_assoc = mt7921_mac_sta_assoc, .sta_remove = mt7921_mac_sta_remove, - .update_survey = mt7921_update_channel, + .update_survey = mt792x_update_channel, }; - static const struct mt7921_hif_ops hif_ops = { + static const struct mt792x_hif_ops hif_ops = { .mcu_init = mt7921u_mcu_init, - .init_reset = mt7921u_init_reset, + .init_reset = mt792xu_init_reset, .reset = mt7921u_mac_reset, }; static struct mt76_bus_ops bus_ops = { - .rr = mt7921u_rr, - .wr = mt7921u_wr, - .rmw = mt7921u_rmw, + .rr = mt792xu_rr, + .wr = mt792xu_wr, + .rmw = mt792xu_rmw, .read_copy = mt76u_read_copy, - .write_copy = mt7921u_copy, + .write_copy = mt792xu_copy, .type = MT76_BUS_USB, }; struct usb_device *udev = interface_to_usbdev(usb_intf); struct ieee80211_ops *ops; struct ieee80211_hw *hw; - struct mt7921_dev *dev; + struct mt792x_dev *dev; struct mt76_dev *mdev; u8 features; int ret; - ops = mt7921_get_mac80211_ops(&usb_intf->dev, (void *)id->driver_info, - &features); + ops = mt792x_get_mac80211_ops(&usb_intf->dev, &mt7921_ops, + (void *)id->driver_info, &features); if (!ops) return -ENOMEM; - ops->stop = mt7921u_stop; + ops->stop = mt792xu_stop; mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops); if (!mdev) return -ENOMEM; - dev = container_of(mdev, struct mt7921_dev, mt76); + dev = container_of(mdev, struct mt792x_dev, mt76); dev->fw_features = features; dev->hif_ops = &hif_ops; @@ -240,12 +204,12 @@ static int mt7921u_probe(struct usb_interface *usb_intf, dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); if (mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY)) { - ret = mt7921u_wfsys_reset(dev); + ret = mt792xu_wfsys_reset(dev); if (ret) goto error; } - ret = mt7921u_mcu_power_on(dev); + ret = mt792xu_mcu_power_on(dev); if (ret) goto error; @@ -257,7 +221,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf, if (ret) goto error; - ret = mt7921u_dma_init(dev, false); + ret = mt792xu_dma_init(dev, false); if (ret) goto error; @@ -282,27 +246,10 @@ error: return ret; } -static void mt7921u_disconnect(struct usb_interface *usb_intf) -{ - struct mt7921_dev *dev = usb_get_intfdata(usb_intf); - - cancel_work_sync(&dev->init_work); - if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) - return; - - mt76_unregister_device(&dev->mt76); - mt7921u_cleanup(dev); - - usb_set_intfdata(usb_intf, NULL); - usb_put_dev(interface_to_usbdev(usb_intf)); - - mt76_free_device(&dev->mt76); -} - #ifdef CONFIG_PM static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state) { - struct mt7921_dev *dev = usb_get_intfdata(intf); + struct mt792x_dev *dev = usb_get_intfdata(intf); struct mt76_connac_pm *pm = &dev->pm; int err; @@ -322,14 +269,14 @@ failed: pm->suspended = false; if (err < 0) - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); return err; } static int mt7921u_resume(struct usb_interface *intf) { - struct mt7921_dev *dev = usb_get_intfdata(intf); + struct mt792x_dev *dev = usb_get_intfdata(intf); struct mt76_connac_pm *pm = &dev->pm; bool reinit = true; int err, i; @@ -349,8 +296,8 @@ static int mt7921u_resume(struct usb_interface *intf) msleep(20); } - if (reinit || mt7921_dma_need_reinit(dev)) { - err = mt7921u_dma_init(dev, true); + if (reinit || mt792x_dma_need_reinit(dev)) { + err = mt792xu_dma_init(dev, true); if (err) goto failed; } @@ -364,7 +311,7 @@ failed: pm->suspended = false; if (err < 0) - mt7921_reset(&dev->mt76); + mt792x_reset(&dev->mt76); return err; } @@ -378,7 +325,7 @@ static struct usb_driver mt7921u_driver = { .name = KBUILD_MODNAME, .id_table = mt7921u_device_table, .probe = mt7921u_probe, - .disconnect = mt7921u_disconnect, + .disconnect = mt792xu_disconnect, #ifdef CONFIG_PM .suspend = mt7921u_suspend, .resume = mt7921u_resume, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c deleted file mode 100644 index 50eb6e7fd6b5..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c +++ /dev/null @@ -1,255 +0,0 @@ -// SPDX-License-Identifier: ISC -/* Copyright (C) 2022 MediaTek Inc. - * - * Author: Lorenzo Bianconi <lorenzo@kernel.org> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/usb.h> - -#include "mt7921.h" -#include "mcu.h" -#include "../mt76_connac2_mac.h" - -static u32 mt7921u_uhw_rr(struct mt76_dev *dev, u32 addr) -{ - u32 ret; - - mutex_lock(&dev->usb.usb_ctrl_mtx); - ret = ___mt76u_rr(dev, MT_VEND_DEV_MODE, - USB_DIR_IN | MT_USB_TYPE_UHW_VENDOR, addr); - mutex_unlock(&dev->usb.usb_ctrl_mtx); - - return ret; -} - -static void mt7921u_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val) -{ - mutex_lock(&dev->usb.usb_ctrl_mtx); - ___mt76u_wr(dev, MT_VEND_WRITE, - USB_DIR_OUT | MT_USB_TYPE_UHW_VENDOR, addr, val); - mutex_unlock(&dev->usb.usb_ctrl_mtx); -} - -static void mt7921u_dma_prefetch(struct mt7921_dev *dev) -{ - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0), - MT_WPDMA0_MAX_CNT_MASK, 4); - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0), - MT_WPDMA0_BASE_PTR_MASK, 0x80); - - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1), - MT_WPDMA0_MAX_CNT_MASK, 4); - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1), - MT_WPDMA0_BASE_PTR_MASK, 0xc0); - - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2), - MT_WPDMA0_MAX_CNT_MASK, 4); - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2), - MT_WPDMA0_BASE_PTR_MASK, 0x100); - - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3), - MT_WPDMA0_MAX_CNT_MASK, 4); - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3), - MT_WPDMA0_BASE_PTR_MASK, 0x140); - - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4), - MT_WPDMA0_MAX_CNT_MASK, 4); - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4), - MT_WPDMA0_BASE_PTR_MASK, 0x180); - - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16), - MT_WPDMA0_MAX_CNT_MASK, 4); - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16), - MT_WPDMA0_BASE_PTR_MASK, 0x280); - - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17), - MT_WPDMA0_MAX_CNT_MASK, 4); - mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17), - MT_WPDMA0_BASE_PTR_MASK, 0x2c0); -} - -static void mt7921u_wfdma_init(struct mt7921_dev *dev) -{ - mt7921u_dma_prefetch(dev); - - mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO); - mt76_set(dev, MT_UWFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 | - MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL | - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN); - - /* disable dmashdl */ - mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0, - MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); - mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS); - - mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); -} - -static int mt7921u_dma_rx_evt_ep4(struct mt7921_dev *dev) -{ - if (!mt76_poll(dev, MT_UWFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000)) - return -ETIMEDOUT; - - mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN); - mt76_set(dev, MT_WFDMA_HOST_CONFIG, - MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN); - mt76_set(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN); - - return 0; -} - -static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset) -{ - u32 val; - - /* usb endpoint reset opt - * bits[4,9]: out blk ep 4-9 - * bits[20,21]: in blk ep 4-5 - * bits[22]: in int ep 6 - */ - val = mt7921u_uhw_rr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT); - if (reset) - val |= GENMASK(9, 4) | GENMASK(22, 20); - else - val &= ~(GENMASK(9, 4) | GENMASK(22, 20)); - mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val); -} - -int mt7921u_dma_init(struct mt7921_dev *dev, bool resume) -{ - int err; - - mt7921u_wfdma_init(dev); - - mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); - - mt76_set(dev, MT_UDMA_WLCFG_0, - MT_WL_RX_EN | MT_WL_TX_EN | - MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN); - mt76_clear(dev, MT_UDMA_WLCFG_0, - MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT); - mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT); - - if (resume) - return 0; - - err = mt7921u_dma_rx_evt_ep4(dev); - if (err) - return err; - - mt7921u_epctl_rst_opt(dev, false); - - return 0; -} - -int mt7921u_wfsys_reset(struct mt7921_dev *dev) -{ - u32 val; - int i; - - mt7921u_epctl_rst_opt(dev, false); - - val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST); - val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH; - mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val); - - usleep_range(10, 20); - - val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST); - val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH; - mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val); - - mt7921u_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0); - for (i = 0; i < MT7921_WFSYS_INIT_RETRY_COUNT; i++) { - val = mt7921u_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS); - if (val & MT_UDMA_CONN_WFSYS_INIT_DONE) - break; - - msleep(100); - } - - if (i == MT7921_WFSYS_INIT_RETRY_COUNT) - return -ETIMEDOUT; - - return 0; -} - -int mt7921u_init_reset(struct mt7921_dev *dev) -{ - set_bit(MT76_RESET, &dev->mphy.state); - - wake_up(&dev->mt76.mcu.wait); - skb_queue_purge(&dev->mt76.mcu.res_q); - - mt76u_stop_rx(&dev->mt76); - mt76u_stop_tx(&dev->mt76); - - mt7921u_wfsys_reset(dev); - - clear_bit(MT76_RESET, &dev->mphy.state); - - return mt76u_resume_rx(&dev->mt76); -} - -int mt7921u_mac_reset(struct mt7921_dev *dev) -{ - int err; - - mt76_txq_schedule_all(&dev->mphy); - mt76_worker_disable(&dev->mt76.tx_worker); - - set_bit(MT76_RESET, &dev->mphy.state); - set_bit(MT76_MCU_RESET, &dev->mphy.state); - - wake_up(&dev->mt76.mcu.wait); - skb_queue_purge(&dev->mt76.mcu.res_q); - - mt76u_stop_rx(&dev->mt76); - mt76u_stop_tx(&dev->mt76); - - mt7921u_wfsys_reset(dev); - - clear_bit(MT76_MCU_RESET, &dev->mphy.state); - err = mt76u_resume_rx(&dev->mt76); - if (err) - goto out; - - err = mt7921u_mcu_power_on(dev); - if (err) - goto out; - - err = mt7921u_dma_init(dev, false); - if (err) - goto out; - - mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); - mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); - - err = mt7921_run_firmware(dev); - if (err) - goto out; - - mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); - - err = mt7921_mcu_set_eeprom(dev); - if (err) - goto out; - - err = mt7921_mac_init(dev); - if (err) - goto out; - - err = __mt7921_start(&dev->phy); -out: - clear_bit(MT76_RESET, &dev->mphy.state); - - mt76_worker_enable(&dev->mt76.tx_worker); - - return err; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7925/Kconfig new file mode 100644 index 000000000000..5854e95e68a5 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: ISC +config MT7925_COMMON + tristate + select MT792x_LIB + select WANT_DEV_COREDUMP + +config MT7925E + tristate "MediaTek MT7925E (PCIe) support" + select MT7925_COMMON + depends on MAC80211 + depends on PCI + help + This adds support for MT7925-based wireless PCIe devices, + which support operation at 6GHz, 5GHz, and 2.4GHz IEEE 802.11be + 2x2:2SS 4096-QAM, 160MHz channels. + + To compile this driver as a module, choose M here. + +config MT7925U + tristate "MediaTek MT7925U (USB) support" + select MT792x_USB + select MT7925_COMMON + depends on MAC80211 + depends on USB + help + This adds support for MT7925-based wireless USB devices, + which support operation at 6GHz, 5GHz, and 2.4GHz IEEE 802.11be + 2x2:2SS 4096-QAM, 160MHz channels. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile new file mode 100644 index 000000000000..d321e4ed732f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: ISC + +obj-$(CONFIG_MT7925_COMMON) += mt7925-common.o +obj-$(CONFIG_MT7925E) += mt7925e.o +obj-$(CONFIG_MT7925U) += mt7925u.o + +mt7925-common-y := mac.o mcu.o main.o init.o debugfs.o +mt7925e-y := pci.o pci_mac.o pci_mcu.o +mt7925u-y := usb.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c new file mode 100644 index 000000000000..1e2fc6577e78 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt7925.h" +#include "mcu.h" + +static int +mt7925_reg_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + u32 regval = val; + + mt792x_mutex_acquire(dev); + mt7925_mcu_regval(dev, dev->mt76.debugfs_reg, ®val, true); + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_reg_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + u32 regval; + int ret; + + mt792x_mutex_acquire(dev); + ret = mt7925_mcu_regval(dev, dev->mt76.debugfs_reg, ®val, false); + mt792x_mutex_release(dev); + if (!ret) + *val = regval; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7925_reg_get, mt7925_reg_set, + "0x%08llx\n"); +static int +mt7925_fw_debug_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + + mt792x_mutex_acquire(dev); + + dev->fw_debug = (u8)val; + mt7925_mcu_fw_log_2_host(dev, dev->fw_debug); + + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_fw_debug_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + + *val = dev->fw_debug; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7925_fw_debug_get, + mt7925_fw_debug_set, "%lld\n"); + +DEFINE_SHOW_ATTRIBUTE(mt792x_tx_stats); + +static void +mt7925_seq_puts_array(struct seq_file *file, const char *str, + s8 val[][2], int len, u8 band_idx) +{ + int i; + + seq_printf(file, "%-22s:", str); + for (i = 0; i < len; i++) + if (val[i][band_idx] == 127) + seq_printf(file, " %6s", "N.A"); + else + seq_printf(file, " %6d", val[i][band_idx]); + seq_puts(file, "\n"); +} + +#define mt7925_print_txpwr_entry(prefix, rate, idx) \ +({ \ + mt7925_seq_puts_array(s, #prefix " (tmac)", \ + txpwr->rate, \ + ARRAY_SIZE(txpwr->rate), \ + idx); \ +}) + +static inline void +mt7925_eht_txpwr(struct seq_file *s, struct mt7925_txpwr *txpwr, u8 band_idx) +{ + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11", + "mcs12", "mcs13", "mcs14", "mcs15"); + mt7925_print_txpwr_entry(EHT26, eht26, band_idx); + mt7925_print_txpwr_entry(EHT52, eht52, band_idx); + mt7925_print_txpwr_entry(EHT106, eht106, band_idx); + mt7925_print_txpwr_entry(EHT242, eht242, band_idx); + mt7925_print_txpwr_entry(EHT484, eht484, band_idx); + + mt7925_print_txpwr_entry(EHT996, eht996, band_idx); + mt7925_print_txpwr_entry(EHT996x2, eht996x2, band_idx); + mt7925_print_txpwr_entry(EHT996x4, eht996x4, band_idx); + mt7925_print_txpwr_entry(EHT26_52, eht26_52, band_idx); + mt7925_print_txpwr_entry(EHT26_106, eht26_106, band_idx); + mt7925_print_txpwr_entry(EHT484_242, eht484_242, band_idx); + mt7925_print_txpwr_entry(EHT996_484, eht996_484, band_idx); + mt7925_print_txpwr_entry(EHT996_484_242, eht996_484_242, band_idx); + mt7925_print_txpwr_entry(EHT996x2_484, eht996x2_484, band_idx); + mt7925_print_txpwr_entry(EHT996x3, eht996x3, band_idx); + mt7925_print_txpwr_entry(EHT996x3_484, eht996x3_484, band_idx); +} + +static int +mt7925_txpwr(struct seq_file *s, void *data) +{ + struct mt792x_dev *dev = dev_get_drvdata(s->private); + struct mt7925_txpwr *txpwr = NULL; + u8 band_idx = dev->mphy.band_idx; + int ret = 0; + + txpwr = devm_kmalloc(dev->mt76.dev, sizeof(*txpwr), GFP_KERNEL); + + if (!txpwr) + return -ENOMEM; + + mt792x_mutex_acquire(dev); + ret = mt7925_get_txpwr_info(dev, band_idx, txpwr); + mt792x_mutex_release(dev); + + if (ret) + goto out; + + seq_printf(s, "%-22s %6s %6s %6s %6s\n", + " ", "1m", "2m", "5m", "11m"); + mt7925_print_txpwr_entry(CCK, cck, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "6m", "9m", "12m", "18m", "24m", "36m", + "48m", "54m"); + mt7925_print_txpwr_entry(OFDM, ofdm, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7"); + mt7925_print_txpwr_entry(HT20, ht20, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs32"); + mt7925_print_txpwr_entry(HT40, ht40, band_idx); + + seq_printf(s, "%-22s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", + "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11"); + mt7925_print_txpwr_entry(VHT20, vht20, band_idx); + mt7925_print_txpwr_entry(VHT40, vht40, band_idx); + + mt7925_print_txpwr_entry(VHT80, vht80, band_idx); + mt7925_print_txpwr_entry(VHT160, vht160, band_idx); + + mt7925_print_txpwr_entry(HE26, he26, band_idx); + mt7925_print_txpwr_entry(HE52, he52, band_idx); + mt7925_print_txpwr_entry(HE106, he106, band_idx); + mt7925_print_txpwr_entry(HE242, he242, band_idx); + mt7925_print_txpwr_entry(HE484, he484, band_idx); + + mt7925_print_txpwr_entry(HE996, he996, band_idx); + mt7925_print_txpwr_entry(HE996x2, he996x2, band_idx); + + mt7925_eht_txpwr(s, txpwr, band_idx); + +out: + devm_kfree(dev->mt76.dev, txpwr); + return ret; +} + +static int +mt7925_pm_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + struct mt76_connac_pm *pm = &dev->pm; + + if (mt76_is_usb(&dev->mt76)) + return -EOPNOTSUPP; + + mutex_lock(&dev->mt76.mutex); + + if (val == pm->enable_user) + goto out; + + if (!pm->enable_user) { + pm->stats.last_wake_event = jiffies; + pm->stats.last_doze_event = jiffies; + } + /* make sure the chip is awake here and ps_work is scheduled + * just at end of the this routine. + */ + pm->enable = false; + mt76_connac_pm_wake(&dev->mphy, pm); + + pm->enable_user = val; + mt7925_set_runtime_pm(dev); + mt76_connac_power_save_sched(&dev->mphy, pm); +out: + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static int +mt7925_pm_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + + *val = dev->pm.enable_user; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7925_pm_get, mt7925_pm_set, "%lld\n"); + +static int +mt7925_deep_sleep_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + struct mt76_connac_pm *pm = &dev->pm; + bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR); + bool enable = !!val; + + if (mt76_is_usb(&dev->mt76)) + return -EOPNOTSUPP; + + mt792x_mutex_acquire(dev); + if (pm->ds_enable_user == enable) + goto out; + + pm->ds_enable_user = enable; + pm->ds_enable = enable && !monitor; + mt7925_mcu_set_deep_sleep(dev, pm->ds_enable); +out: + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_deep_sleep_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + + *val = dev->pm.ds_enable_user; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_ds, mt7925_deep_sleep_get, + mt7925_deep_sleep_set, "%lld\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt792x_pm_idle_timeout_get, + mt792x_pm_idle_timeout_set, "%lld\n"); + +static int mt7925_chip_reset(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + int ret = 0; + + switch (val) { + case 1: + /* Reset wifisys directly. */ + mt792x_reset(&dev->mt76); + break; + default: + /* Collect the core dump before reset wifisys. */ + mt792x_mutex_acquire(dev); + ret = mt7925_mcu_chip_config(dev, "assert"); + mt792x_mutex_release(dev); + break; + } + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7925_chip_reset, "%lld\n"); + +int mt7925_init_debugfs(struct mt792x_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs_fops(&dev->mphy, &fops_regval); + if (!dir) + return -ENOMEM; + + if (mt76_is_mmio(&dev->mt76)) + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", + dir, mt792x_queues_read); + else + debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", + dir, mt76_queues_read); + + debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, + mt792x_queues_acq); + debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir, + mt7925_txpwr); + debugfs_create_file("tx_stats", 0400, dir, dev, &mt792x_tx_stats_fops); + debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); + debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); + debugfs_create_file("idle-timeout", 0600, dir, dev, + &fops_pm_idle_timeout); + debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset); + debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, + mt792x_pm_stats); + debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c new file mode 100644 index 000000000000..8f9b7a2f376c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/etherdevice.h> +#include <linux/firmware.h> +#include "mt7925.h" +#include "mac.h" +#include "mcu.h" + +static void +mt7925_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *req) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_dev *mdev = &dev->mt76; + + /* allow world regdom at the first boot only */ + if (!memcmp(req->alpha2, "00", 2) && + mdev->alpha2[0] && mdev->alpha2[1]) + return; + + /* do not need to update the same country twice */ + if (!memcmp(req->alpha2, mdev->alpha2, 2) && + dev->country_ie_env == req->country_ie_env) + return; + + memcpy(mdev->alpha2, req->alpha2, 2); + mdev->region = req->dfs_region; + dev->country_ie_env = req->country_ie_env; + + mt792x_mutex_acquire(dev); + mt7925_mcu_set_clc(dev, req->alpha2, req->country_ie_env); + mt7925_mcu_set_channel_domain(hw->priv); + mt7925_set_tx_sar_pwr(hw, NULL); + mt792x_mutex_release(dev); +} + +static void mt7925_mac_init_basic_rates(struct mt792x_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mt76_rates); i++) { + u16 rate = mt76_rates[i].hw_value; + u16 idx = MT792x_BASIC_RATES_TBL + i; + + rate = FIELD_PREP(MT_TX_RATE_MODE, rate >> 8) | + FIELD_PREP(MT_TX_RATE_IDX, rate & GENMASK(7, 0)); + mt7925_mac_set_fixed_rate_table(dev, idx, rate); + } +} + +int mt7925_mac_init(struct mt792x_dev *dev) +{ + int i; + + mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536); + /* enable hardware de-agg */ + mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); + + for (i = 0; i < MT792x_WTBL_SIZE; i++) + mt7925_mac_wtbl_update(dev, i, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + for (i = 0; i < 2; i++) + mt792x_mac_init_band(dev, i); + + mt7925_mac_init_basic_rates(dev); + + memzero_explicit(&dev->mt76.alpha2, sizeof(dev->mt76.alpha2)); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mac_init); + +static int __mt7925_init_hardware(struct mt792x_dev *dev) +{ + int ret; + + ret = mt792x_mcu_init(dev); + if (ret) + goto out; + + mt76_eeprom_override(&dev->mphy); + + ret = mt7925_mcu_set_eeprom(dev); + if (ret) + goto out; + + ret = mt7925_mac_init(dev); + if (ret) + goto out; + +out: + return ret; +} + +static int mt7925_init_hardware(struct mt792x_dev *dev) +{ + int ret, i; + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + for (i = 0; i < MT792x_MCU_INIT_RETRY_COUNT; i++) { + ret = __mt7925_init_hardware(dev); + if (!ret) + break; + + mt792x_init_reset(dev); + } + + if (i == MT792x_MCU_INIT_RETRY_COUNT) { + dev_err(dev->mt76.dev, "hardware init failed\n"); + return ret; + } + + return 0; +} + +static void mt7925_init_work(struct work_struct *work) +{ + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + init_work); + int ret; + + ret = mt7925_init_hardware(dev); + if (ret) + return; + + mt76_set_stream_caps(&dev->mphy, true); + mt7925_set_stream_he_eht_caps(&dev->phy); + + ret = mt76_register_device(&dev->mt76, true, mt76_rates, + ARRAY_SIZE(mt76_rates)); + if (ret) { + dev_err(dev->mt76.dev, "register device failed\n"); + return; + } + + ret = mt7925_init_debugfs(dev); + if (ret) { + dev_err(dev->mt76.dev, "register debugfs failed\n"); + return; + } + + /* we support chip reset now */ + dev->hw_init_done = true; + + mt7925_mcu_set_deep_sleep(dev, dev->pm.ds_enable); +} + +int mt7925_register_device(struct mt792x_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + int ret; + + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; + dev->mt76.tx_worker.fn = mt792x_tx_worker; + + INIT_DELAYED_WORK(&dev->pm.ps_work, mt792x_pm_power_save_work); + INIT_WORK(&dev->pm.wake_work, mt792x_pm_wake_work); + spin_lock_init(&dev->pm.wake.lock); + mutex_init(&dev->pm.mutex); + init_waitqueue_head(&dev->pm.wait); + spin_lock_init(&dev->pm.txq_lock); + INIT_DELAYED_WORK(&dev->mphy.mac_work, mt792x_mac_work); + INIT_DELAYED_WORK(&dev->phy.scan_work, mt7925_scan_work); + INIT_DELAYED_WORK(&dev->coredump.work, mt7925_coredump_work); +#if IS_ENABLED(CONFIG_IPV6) + INIT_WORK(&dev->ipv6_ns_work, mt7925_set_ipv6_ns_work); + skb_queue_head_init(&dev->ipv6_ns_list); +#endif + skb_queue_head_init(&dev->phy.scan_event_list); + skb_queue_head_init(&dev->coredump.msg_list); + + INIT_WORK(&dev->reset_work, mt7925_mac_reset_work); + INIT_WORK(&dev->init_work, mt7925_init_work); + + INIT_WORK(&dev->phy.roc_work, mt7925_roc_work); + timer_setup(&dev->phy.roc_timer, mt792x_roc_timer, 0); + init_waitqueue_head(&dev->phy.roc_wait); + + dev->pm.idle_timeout = MT792x_PM_TIMEOUT; + dev->pm.stats.last_wake_event = jiffies; + dev->pm.stats.last_doze_event = jiffies; + if (!mt76_is_usb(&dev->mt76)) { + dev->pm.enable_user = true; + dev->pm.enable = true; + dev->pm.ds_enable_user = true; + dev->pm.ds_enable = true; + } + + if (!mt76_is_mmio(&dev->mt76)) + hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE; + + mt792x_init_acpi_sar(dev); + + ret = mt792x_init_wcid(dev); + if (ret) + return ret; + + ret = mt792x_init_wiphy(hw); + if (ret) + return ret; + + hw->wiphy->reg_notifier = mt7925_regd_notifier; + dev->mphy.sband_2g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + dev->mphy.sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_2; + dev->mphy.sband_5g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_MAX_AMSDU; + dev->mphy.sband_2g.sband.ht_cap.ampdu_density = + IEEE80211_HT_MPDU_DENSITY_1; + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | + (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | + IEEE80211_VHT_CAP_SHORT_GI_160; + + dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; + + queue_work(system_wq, &dev->init_work); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_register_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c new file mode 100644 index 000000000000..1b9fbd9a140d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -0,0 +1,1452 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/devcoredump.h> +#include <linux/etherdevice.h> +#include <linux/timekeeping.h> +#include "mt7925.h" +#include "../dma.h" +#include "mac.h" +#include "mcu.h" + +bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask) +{ + mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); + + return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, + 0, 5000); +} + +static void mt7925_mac_sta_poll(struct mt792x_dev *dev) +{ + static const u8 ac_to_tid[] = { + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + [IEEE80211_AC_VI] = 4, + [IEEE80211_AC_VO] = 6 + }; + struct ieee80211_sta *sta; + struct mt792x_sta *msta; + u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; + LIST_HEAD(sta_poll_list); + struct rate_info *rate; + s8 rssi[4]; + int i; + + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + while (true) { + bool clear = false; + u32 addr, val; + u16 idx; + u8 bw; + + if (list_empty(&sta_poll_list)) + break; + msta = list_first_entry(&sta_poll_list, + struct mt792x_sta, wcid.poll_list); + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + idx = msta->wcid.idx; + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET); + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u32 tx_last = msta->airtime_ac[i]; + u32 rx_last = msta->airtime_ac[i + 4]; + + msta->airtime_ac[i] = mt76_rr(dev, addr); + msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + + tx_time[i] = msta->airtime_ac[i] - tx_last; + rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + + if ((tx_last | rx_last) & BIT(30)) + clear = true; + + addr += 8; + } + + if (clear) { + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + } + + if (!msta->wcid.sta) + continue; + + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u8 q = mt76_connac_lmac_mapping(i); + u32 tx_cur = tx_time[q]; + u32 rx_cur = rx_time[q]; + u8 tid = ac_to_tid[i]; + + if (!tx_cur && !rx_cur) + continue; + + ieee80211_sta_register_airtime(sta, tid, tx_cur, + rx_cur); + } + + /* We don't support reading GI info from txs packets. + * For accurate tx status reporting and AQL improvement, + * we need to make sure that flags match so polling GI + * from per-sta counters directly. + */ + rate = &msta->wcid.rate; + + switch (rate->bw) { + case RATE_INFO_BW_160: + bw = IEEE80211_STA_RX_BW_160; + break; + case RATE_INFO_BW_80: + bw = IEEE80211_STA_RX_BW_80; + break; + case RATE_INFO_BW_40: + bw = IEEE80211_STA_RX_BW_40; + break; + default: + bw = IEEE80211_STA_RX_BW_20; + break; + } + + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 6); + val = mt76_rr(dev, addr); + if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) { + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 5); + val = mt76_rr(dev, addr); + rate->eht_gi = FIELD_GET(GENMASK(25, 24), val); + } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { + u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw; + + rate->he_gi = (val & (0x3 << offs)) >> offs; + } else if (rate->flags & + (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { + if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw)) + rate->flags |= RATE_INFO_FLAGS_SHORT_GI; + else + rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; + } + + /* get signal strength of resp frames (CTS/BA/ACK) */ + addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 34); + val = mt76_rr(dev, addr); + + rssi[0] = to_rssi(GENMASK(7, 0), val); + rssi[1] = to_rssi(GENMASK(15, 8), val); + rssi[2] = to_rssi(GENMASK(23, 16), val); + rssi[3] = to_rssi(GENMASK(31, 14), val); + + msta->ack_signal = + mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); + + ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); + } +} + +void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev, + u8 tbl_idx, u16 rate_idx) +{ + u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx; + + mt76_wr(dev, MT_WTBL_ITDR0, rate_idx); + /* use wtbl spe idx */ + mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL); + mt76_wr(dev, MT_WTBL_ITCR, ctrl); +} + +/* The HW does not translate the mac header to 802.3 for mesh point */ +static int mt7925_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); + struct mt792x_sta *msta = (struct mt792x_sta *)status->wcid; + __le32 *rxd = (__le32 *)skb->data; + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct ieee80211_hdr hdr; + u16 frame_control; + + if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != + MT_RXD3_NORMAL_U2M) + return -EINVAL; + + if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) + return -EINVAL; + + if (!msta || !msta->vif) + return -EINVAL; + + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + /* store the info from RXD and ethhdr to avoid being overridden */ + frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); + hdr.frame_control = cpu_to_le16(frame_control); + hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); + hdr.duration_id = 0; + + ether_addr_copy(hdr.addr1, vif->addr); + ether_addr_copy(hdr.addr2, sta->addr); + switch (frame_control & (IEEE80211_FCTL_TODS | + IEEE80211_FCTL_FROMDS)) { + case 0: + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); + break; + case IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr->h_source); + break; + case IEEE80211_FCTL_TODS: + ether_addr_copy(hdr.addr3, eth_hdr->h_dest); + break; + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr->h_dest); + ether_addr_copy(hdr.addr4, eth_hdr->h_source); + break; + default: + break; + } + + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); + if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || + eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); + else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); + else + skb_pull(skb, 2); + + if (ieee80211_has_order(hdr.frame_control)) + memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], + IEEE80211_HT_CTL_LEN); + if (ieee80211_is_data_qos(hdr.frame_control)) { + __le16 qos_ctrl; + + qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); + memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, + IEEE80211_QOS_CTL_LEN); + } + + if (ieee80211_has_a4(hdr.frame_control)) + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); + else + memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); + + return 0; +} + +static int +mt7925_mac_fill_rx_rate(struct mt792x_dev *dev, + struct mt76_rx_status *status, + struct ieee80211_supported_band *sband, + __le32 *rxv, u8 *mode) +{ + u32 v0, v2; + u8 stbc, gi, bw, dcm, nss; + int i, idx; + bool cck = false; + + v0 = le32_to_cpu(rxv[0]); + v2 = le32_to_cpu(rxv[2]); + + idx = FIELD_GET(MT_PRXV_TX_RATE, v0); + i = idx; + nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; + + stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); + gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); + *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); + dcm = FIELD_GET(MT_PRXV_DCM, v2); + bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); + + switch (*mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + i = mt76_get_rate(&dev->mt76, sband, i, cck); + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + if (gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (i > 31) + return -EINVAL; + break; + case MT_PHY_TYPE_VHT: + status->nss = nss; + status->encoding = RX_ENC_VHT; + if (gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (i > 11) + return -EINVAL; + break; + case MT_PHY_TYPE_HE_MU: + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + status->nss = nss; + status->encoding = RX_ENC_HE; + i &= GENMASK(3, 0); + + if (gi <= NL80211_RATE_INFO_HE_GI_3_2) + status->he_gi = gi; + + status->he_dcm = dcm; + break; + case MT_PHY_TYPE_EHT_SU: + case MT_PHY_TYPE_EHT_TRIG: + case MT_PHY_TYPE_EHT_MU: + status->nss = nss; + status->encoding = RX_ENC_EHT; + i &= GENMASK(3, 0); + + if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) + status->eht.gi = gi; + break; + default: + return -EINVAL; + } + status->rate_idx = i; + + switch (bw) { + case IEEE80211_STA_RX_BW_20: + break; + case IEEE80211_STA_RX_BW_40: + if (*mode & MT_PHY_TYPE_HE_EXT_SU && + (idx & MT_PRXV_TX_ER_SU_106T)) { + status->bw = RATE_INFO_BW_HE_RU; + status->he_ru = + NL80211_RATE_INFO_HE_RU_ALLOC_106; + } else { + status->bw = RATE_INFO_BW_40; + } + break; + case IEEE80211_STA_RX_BW_80: + status->bw = RATE_INFO_BW_80; + break; + case IEEE80211_STA_RX_BW_160: + status->bw = RATE_INFO_BW_160; + break; + default: + return -EINVAL; + } + + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; + if (*mode < MT_PHY_TYPE_HE_SU && gi) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + return 0; +} + +static int +mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) +{ + u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + bool hdr_trans, unicast, insert_ccmp_hdr = false; + u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info; + u16 hdr_gap; + __le32 *rxv = NULL, *rxd = (__le32 *)skb->data; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt792x_phy *phy = &dev->phy; + struct ieee80211_supported_band *sband; + u32 csum_status = *(u32 *)skb->cb; + u32 rxd0 = le32_to_cpu(rxd[0]); + u32 rxd1 = le32_to_cpu(rxd[1]); + u32 rxd2 = le32_to_cpu(rxd[2]); + u32 rxd3 = le32_to_cpu(rxd[3]); + u32 rxd4 = le32_to_cpu(rxd[4]); + struct mt792x_sta *msta = NULL; + u8 mode = 0; /* , band_idx; */ + u16 seq_ctrl = 0; + __le16 fc = 0; + int idx; + + memset(status, 0, sizeof(*status)); + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) + return -EINVAL; + + if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) + return -EINVAL; + + hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; + if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) + return -EINVAL; + + /* ICV error or CCMP/BIP/WPI MIC error */ + if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) + status->flag |= RX_FLAG_ONLY_MONITOR; + + chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3); + unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; + idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); + status->wcid = mt792x_rx_get_wcid(dev, idx, unicast); + + if (status->wcid) { + msta = container_of(status->wcid, struct mt792x_sta, wcid); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + } + + mt792x_get_status_freq_info(status, chfreq); + + switch (status->band) { + case NL80211_BAND_5GHZ: + sband = &mphy->sband_5g.sband; + break; + case NL80211_BAND_6GHZ: + sband = &mphy->sband_6g.sband; + break; + default: + sband = &mphy->sband_2g.sband; + break; + } + + if (!sband->channels) + return -EINVAL; + + if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && + !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (rxd3 & MT_RXD3_NORMAL_FCS_ERR) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && + !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED; + status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; + } + + remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); + + if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) + return -EINVAL; + + rxd += 8; + if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { + u32 v0 = le32_to_cpu(rxd[0]); + u32 v2 = le32_to_cpu(rxd[2]); + + /* TODO: need to map rxd address */ + fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); + seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); + qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); + + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { + u8 *data = (u8 *)rxd; + + if (status->flag & RX_FLAG_DECRYPTED) { + switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { + case MT_CIPHER_AES_CCMP: + case MT_CIPHER_CCMP_CCX: + case MT_CIPHER_CCMP_256: + insert_ccmp_hdr = + FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); + fallthrough; + case MT_CIPHER_TKIP: + case MT_CIPHER_TKIP_NO_MIC: + case MT_CIPHER_GCMP: + case MT_CIPHER_GCMP_256: + status->iv[0] = data[5]; + status->iv[1] = data[4]; + status->iv[2] = data[3]; + status->iv[3] = data[2]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + break; + default: + break; + } + } + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { + status->timestamp = le32_to_cpu(rxd[0]); + status->flag |= RX_FLAG_MACTIME_START; + + if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { + status->flag |= RX_FLAG_AMPDU_DETAILS; + + /* all subframes of an A-MPDU have the same timestamp */ + if (phy->rx_ampdu_ts != status->timestamp) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = status->timestamp; + + status->ampdu_ref = phy->ampdu_ref; + } + + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + /* RXD Group 3 - P-RXV */ + if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { + u32 v3; + int ret; + + rxv = rxd; + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + + v3 = le32_to_cpu(rxv[3]); + + status->chains = mphy->antenna_mask; + status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); + status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); + status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); + status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); + + /* RXD Group 5 - C-RXV */ + if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { + rxd += 24; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + + ret = mt7925_mac_fill_rx_rate(dev, status, sband, rxv, &mode); + if (ret < 0) + return ret; + } + + amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); + status->amsdu = !!amsdu_info; + if (status->amsdu) { + status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; + status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; + } + + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; + if (hdr_trans && ieee80211_has_morefrags(fc)) { + if (mt7925_reverse_frag0_hdr_trans(skb, hdr_gap)) + return -EINVAL; + hdr_trans = false; + } else { + int pad_start = 0; + + skb_pull(skb, hdr_gap); + if (!hdr_trans && status->amsdu) { + pad_start = ieee80211_get_hdrlen_from_skb(skb); + } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { + /* When header translation failure is indicated, + * the hardware will insert an extra 2-byte field + * containing the data length after the protocol + * type field. + */ + pad_start = 12; + if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) + pad_start += 4; + else + pad_start = 0; + } + + if (pad_start) { + memmove(skb->data + 2, skb->data, pad_start); + skb_pull(skb, 2); + } + } + + if (!hdr_trans) { + struct ieee80211_hdr *hdr; + + if (insert_ccmp_hdr) { + u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); + + mt76_insert_ccmp_hdr(skb, key_id); + } + + hdr = mt76_skb_get_hdr(skb); + fc = hdr->frame_control; + if (ieee80211_is_data_qos(fc)) { + seq_ctrl = le16_to_cpu(hdr->seq_ctrl); + qos_ctl = *ieee80211_get_qos_ctl(hdr); + } + } else { + status->flag |= RX_FLAG_8023; + } + + mt792x_mac_assoc_rssi(dev, skb); + + if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) + mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); + + if (!status->wcid || !ieee80211_is_data_qos(fc)) + return 0; + + status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc); + status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); + status->qos_ctl = qos_ctl; + + return 0; +} + +static void +mt7925_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb, + struct mt76_wcid *wcid) +{ + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + u8 fc_type, fc_stype; + u16 ethertype; + bool wmm = false; + u32 val; + + if (wcid->sta) { + struct ieee80211_sta *sta; + + sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); + wmm = sta->wme; + } + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | + FIELD_PREP(MT_TXD1_TID, tid); + + ethertype = get_unaligned_be16(&skb->data[12]); + if (ethertype >= ETH_P_802_3_MIN) + val |= MT_TXD1_ETH_802_3; + + txwi[1] |= cpu_to_le32(val); + + fc_type = IEEE80211_FTYPE_DATA >> 2; + fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); + + txwi[2] |= cpu_to_le32(val); +} + +static void +mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi, + struct sk_buff *skb, + struct ieee80211_key_conf *key) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool multicast = is_multicast_ether_addr(hdr->addr1); + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + __le16 fc = hdr->frame_control; + u8 fc_type, fc_stype; + u32 val; + + if (ieee80211_is_action(fc) && + mgmt->u.action.category == WLAN_CATEGORY_BACK && + mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) + tid = MT_TX_ADDBA; + else if (ieee80211_is_mgmt(hdr->frame_control)) + tid = MT_TX_NORMAL; + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | + FIELD_PREP(MT_TXD1_HDR_INFO, + ieee80211_get_hdrlen_from_skb(skb) / 2) | + FIELD_PREP(MT_TXD1_TID, tid); + + if (!ieee80211_is_data(fc) || multicast || + info->flags & IEEE80211_TX_CTL_USE_MINRATE) + val |= MT_TXD1_FIXED_RATE; + + if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && + key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + val |= MT_TXD1_BIP; + txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); + } + + txwi[1] |= cpu_to_le32(val); + + fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; + fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); + + txwi[2] |= cpu_to_le32(val); + + txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); + if (ieee80211_is_beacon(fc)) + txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); + + if (info->flags & IEEE80211_TX_CTL_INJECTED) { + u16 seqno = le16_to_cpu(hdr->seq_ctrl); + + if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar; + + bar = (struct ieee80211_bar *)skb->data; + seqno = le16_to_cpu(bar->start_seq_num); + } + + val = MT_TXD3_SN_VALID | + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); + txwi[3] |= cpu_to_le32(val); + txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); + } +} + +void +mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, int pid, + enum mt76_txq_id qid, u32 changed) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0; + u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE; + bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + struct mt76_vif *mvif; + bool beacon = !!(changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)); + bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)); + + mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL; + if (mvif) { + omac_idx = mvif->omac_idx; + wmm_idx = mvif->wmm_idx; + band_idx = mvif->band_idx; + } + + if (inband_disc) { + p_fmt = MT_TX_TYPE_FW; + q_idx = MT_LMAC_ALTX0; + } else if (beacon) { + p_fmt = MT_TX_TYPE_FW; + q_idx = MT_LMAC_BCN0; + } else if (qid >= MT_TXQ_PSD) { + p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; + q_idx = MT_LMAC_ALTX0; + } else { + p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; + q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS + + mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); + + /* counting non-offloading skbs */ + wcid->stats.tx_bytes += skb->len; + wcid->stats.tx_packets++; + } + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | + FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | + FIELD_PREP(MT_TXD0_Q_IDX, q_idx); + txwi[0] = cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | + FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); + + if (band_idx) + val |= FIELD_PREP(MT_TXD1_TGID, band_idx); + + txwi[1] = cpu_to_le32(val); + txwi[2] = 0; + + val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15); + + if (key) + val |= MT_TXD3_PROTECT_FRAME; + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + val |= MT_TXD3_NO_ACK; + if (wcid->amsdu) + val |= MT_TXD3_HW_AMSDU; + + txwi[3] = cpu_to_le32(val); + txwi[4] = 0; + + val = FIELD_PREP(MT_TXD5_PID, pid); + if (pid >= MT_PACKET_ID_FIRST) { + val |= MT_TXD5_TX_STATUS_HOST; + txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); + txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); + } + + txwi[5] = cpu_to_le32(val); + + val = MT_TXD6_DIS_MAT | MT_TXD6_DAS | + FIELD_PREP(MT_TXD6_MSDU_CNT, 1); + txwi[6] = cpu_to_le32(val); + txwi[7] = 0; + + if (is_8023) + mt7925_mac_write_txwi_8023(txwi, skb, wcid); + else + mt7925_mac_write_txwi_80211(dev, txwi, skb, key); + + if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + bool mcast = ieee80211_is_data(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr1); + u8 idx = MT792x_BASIC_RATES_TBL; + + if (mvif) { + if (mcast && mvif->mcast_rates_idx) + idx = mvif->mcast_rates_idx; + else if (beacon && mvif->beacon_rates_idx) + idx = mvif->beacon_rates_idx; + else + idx = mvif->basic_rates_idx; + } + + txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx)); + txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); + } +} +EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi); + +static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +{ + struct mt792x_sta *msta; + u16 fc, tid; + u32 val; + + if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) + return; + + tid = le32_get_bits(txwi[1], MT_TXD1_TID); + if (tid >= 6) /* skip VO queue */ + return; + + val = le32_to_cpu(txwi[2]); + fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | + FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) + return; + + msta = (struct mt792x_sta *)sta->drv_priv; + if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) + ieee80211_start_tx_ba_session(sta, tid, 0); +} + +static bool +mt7925_mac_add_txs_skb(struct mt792x_dev *dev, struct mt76_wcid *wcid, + int pid, __le32 *txs_data) +{ + struct mt76_sta_stats *stats = &wcid->stats; + struct ieee80211_supported_band *sband; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_phy *mphy; + struct ieee80211_tx_info *info; + struct sk_buff_head list; + struct rate_info rate = {}; + struct sk_buff *skb; + bool cck = false; + u32 txrate, txs, mode, stbc; + + mt76_tx_status_lock(mdev, &list); + skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); + if (!skb) + goto out_no_skb; + + txs = le32_to_cpu(txs_data[0]); + + info = IEEE80211_SKB_CB(skb); + if (!(txs & MT_TXS0_ACK_ERROR_MASK)) + info->flags |= IEEE80211_TX_STAT_ACK; + + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = !!(info->flags & + IEEE80211_TX_STAT_ACK); + + info->status.rates[0].idx = -1; + + txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); + + rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); + rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; + stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); + + if (stbc && rate.nss > 1) + rate.nss >>= 1; + + if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) + stats->tx_nss[rate.nss - 1]++; + if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) + stats->tx_mcs[rate.mcs]++; + + mode = FIELD_GET(MT_TX_RATE_MODE, txrate); + switch (mode) { + case MT_PHY_TYPE_CCK: + cck = true; + fallthrough; + case MT_PHY_TYPE_OFDM: + mphy = mt76_dev_phy(mdev, wcid->phy_idx); + + if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) + sband = &mphy->sband_6g.sband; + else + sband = &mphy->sband_2g.sband; + + rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); + rate.legacy = sband->bitrates[rate.mcs].bitrate; + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + if (rate.mcs > 31) + goto out; + + rate.flags = RATE_INFO_FLAGS_MCS; + if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) + rate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_VHT: + if (rate.mcs > 9) + goto out; + + rate.flags = RATE_INFO_FLAGS_VHT_MCS; + break; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + case MT_PHY_TYPE_HE_MU: + if (rate.mcs > 11) + goto out; + + rate.he_gi = wcid->rate.he_gi; + rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); + rate.flags = RATE_INFO_FLAGS_HE_MCS; + break; + case MT_PHY_TYPE_EHT_SU: + case MT_PHY_TYPE_EHT_TRIG: + case MT_PHY_TYPE_EHT_MU: + if (rate.mcs > 13) + goto out; + + rate.eht_gi = wcid->rate.eht_gi; + rate.flags = RATE_INFO_FLAGS_EHT_MCS; + break; + default: + goto out; + } + + stats->tx_mode[mode]++; + + switch (FIELD_GET(MT_TXS0_BW, txs)) { + case IEEE80211_STA_RX_BW_160: + rate.bw = RATE_INFO_BW_160; + stats->tx_bw[3]++; + break; + case IEEE80211_STA_RX_BW_80: + rate.bw = RATE_INFO_BW_80; + stats->tx_bw[2]++; + break; + case IEEE80211_STA_RX_BW_40: + rate.bw = RATE_INFO_BW_40; + stats->tx_bw[1]++; + break; + default: + rate.bw = RATE_INFO_BW_20; + stats->tx_bw[0]++; + break; + } + wcid->rate = rate; + +out: + mt76_tx_status_skb_done(mdev, skb, &list); + +out_no_skb: + mt76_tx_status_unlock(mdev, &list); + + return !!skb; +} + +void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data) +{ + struct mt792x_sta *msta = NULL; + struct mt76_wcid *wcid; + __le32 *txs_data = data; + u16 wcidx; + u8 pid; + + if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) + return; + + wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); + pid = le32_get_bits(txs_data[3], MT_TXS3_PID); + + if (pid < MT_PACKET_ID_FIRST) + return; + + if (wcidx >= MT792x_WTBL_SIZE) + return; + + rcu_read_lock(); + + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + if (!wcid) + goto out; + + msta = container_of(wcid, struct mt792x_sta, wcid); + + mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data); + if (!wcid->sta) + goto out; + + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + +out: + rcu_read_unlock(); +} + +void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, + struct ieee80211_sta *sta, bool clear_status, + struct list_head *free_list) +{ + struct mt76_dev *mdev = &dev->mt76; + __le32 *txwi; + u16 wcid_idx; + + mt76_connac_txp_skb_unmap(mdev, t); + if (!t->skb) + goto out; + + txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); + if (sta) { + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + + if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt7925_tx_check_aggr(sta, txwi); + + wcid_idx = wcid->idx; + } else { + wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + } + + __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); +out: + t->skb = NULL; + mt76_put_txwi(mdev, t); +} +EXPORT_SYMBOL_GPL(mt7925_txwi_free); + +static void +mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) +{ + __le32 *tx_free = (__le32 *)data, *cur_info; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_txwi_cache *txwi; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + LIST_HEAD(free_list); + struct sk_buff *skb, *tmp; + void *end = data + len; + bool wake = false; + u16 total, count = 0; + + /* clean DMA queues and unmap buffers first */ + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); + + if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4)) + return; + + total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); + for (cur_info = &tx_free[2]; count < total; cur_info++) { + u32 msdu, info; + u8 i; + + if (WARN_ON_ONCE((void *)cur_info >= end)) + return; + /* 1'b1: new wcid pair. + * 1'b0: msdu_id with the same 'wcid pair' as above. + */ + info = le32_to_cpu(*cur_info); + if (info & MT_TXFREE_INFO_PAIR) { + struct mt792x_sta *msta; + u16 idx; + + idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); + wcid = rcu_dereference(dev->mt76.wcid[idx]); + sta = wcid_to_sta(wcid); + if (!sta) + continue; + + msta = container_of(wcid, struct mt792x_sta, wcid); + spin_lock_bh(&mdev->sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &mdev->sta_poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); + continue; + } + + if (info & MT_TXFREE_INFO_HEADER) { + if (wcid) { + wcid->stats.tx_retries += + FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; + wcid->stats.tx_failed += + !!FIELD_GET(MT_TXFREE_INFO_STAT, info); + } + continue; + } + + for (i = 0; i < 2; i++) { + msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; + if (msdu == MT_TXFREE_INFO_MSDU_ID) + continue; + + count++; + txwi = mt76_token_release(mdev, msdu, &wake); + if (!txwi) + continue; + + mt7925_txwi_free(dev, txwi, sta, 0, &free_list); + } + } + + mt7925_mac_sta_poll(dev); + + if (wake) + mt76_set_tx_blocked(&dev->mt76, false); + + mt76_worker_schedule(&dev->mt76.tx_worker); + + list_for_each_entry_safe(skb, tmp, &free_list, list) { + skb_list_del_init(skb); + napi_consume_skb(skb, 1); + } +} + +bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + __le32 *rxd = (__le32 *)data; + __le32 *end = (__le32 *)&rxd[len / 4]; + enum rx_pkt_type type; + + type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); + if (type != PKT_TYPE_NORMAL) { + u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); + + if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == + MT_RXD0_SW_PKT_TYPE_FRAME)) + return true; + } + + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */ + mt7925_mac_tx_free(dev, data, len); /* mmio */ + return false; + case PKT_TYPE_TXS: + for (rxd += 4; rxd + 12 <= end; rxd += 12) + mt7925_mac_add_txs(dev, rxd); + return false; + default: + return true; + } +} +EXPORT_SYMBOL_GPL(mt7925_rx_check); + +void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb, u32 *info) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + __le32 *end = (__le32 *)&skb->data[skb->len]; + enum rx_pkt_type type; + u16 flag; + + type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); + flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG); + if (type != PKT_TYPE_NORMAL) { + u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); + + if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == + MT_RXD0_SW_PKT_TYPE_FRAME)) + type = PKT_TYPE_NORMAL; + } + + if (type == PKT_TYPE_RX_EVENT && flag == 0x1) + type = PKT_TYPE_NORMAL_MCU; + + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */ + mt7925_mac_tx_free(dev, skb->data, skb->len); + napi_consume_skb(skb, 1); + break; + case PKT_TYPE_RX_EVENT: + mt7925_mcu_rx_event(dev, skb); + break; + case PKT_TYPE_TXS: + for (rxd += 2; rxd + 8 <= end; rxd += 8) + mt7925_mac_add_txs(dev, rxd); + dev_kfree_skb(skb); + break; + case PKT_TYPE_NORMAL_MCU: + case PKT_TYPE_NORMAL: + if (!mt7925_mac_fill_rx(dev, skb)) { + mt76_rx(&dev->mt76, q, skb); + return; + } + fallthrough; + default: + dev_kfree_skb(skb); + break; + } +} +EXPORT_SYMBOL_GPL(mt7925_queue_rx_skb); + +static void +mt7925_vif_connect_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mvif->phy->dev; + struct ieee80211_hw *hw = mt76_hw(dev); + + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_disconnect(vif, true); + + mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); + mt7925_mcu_set_tx(dev, vif); + + if (vif->type == NL80211_IFTYPE_AP) { + mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid, + true, NULL); + mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_NONE); + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true); + } +} + +/* system error recovery */ +void mt7925_mac_reset_work(struct work_struct *work) +{ + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + reset_work); + struct ieee80211_hw *hw = mt76_hw(dev); + struct mt76_connac_pm *pm = &dev->pm; + int i, ret; + + dev_dbg(dev->mt76.dev, "chip reset\n"); + dev->hw_full_reset = true; + ieee80211_stop_queues(hw); + + cancel_delayed_work_sync(&dev->mphy.mac_work); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + + for (i = 0; i < 10; i++) { + mutex_lock(&dev->mt76.mutex); + ret = mt792x_dev_reset(dev); + mutex_unlock(&dev->mt76.mutex); + + if (!ret) + break; + } + + if (i == 10) + dev_err(dev->mt76.dev, "chip reset failed\n"); + + if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(dev->mphy.hw, &info); + } + + dev->hw_full_reset = false; + pm->suspended = false; + ieee80211_wake_queues(hw); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_vif_connect_iter, NULL); + mt76_connac_power_save_sched(&dev->mt76.phy, pm); +} + +void mt7925_coredump_work(struct work_struct *work) +{ + struct mt792x_dev *dev; + char *dump, *data; + + dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev, + coredump.work.work); + + if (time_is_after_jiffies(dev->coredump.last_activity + + 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) { + queue_delayed_work(dev->mt76.wq, &dev->coredump.work, + MT76_CONNAC_COREDUMP_TIMEOUT); + return; + } + + dump = vzalloc(MT76_CONNAC_COREDUMP_SZ); + data = dump; + + while (true) { + struct sk_buff *skb; + + spin_lock_bh(&dev->mt76.lock); + skb = __skb_dequeue(&dev->coredump.msg_list); + spin_unlock_bh(&dev->mt76.lock); + + if (!skb) + break; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 8); + if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { + dev_kfree_skb(skb); + continue; + } + + memcpy(data, skb->data, skb->len); + data += skb->len; + + dev_kfree_skb(skb); + } + + if (dump) + dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, + GFP_KERNEL); + + mt792x_reset(&dev->mt76); +} + +/* usb_sdio */ +static void +mt7925_usb_sdio_write_txwi(struct mt792x_dev *dev, struct mt76_wcid *wcid, + enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, + struct sk_buff *skb) +{ + __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); + + memset(txwi, 0, MT_SDIO_TXD_SIZE); + mt7925_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0); + skb_push(skb, MT_SDIO_TXD_SIZE); +} + +int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct sk_buff *skb = tx_info->skb; + int err, pad, pktid; + + if (unlikely(tx_info->skb->len <= ETH_HLEN)) + return -EINVAL; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + if (sta) { + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + + if (time_after(jiffies, msta->last_txs + HZ / 4)) { + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + msta->last_txs = jiffies; + } + } + + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7925_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); + + mt792x_skb_add_usb_sdio_hdr(dev, skb, 0); + pad = round_up(skb->len, 4) - skb->len; + if (mt76_is_usb(mdev)) + pad += 4; + + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; +} +EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_prepare_skb); + +void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + struct mt76_queue_entry *e) +{ + __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE); + unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE; + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + u16 idx; + + idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + wcid = rcu_dereference(mdev->wcid[idx]); + sta = wcid_to_sta(wcid); + + if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt7925_tx_check_aggr(sta, txwi); + + skb_pull(e->skb, headroom); + mt76_tx_complete_skb(mdev, e->wcid, e->skb); +} +EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_complete_skb); + +bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + + mt792x_mutex_acquire(dev); + mt7925_mac_sta_poll(dev); + mt792x_mutex_release(dev); + + return false; +} +EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_status_data); + +#if IS_ENABLED(CONFIG_IPV6) +void mt7925_set_ipv6_ns_work(struct work_struct *work) +{ + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + ipv6_ns_work); + struct sk_buff *skb; + int ret = 0; + + do { + skb = skb_dequeue(&dev->ipv6_ns_list); + + if (!skb) + break; + + mt792x_mutex_acquire(dev); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(OFFLOAD), true); + mt792x_mutex_release(dev); + + } while (!ret); + + if (ret) + skb_queue_purge(&dev->ipv6_ns_list); +} +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.h b/drivers/net/wireless/mediatek/mt76/mt7925/mac.h new file mode 100644 index 000000000000..b10a993326b9 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_MAC_H +#define __MT7925_MAC_H + +#include "../mt76_connac3_mac.h" + +#define MT_WTBL_TXRX_CAP_RATE_OFFSET 7 +#define MT_WTBL_TXRX_RATE_G2_HE 24 +#define MT_WTBL_TXRX_RATE_G2 12 + +#define MT_WTBL_AC0_CTT_OFFSET 20 + +static inline u32 mt7925_mac_wtbl_lmac_addr(struct mt792x_dev *dev, u16 wcid, u8 dw) +{ + mt76_wr(dev, MT_WTBLON_TOP_WDUCR, + FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); + + return MT_WTBL_LMAC_OFFS(wcid, dw); +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c new file mode 100644 index 000000000000..aa918b9b0469 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -0,0 +1,1454 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/ctype.h> +#include <net/ipv6.h> +#include "mt7925.h" +#include "mcu.h" +#include "mac.h" + +static void +mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band, + struct ieee80211_sband_iftype_data *data, + enum nl80211_iftype iftype) +{ + struct ieee80211_sta_he_cap *he_cap = &data->he_cap; + struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; + struct ieee80211_he_mcs_nss_supp *he_mcs = &he_cap->he_mcs_nss_supp; + int i, nss = hweight8(phy->mt76->antenna_mask); + u16 mcs_map = 0; + + for (i = 0; i < 8; i++) { + if (i < nss) + mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2)); + else + mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2)); + } + + he_cap->has_he = true; + + he_cap_elem->mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE; + he_cap_elem->mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3; + he_cap_elem->mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU; + + if (band == NL80211_BAND_2GHZ) + he_cap_elem->phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + else + he_cap_elem->phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + + he_cap_elem->phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD; + he_cap_elem->phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; + + switch (iftype) { + case NL80211_IFTYPE_AP: + he_cap_elem->mac_cap_info[2] |= + IEEE80211_HE_MAC_CAP2_BSR; + he_cap_elem->mac_cap_info[4] |= + IEEE80211_HE_MAC_CAP4_BQR; + he_cap_elem->mac_cap_info[5] |= + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX; + he_cap_elem->phy_cap_info[3] |= + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; + he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + break; + case NL80211_IFTYPE_STATION: + he_cap_elem->mac_cap_info[1] |= + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; + + if (band == NL80211_BAND_2GHZ) + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G; + else + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G; + + he_cap_elem->phy_cap_info[1] |= + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; + he_cap_elem->phy_cap_info[3] |= + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; + he_cap_elem->phy_cap_info[4] |= + IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4; + he_cap_elem->phy_cap_info[5] |= + IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; + he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[7] |= + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; + he_cap_elem->phy_cap_info[8] |= + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; + break; + default: + break; + } + + he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); + + memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); + + if (he_cap_elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss); + } else { + he_cap_elem->phy_cap_info[9] |= + u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US, + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); + } + + if (band == NL80211_BAND_6GHZ) { + u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | + IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; + + cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_0_5, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) | + u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) | + u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454, + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); + + data->he_6ghz_capa.capa = cpu_to_le16(cap); + } +} + +static void +mt7925_init_eht_caps(struct mt792x_phy *phy, enum nl80211_band band, + struct ieee80211_sband_iftype_data *data, + enum nl80211_iftype iftype) +{ + struct ieee80211_sta_eht_cap *eht_cap = &data->eht_cap; + struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp *eht_nss = &eht_cap->eht_mcs_nss_supp; + enum nl80211_chan_width width = phy->mt76->chandef.width; + int nss = hweight8(phy->mt76->antenna_mask); + int sts = hweight16(phy->mt76->chainmask); + u8 val; + + if (!phy->dev->has_eht) + return; + + eht_cap->has_eht = true; + + eht_cap_elem->mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL; + + eht_cap_elem->phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; + + eht_cap_elem->phy_cap_info[0] |= + u8_encode_bits(u8_get_bits(sts - 1, BIT(0)), + IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK); + + eht_cap_elem->phy_cap_info[1] = + u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)), + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | + u8_encode_bits(sts - 1, + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK); + + eht_cap_elem->phy_cap_info[2] = + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) | + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK); + + eht_cap_elem->phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + + eht_cap_elem->phy_cap_info[4] = + u8_encode_bits(min_t(int, sts - 1, 2), + IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK); + + eht_cap_elem->phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US, + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) | + u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)), + IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK); + + val = width == NL80211_CHAN_WIDTH_160 ? 0x7 : + width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1; + eht_cap_elem->phy_cap_info[6] = + u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)), + IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) | + u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK); + + eht_cap_elem->phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ; + + val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) | + u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX); + + eht_nss->bw._80.rx_tx_mcs9_max_nss = val; + eht_nss->bw._80.rx_tx_mcs11_max_nss = val; + eht_nss->bw._80.rx_tx_mcs13_max_nss = val; + eht_nss->bw._160.rx_tx_mcs9_max_nss = val; + eht_nss->bw._160.rx_tx_mcs11_max_nss = val; + eht_nss->bw._160.rx_tx_mcs13_max_nss = val; +} + +static void +__mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy, + struct ieee80211_supported_band *sband, + enum nl80211_band band) +{ + struct ieee80211_sband_iftype_data *data = phy->iftype[band]; + int i, n = 0; + + for (i = 0; i < NUM_NL80211_IFTYPES; i++) { + switch (i) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + break; + default: + continue; + } + + data[n].types_mask = BIT(i); + mt7925_init_he_caps(phy, band, &data[n], i); + mt7925_init_eht_caps(phy, band, &data[n], i); + + n++; + } + + _ieee80211_set_sband_iftype_data(sband, data, n); +} + +void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy) +{ + if (phy->mt76->cap.has_2ghz) + __mt7925_set_stream_he_eht_caps(phy, &phy->mt76->sband_2g.sband, + NL80211_BAND_2GHZ); + + if (phy->mt76->cap.has_5ghz) + __mt7925_set_stream_he_eht_caps(phy, &phy->mt76->sband_5g.sband, + NL80211_BAND_5GHZ); + + if (phy->mt76->cap.has_6ghz) + __mt7925_set_stream_he_eht_caps(phy, &phy->mt76->sband_6g.sband, + NL80211_BAND_6GHZ); +} + +int __mt7925_start(struct mt792x_phy *phy) +{ + struct mt76_phy *mphy = phy->mt76; + int err; + + err = mt7925_mcu_set_channel_domain(mphy); + if (err) + return err; + + err = mt7925_mcu_set_rts_thresh(phy, 0x92b); + if (err) + return err; + + err = mt7925_set_tx_sar_pwr(mphy->hw, NULL); + if (err) + return err; + + mt792x_mac_reset_counters(phy); + set_bit(MT76_STATE_RUNNING, &mphy->state); + + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, + MT792x_WATCHDOG_TIME); + + return 0; +} +EXPORT_SYMBOL_GPL(__mt7925_start); + +static int mt7925_start(struct ieee80211_hw *hw) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int err; + + mt792x_mutex_acquire(phy->dev); + err = __mt7925_start(phy); + mt792x_mutex_release(phy->dev); + + return err; +} + +static int +mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt76_txq *mtxq; + int idx, ret = 0; + + mt792x_mutex_acquire(dev); + + mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } + + mvif->mt76.omac_idx = mvif->mt76.idx; + mvif->phy = phy; + mvif->mt76.band_idx = 0; + mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + + if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) + mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; + else + mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL; + + ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, + true); + if (ret) + goto out; + + dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); + phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); + + idx = MT792x_WTBL_RESERVED - mvif->mt76.idx; + + INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_wcid_init(&mvif->sta.wcid); + + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + ewma_rssi_init(&mvif->rssi); + + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + if (vif->txq) { + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + mtxq->wcid = idx; + } + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; +out: + mt792x_mutex_release(dev); + + return ret; +} + +static void mt7925_roc_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = priv; + + mt7925_mcu_abort_roc(phy, mvif, phy->roc_token_id); +} + +void mt7925_roc_work(struct work_struct *work) +{ + struct mt792x_phy *phy; + + phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, + roc_work); + + if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + return; + + mt792x_mutex_acquire(phy->dev); + ieee80211_iterate_active_interfaces(phy->mt76->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_roc_iter, phy); + mt792x_mutex_release(phy->dev); + ieee80211_remain_on_channel_expired(phy->mt76->hw); +} + +static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif) +{ + int err = 0; + + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + + mt792x_mutex_acquire(phy->dev); + if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + err = mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + mt792x_mutex_release(phy->dev); + + return err; +} + +static int mt7925_set_roc(struct mt792x_phy *phy, + struct mt792x_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum mt7925_roc_req type) +{ + int err; + + if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)) + return -EBUSY; + + phy->roc_grant = false; + + err = mt7925_mcu_set_roc(phy, vif, chan, duration, type, + ++phy->roc_token_id); + if (err < 0) { + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + goto out; + } + + if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) { + mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + err = -ETIMEDOUT; + } + +out: + return err; +} + +static int mt7925_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int err; + + mt792x_mutex_acquire(phy->dev); + err = mt7925_set_roc(phy, mvif, chan, duration, MT7925_ROC_REQ_ROC); + mt792x_mutex_release(phy->dev); + + return err; +} + +static int mt7925_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + return mt7925_abort_roc(phy, mvif); +} + +static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : + &mvif->sta; + struct mt76_wcid *wcid = &msta->wcid; + u8 *wcid_keyidx = &wcid->hw_key_idx; + int idx = key->keyidx, err = 0; + + /* The hardware does not support per-STA RX GTK, fallback + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + wcid_keyidx = &wcid->hw_key_idx2; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + if (!mvif->wep_sta) + return -EOPNOTSUPP; + break; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_SMS4: + break; + default: + return -EOPNOTSUPP; + } + + mt792x_mutex_acquire(dev); + + if (cmd == SET_KEY && !mvif->mt76.cipher) { + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher); + mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true); + } + + if (cmd == SET_KEY) + *wcid_keyidx = idx; + else if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + else + goto out; + + mt76_wcid_key_setup(&dev->mt76, wcid, + cmd == SET_KEY ? key : NULL); + + err = mt7925_mcu_add_key(&dev->mt76, vif, &msta->bip, + key, MCU_UNI_CMD(STA_REC_UPDATE), + &msta->wcid, cmd); + + if (err) + goto out; + + if (key->cipher == WLAN_CIPHER_SUITE_WEP104 || + key->cipher == WLAN_CIPHER_SUITE_WEP40) + err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->bip, + key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), + &mvif->wep_sta->wcid, cmd); + +out: + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = priv; + struct ieee80211_hw *hw = mt76_hw(dev); + bool pm_enable = dev->pm.enable; + int err; + + err = mt7925_mcu_set_beacon_filter(dev, vif, pm_enable); + if (err < 0) + return; + + if (pm_enable) { + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + ieee80211_hw_set(hw, CONNECTION_MONITOR); + } else { + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags); + } +} + +static void +mt7925_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = priv; + struct ieee80211_hw *hw = mt76_hw(dev); + struct mt76_connac_pm *pm = &dev->pm; + bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); + + mt7925_mcu_set_sniffer(dev, vif, monitor); + pm->enable = pm->enable_user && !monitor; + pm->ds_enable = pm->ds_enable_user && !monitor; + + mt7925_mcu_set_deep_sleep(dev, pm->ds_enable); + + if (monitor) + mt7925_mcu_set_beacon_filter(dev, vif, false); +} + +void mt7925_set_runtime_pm(struct mt792x_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + struct mt76_connac_pm *pm = &dev->pm; + bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); + + pm->enable = pm->enable_user && !monitor; + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_pm_interface_iter, dev); + pm->ds_enable = pm->ds_enable_user && !monitor; + mt7925_mcu_set_deep_sleep(dev, pm->ds_enable); +} + +static int mt7925_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int ret = 0; + + mt792x_mutex_acquire(dev); + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + ret = mt7925_set_tx_sar_pwr(hw, NULL); + if (ret) + goto out; + } + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_sniffer_interface_iter, dev); + } + +out: + mt792x_mutex_release(dev); + + return ret; +} + +static void mt7925_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ +#define MT7925_FILTER_FCSFAIL BIT(2) +#define MT7925_FILTER_CONTROL BIT(5) +#define MT7925_FILTER_OTHER_BSS BIT(6) +#define MT7925_FILTER_ENABLE BIT(31) + struct mt792x_dev *dev = mt792x_hw_dev(hw); + u32 flags = MT7925_FILTER_ENABLE; + +#define MT7925_FILTER(_fif, _type) do { \ + if (*total_flags & (_fif)) \ + flags |= MT7925_FILTER_##_type; \ + } while (0) + + MT7925_FILTER(FIF_FCSFAIL, FCSFAIL); + MT7925_FILTER(FIF_CONTROL, CONTROL); + MT7925_FILTER(FIF_OTHER_BSS, OTHER_BSS); + + mt792x_mutex_acquire(dev); + mt7925_mcu_set_rxfilter(dev, flags, 0, 0); + mt792x_mutex_release(dev); + + *total_flags &= (FIF_OTHER_BSS | FIF_FCSFAIL | FIF_CONTROL); +} + +static u8 +mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + bool beacon, bool mcast) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_phy *mphy = hw->priv; + u16 rate; + u8 i, idx, ht; + + rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon, mcast); + ht = FIELD_GET(MT_TX_RATE_MODE, rate) > MT_PHY_TYPE_OFDM; + + if (beacon && ht) { + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + /* must odd index */ + idx = MT7925_BEACON_RATES_TBL + 2 * (mvif->idx % 20); + mt7925_mac_set_fixed_rate_table(dev, idx, rate); + return idx; + } + + idx = FIELD_GET(MT_TX_RATE_IDX, rate); + for (i = 0; i < ARRAY_SIZE(mt76_rates); i++) + if ((mt76_rates[i].hw_value & GENMASK(7, 0)) == idx) + return MT792x_BASIC_RATES_TBL + i; + + return mvif->basic_rates_idx; +} + +static void mt7925_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != phy->slottime) { + phy->slottime = slottime; + mt792x_mac_set_timeing(phy); + } + } + + if (changed & BSS_CHANGED_MCAST_RATE) + mvif->mcast_rates_idx = + mt7925_get_rates_table(hw, vif, false, true); + + if (changed & BSS_CHANGED_BASIC_RATES) + mvif->basic_rates_idx = + mt7925_get_rates_table(hw, vif, false, false); + + if (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)) { + mvif->beacon_rates_idx = + mt7925_get_rates_table(hw, vif, true, false); + + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, + info->enable_beacon); + } + + /* ensure that enable txcmd_mode after bss_info */ + if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) + mt7925_mcu_set_tx(dev, vif); + + if (changed & BSS_CHANGED_PS) + mt7925_mcu_uni_bss_ps(dev, vif); + + if (changed & BSS_CHANGED_ASSOC) { + mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_ASSOC); + mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); + } + + if (changed & BSS_CHANGED_ARP_FILTER) { + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mt7925_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, info); + } + + mt792x_mutex_release(dev); +} + +int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + int ret, idx; + + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); + if (idx < 0) + return -ENOSPC; + + INIT_LIST_HEAD(&msta->wcid.poll_list); + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.phy_idx = mvif->mt76.band_idx; + msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + msta->last_txs = jiffies; + + ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); + if (ret) + return ret; + + if (vif->type == NL80211_IFTYPE_STATION) + mvif->wep_sta = msta; + + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + /* should update bss info before STA add */ + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, + false); + + ret = mt7925_mcu_sta_update(dev, sta, vif, true, + MT76_STA_INFO_STATE_NONE); + if (ret) + return ret; + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mac_sta_add); + +void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mt792x_mutex_acquire(dev); + + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, + true); + + ewma_avg_signal_init(&msta->avg_ack_signal); + + mt7925_mac_wtbl_update(dev, msta->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + + mt7925_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); + + mt792x_mutex_release(dev); +} +EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc); + +void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + mt76_connac_pm_wake(&dev->mphy, &dev->pm); + + mt7925_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE); + mt7925_mac_wtbl_update(dev, msta->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + if (vif->type == NL80211_IFTYPE_STATION) { + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mvif->wep_sta = NULL; + ewma_rssi_init(&mvif->rssi); + if (!sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, + false); + } + + spin_lock_bh(&mdev->sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); +} +EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove); + +static int mt7925_set_rts_threshold(struct ieee80211_hw *hw, u32 val) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + mt7925_mcu_set_rts_thresh(&dev->phy, val); + mt792x_mutex_release(dev); + + return 0; +} + +static int +mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct ieee80211_sta *sta = params->sta; + struct ieee80211_txq *txq = sta->txq[params->tid]; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + u16 tid = params->tid; + u16 ssn = params->ssn; + struct mt76_txq *mtxq; + int ret = 0; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + mt792x_mutex_acquire(dev); + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + params->buf_size); + mt7925_mcu_uni_rx_ba(dev, params, true); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt7925_mcu_uni_rx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + mt7925_mcu_uni_tx_ba(dev, params, true); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + clear_bit(tid, &msta->wcid.ampdu_state); + mt7925_mcu_uni_tx_ba(dev, params, false); + break; + case IEEE80211_AMPDU_TX_START: + set_bit(tid, &msta->wcid.ampdu_state); + ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + clear_bit(tid, &msta->wcid.ampdu_state); + mt7925_mcu_uni_tx_ba(dev, params, false); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + mt792x_mutex_release(dev); + + return ret; +} + +static bool is_valid_alpha2(const char *alpha2) +{ + if (!alpha2) + return false; + + if (alpha2[0] == '0' && alpha2[1] == '0') + return true; + + if (isalpha(alpha2[0]) && isalpha(alpha2[1])) + return true; + + return false; +} + +void mt7925_scan_work(struct work_struct *work) +{ + struct mt792x_phy *phy; + + phy = (struct mt792x_phy *)container_of(work, struct mt792x_phy, + scan_work.work); + + while (true) { + struct mt76_dev *mdev = &phy->dev->mt76; + struct sk_buff *skb; + struct tlv *tlv; + int tlv_len; + + spin_lock_bh(&phy->dev->mt76.lock); + skb = __skb_dequeue(&phy->scan_event_list); + spin_unlock_bh(&phy->dev->mt76.lock); + + if (!skb) + break; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + tlv = (struct tlv *)skb->data; + tlv_len = skb->len; + + while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) { + struct mt7925_mcu_scan_chinfo_event *evt; + + switch (le16_to_cpu(tlv->tag)) { + case UNI_EVENT_SCAN_DONE_BASIC: + if (test_and_clear_bit(MT76_HW_SCANNING, &phy->mt76->state)) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + ieee80211_scan_completed(phy->mt76->hw, &info); + } + break; + case UNI_EVENT_SCAN_DONE_CHNLINFO: + evt = (struct mt7925_mcu_scan_chinfo_event *)tlv->data; + + if (!is_valid_alpha2(evt->alpha2)) + break; + + if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0') + break; + + mt7925_mcu_set_clc(phy->dev, evt->alpha2, ENVIRON_INDOOR); + + break; + case UNI_EVENT_SCAN_DONE_NLO: + ieee80211_sched_scan_results(phy->mt76->hw); + break; + default: + break; + } + + tlv_len -= le16_to_cpu(tlv->len); + tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len)); + } + + dev_kfree_skb(skb); + } +} + +static int +mt7925_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mt792x_mutex_acquire(dev); + err = mt7925_mcu_hw_scan(mphy, vif, req); + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + + mt792x_mutex_acquire(dev); + mt7925_mcu_cancel_hw_scan(mphy, vif); + mt792x_mutex_release(dev); +} + +static int +mt7925_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mt792x_mutex_acquire(dev); + + err = mt7925_mcu_sched_scan_req(mphy, vif, req); + if (err < 0) + goto out; + + err = mt7925_mcu_sched_scan_enable(mphy, vif, true); +out: + mt792x_mutex_release(dev); + + return err; +} + +static int +mt7925_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mt792x_mutex_acquire(dev); + err = mt7925_mcu_sched_scan_enable(mphy, vif, false); + mt792x_mutex_release(dev); + + return err; +} + +static int +mt7925_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int max_nss = hweight8(hw->wiphy->available_antennas_tx); + + if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss) + return -EINVAL; + + if ((BIT(hweight8(tx_ant)) - 1) != tx_ant) + tx_ant = BIT(ffs(tx_ant) - 1) - 1; + + mt792x_mutex_acquire(dev); + + phy->mt76->antenna_mask = tx_ant; + phy->mt76->chainmask = tx_ant; + + mt76_set_stream_caps(phy->mt76, true); + mt7925_set_stream_he_eht_caps(phy); + + /* TODO: update bmc_wtbl spe_idx when antenna changes */ + mt792x_mutex_release(dev); + + return 0; +} + +#ifdef CONFIG_PM +static int mt7925_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + cancel_delayed_work_sync(&phy->scan_work); + cancel_delayed_work_sync(&phy->mt76->mac_work); + + cancel_delayed_work_sync(&dev->pm.ps_work); + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt792x_mutex_acquire(dev); + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_set_suspend_iter, + &dev->mphy); + + mt792x_mutex_release(dev); + + return 0; +} + +static int mt7925_resume(struct ieee80211_hw *hw) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + mt792x_mutex_acquire(dev); + + set_bit(MT76_STATE_RUNNING, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_set_suspend_iter, + &dev->mphy); + + ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, + MT792x_WATCHDOG_TIME); + + mt792x_mutex_release(dev); + + return 0; +} + +static void mt7925_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + mt76_connac_mcu_update_gtk_rekey(hw, vif, data); + mt792x_mutex_release(dev); +} +#endif /* CONFIG_PM */ + +static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta); + + mt792x_mutex_release(dev); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mvif->phy->dev; + struct inet6_ifaddr *ifa; + struct sk_buff *skb; + u8 idx = 0; + + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7925_arpns_tlv arpns; + struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; + } req_hdr = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .arpns = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), + .len = cpu_to_le16(sizeof(req_hdr) - 4), + .enable = true, + }, + }; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + if (ifa->flags & IFA_F_TENTATIVE) + continue; + req_hdr.ns_addrs[idx] = ifa->addr; + if (++idx >= IEEE80211_BSS_ARP_ADDR_LIST_LEN) + break; + } + read_unlock_bh(&idev->lock); + + if (!idx) + return; + + req_hdr.arpns.ips_num = idx; + + skb = __mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr), + 0, GFP_ATOMIC); + if (!skb) + return; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + + skb_queue_tail(&dev->ipv6_ns_list, skb); + + ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work); +} +#endif + +int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt76_phy *mphy = hw->priv; + + if (sar) { + int err = mt76_init_sar_power(hw, sar); + + if (err) + return err; + } + mt792x_init_acpi_sar_power(mt792x_hw_phy(hw), !sar); + + return mt7925_mcu_set_rate_txpower(mphy); +} + +static int mt7925_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err; + + mt792x_mutex_acquire(dev); + err = mt7925_mcu_set_clc(dev, dev->mt76.alpha2, + dev->country_ie_env); + if (err < 0) + goto out; + + err = mt7925_set_tx_sar_pwr(hw, sar); +out: + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt792x_mutex_acquire(dev); + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true); + mt792x_mutex_release(dev); +} + +static int +mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err; + + mt792x_mutex_acquire(dev); + + err = mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, + true); + if (err) + goto out; + + err = mt7925_mcu_set_bss_pm(dev, vif, true); + if (err) + goto out; + + err = mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_NONE); +out: + mt792x_mutex_release(dev); + + return err; +} + +static void +mt7925_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err; + + mt792x_mutex_acquire(dev); + + err = mt7925_mcu_set_bss_pm(dev, vif, false); + if (err) + goto out; + + mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, + false); + +out: + mt792x_mutex_release(dev); +} + +static int +mt7925_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + return 0; +} + +static void +mt7925_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ +} + +static void mt7925_ctx_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_chanctx_conf *ctx = priv; + + if (ctx != mvif->mt76.ctx) + return; + + if (vif->type == NL80211_IFTYPE_MONITOR) { + mt7925_mcu_set_sniffer(mvif->phy->dev, vif, true); + mt7925_mcu_config_sniffer(mvif, ctx); + } else { + mt7925_mcu_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx); + } +} + +static void +mt7925_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + mt792x_mutex_acquire(phy->dev); + ieee80211_iterate_active_interfaces(phy->mt76->hw, + IEEE80211_IFACE_ITER_ACTIVE, + mt7925_ctx_iter, ctx); + mt792x_mutex_release(phy->dev); +} + +static void mt7925_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + u16 duration = info->duration ? info->duration : + jiffies_to_msecs(HZ); + + mt792x_mutex_acquire(dev); + mt7925_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, + MT7925_ROC_REQ_JOIN); + mt792x_mutex_release(dev); +} + +static void mt7925_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mt7925_abort_roc(mvif->phy, mvif); +} + +const struct ieee80211_ops mt7925_ops = { + .tx = mt792x_tx, + .start = mt7925_start, + .stop = mt792x_stop, + .add_interface = mt7925_add_interface, + .remove_interface = mt792x_remove_interface, + .config = mt7925_config, + .conf_tx = mt792x_conf_tx, + .configure_filter = mt7925_configure_filter, + .bss_info_changed = mt7925_bss_info_changed, + .start_ap = mt7925_start_ap, + .stop_ap = mt7925_stop_ap, + .sta_state = mt76_sta_state, + .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, + .set_key = mt7925_set_key, + .sta_set_decap_offload = mt7925_sta_set_decap_offload, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = mt7925_ipv6_addr_change, +#endif /* CONFIG_IPV6 */ + .ampdu_action = mt7925_ampdu_action, + .set_rts_threshold = mt7925_set_rts_threshold, + .wake_tx_queue = mt76_wake_tx_queue, + .release_buffered_frames = mt76_release_buffered_frames, + .channel_switch_beacon = mt7925_channel_switch_beacon, + .get_txpower = mt76_get_txpower, + .get_stats = mt792x_get_stats, + .get_et_sset_count = mt792x_get_et_sset_count, + .get_et_strings = mt792x_get_et_strings, + .get_et_stats = mt792x_get_et_stats, + .get_tsf = mt792x_get_tsf, + .set_tsf = mt792x_set_tsf, + .get_survey = mt76_get_survey, + .get_antenna = mt76_get_antenna, + .set_antenna = mt7925_set_antenna, + .set_coverage_class = mt792x_set_coverage_class, + .hw_scan = mt7925_hw_scan, + .cancel_hw_scan = mt7925_cancel_hw_scan, + .sta_statistics = mt792x_sta_statistics, + .sched_scan_start = mt7925_start_sched_scan, + .sched_scan_stop = mt7925_stop_sched_scan, +#ifdef CONFIG_PM + .suspend = mt7925_suspend, + .resume = mt7925_resume, + .set_wakeup = mt792x_set_wakeup, + .set_rekey_data = mt7925_set_rekey_data, +#endif /* CONFIG_PM */ + .flush = mt792x_flush, + .set_sar_specs = mt7925_set_sar_specs, + .remain_on_channel = mt7925_remain_on_channel, + .cancel_remain_on_channel = mt7925_cancel_remain_on_channel, + .add_chanctx = mt7925_add_chanctx, + .remove_chanctx = mt7925_remove_chanctx, + .change_chanctx = mt7925_change_chanctx, + .assign_vif_chanctx = mt792x_assign_vif_chanctx, + .unassign_vif_chanctx = mt792x_unassign_vif_chanctx, + .mgd_prepare_tx = mt7925_mgd_prepare_tx, + .mgd_complete_tx = mt7925_mgd_complete_tx, +}; +EXPORT_SYMBOL_GPL(mt7925_ops); + +MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c new file mode 100644 index 000000000000..9c0e397537ac --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -0,0 +1,3174 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/fs.h> +#include <linux/firmware.h> +#include "mt7925.h" +#include "mcu.h" +#include "mac.h" + +#define MT_STA_BFER BIT(0) +#define MT_STA_BFEE BIT(1) + +static bool mt7925_disable_clc; +module_param_named(disable_clc, mt7925_disable_clc, bool, 0644); +MODULE_PARM_DESC(disable_clc, "disable CLC support"); + +int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) +{ + int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); + struct mt7925_mcu_rxd *rxd; + int ret = 0; + + if (!skb) { + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", cmd, seq); + mt792x_reset(mdev); + + return -ETIMEDOUT; + } + + rxd = (struct mt7925_mcu_rxd *)skb->data; + if (seq != rxd->seq) + return -EAGAIN; + + if (cmd == MCU_CMD(PATCH_SEM_CONTROL) || + cmd == MCU_CMD(PATCH_FINISH_REQ)) { + skb_pull(skb, sizeof(*rxd) - 4); + ret = *skb->data; + } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || + cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || + cmd == MCU_UNI_CMD(STA_REC_UPDATE) || + cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(OFFLOAD) || + cmd == MCU_UNI_CMD(SUSPEND)) { + struct mt7925_mcu_uni_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7925_mcu_uni_event *)skb->data; + ret = le32_to_cpu(event->status); + /* skip invalid event */ + if (mcu_cmd != event->cid) + ret = -EAGAIN; + } else { + skb_pull(skb, sizeof(*rxd)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_parse_response); + +int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set) +{ +#define MT_RF_REG_HDR GENMASK(31, 24) +#define MT_RF_REG_ANT GENMASK(23, 16) +#define RF_REG_PREFIX 0x99 + struct { + u8 __rsv[4]; + union { + struct uni_cmd_access_reg_basic { + __le16 tag; + __le16 len; + __le32 idx; + __le32 data; + } __packed reg; + struct uni_cmd_access_rf_reg_basic { + __le16 tag; + __le16 len; + __le16 ant; + u8 __rsv[2]; + __le32 idx; + __le32 data; + } __packed rf_reg; + }; + } __packed * res, req; + struct sk_buff *skb; + int ret; + + if (u32_get_bits(regidx, MT_RF_REG_HDR) == RF_REG_PREFIX) { + req.rf_reg.tag = cpu_to_le16(UNI_CMD_ACCESS_RF_REG_BASIC); + req.rf_reg.len = cpu_to_le16(sizeof(req.rf_reg)); + req.rf_reg.ant = cpu_to_le16(u32_get_bits(regidx, MT_RF_REG_ANT)); + req.rf_reg.idx = cpu_to_le32(regidx); + req.rf_reg.data = set ? cpu_to_le32(*val) : 0; + } else { + req.reg.tag = cpu_to_le16(UNI_CMD_ACCESS_REG_BASIC); + req.reg.len = cpu_to_le16(sizeof(req.reg)); + req.reg.idx = cpu_to_le32(regidx); + req.reg.data = set ? cpu_to_le32(*val) : 0; + } + + if (set) + return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(REG_ACCESS), + &req, sizeof(req), true); + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, + MCU_WM_UNI_CMD_QUERY(REG_ACCESS), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + res = (void *)skb->data; + if (u32_get_bits(regidx, MT_RF_REG_HDR) == RF_REG_PREFIX) + *val = le32_to_cpu(res->rf_reg.data); + else + *val = le32_to_cpu(res->reg.data); + + dev_kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_regval); + +int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, + struct mt76_vif *vif, + struct ieee80211_bss_conf *info) +{ + struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif, + bss_conf); + struct sk_buff *skb; + int i, len = min_t(int, mvif->cfg.arp_addr_cnt, + IEEE80211_BSS_ARP_ADDR_LIST_LEN); + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7925_arpns_tlv arp; + } req = { + .hdr = { + .bss_idx = vif->idx, + }, + .arp = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), + .len = cpu_to_le16(sizeof(req) - 4 + len * 2 * sizeof(__be32)), + .ips_num = len, + .enable = true, + }, + }; + + skb = mt76_mcu_msg_alloc(dev, NULL, sizeof(req) + len * 2 * sizeof(__be32)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req, sizeof(req)); + for (i = 0; i < len; i++) { + skb_put_data(skb, &mvif->cfg.arp_addr_list[i], sizeof(__be32)); + skb_put_zero(skb, sizeof(__be32)); + } + + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true); +} + +#ifdef CONFIG_PM +static int +mt7925_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, + bool suspend, struct cfg80211_wowlan *wowlan) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_dev *dev = phy->dev; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_wow_ctrl_tlv wow_ctrl_tlv; + struct mt76_connac_wow_gpio_param_tlv gpio_tlv; + } req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .wow_ctrl_tlv = { + .tag = cpu_to_le16(UNI_SUSPEND_WOW_CTRL), + .len = cpu_to_le16(sizeof(struct mt76_connac_wow_ctrl_tlv)), + .cmd = suspend ? 1 : 2, + }, + .gpio_tlv = { + .tag = cpu_to_le16(UNI_SUSPEND_WOW_GPIO_PARAM), + .len = cpu_to_le16(sizeof(struct mt76_connac_wow_gpio_param_tlv)), + .gpio_pin = 0xff, /* follow fw about GPIO pin */ + }, + }; + + if (wowlan->magic_pkt) + req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC; + if (wowlan->disconnect) + req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT | + UNI_WOW_DETECT_TYPE_BCN_LOST); + if (wowlan->nd_config) { + mt7925_mcu_sched_scan_req(phy, vif, wowlan->nd_config); + req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT; + mt7925_mcu_sched_scan_enable(phy, vif, suspend); + } + if (wowlan->n_patterns) + req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP; + + if (mt76_is_mmio(dev)) + req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE; + else if (mt76_is_usb(dev)) + req.wow_ctrl_tlv.wakeup_hif = WOW_USB; + else if (mt76_is_sdio(dev)) + req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO; + + return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, + sizeof(req), true); +} + +static int +mt7925_mcu_set_wow_pattern(struct mt76_dev *dev, + struct ieee80211_vif *vif, + u8 index, bool enable, + struct cfg80211_pkt_pattern *pattern) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt7925_wow_pattern_tlv *tlv; + struct sk_buff *skb; + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr = { + .bss_idx = mvif->idx, + }; + + skb = mt76_mcu_msg_alloc(dev, NULL, sizeof(hdr) + sizeof(*tlv)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &hdr, sizeof(hdr)); + tlv = (struct mt7925_wow_pattern_tlv *)skb_put(skb, sizeof(*tlv)); + tlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN); + tlv->len = cpu_to_le16(sizeof(*tlv)); + tlv->bss_idx = 0xF; + tlv->data_len = pattern->pattern_len; + tlv->enable = enable; + tlv->index = index; + tlv->offset = 0; + + memcpy(tlv->pattern, pattern->pattern, pattern->pattern_len); + memcpy(tlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8)); + + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SUSPEND), true); +} + +void mt7925_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt76_phy *phy = priv; + bool suspend = !test_bit(MT76_STATE_RUNNING, &phy->state); + struct ieee80211_hw *hw = phy->hw; + struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config; + int i; + + mt76_connac_mcu_set_gtk_rekey(phy->dev, vif, suspend); + + mt76_connac_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true); + + for (i = 0; i < wowlan->n_patterns; i++) + mt7925_mcu_set_wow_pattern(phy->dev, vif, i, suspend, + &wowlan->patterns[i]); + mt7925_connac_mcu_set_wow_ctrl(phy, vif, suspend, wowlan); +} + +#endif /* CONFIG_PM */ + +static void +mt7925_mcu_connection_loss_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt7925_uni_beacon_loss_event *event = priv; + + if (mvif->idx != event->hdr.bss_idx) + return; + + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) || + vif->type != NL80211_IFTYPE_STATION) + return; + + ieee80211_connection_loss(vif); +} + +static void +mt7925_mcu_connection_loss_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt7925_uni_beacon_loss_event *event; + struct mt76_phy *mphy = &dev->mt76.phy; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd)); + event = (struct mt7925_uni_beacon_loss_event *)skb->data; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_connection_loss_iter, event); +} + +static void +mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt7925_roc_grant_tlv *grant = priv; + + if (mvif->idx != grant->bss_idx) + return; + + mvif->band_idx = grant->dbdcband; +} + +static void +mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_hw *hw = dev->mt76.hw; + struct mt7925_roc_grant_tlv *grant; + struct mt7925_mcu_rxd *rxd; + int duration; + + rxd = (struct mt7925_mcu_rxd *)skb->data; + grant = (struct mt7925_roc_grant_tlv *)(rxd->tlv + 4); + + /* should never happen */ + WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT)); + + if (grant->reqtype == MT7925_ROC_REQ_ROC) + ieee80211_ready_on_channel(hw); + else if (grant->reqtype == MT7925_ROC_REQ_JOIN) + ieee80211_iterate_active_interfaces_atomic(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mcu_roc_iter, grant); + dev->phy.roc_grant = true; + wake_up(&dev->phy.roc_wait); + duration = le32_to_cpu(grant->max_interval); + mod_timer(&dev->phy.roc_timer, + jiffies + msecs_to_jiffies(duration)); +} + +static void +mt7925_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv; + + spin_lock_bh(&dev->mt76.lock); + __skb_queue_tail(&phy->scan_event_list, skb); + spin_unlock_bh(&dev->mt76.lock); + + ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, + MT792x_HW_SCAN_TIMEOUT); +} + +static void +mt7925_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ +#define UNI_EVENT_TX_DONE_MSG 0 +#define UNI_EVENT_TX_DONE_RAW 1 + struct mt7925_mcu_txs_event { + u8 ver; + u8 rsv[3]; + u8 data[0]; + } __packed * txs; + struct tlv *tlv; + u32 tlv_len; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + tlv = (struct tlv *)skb->data; + tlv_len = skb->len; + + while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) { + switch (le16_to_cpu(tlv->tag)) { + case UNI_EVENT_TX_DONE_RAW: + txs = (struct mt7925_mcu_txs_event *)tlv->data; + mt7925_mac_add_txs(dev, txs->data); + break; + default: + break; + } + tlv_len -= le16_to_cpu(tlv->len); + tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len)); + } +} + +static void +mt7925_mcu_uni_debug_msg_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt7925_uni_debug_msg { + __le16 tag; + __le16 len; + u8 fmt; + u8 rsv[3]; + u8 id; + u8 type:3; + u8 nr_args:5; + union { + struct idxlog { + __le16 rsv; + __le32 ts; + __le32 idx; + u8 data[]; + } __packed idx; + struct txtlog { + u8 len; + u8 rsv; + __le32 ts; + u8 data[]; + } __packed txt; + }; + } __packed * hdr; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + hdr = (struct mt7925_uni_debug_msg *)skb->data; + + if (hdr->id == 0x28) { + skb_pull(skb, offsetof(struct mt7925_uni_debug_msg, id)); + wiphy_info(mt76_hw(dev)->wiphy, "%.*s", skb->len, skb->data); + return; + } else if (hdr->id != 0xa8) { + return; + } + + if (hdr->type == 0) { /* idx log */ + int i, ret, len = PAGE_SIZE - 1, nr_val; + struct page *page = dev_alloc_pages(get_order(len)); + __le32 *val; + char *buf, *cur; + + if (!page) + return; + + buf = page_address(page); + cur = buf; + + nr_val = (le16_to_cpu(hdr->len) - sizeof(*hdr)) / 4; + val = (__le32 *)hdr->idx.data; + for (i = 0; i < nr_val && len > 0; i++) { + ret = snprintf(cur, len, "0x%x,", le32_to_cpu(val[i])); + if (ret <= 0) + break; + + cur += ret; + len -= ret; + } + if (cur > buf) + wiphy_info(mt76_hw(dev)->wiphy, "idx: 0x%X,%d,%s", + le32_to_cpu(hdr->idx.idx), nr_val, buf); + put_page(page); + } else if (hdr->type == 2) { /* str log */ + wiphy_info(mt76_hw(dev)->wiphy, "%.*s", hdr->txt.len, hdr->txt.data); + } +} + +static void +mt7925_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev, + struct sk_buff *skb) +{ + struct mt7925_mcu_rxd *rxd; + + rxd = (struct mt7925_mcu_rxd *)skb->data; + + switch (rxd->eid) { + case MCU_UNI_EVENT_FW_LOG_2_HOST: + mt7925_mcu_uni_debug_msg_event(dev, skb); + break; + case MCU_UNI_EVENT_ROC: + mt7925_mcu_uni_roc_event(dev, skb); + break; + case MCU_UNI_EVENT_SCAN_DONE: + mt7925_mcu_scan_event(dev, skb); + return; + case MCU_UNI_EVENT_TX_DONE: + mt7925_mcu_tx_done_event(dev, skb); + break; + case MCU_UNI_EVENT_BSS_BEACON_LOSS: + mt7925_mcu_connection_loss_event(dev, skb); + break; + case MCU_UNI_EVENT_COREDUMP: + dev->fw_assert = true; + mt76_connac_mcu_coredump_event(&dev->mt76, skb, &dev->coredump); + return; + default: + break; + } + dev_kfree_skb(skb); +} + +void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt7925_mcu_rxd *rxd = (struct mt7925_mcu_rxd *)skb->data; + + if (skb_linearize(skb)) + return; + + if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) { + mt7925_mcu_uni_rx_unsolicited_event(dev, skb); + return; + } + + mt76_mcu_rx_event(&dev->mt76, skb); +} + +static int +mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, + struct ieee80211_ampdu_params *params, + bool enable, bool tx) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv; + struct sta_rec_ba_uni *ba; + struct sk_buff *skb; + struct tlv *tlv; + int len; + + len = sizeof(struct sta_req_hdr) + sizeof(*ba); + skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid, + len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba)); + + ba = (struct sta_rec_ba_uni *)tlv; + ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT; + ba->winsize = cpu_to_le16(params->buf_size); + ba->ssn = cpu_to_le16(params->ssn); + ba->ba_en = enable << params->tid; + ba->amsdu = params->amsdu; + ba->tid = params->tid; + + return mt76_mcu_skb_send_msg(dev, skb, + MCU_UNI_CMD(STA_REC_UPDATE), true); +} + +/** starec & wtbl **/ +int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; + struct mt792x_vif *mvif = msta->vif; + + if (enable && !params->amsdu) + msta->wcid.amsdu = false; + + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + enable, true); +} + +int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; + struct mt792x_vif *mvif = msta->vif; + + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + enable, false); +} + +static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) +{ + const struct mt76_connac2_fw_trailer *hdr; + const struct mt76_connac2_fw_region *region; + const struct mt7925_clc *clc; + struct mt76_dev *mdev = &dev->mt76; + struct mt792x_phy *phy = &dev->phy; + const struct firmware *fw; + int ret, i, len, offset = 0; + u8 *clc_base = NULL; + + if (mt7925_disable_clc || + mt76_is_usb(&dev->mt76)) + return 0; + + ret = request_firmware(&fw, fw_name, mdev->dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(mdev->dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); + for (i = 0; i < hdr->n_region; i++) { + region = (const void *)((const u8 *)hdr - + (hdr->n_region - i) * sizeof(*region)); + len = le32_to_cpu(region->len); + + /* check if we have valid buffer size */ + if (offset + len > fw->size) { + dev_err(mdev->dev, "Invalid firmware region\n"); + ret = -EINVAL; + goto out; + } + + if ((region->feature_set & FW_FEATURE_NON_DL) && + region->type == FW_TYPE_CLC) { + clc_base = (u8 *)(fw->data + offset); + break; + } + offset += len; + } + + if (!clc_base) + goto out; + + for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) { + clc = (const struct mt7925_clc *)(clc_base + offset); + + /* do not init buf again if chip reset triggered */ + if (phy->clc[clc->idx]) + continue; + + phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc, + le32_to_cpu(clc->len), + GFP_KERNEL); + + if (!phy->clc[clc->idx]) { + ret = -ENOMEM; + goto out; + } + } + + ret = mt7925_mcu_set_clc(dev, "00", ENVIRON_INDOOR); +out: + release_firmware(fw); + + return ret; +} + +int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl) +{ + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + u8 ctrl; + u8 interval; + u8 _rsv2[2]; + } __packed req = { + .tag = cpu_to_le16(UNI_WSYS_CONFIG_FW_LOG_CTRL), + .len = cpu_to_le16(sizeof(req) - 4), + .ctrl = ctrl, + }; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(WSYS_CONFIG), + &req, sizeof(req), false, NULL); + return ret; +} + +static void +mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_dev *mdev = mphy->dev; + struct mt7925_mcu_phy_cap { + u8 ht; + u8 vht; + u8 _5g; + u8 max_bw; + u8 nss; + u8 dbdc; + u8 tx_ldpc; + u8 rx_ldpc; + u8 tx_stbc; + u8 rx_stbc; + u8 hw_path; + u8 he; + u8 eht; + } __packed * cap; + enum { + WF0_24G, + WF0_5G + }; + + cap = (struct mt7925_mcu_phy_cap *)data; + + mdev->phy.antenna_mask = BIT(cap->nss) - 1; + mdev->phy.chainmask = mdev->phy.antenna_mask; + mdev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); + mdev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); + dev->has_eht = cap->eht; +} + +static int +mt7925_mcu_get_nic_capability(struct mt792x_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + } __packed req = { + .tag = cpu_to_le16(UNI_CHIP_CONFIG_NIC_CAPA), + .len = cpu_to_le16(sizeof(req) - 4), + }; + struct mt76_connac_cap_hdr { + __le16 n_element; + u8 rsv[2]; + } __packed * hdr; + struct sk_buff *skb; + int ret, i; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(CHIP_CONFIG), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + hdr = (struct mt76_connac_cap_hdr *)skb->data; + if (skb->len < sizeof(*hdr)) { + ret = -EINVAL; + goto out; + } + + skb_pull(skb, sizeof(*hdr)); + + for (i = 0; i < le16_to_cpu(hdr->n_element); i++) { + struct tlv *tlv = (struct tlv *)skb->data; + int len; + + if (skb->len < sizeof(*tlv)) + break; + + len = le16_to_cpu(tlv->len); + if (skb->len < len) + break; + + switch (le16_to_cpu(tlv->tag)) { + case MT_NIC_CAP_6G: + mphy->cap.has_6ghz = !!tlv->data[0]; + break; + case MT_NIC_CAP_MAC_ADDR: + memcpy(mphy->macaddr, (void *)tlv->data, ETH_ALEN); + break; + case MT_NIC_CAP_PHY: + mt7925_mcu_parse_phy_cap(dev, tlv->data); + break; + default: + break; + } + skb_pull(skb, len); + } +out: + dev_kfree_skb(skb); + return ret; +} + +int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd) +{ + u16 len = strlen(cmd) + 1; + struct { + u8 _rsv[4]; + __le16 tag; + __le16 len; + struct mt76_connac_config config; + } __packed req = { + .tag = cpu_to_le16(UNI_CHIP_CONFIG_CHIP_CFG), + .len = cpu_to_le16(sizeof(req) - 4), + .config = { + .resp_type = 0, + .type = 0, + .data_size = cpu_to_le16(len), + }, + }; + + memcpy(req.config.data, cmd, len); + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHIP_CONFIG), + &req, sizeof(req), false); +} + +int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable) +{ + char cmd[16]; + + snprintf(cmd, sizeof(cmd), "KeepFullPwr %d", !enable); + + return mt7925_mcu_chip_config(dev, cmd); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_deep_sleep); + +int mt7925_run_firmware(struct mt792x_dev *dev) +{ + int err; + + err = mt792x_load_firmware(dev); + if (err) + return err; + + err = mt7925_mcu_get_nic_capability(dev); + if (err) + return err; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + err = mt7925_load_clc(dev, mt792x_ram_name(dev)); + if (err) + return err; + + return mt7925_mcu_fw_log_2_host(dev, 1); +} +EXPORT_SYMBOL_GPL(mt7925_run_firmware); + +static void +mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct sta_rec_hdr_trans *hdr_trans; + struct mt76_wcid *wcid; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HDR_TRANS, sizeof(*hdr_trans)); + hdr_trans = (struct sta_rec_hdr_trans *)tlv; + hdr_trans->dis_rx_hdr_tran = true; + + if (vif->type == NL80211_IFTYPE_STATION) + hdr_trans->to_ds = true; + else + hdr_trans->from_ds = true; + + wcid = (struct mt76_wcid *)sta->drv_priv; + if (!wcid) + return; + + hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags); + if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) { + hdr_trans->to_ds = true; + hdr_trans->from_ds = true; + } +} + +int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta; + struct sk_buff *skb; + + msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* starec hdr trans */ + mt7925_mcu_sta_hdr_trans_tlv(skb, vif, sta); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); +} + +int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) +{ +#define MCU_EDCA_AC_PARAM 0 +#define WMM_AIFS_SET BIT(0) +#define WMM_CW_MIN_SET BIT(1) +#define WMM_CW_MAX_SET BIT(2) +#define WMM_TXOP_SET BIT(3) +#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \ + WMM_CW_MAX_SET | WMM_TXOP_SET) + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + u8 bss_idx; + u8 __rsv[3]; + } __packed hdr = { + .bss_idx = mvif->mt76.idx, + }; + struct sk_buff *skb; + int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca); + int ac; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &hdr, sizeof(hdr)); + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; + struct edca *e; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, MCU_EDCA_AC_PARAM, sizeof(*e)); + + e = (struct edca *)tlv; + e->set = WMM_PARAM_SET; + e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS; + e->aifs = q->aifs; + e->txop = cpu_to_le16(q->txop); + + if (q->cw_min) + e->cw_min = fls(q->cw_min); + else + e->cw_min = 5; + + if (q->cw_max) + e->cw_max = fls(q->cw_max); + else + e->cw_max = 10; + } + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(EDCA_UPDATE), true); +} + +static int +mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, + struct mt76_connac_sta_key_conf *sta_key_conf, + struct sk_buff *skb, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd) +{ + struct sta_rec_sec_uni *sec; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec)); + sec = (struct sta_rec_sec_uni *)tlv; + sec->add = cmd; + + if (cmd == SET_KEY) { + struct sec_key_uni *sec_key; + u8 cipher; + + cipher = mt76_connac_mcu_get_cipher(key->cipher); + if (cipher == MCU_CIPHER_NONE) + return -EOPNOTSUPP; + + sec_key = &sec->key[0]; + sec_key->cipher_len = sizeof(*sec_key); + + if (cipher == MCU_CIPHER_BIP_CMAC_128) { + sec_key->wlan_idx = cpu_to_le16(wcid->idx); + sec_key->cipher_id = MCU_CIPHER_AES_CCMP; + sec_key->key_id = sta_key_conf->keyidx; + sec_key->key_len = 16; + memcpy(sec_key->key, sta_key_conf->key, 16); + + sec_key = &sec->key[1]; + sec_key->wlan_idx = cpu_to_le16(wcid->idx); + sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128; + sec_key->cipher_len = sizeof(*sec_key); + sec_key->key_len = 16; + memcpy(sec_key->key, key->key, 16); + sec->n_cipher = 2; + } else { + sec_key->wlan_idx = cpu_to_le16(wcid->idx); + sec_key->cipher_id = cipher; + sec_key->key_id = key->keyidx; + sec_key->key_len = key->keylen; + memcpy(sec_key->key, key->key, key->keylen); + + if (cipher == MCU_CIPHER_TKIP) { + /* Rx/Tx MIC keys are swapped */ + memcpy(sec_key->key + 16, key->key + 24, 8); + memcpy(sec_key->key + 24, key->key + 16, 8); + } + + /* store key_conf for BIP batch update */ + if (cipher == MCU_CIPHER_AES_CCMP) { + memcpy(sta_key_conf->key, key->key, key->keylen); + sta_key_conf->keyidx = key->keyidx; + } + + sec->n_cipher = 1; + } + } else { + sec->n_cipher = 0; + } + + return 0; +} + +int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct mt76_connac_sta_key_conf *sta_key_conf, + struct ieee80211_key_conf *key, int mcu_cmd, + struct mt76_wcid *wcid, enum set_key_cmd cmd) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct sk_buff *skb; + int ret; + + skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd); + if (ret) + return ret; + + return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); +} + +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + struct ieee80211_channel *chan, int duration, + enum mt7925_roc_req type, u8 token_id) +{ + int center_ch = ieee80211_frequency_to_channel(chan->center_freq); + struct mt792x_dev *dev = phy->dev; + struct { + struct { + u8 rsv[4]; + } __packed hdr; + struct roc_acquire_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 control_channel; + u8 sco; + u8 band; + u8 bw; + u8 center_chan; + u8 center_chan2; + u8 bw_from_ap; + u8 center_chan_from_ap; + u8 center_chan2_from_ap; + u8 reqtype; + __le32 maxinterval; + u8 dbdcband; + u8 rsv[3]; + } __packed roc; + } __packed req = { + .roc = { + .tag = cpu_to_le16(UNI_ROC_ACQUIRE), + .len = cpu_to_le16(sizeof(struct roc_acquire_tlv)), + .tokenid = token_id, + .reqtype = type, + .maxinterval = cpu_to_le32(duration), + .bss_idx = vif->mt76.idx, + .control_channel = chan->hw_value, + .bw = CMD_CBW_20MHZ, + .bw_from_ap = CMD_CBW_20MHZ, + .center_chan = center_ch, + .center_chan_from_ap = center_ch, + .dbdcband = 0xff, /* auto */ + }, + }; + + if (chan->hw_value < center_ch) + req.roc.sco = 1; /* SCA */ + else if (chan->hw_value > center_ch) + req.roc.sco = 3; /* SCB */ + + switch (chan->band) { + case NL80211_BAND_6GHZ: + req.roc.band = 3; + break; + case NL80211_BAND_5GHZ: + req.roc.band = 2; + break; + default: + req.roc.band = 1; + break; + } + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), + &req, sizeof(req), false); +} + +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + u8 token_id) +{ + struct mt792x_dev *dev = phy->dev; + struct { + struct { + u8 rsv[4]; + } __packed hdr; + struct roc_abort_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 dbdcband; + u8 rsv[5]; + } __packed abort; + } __packed req = { + .abort = { + .tag = cpu_to_le16(UNI_ROC_ABORT), + .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), + .tokenid = token_id, + .bss_idx = vif->mt76.idx, + .dbdcband = 0xff, /* auto*/ + }, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), + &req, sizeof(req), false); +} + +int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag) +{ + static const u8 ch_band[] = { + [NL80211_BAND_2GHZ] = 0, + [NL80211_BAND_5GHZ] = 1, + [NL80211_BAND_6GHZ] = 2, + }; + struct mt792x_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq1 = chandef->center_freq1; + u8 band_idx = chandef->chan->band != NL80211_BAND_2GHZ; + struct { + /* fixed field */ + u8 __rsv[4]; + + __le16 tag; + __le16 len; + u8 control_ch; + u8 center_ch; + u8 bw; + u8 tx_path_num; + u8 rx_path; /* mask or num */ + u8 switch_reason; + u8 band_idx; + u8 center_ch2; /* for 80+80 only */ + __le16 cac_case; + u8 channel_band; + u8 rsv0; + __le32 outband_freq; + u8 txpower_drop; + u8 ap_bw; + u8 ap_center_ch; + u8 rsv1[53]; + } __packed req = { + .tag = cpu_to_le16(tag), + .len = cpu_to_le16(sizeof(req) - 4), + .control_ch = chandef->chan->hw_value, + .center_ch = ieee80211_frequency_to_channel(freq1), + .bw = mt76_connac_chan_bw(chandef), + .tx_path_num = hweight8(phy->mt76->antenna_mask), + .rx_path = phy->mt76->antenna_mask, + .band_idx = band_idx, + .channel_band = ch_band[chandef->chan->band], + }; + + if (chandef->chan->band == NL80211_BAND_6GHZ) + req.channel_band = 2; + else + req.channel_band = chandef->chan->band; + + if (tag == UNI_CHANNEL_RX_PATH || + dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + req.switch_reason = CH_SWITCH_NORMAL; + else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; + else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, + NL80211_IFTYPE_AP)) + req.switch_reason = CH_SWITCH_DFS; + else + req.switch_reason = CH_SWITCH_NORMAL; + + if (tag == UNI_CHANNEL_SWITCH) + req.rx_path = hweight8(req.rx_path); + + if (chandef->width == NL80211_CHAN_WIDTH_80P80) { + int freq2 = chandef->center_freq2; + + req.center_ch2 = ieee80211_frequency_to_channel(freq2); + } + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHANNEL_SWITCH), + &req, sizeof(req), true); +} + +int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) +{ + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + u8 buffer_mode; + u8 format; + __le16 buf_len; + } __packed req = { + .tag = cpu_to_le16(UNI_EFUSE_BUFFER_MODE), + .len = cpu_to_le16(sizeof(req) - 4), + .buffer_mode = EE_MODE_EFUSE, + .format = EE_FORMAT_WHOLE + }; + + return mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(EFUSE_CTRL), + &req, sizeof(req), false, NULL); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom); + +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct ps_tlv { + __le16 tag; + __le16 len; + u8 ps_state; /* 0: device awake + * 1: static power save + * 2: dynamic power saving + * 3: enter TWT power saving + * 4: leave TWT power saving + */ + u8 pad[3]; + } __packed ps; + } __packed ps_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .ps = { + .tag = cpu_to_le16(UNI_BSS_INFO_PS), + .len = cpu_to_le16(sizeof(struct ps_tlv)), + .ps_state = vif->cfg.ps ? 2 : 0, + }, + }; + + if (vif->type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &ps_req, sizeof(ps_req), true); +} + +static int +mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcnft_tlv { + __le16 tag; + __le16 len; + __le16 bcn_interval; + u8 dtim_period; + u8 bmc_delivered_ac; + u8 bmc_triggered_ac; + u8 pad[3]; + } __packed bcnft; + } __packed bcnft_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .bcnft = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), + .len = cpu_to_le16(sizeof(struct bcnft_tlv)), + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + .dtim_period = vif->bss_conf.dtim_period, + }, + }; + + if (vif->type != NL80211_IFTYPE_STATION) + return 0; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &bcnft_req, sizeof(bcnft_req), true); +} + +int +mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcnft_tlv { + __le16 tag; + __le16 len; + __le16 bcn_interval; + u8 dtim_period; + u8 bmc_delivered_ac; + u8 bmc_triggered_ac; + u8 pad[3]; + } __packed enable; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .enable = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), + .len = cpu_to_le16(sizeof(struct bcnft_tlv)), + .dtim_period = vif->bss_conf.dtim_period, + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + }, + }; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct pm_disable { + __le16 tag; + __le16 len; + } __packed disable; + } req1 = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .disable = { + .tag = cpu_to_le16(UNI_BSS_INFO_PM_DISABLE), + .len = cpu_to_le16(sizeof(struct pm_disable)) + }, + }; + int err; + + err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req1, sizeof(req1), false); + if (err < 0 || !enable) + return err; + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req, sizeof(req), false); +} + +static void +mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + if (!sta->deflink.he_cap.has_he) + return; + + mt76_connac_mcu_sta_he_tlv_v2(skb, sta); +} + +static void +mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct sta_rec_he_6g_capa *he_6g; + struct tlv *tlv; + + if (!sta->deflink.he_6ghz_capa.capa) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g)); + + he_6g = (struct sta_rec_he_6g_capa *)tlv; + he_6g->capa = sta->deflink.he_6ghz_capa.capa; +} + +static void +mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct ieee80211_eht_mcs_nss_supp *mcs_map; + struct ieee80211_eht_cap_elem_fixed *elem; + struct sta_rec_eht *eht; + struct tlv *tlv; + + if (!sta->deflink.eht_cap.has_eht) + return; + + mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp; + elem = &sta->deflink.eht_cap.eht_cap_elem; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht)); + + eht = (struct sta_rec_eht *)tlv; + eht->tid_bitmap = 0xff; + eht->mac_cap = cpu_to_le16(*(u16 *)elem->mac_cap_info); + eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info); + eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]); + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20)); + memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80)); + memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160)); +} + +static void +mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct sta_rec_ht *ht; + struct tlv *tlv; + + if (!sta->deflink.ht_cap.ht_supported) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); + + ht = (struct sta_rec_ht *)tlv; + ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); +} + +static void +mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct sta_rec_vht *vht; + struct tlv *tlv; + + /* For 6G band, this tlv is necessary to let hw work normally */ + if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); + + vht = (struct sta_rec_vht *)tlv; + vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); + vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; +} + +static void +mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct sta_rec_amsdu *amsdu; + struct tlv *tlv; + + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + return; + + if (!sta->deflink.agg.max_amsdu_len) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); + amsdu = (struct sta_rec_amsdu *)tlv; + amsdu->max_amsdu_num = 8; + amsdu->amsdu_en = true; + msta->wcid.amsdu = true; + + switch (sta->deflink.agg.max_amsdu_len) { + case IEEE80211_MAX_MPDU_LEN_VHT_11454: + amsdu->max_mpdu_size = + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + return; + case IEEE80211_MAX_MPDU_LEN_HT_7935: + case IEEE80211_MAX_MPDU_LEN_VHT_7991: + amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; + return; + default: + amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; + return; + } +} + +static void +mt7925_mcu_sta_phy_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; + struct sta_rec_phy *phy; + struct tlv *tlv; + u8 af = 0, mm = 0; + + if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy)); + phy = (struct sta_rec_phy *)tlv; + phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta); + if (sta->deflink.ht_cap.ht_supported) { + af = sta->deflink.ht_cap.ampdu_factor; + mm = sta->deflink.ht_cap.ampdu_density; + } + + if (sta->deflink.vht_cap.vht_supported) { + u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, + sta->deflink.vht_cap.cap); + + af = max_t(u8, af, vht_af); + } + + if (sta->deflink.he_6ghz_capa.capa) { + af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + } + + phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR, af) | + FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY, mm); + phy->max_ampdu_len = af; +} + +static void +mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb, + struct ieee80211_sta *sta, + struct ieee80211_vif *vif, + u8 rcpi, u8 sta_state) +{ + struct sta_rec_state_v2 { + __le16 tag; + __le16 len; + u8 state; + u8 rsv[3]; + __le32 flags; + u8 vht_opmode; + u8 action; + u8 rsv2[2]; + } __packed * state; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state)); + state = (struct sta_rec_state_v2 *)tlv; + state->state = sta_state; + + if (sta->deflink.vht_cap.vht_supported) { + state->vht_opmode = sta->deflink.bandwidth; + state->vht_opmode |= sta->deflink.rx_nss << + IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; + } +} + +static void +mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; + enum nl80211_band band = chandef->chan->band; + struct sta_rec_ra_info *ra_info; + struct tlv *tlv; + u16 supp_rates; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); + ra_info = (struct sta_rec_ra_info *)tlv; + + supp_rates = sta->deflink.supp_rates[band]; + if (band == NL80211_BAND_2GHZ) + supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) | + FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf); + else + supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates); + + ra_info->legacy = cpu_to_le16(supp_rates); + + if (sta->deflink.ht_cap.ht_supported) + memcpy(ra_info->rx_mcs_bitmask, + sta->deflink.ht_cap.mcs.rx_mask, + HT_MCS_MASK_NUM); +} + +static void +mt7925_mcu_sta_mld_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + struct sta_rec_mld *mld; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, sizeof(*mld)); + mld = (struct sta_rec_mld *)tlv; + memcpy(mld->mac_addr, vif->addr, ETH_ALEN); + mld->primary_id = cpu_to_le16(wcid->idx); + mld->wlan_id = cpu_to_le16(wcid->idx); + + /* TODO: 0 means deflink only, add secondary link(1) later */ + mld->link_num = !!(hweight8(vif->active_links) > 1); + WARN_ON_ONCE(mld->link_num); +} + +static int +mt7925_mcu_sta_cmd(struct mt76_phy *phy, + struct mt76_sta_cmd_info *info) +{ + struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv; + struct mt76_dev *dev = phy->dev; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + + skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + if (info->sta || !info->offload_fw) + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta, + info->enable, info->newly); + if (info->sta && info->enable) { + mt7925_mcu_sta_phy_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_ht_tlv(skb, info->sta); + mt7925_mcu_sta_vht_tlv(skb, info->sta); + mt76_connac_mcu_sta_uapsd(skb, info->vif, info->sta); + mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_he_tlv(skb, info->sta); + mt7925_mcu_sta_he_6g_tlv(skb, info->sta); + mt7925_mcu_sta_eht_tlv(skb, info->sta); + mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta, + info->vif, info->rcpi, + info->state); + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta); + } + + sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, + sizeof(struct tlv)); + + wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid, + WTBL_RESET_AND_SET, + sta_wtbl, &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + if (info->enable) { + mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif, + info->sta, sta_wtbl, + wtbl_hdr); + mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid, + sta_wtbl, wtbl_hdr); + if (info->sta) + mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta, + sta_wtbl, wtbl_hdr, + true, true); + } + + return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); +} + +int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, bool enable, + enum mt76_sta_info_state state) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + int rssi = -ewma_rssi_read(&mvif->rssi); + struct mt76_sta_cmd_info info = { + .sta = sta, + .vif = vif, + .enable = enable, + .cmd = MCU_UNI_CMD(STA_REC_UPDATE), + .state = state, + .offload_fw = true, + .rcpi = to_rcpi(rssi), + }; + struct mt792x_sta *msta; + + msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; + info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; + info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; + + return mt7925_mcu_sta_cmd(&dev->mphy, &info); +} + +int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + bool enable) +{ +#define MT7925_FIF_BIT_CLR BIT(1) +#define MT7925_FIF_BIT_SET BIT(0) + int err = 0; + + if (enable) { + err = mt7925_mcu_uni_bss_bcnft(dev, vif, true); + if (err) + return err; + + return mt7925_mcu_set_rxfilter(dev, 0, + MT7925_FIF_BIT_SET, + MT_WF_RFCR_DROP_OTHER_BEACON); + } + + err = mt7925_mcu_set_bss_pm(dev, vif, false); + if (err) + return err; + + return mt7925_mcu_set_rxfilter(dev, 0, + MT7925_FIF_BIT_CLR, + MT_WF_RFCR_DROP_OTHER_BEACON); +} + +int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txpwr *txpwr) +{ +#define TX_POWER_SHOW_INFO 0x7 +#define TXPOWER_ALL_RATE_POWER_INFO 0x2 + struct mt7925_txpwr_event *event; + struct mt7925_txpwr_req req = { + .tag = cpu_to_le16(TX_POWER_SHOW_INFO), + .len = cpu_to_le16(sizeof(req) - 4), + .catg = TXPOWER_ALL_RATE_POWER_INFO, + .band_idx = band_idx, + }; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(TXPOWER), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + event = (struct mt7925_txpwr_event *)skb->data; + memcpy(txpwr, &event->txpwr, sizeof(event->txpwr)); + + dev_kfree_skb(skb); + + return 0; +} + +int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + struct { + struct { + u8 band_idx; + u8 pad[3]; + } __packed hdr; + struct sniffer_enable_tlv { + __le16 tag; + __le16 len; + u8 enable; + u8 pad[3]; + } __packed enable; + } __packed req = { + .hdr = { + .band_idx = mvif->mt76.band_idx, + }, + .enable = { + .tag = cpu_to_le16(UNI_SNIFFER_ENABLE), + .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)), + .enable = enable, + }, + }; + + mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true); + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), + true); +} + +int mt7925_mcu_config_sniffer(struct mt792x_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt76_phy *mphy = vif->phy->mt76; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &mphy->chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; + + const u8 ch_band[] = { + [NL80211_BAND_2GHZ] = 1, + [NL80211_BAND_5GHZ] = 2, + [NL80211_BAND_6GHZ] = 3, + }; + const u8 ch_width[] = { + [NL80211_CHAN_WIDTH_20_NOHT] = 0, + [NL80211_CHAN_WIDTH_20] = 0, + [NL80211_CHAN_WIDTH_40] = 0, + [NL80211_CHAN_WIDTH_80] = 1, + [NL80211_CHAN_WIDTH_160] = 2, + [NL80211_CHAN_WIDTH_80P80] = 3, + [NL80211_CHAN_WIDTH_5] = 4, + [NL80211_CHAN_WIDTH_10] = 5, + [NL80211_CHAN_WIDTH_320] = 6, + }; + + struct { + struct { + u8 band_idx; + u8 pad[3]; + } __packed hdr; + struct config_tlv { + __le16 tag; + __le16 len; + u16 aid; + u8 ch_band; + u8 bw; + u8 control_ch; + u8 sco; + u8 center_ch; + u8 center_ch2; + u8 drop_err; + u8 pad[3]; + } __packed tlv; + } __packed req = { + .hdr = { + .band_idx = vif->mt76.band_idx, + }, + .tlv = { + .tag = cpu_to_le16(UNI_SNIFFER_CONFIG), + .len = cpu_to_le16(sizeof(req.tlv)), + .control_ch = chandef->chan->hw_value, + .center_ch = ieee80211_frequency_to_channel(freq1), + .drop_err = 1, + }, + }; + + if (chandef->chan->band < ARRAY_SIZE(ch_band)) + req.tlv.ch_band = ch_band[chandef->chan->band]; + if (chandef->width < ARRAY_SIZE(ch_width)) + req.tlv.bw = ch_width[chandef->width]; + + if (freq2) + req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2); + + if (req.tlv.control_ch < req.tlv.center_ch) + req.tlv.sco = 1; /* SCA */ + else if (req.tlv.control_ch > req.tlv.center_ch) + req.tlv.sco = 3; /* SCB */ + + return mt76_mcu_send_msg(mphy->dev, MCU_UNI_CMD(SNIFFER), + &req, sizeof(req), true); +} + +int +mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_mutable_offsets offs; + struct { + struct req_hdr { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcn_content_tlv { + __le16 tag; + __le16 len; + __le16 tim_ie_pos; + __le16 csa_ie_pos; + __le16 bcc_ie_pos; + /* 0: disable beacon offload + * 1: enable beacon offload + * 2: update probe respond offload + */ + u8 enable; + /* 0: legacy format (TXD + payload) + * 1: only cap field IE + */ + u8 type; + __le16 pkt_len; + u8 pkt[512]; + } __packed beacon_tlv; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .beacon_tlv = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), + .len = cpu_to_le16(sizeof(struct bcn_content_tlv)), + .enable = enable, + .type = 1, + }, + }; + struct sk_buff *skb; + u8 cap_offs; + + /* support enable/update process only + * disable flow would be handled in bss stop handler automatically + */ + if (!enable) + return -EOPNOTSUPP; + + skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0); + if (!skb) + return -EINVAL; + + cap_offs = offsetof(struct ieee80211_mgmt, u.beacon.capab_info); + if (!skb_pull(skb, cap_offs)) { + dev_err(dev->mt76.dev, "beacon format err\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + if (skb->len > 512) { + dev_err(dev->mt76.dev, "beacon size limit exceed\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + memcpy(req.beacon_tlv.pkt, skb->data, skb->len); + req.beacon_tlv.pkt_len = cpu_to_le16(skb->len); + offs.tim_offset -= cap_offs; + req.beacon_tlv.tim_ie_pos = cpu_to_le16(offs.tim_offset); + + if (offs.cntdwn_counter_offs[0]) { + u16 csa_offs; + + csa_offs = offs.cntdwn_counter_offs[0] - cap_offs - 4; + req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs); + } + dev_kfree_skb(skb); + + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req, sizeof(req), true); +} + +int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_chanctx_conf *ctx) +{ + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; + enum nl80211_band band = chandef->chan->band; + struct mt76_dev *mdev = phy->dev; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct rlm_tlv { + __le16 tag; + __le16 len; + u8 control_channel; + u8 center_chan; + u8 center_chan2; + u8 bw; + u8 tx_streams; + u8 rx_streams; + u8 ht_op_info; + u8 sco; + u8 band; + u8 pad[3]; + } __packed rlm; + } __packed rlm_req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .rlm = { + .tag = cpu_to_le16(UNI_BSS_INFO_RLM), + .len = cpu_to_le16(sizeof(struct rlm_tlv)), + .control_channel = chandef->chan->hw_value, + .center_chan = ieee80211_frequency_to_channel(freq1), + .center_chan2 = ieee80211_frequency_to_channel(freq2), + .tx_streams = hweight8(phy->antenna_mask), + .ht_op_info = 4, /* set HT 40M allowed */ + .rx_streams = hweight8(phy->antenna_mask), + .band = band, + }, + }; + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_40: + rlm_req.rlm.bw = CMD_CBW_40MHZ; + break; + case NL80211_CHAN_WIDTH_80: + rlm_req.rlm.bw = CMD_CBW_80MHZ; + break; + case NL80211_CHAN_WIDTH_80P80: + rlm_req.rlm.bw = CMD_CBW_8080MHZ; + break; + case NL80211_CHAN_WIDTH_160: + rlm_req.rlm.bw = CMD_CBW_160MHZ; + break; + case NL80211_CHAN_WIDTH_5: + rlm_req.rlm.bw = CMD_CBW_5MHZ; + break; + case NL80211_CHAN_WIDTH_10: + rlm_req.rlm.bw = CMD_CBW_10MHZ; + break; + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + default: + rlm_req.rlm.bw = CMD_CBW_20MHZ; + rlm_req.rlm.ht_op_info = 0; + break; + } + + if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 1; /* SCA */ + else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 3; /* SCB */ + + return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req, + sizeof(rlm_req), true); +} + +static struct sk_buff * +__mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len) +{ + struct bss_req_hdr hdr = { + .bss_idx = mvif->idx, + }; + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(dev, NULL, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_put_data(skb, &hdr, sizeof(hdr)); + + return skb; +} + +static u8 +mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta) +{ + struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_sta_eht_cap *eht_cap; + __le16 capa = 0; + u8 mode = 0; + + if (sta) { + he_6ghz_capa = &sta->deflink.he_6ghz_capa; + eht_cap = &sta->deflink.eht_cap; + } else { + struct ieee80211_supported_band *sband; + + sband = phy->hw->wiphy->bands[band]; + capa = ieee80211_get_he_6ghz_capa(sband, vif->type); + he_6ghz_capa = (struct ieee80211_he_6ghz_capa *)&capa; + + eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type); + } + + switch (band) { + case NL80211_BAND_2GHZ: + if (eht_cap && eht_cap->has_eht) + mode |= PHY_MODE_BE_24G; + break; + case NL80211_BAND_5GHZ: + if (eht_cap && eht_cap->has_eht) + mode |= PHY_MODE_BE_5G; + break; + case NL80211_BAND_6GHZ: + if (he_6ghz_capa && he_6ghz_capa->capa) + mode |= PHY_MODE_AX_6G; + + if (eht_cap && eht_cap->has_eht) + mode |= PHY_MODE_BE_6G; + break; + default: + break; + } + + return mode; +} + +static void +mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_chanctx_conf *ctx, + struct mt76_phy *phy, u16 wlan_idx, + bool enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : + &mvif->sta; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + enum nl80211_band band = chandef->chan->band; + struct mt76_connac_bss_basic_tlv *basic_req; + u8 idx, basic_phy; + struct tlv *tlv; + int conn_type; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req)); + basic_req = (struct mt76_connac_bss_basic_tlv *)tlv; + + idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 : + mvif->mt76.omac_idx; + basic_req->hw_bss_idx = idx; + + basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta); + + basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, sta); + basic_req->nonht_basic_phy = cpu_to_le16(basic_phy); + + memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN); + basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta); + basic_req->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); + basic_req->dtim_period = vif->bss_conf.dtim_period; + basic_req->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx); + basic_req->sta_idx = cpu_to_le16(msta->wcid.idx); + basic_req->omac_idx = mvif->mt76.omac_idx; + basic_req->band_idx = mvif->mt76.band_idx; + basic_req->wmm_idx = mvif->mt76.wmm_idx; + basic_req->conn_state = !enable; + + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + if (vif->p2p) + conn_type = CONNECTION_P2P_GO; + else + conn_type = CONNECTION_INFRA_AP; + basic_req->conn_type = cpu_to_le32(conn_type); + basic_req->active = enable; + break; + case NL80211_IFTYPE_STATION: + if (vif->p2p) + conn_type = CONNECTION_P2P_GC; + else + conn_type = CONNECTION_INFRA_STA; + basic_req->conn_type = cpu_to_le32(conn_type); + basic_req->active = true; + break; + case NL80211_IFTYPE_ADHOC: + basic_req->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + basic_req->active = true; + break; + default: + WARN_ON(1); + break; + } +} + +static void +mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct bss_sec_tlv { + __le16 tag; + __le16 len; + u8 mode; + u8 status; + u8 cipher; + u8 __rsv; + } __packed * sec; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_SEC, sizeof(*sec)); + sec = (struct bss_sec_tlv *)tlv; + + switch (mvif->cipher) { + case MCU_CIPHER_GCMP_256: + case MCU_CIPHER_GCMP: + sec->mode = MODE_WPA3_SAE; + sec->status = 8; + break; + case MCU_CIPHER_AES_CCMP: + sec->mode = MODE_WPA2_PSK; + sec->status = 6; + break; + case MCU_CIPHER_TKIP: + sec->mode = MODE_WPA2_PSK; + sec->status = 4; + break; + case MCU_CIPHER_WEP104: + case MCU_CIPHER_WEP40: + sec->mode = MODE_SHARED; + sec->status = 0; + break; + default: + sec->mode = MODE_OPEN; + sec->status = 1; + break; + } + + sec->cipher = mvif->cipher; +} + +static void +mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy, + struct ieee80211_chanctx_conf *ctx, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->mt76->chandef; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + enum nl80211_band band = chandef->chan->band; + struct bss_rate_tlv *bmc; + struct tlv *tlv; + u8 idx = mvif->mcast_rates_idx ? + mvif->mcast_rates_idx : mvif->basic_rates_idx; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RATE, sizeof(*bmc)); + + bmc = (struct bss_rate_tlv *)tlv; + + bmc->short_preamble = (band == NL80211_BAND_2GHZ); + bmc->bc_fixed_rate = idx; + bmc->mc_fixed_rate = idx; +} + +static void +mt7925_mcu_bss_mld_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + bool is_mld = ieee80211_vif_is_mld(vif); + struct bss_mld_tlv *mld; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld)); + mld = (struct bss_mld_tlv *)tlv; + + mld->link_id = sta ? (is_mld ? vif->bss_conf.link_id : 0) : 0xff; + mld->group_mld_id = is_mld ? mvif->mt76.idx : 0xff; + mld->own_mld_id = mvif->mt76.idx + 32; + mld->remap_idx = 0xff; + + if (sta) + memcpy(mld->mac_addr, sta->addr, ETH_ALEN); +} + +static void +mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +{ + struct mt76_connac_bss_qos_tlv *qos; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_QBSS, sizeof(*qos)); + qos = (struct mt76_connac_bss_qos_tlv *)tlv; + qos->qos = vif->bss_conf.qos; +} + +static void +mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt792x_phy *phy) +{ +#define DEFAULT_HE_PE_DURATION 4 +#define DEFAULT_HE_DURATION_RTS_THRES 1023 + const struct ieee80211_sta_he_cap *cap; + struct bss_info_uni_he *he; + struct tlv *tlv; + + cap = mt76_connac_get_he_phy_cap(phy->mt76, vif); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he)); + + he = (struct bss_info_uni_he *)tlv; + he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; + if (!he->he_pe_duration) + he->he_pe_duration = DEFAULT_HE_PE_DURATION; + + he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); + if (!he->he_rts_thres) + he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); + + he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80; + he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160; + he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; +} + +static void +mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + bool enable) +{ + struct bss_info_uni_bss_color *color; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BSS_COLOR, sizeof(*color)); + color = (struct bss_info_uni_bss_color *)tlv; + + color->enable = enable ? + vif->bss_conf.he_bss_color.enabled : 0; + color->bss_color = enable ? + vif->bss_conf.he_bss_color.color : 0; +} + +int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, + struct ieee80211_chanctx_conf *ctx, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + int enable) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = phy->dev; + struct sk_buff *skb; + int err; + + skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, + MT7925_BSS_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* bss_basic must be first */ + mt7925_mcu_bss_basic_tlv(skb, vif, sta, ctx, phy->mt76, + mvif->sta.wcid.idx, enable); + mt7925_mcu_bss_sec_tlv(skb, vif); + + mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta); + mt7925_mcu_bss_qos_tlv(skb, vif); + mt7925_mcu_bss_mld_tlv(skb, vif, sta); + + if (vif->bss_conf.he_support) { + mt7925_mcu_bss_he_tlv(skb, vif, phy); + mt7925_mcu_bss_color_tlv(skb, vif, enable); + } + + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(BSS_INFO_UPDATE), true); + if (err < 0) + return err; + + return mt7925_mcu_set_chctx(phy->mt76, &mvif->mt76, ctx); +} + +int mt7925_mcu_set_dbdc(struct mt76_phy *phy) +{ + struct mt76_dev *mdev = phy->dev; + + struct mbmc_conf_tlv *conf; + struct mbmc_set_req *hdr; + struct sk_buff *skb; + struct tlv *tlv; + int max_len, err; + + max_len = sizeof(*hdr) + sizeof(*conf); + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + hdr = (struct mbmc_set_req *)skb_put(skb, sizeof(*hdr)); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_MBMC_SETTING, sizeof(*conf)); + conf = (struct mbmc_conf_tlv *)tlv; + + conf->mbmc_en = 1; + conf->band = 0; /* unused */ + + err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS), + false); + + return err; +} + +#define MT76_CONNAC_SCAN_CHANNEL_TIME 60 + +int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_scan_request *sreq = &scan_req->req; + int n_ssids = 0, err, i, duration; + struct ieee80211_channel **scan_list = sreq->channels; + struct mt76_dev *mdev = phy->dev; + struct mt76_connac_mcu_scan_channel *chan; + struct sk_buff *skb; + + struct scan_hdr_tlv *hdr; + struct scan_req_tlv *req; + struct scan_ssid_tlv *ssid; + struct scan_bssid_tlv *bssid; + struct scan_chan_info_tlv *chan_info; + struct scan_ie_tlv *ie; + struct scan_misc_tlv *misc; + struct tlv *tlv; + int max_len; + + max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) + + sizeof(*bssid) + sizeof(*chan_info) + + sizeof(*misc) + sizeof(*ie); + + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + set_bit(MT76_HW_SCANNING, &phy->state); + mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; + + hdr = (struct scan_hdr_tlv *)skb_put(skb, sizeof(*hdr)); + hdr->seq_num = mvif->scan_seq_num | mvif->band_idx << 7; + hdr->bss_idx = mvif->idx; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_REQ, sizeof(*req)); + req = (struct scan_req_tlv *)tlv; + req->scan_type = sreq->n_ssids ? 1 : 0; + req->probe_req_num = sreq->n_ssids ? 2 : 0; + + duration = MT76_CONNAC_SCAN_CHANNEL_TIME; + /* increase channel time for passive scan */ + if (!sreq->n_ssids) + duration *= 2; + req->timeout_value = cpu_to_le16(sreq->n_channels * duration); + req->channel_min_dwell_time = cpu_to_le16(duration); + req->channel_dwell_time = cpu_to_le16(duration); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID, sizeof(*ssid)); + ssid = (struct scan_ssid_tlv *)tlv; + for (i = 0; i < sreq->n_ssids; i++) { + if (!sreq->ssids[i].ssid_len) + continue; + + ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid, + sreq->ssids[i].ssid_len); + n_ssids++; + } + ssid->ssid_type = n_ssids ? BIT(2) : BIT(0); + ssid->ssids_num = n_ssids; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid)); + bssid = (struct scan_bssid_tlv *)tlv; + + memcpy(bssid->bssid, sreq->bssid, ETH_ALEN); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_CHANNEL, sizeof(*chan_info)); + chan_info = (struct scan_chan_info_tlv *)tlv; + chan_info->channels_num = min_t(u8, sreq->n_channels, + ARRAY_SIZE(chan_info->channels)); + for (i = 0; i < chan_info->channels_num; i++) { + chan = &chan_info->channels[i]; + + switch (scan_list[i]->band) { + case NL80211_BAND_2GHZ: + chan->band = 1; + break; + case NL80211_BAND_6GHZ: + chan->band = 3; + break; + default: + chan->band = 2; + break; + } + chan->channel_num = scan_list[i]->hw_value; + } + chan_info->channel_type = sreq->n_channels ? 4 : 0; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_IE, sizeof(*ie)); + ie = (struct scan_ie_tlv *)tlv; + if (sreq->ie_len > 0) { + memcpy(ie->ies, sreq->ie, sreq->ie_len); + ie->ies_len = cpu_to_le16(sreq->ie_len); + } + + req->scan_func |= SCAN_FUNC_SPLIT_SCAN; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_MISC, sizeof(*misc)); + misc = (struct scan_misc_tlv *)tlv; + if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + get_random_mask_addr(misc->random_mac, sreq->mac_addr, + sreq->mac_addr_mask); + req->scan_func |= SCAN_FUNC_RANDOM_MAC; + } + + err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), + false); + if (err < 0) + clear_bit(MT76_HW_SCANNING, &phy->state); + + return err; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_hw_scan); + +int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct ieee80211_channel **scan_list = sreq->channels; + struct mt76_connac_mcu_scan_channel *chan; + struct mt76_dev *mdev = phy->dev; + struct cfg80211_match_set *cfg_match; + struct cfg80211_ssid *cfg_ssid; + + struct scan_hdr_tlv *hdr; + struct scan_sched_req *req; + struct scan_ssid_tlv *ssid; + struct scan_chan_info_tlv *chan_info; + struct scan_ie_tlv *ie; + struct scan_sched_ssid_match_sets *match; + struct sk_buff *skb; + struct tlv *tlv; + int i, max_len; + + max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) + + sizeof(*chan_info) + sizeof(*ie) + + sizeof(*match); + + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; + + hdr = (struct scan_hdr_tlv *)skb_put(skb, sizeof(*hdr)); + hdr->seq_num = mvif->scan_seq_num | mvif->band_idx << 7; + hdr->bss_idx = mvif->idx; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SCHED_REQ, sizeof(*req)); + req = (struct scan_sched_req *)tlv; + req->version = 1; + + if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + req->scan_func |= SCAN_FUNC_RANDOM_MAC; + + req->intervals_num = sreq->n_scan_plans; + for (i = 0; i < req->intervals_num; i++) + req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval); + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID, sizeof(*ssid)); + ssid = (struct scan_ssid_tlv *)tlv; + + ssid->ssids_num = sreq->n_ssids; + ssid->ssid_type = BIT(2); + for (i = 0; i < ssid->ssids_num; i++) { + cfg_ssid = &sreq->ssids[i]; + memcpy(ssid->ssids[i].ssid, cfg_ssid->ssid, cfg_ssid->ssid_len); + ssid->ssids[i].ssid_len = cpu_to_le32(cfg_ssid->ssid_len); + } + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SSID_MATCH_SETS, sizeof(*match)); + match = (struct scan_sched_ssid_match_sets *)tlv; + match->match_num = sreq->n_match_sets; + for (i = 0; i < match->match_num; i++) { + cfg_match = &sreq->match_sets[i]; + memcpy(match->match[i].ssid, cfg_match->ssid.ssid, + cfg_match->ssid.ssid_len); + match->match[i].rssi_th = cpu_to_le32(cfg_match->rssi_thold); + match->match[i].ssid_len = cfg_match->ssid.ssid_len; + } + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_CHANNEL, sizeof(*chan_info)); + chan_info = (struct scan_chan_info_tlv *)tlv; + chan_info->channels_num = min_t(u8, sreq->n_channels, + ARRAY_SIZE(chan_info->channels)); + for (i = 0; i < chan_info->channels_num; i++) { + chan = &chan_info->channels[i]; + + switch (scan_list[i]->band) { + case NL80211_BAND_2GHZ: + chan->band = 1; + break; + case NL80211_BAND_6GHZ: + chan->band = 3; + break; + default: + chan->band = 2; + break; + } + chan->channel_num = scan_list[i]->hw_value; + } + chan_info->channel_type = sreq->n_channels ? 4 : 0; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_IE, sizeof(*ie)); + ie = (struct scan_ie_tlv *)tlv; + if (sreq->ie_len > 0) { + memcpy(ie->ies, sreq->ie, sreq->ie_len); + ie->ies_len = cpu_to_le16(sreq->ie_len); + } + + return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), + false); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_sched_scan_req); + +int +mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt76_dev *mdev = phy->dev; + struct scan_sched_enable *req; + struct scan_hdr_tlv *hdr; + struct sk_buff *skb; + struct tlv *tlv; + int max_len; + + max_len = sizeof(*hdr) + sizeof(*req); + + skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); + if (!skb) + return -ENOMEM; + + hdr = (struct scan_hdr_tlv *)skb_put(skb, sizeof(*hdr)); + hdr->seq_num = 0; + hdr->bss_idx = 0; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_SCHED_ENABLE, sizeof(*req)); + req = (struct scan_sched_enable *)tlv; + req->active = !enable; + + if (enable) + set_bit(MT76_HW_SCHED_SCANNING, &phy->state); + else + clear_bit(MT76_HW_SCHED_SCANNING, &phy->state); + + return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), + false); +} + +int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, + struct ieee80211_vif *vif) +{ + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct { + struct scan_hdr { + u8 seq_num; + u8 bss_idx; + u8 pad[2]; + } __packed hdr; + struct scan_cancel_tlv { + __le16 tag; + __le16 len; + u8 is_ext_channel; + u8 rsv[3]; + } __packed cancel; + } req = { + .hdr = { + .seq_num = mvif->scan_seq_num, + .bss_idx = mvif->idx, + }, + .cancel = { + .tag = cpu_to_le16(UNI_SCAN_CANCEL), + .len = cpu_to_le16(sizeof(struct scan_cancel_tlv)), + }, + }; + + if (test_and_clear_bit(MT76_HW_SCANNING, &phy->state)) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(phy->hw, &info); + } + + return mt76_mcu_send_msg(phy->dev, MCU_UNI_CMD(SCAN_REQ), + &req, sizeof(req), false); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_cancel_hw_scan); + +int mt7925_mcu_set_channel_domain(struct mt76_phy *phy) +{ + int len, i, n_max_channels, n_2ch = 0, n_5ch = 0, n_6ch = 0; + struct { + struct { + u8 alpha2[4]; /* regulatory_request.alpha2 */ + u8 bw_2g; /* BW_20_40M 0 + * BW_20M 1 + * BW_20_40_80M 2 + * BW_20_40_80_160M 3 + * BW_20_40_80_8080M 4 + */ + u8 bw_5g; + u8 bw_6g; + u8 pad; + } __packed hdr; + struct n_chan { + __le16 tag; + __le16 len; + u8 n_2ch; + u8 n_5ch; + u8 n_6ch; + u8 pad; + } __packed n_ch; + } req = { + .hdr = { + .bw_2g = 0, + .bw_5g = 3, /* BW_20_40_80_160M */ + .bw_6g = 3, + }, + .n_ch = { + .tag = cpu_to_le16(2), + }, + }; + struct mt76_connac_mcu_chan { + __le16 hw_value; + __le16 pad; + __le32 flags; + } __packed channel; + struct mt76_dev *dev = phy->dev; + struct ieee80211_channel *chan; + struct sk_buff *skb; + + n_max_channels = phy->sband_2g.sband.n_channels + + phy->sband_5g.sband.n_channels + + phy->sband_6g.sband.n_channels; + len = sizeof(req) + n_max_channels * sizeof(channel); + + skb = mt76_mcu_msg_alloc(dev, NULL, len); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, sizeof(req)); + + for (i = 0; i < phy->sband_2g.sband.n_channels; i++) { + chan = &phy->sband_2g.sband.channels[i]; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + n_2ch++; + } + for (i = 0; i < phy->sband_5g.sband.n_channels; i++) { + chan = &phy->sband_5g.sband.channels[i]; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + n_5ch++; + } + for (i = 0; i < phy->sband_6g.sband.n_channels; i++) { + chan = &phy->sband_6g.sband.channels[i]; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + n_6ch++; + } + + BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(req.hdr.alpha2)); + memcpy(req.hdr.alpha2, dev->alpha2, sizeof(dev->alpha2)); + req.n_ch.n_2ch = n_2ch; + req.n_ch.n_5ch = n_5ch; + req.n_ch.n_6ch = n_6ch; + len = sizeof(struct n_chan) + (n_2ch + n_5ch + n_6ch) * sizeof(channel); + req.n_ch.len = cpu_to_le16(len); + memcpy(__skb_push(skb, sizeof(req)), &req, sizeof(req)); + + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SET_DOMAIN_INFO), + false); +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_channel_domain); + +static int +__mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, + enum environment_cap env_cap, + struct mt7925_clc *clc, u8 idx) +{ + struct mt7925_clc_segment *seg; + struct sk_buff *skb; + struct { + u8 rsv[4]; + __le16 tag; + __le16 len; + + u8 ver; + u8 pad0; + __le16 size; + u8 idx; + u8 env; + u8 acpi_conf; + u8 pad1; + u8 alpha2[2]; + u8 type[2]; + u8 rsvd[64]; + } __packed req = { + .tag = cpu_to_le16(0x3), + .len = cpu_to_le16(sizeof(req) - 4), + + .idx = idx, + .env = env_cap, + .acpi_conf = mt792x_acpi_get_flags(&dev->phy), + }; + int ret, valid_cnt = 0; + u8 i, *pos; + + if (!clc) + return 0; + + pos = clc->data + sizeof(*seg) * clc->nr_seg; + for (i = 0; i < clc->nr_country; i++) { + struct mt7925_clc_rule *rule = (struct mt7925_clc_rule *)pos; + + pos += sizeof(*rule); + if (rule->alpha2[0] != alpha2[0] || + rule->alpha2[1] != alpha2[1]) + continue; + + seg = (struct mt7925_clc_segment *)clc->data + + rule->seg_idx - 1; + + memcpy(req.alpha2, rule->alpha2, 2); + memcpy(req.type, rule->type, 2); + + req.size = cpu_to_le16(seg->len); + skb = __mt76_mcu_msg_alloc(&dev->mt76, &req, + le16_to_cpu(req.size) + sizeof(req), + sizeof(req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_put_data(skb, clc->data + seg->offset, seg->len); + + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(SET_POWER_LIMIT), + true); + if (ret < 0) + return ret; + valid_cnt++; + } + + if (!valid_cnt) + return -ENOENT; + + return 0; +} + +int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, + enum environment_cap env_cap) +{ + struct mt792x_phy *phy = (struct mt792x_phy *)&dev->phy; + int i, ret; + + /* submit all clc config */ + for (i = 0; i < ARRAY_SIZE(phy->clc); i++) { + ret = __mt7925_mcu_set_clc(dev, alpha2, env_cap, + phy->clc[i], i); + + /* If no country found, set "00" as default */ + if (ret == -ENOENT) + ret = __mt7925_mcu_set_clc(dev, "00", + ENVIRON_INDOOR, + phy->clc[i], i); + if (ret < 0) + return ret; + } + return 0; +} + +int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq) +{ + int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); + struct mt76_connac2_mcu_uni_txd *uni_txd; + struct mt76_connac2_mcu_txd *mcu_txd; + __le32 *txd; + u32 val; + u8 seq; + + /* TODO: make dynamic based on msg type */ + mdev->mcu.timeout = 20 * HZ; + + seq = ++mdev->mcu.msg_seq & 0xf; + if (!seq) + seq = ++mdev->mcu.msg_seq & 0xf; + + if (cmd == MCU_CMD(FW_SCATTER)) + goto exit; + + txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd); + txd = (__le32 *)skb_push(skb, txd_len); + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) | + FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CMD) | + FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_MCU_PORT_RX_Q0); + txd[0] = cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD); + txd[1] = cpu_to_le32(val); + + if (cmd & __MCU_CMD_FIELD_UNI) { + uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd; + uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd)); + uni_txd->option = MCU_CMD_UNI_EXT_ACK; + uni_txd->cid = cpu_to_le16(mcu_cmd); + uni_txd->s2d_index = MCU_S2D_H2N; + uni_txd->pkt_type = MCU_PKT_ID; + uni_txd->seq = seq; + + goto exit; + } + + mcu_txd = (struct mt76_connac2_mcu_txd *)txd; + mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd)); + mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, + MT_TX_MCU_PORT_RX_Q0)); + mcu_txd->pkt_type = MCU_PKT_ID; + mcu_txd->seq = seq; + mcu_txd->cid = mcu_cmd; + mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd); + + if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) { + if (cmd & __MCU_CMD_FIELD_QUERY) + mcu_txd->set_query = MCU_Q_QUERY; + else + mcu_txd->set_query = MCU_Q_SET; + mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid; + } else { + mcu_txd->set_query = MCU_Q_NA; + } + + if (cmd & __MCU_CMD_FIELD_WA) + mcu_txd->s2d_index = MCU_S2D_H2C; + else + mcu_txd->s2d_index = MCU_S2D_H2N; + +exit: + if (wait_seq) + *wait_seq = seq; + + return 0; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_fill_message); + +int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val) +{ + struct { + u8 band_idx; + u8 _rsv[3]; + + __le16 tag; + __le16 len; + __le32 len_thresh; + __le32 pkt_thresh; + } __packed req = { + .band_idx = phy->mt76->band_idx, + .tag = cpu_to_le16(UNI_BAND_CONFIG_RTS_THRESHOLD), + .len = cpu_to_le16(sizeof(req) - 4), + .len_thresh = cpu_to_le32(val), + .pkt_thresh = cpu_to_le32(0x2), + }; + + return mt76_mcu_send_msg(&phy->dev->mt76, MCU_UNI_CMD(BAND_CONFIG), + &req, sizeof(req), true); +} + +int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable) +{ + struct { + u8 band_idx; + u8 _rsv[3]; + + __le16 tag; + __le16 len; + u8 enable; + u8 _rsv2[3]; + } __packed req = { + .band_idx = phy->mt76->band_idx, + .tag = cpu_to_le16(UNI_BAND_CONFIG_RADIO_ENABLE), + .len = cpu_to_le16(sizeof(req) - 4), + .enable = enable, + }; + + return mt76_mcu_send_msg(&phy->dev->mt76, MCU_UNI_CMD(BAND_CONFIG), + &req, sizeof(req), true); +} + +static void +mt7925_mcu_build_sku(struct mt76_dev *dev, s8 *sku, + struct mt76_power_limits *limits, + enum nl80211_band band) +{ + int i, offset = sizeof(limits->cck); + + memset(sku, 127, MT_CONNAC3_SKU_POWER_LIMIT); + + if (band == NL80211_BAND_2GHZ) { + /* cck */ + memcpy(sku, limits->cck, sizeof(limits->cck)); + } + + /* ofdm */ + memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm)); + offset += (sizeof(limits->ofdm) * 5); + + /* ht */ + for (i = 0; i < 2; i++) { + memcpy(&sku[offset], limits->mcs[i], 8); + offset += 8; + } + sku[offset++] = limits->mcs[0][0]; + + /* vht */ + for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) { + memcpy(&sku[offset], limits->mcs[i], + ARRAY_SIZE(limits->mcs[i])); + offset += 12; + } + + /* he */ + for (i = 0; i < ARRAY_SIZE(limits->ru); i++) { + memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i])); + offset += ARRAY_SIZE(limits->ru[i]); + } + + /* eht */ + for (i = 0; i < ARRAY_SIZE(limits->eht); i++) { + memcpy(&sku[offset], limits->eht[i], ARRAY_SIZE(limits->eht[i])); + offset += ARRAY_SIZE(limits->eht[i]); + } +} + +static int +mt7925_mcu_rate_txpower_band(struct mt76_phy *phy, + enum nl80211_band band) +{ + int tx_power, n_chan, last_ch, err = 0, idx = 0; + int i, sku_len, batch_size, batch_len = 3; + struct mt76_dev *dev = phy->dev; + static const u8 chan_list_2ghz[] = { + 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14 + }; + static const u8 chan_list_5ghz[] = { + 36, 38, 40, 42, 44, 46, 48, + 50, 52, 54, 56, 58, 60, 62, + 64, 100, 102, 104, 106, 108, 110, + 112, 114, 116, 118, 120, 122, 124, + 126, 128, 132, 134, 136, 138, 140, + 142, 144, 149, 151, 153, 155, 157, + 159, 161, 165, 167 + }; + static const u8 chan_list_6ghz[] = { + 1, 3, 5, 7, 9, 11, 13, + 15, 17, 19, 21, 23, 25, 27, + 29, 33, 35, 37, 39, 41, 43, + 45, 47, 49, 51, 53, 55, 57, + 59, 61, 65, 67, 69, 71, 73, + 75, 77, 79, 81, 83, 85, 87, + 89, 91, 93, 97, 99, 101, 103, + 105, 107, 109, 111, 113, 115, 117, + 119, 121, 123, 125, 129, 131, 133, + 135, 137, 139, 141, 143, 145, 147, + 149, 151, 153, 155, 157, 161, 163, + 165, 167, 169, 171, 173, 175, 177, + 179, 181, 183, 185, 187, 189, 193, + 195, 197, 199, 201, 203, 205, 207, + 209, 211, 213, 215, 217, 219, 221, + 225, 227, 229, 233 + }; + struct mt76_power_limits *limits; + struct mt7925_sku_tlv *sku_tlbv; + const u8 *ch_list; + + sku_len = sizeof(*sku_tlbv); + tx_power = 2 * phy->hw->conf.power_level; + if (!tx_power) + tx_power = 127; + + if (band == NL80211_BAND_2GHZ) { + n_chan = ARRAY_SIZE(chan_list_2ghz); + ch_list = chan_list_2ghz; + last_ch = chan_list_2ghz[ARRAY_SIZE(chan_list_2ghz) - 1]; + } else if (band == NL80211_BAND_6GHZ) { + n_chan = ARRAY_SIZE(chan_list_6ghz); + ch_list = chan_list_6ghz; + last_ch = chan_list_6ghz[ARRAY_SIZE(chan_list_6ghz) - 1]; + } else { + n_chan = ARRAY_SIZE(chan_list_5ghz); + ch_list = chan_list_5ghz; + last_ch = chan_list_5ghz[ARRAY_SIZE(chan_list_5ghz) - 1]; + } + batch_size = DIV_ROUND_UP(n_chan, batch_len); + + limits = devm_kmalloc(dev->dev, sizeof(*limits), GFP_KERNEL); + if (!limits) + return -ENOMEM; + + sku_tlbv = devm_kmalloc(dev->dev, sku_len, GFP_KERNEL); + if (!sku_tlbv) { + devm_kfree(dev->dev, limits); + return -ENOMEM; + } + + for (i = 0; i < batch_size; i++) { + struct mt7925_tx_power_limit_tlv *tx_power_tlv; + int j, msg_len, num_ch; + struct sk_buff *skb; + + num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len; + msg_len = sizeof(*tx_power_tlv) + num_ch * sku_len; + skb = mt76_mcu_msg_alloc(dev, NULL, msg_len); + if (!skb) { + err = -ENOMEM; + goto out; + } + + tx_power_tlv = (struct mt7925_tx_power_limit_tlv *) + skb_put(skb, sizeof(*tx_power_tlv)); + + BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv->alpha2)); + memcpy(tx_power_tlv->alpha2, dev->alpha2, sizeof(dev->alpha2)); + tx_power_tlv->n_chan = num_ch; + tx_power_tlv->tag = cpu_to_le16(0x1); + tx_power_tlv->len = cpu_to_le16(sizeof(*tx_power_tlv)); + + switch (band) { + case NL80211_BAND_2GHZ: + tx_power_tlv->band = 1; + break; + case NL80211_BAND_6GHZ: + tx_power_tlv->band = 3; + break; + default: + tx_power_tlv->band = 2; + break; + } + + for (j = 0; j < num_ch; j++, idx++) { + struct ieee80211_channel chan = { + .hw_value = ch_list[idx], + .band = band, + }; + s8 reg_power, sar_power; + + reg_power = mt76_connac_get_ch_power(phy, &chan, + tx_power); + sar_power = mt76_get_sar_power(phy, &chan, reg_power); + + mt76_get_rate_power_limits(phy, &chan, limits, + sar_power); + + tx_power_tlv->last_msg = ch_list[idx] == last_ch; + sku_tlbv->channel = ch_list[idx]; + + mt7925_mcu_build_sku(dev, sku_tlbv->pwr_limit, + limits, band); + skb_put_data(skb, sku_tlbv, sku_len); + } + err = mt76_mcu_skb_send_msg(dev, skb, + MCU_UNI_CMD(SET_POWER_LIMIT), + true); + if (err < 0) + goto out; + } + +out: + devm_kfree(dev->dev, sku_tlbv); + devm_kfree(dev->dev, limits); + return err; +} + +int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy) +{ + int err; + + if (phy->cap.has_2ghz) { + err = mt7925_mcu_rate_txpower_band(phy, + NL80211_BAND_2GHZ); + if (err < 0) + return err; + } + + if (phy->cap.has_5ghz) { + err = mt7925_mcu_rate_txpower_band(phy, + NL80211_BAND_5GHZ); + if (err < 0) + return err; + } + + if (phy->cap.has_6ghz) { + err = mt7925_mcu_rate_txpower_band(phy, + NL80211_BAND_6GHZ); + if (err < 0) + return err; + } + + return 0; +} + +int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, + u8 bit_op, u32 bit_map) +{ + struct mt792x_phy *phy = &dev->phy; + struct { + u8 band_idx; + u8 rsv1[3]; + + __le16 tag; + __le16 len; + u8 mode; + u8 rsv2[3]; + __le32 fif; + __le32 bit_map; /* bit_* for bitmap update */ + u8 bit_op; + u8 pad[51]; + } __packed req = { + .band_idx = phy->mt76->band_idx, + .tag = cpu_to_le16(UNI_BAND_CONFIG_SET_MAC80211_RX_FILTER), + .len = cpu_to_le16(sizeof(req) - 4), + + .mode = fif ? 0 : 1, + .fif = cpu_to_le32(fif), + .bit_map = cpu_to_le32(bit_map), + .bit_op = bit_op, + }; + + return mt76_mcu_send_msg(&phy->dev->mt76, MCU_UNI_CMD(BAND_CONFIG), + &req, sizeof(req), true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h new file mode 100644 index 000000000000..3c41e21303b1 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -0,0 +1,537 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_MCU_H +#define __MT7925_MCU_H + +#include "../mt76_connac_mcu.h" + +/* ext event table */ +enum { + MCU_EXT_EVENT_RATE_REPORT = 0x87, +}; + +struct mt7925_mcu_eeprom_info { + __le32 addr; + __le32 valid; + u8 data[MT7925_EEPROM_BLOCK_SIZE]; +} __packed; + +#define MT_RA_RATE_NSS GENMASK(8, 6) +#define MT_RA_RATE_MCS GENMASK(3, 0) +#define MT_RA_RATE_TX_MODE GENMASK(12, 9) +#define MT_RA_RATE_DCM_EN BIT(4) +#define MT_RA_RATE_BW GENMASK(14, 13) + +struct mt7925_mcu_rxd { + __le32 rxd[8]; + + __le16 len; + __le16 pkt_type_id; + + u8 eid; + u8 seq; + u8 option; + u8 __rsv; + + u8 ext_eid; + u8 __rsv1[2]; + u8 s2d_index; + + u8 tlv[]; +}; + +struct mt7925_mcu_uni_event { + u8 cid; + u8 pad[3]; + __le32 status; /* 0: success, others: fail */ +} __packed; + +enum { + MT_EBF = BIT(0), /* explicit beamforming */ + MT_IBF = BIT(1) /* implicit beamforming */ +}; + +struct mt7925_mcu_reg_event { + __le32 reg; + __le32 val; +} __packed; + +struct mt7925_mcu_ant_id_config { + u8 ant_id[4]; +} __packed; + +struct mt7925_txpwr_req { + u8 _rsv[4]; + __le16 tag; + __le16 len; + + u8 format_id; + u8 catg; + u8 band_idx; + u8 _rsv1; +} __packed; + +struct mt7925_txpwr_event { + u8 rsv[4]; + __le16 tag; + __le16 len; + + u8 catg; + u8 band_idx; + u8 ch_band; + u8 format; /* 0:Legacy, 1:HE */ + + /* Rate power info */ + struct mt7925_txpwr txpwr; + + s8 pwr_max; + s8 pwr_min; + u8 rsv1; +} __packed; + +enum { + TM_SWITCH_MODE, + TM_SET_AT_CMD, + TM_QUERY_AT_CMD, +}; + +enum { + MT7925_TM_NORMAL, + MT7925_TM_TESTMODE, + MT7925_TM_ICAP, + MT7925_TM_ICAP_OVERLAP, + MT7925_TM_WIFISPECTRUM, +}; + +struct mt7925_rftest_cmd { + u8 action; + u8 rsv[3]; + __le32 param0; + __le32 param1; +} __packed; + +struct mt7925_rftest_evt { + __le32 param0; + __le32 param1; +} __packed; + +enum { + UNI_CHANNEL_SWITCH, + UNI_CHANNEL_RX_PATH, +}; + +enum { + UNI_CHIP_CONFIG_CHIP_CFG = 0x2, + UNI_CHIP_CONFIG_NIC_CAPA = 0x3, +}; + +enum { + UNI_BAND_CONFIG_RADIO_ENABLE, + UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08, + UNI_BAND_CONFIG_SET_MAC80211_RX_FILTER = 0x0C, +}; + +enum { + UNI_WSYS_CONFIG_FW_LOG_CTRL, + UNI_WSYS_CONFIG_FW_DBG_CTRL, +}; + +enum { + UNI_EFUSE_ACCESS = 1, + UNI_EFUSE_BUFFER_MODE, + UNI_EFUSE_FREE_BLOCK, + UNI_EFUSE_BUFFER_RD, +}; + +enum { + UNI_CMD_ACCESS_REG_BASIC = 0x0, + UNI_CMD_ACCESS_RF_REG_BASIC, +}; + +enum { + UNI_MBMC_SETTING, +}; + +enum { + UNI_EVENT_SCAN_DONE_BASIC = 0, + UNI_EVENT_SCAN_DONE_CHNLINFO = 2, + UNI_EVENT_SCAN_DONE_NLO = 3, +}; + +struct mt7925_mcu_scan_chinfo_event { + u8 nr_chan; + u8 alpha2[3]; +} __packed; + +enum { + UNI_SCAN_REQ = 1, + UNI_SCAN_CANCEL = 2, + UNI_SCAN_SCHED_REQ = 3, + UNI_SCAN_SCHED_ENABLE = 4, + UNI_SCAN_SSID = 10, + UNI_SCAN_BSSID, + UNI_SCAN_CHANNEL, + UNI_SCAN_IE, + UNI_SCAN_MISC, + UNI_SCAN_SSID_MATCH_SETS, +}; + +enum { + UNI_SNIFFER_ENABLE, + UNI_SNIFFER_CONFIG, +}; + +struct scan_hdr_tlv { + /* fixed field */ + u8 seq_num; + u8 bss_idx; + u8 pad[2]; + /* tlv */ + u8 data[]; +} __packed; + +struct scan_req_tlv { + __le16 tag; + __le16 len; + + u8 scan_type; /* 0: PASSIVE SCAN + * 1: ACTIVE SCAN + */ + u8 probe_req_num; /* Number of probe request for each SSID */ + u8 scan_func; /* BIT(0) Enable random MAC scan + * BIT(1) Disable DBDC scan type 1~3. + * BIT(2) Use DBDC scan type 3 (dedicated one RF to scan). + */ + u8 src_mask; + __le16 channel_min_dwell_time; + __le16 channel_dwell_time; /* channel Dwell interval */ + __le16 timeout_value; + __le16 probe_delay_time; + u8 func_mask_ext; +}; + +struct scan_ssid_tlv { + __le16 tag; + __le16 len; + + u8 ssid_type; /* BIT(0) wildcard SSID + * BIT(1) P2P wildcard SSID + * BIT(2) specified SSID + wildcard SSID + * BIT(2) + ssid_type_ext BIT(0) specified SSID only + */ + u8 ssids_num; + u8 pad[2]; + struct mt76_connac_mcu_scan_ssid ssids[4]; +}; + +struct scan_bssid_tlv { + __le16 tag; + __le16 len; + + u8 bssid[ETH_ALEN]; + u8 match_ch; + u8 match_ssid_ind; + u8 rcpi; + u8 pad[3]; +}; + +struct scan_chan_info_tlv { + __le16 tag; + __le16 len; + + u8 channel_type; /* 0: Full channels + * 1: Only 2.4GHz channels + * 2: Only 5GHz channels + * 3: P2P social channel only (channel #1, #6 and #11) + * 4: Specified channels + * Others: Reserved + */ + u8 channels_num; /* valid when channel_type is 4 */ + u8 pad[2]; + struct mt76_connac_mcu_scan_channel channels[64]; +}; + +struct scan_ie_tlv { + __le16 tag; + __le16 len; + + __le16 ies_len; + u8 band; + u8 pad; + u8 ies[MT76_CONNAC_SCAN_IE_LEN]; +}; + +struct scan_misc_tlv { + __le16 tag; + __le16 len; + + u8 random_mac[ETH_ALEN]; + u8 rsv[2]; +}; + +struct scan_sched_req { + __le16 tag; + __le16 len; + + u8 version; + u8 stop_on_match; + u8 intervals_num; + u8 scan_func; + __le16 intervals[MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL]; +}; + +struct scan_sched_ssid_match_sets { + __le16 tag; + __le16 len; + + u8 match_num; + u8 rsv[3]; + + struct mt76_connac_mcu_scan_match match[MT76_CONNAC_MAX_SCAN_MATCH]; +}; + +struct scan_sched_enable { + __le16 tag; + __le16 len; + + u8 active; + u8 rsv[3]; +}; + +struct mbmc_set_req { + u8 pad[4]; + u8 data[]; +} __packed; + +struct mbmc_conf_tlv { + __le16 tag; + __le16 len; + + u8 mbmc_en; + u8 band; + u8 pad[2]; +} __packed; + +struct edca { + __le16 tag; + __le16 len; + + u8 queue; + u8 set; + u8 cw_min; + u8 cw_max; + __le16 txop; + u8 aifs; + u8 __rsv; +}; + +struct bss_req_hdr { + u8 bss_idx; + u8 __rsv[3]; +} __packed; + +struct bss_rate_tlv { + __le16 tag; + __le16 len; + u8 __rsv1[4]; + __le16 bc_trans; + __le16 mc_trans; + u8 short_preamble; + u8 bc_fixed_rate; + u8 mc_fixed_rate; + u8 __rsv2; +} __packed; + +struct bss_mld_tlv { + __le16 tag; + __le16 len; + u8 group_mld_id; + u8 own_mld_id; + u8 mac_addr[ETH_ALEN]; + u8 remap_idx; + u8 link_id; + u8 __rsv[2]; +} __packed; + +struct sta_rec_ba_uni { + __le16 tag; + __le16 len; + u8 tid; + u8 ba_type; + u8 amsdu; + u8 ba_en; + __le16 ssn; + __le16 winsize; + u8 ba_rdd_rro; + u8 __rsv[3]; +} __packed; + +struct sta_rec_eht { + __le16 tag; + __le16 len; + u8 tid_bitmap; + u8 _rsv; + __le16 mac_cap; + __le64 phy_cap; + __le64 phy_cap_ext; + u8 mcs_map_bw20[4]; + u8 mcs_map_bw80[3]; + u8 mcs_map_bw160[3]; + u8 mcs_map_bw320[3]; + u8 _rsv2[3]; +} __packed; + +struct sec_key_uni { + __le16 wlan_idx; + u8 mgmt_prot; + u8 cipher_id; + u8 cipher_len; + u8 key_id; + u8 key_len; + u8 need_resp; + u8 key[32]; +} __packed; + +struct sta_rec_sec_uni { + __le16 tag; + __le16 len; + u8 add; + u8 n_cipher; + u8 rsv[2]; + + struct sec_key_uni key[2]; +} __packed; + +struct sta_rec_hdr_trans { + __le16 tag; + __le16 len; + u8 from_ds; + u8 to_ds; + u8 dis_rx_hdr_tran; + u8 rsv; +} __packed; + +struct sta_rec_mld { + __le16 tag; + __le16 len; + u8 mac_addr[ETH_ALEN]; + __le16 primary_id; + __le16 secondary_id; + __le16 wlan_id; + u8 link_num; + u8 rsv[3]; + struct { + __le16 wlan_id; + u8 bss_idx; + u8 rsv; + } __packed link[2]; +} __packed; + +#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ + sizeof(struct sta_rec_basic) + \ + sizeof(struct sta_rec_bf) + \ + sizeof(struct sta_rec_ht) + \ + sizeof(struct sta_rec_he_v2) + \ + sizeof(struct sta_rec_ba_uni) + \ + sizeof(struct sta_rec_vht) + \ + sizeof(struct sta_rec_uapsd) + \ + sizeof(struct sta_rec_amsdu) + \ + sizeof(struct sta_rec_bfee) + \ + sizeof(struct sta_rec_phy) + \ + sizeof(struct sta_rec_ra) + \ + sizeof(struct sta_rec_sec) + \ + sizeof(struct sta_rec_ra_fixed) + \ + sizeof(struct sta_rec_he_6g_capa) + \ + sizeof(struct sta_rec_eht) + \ + sizeof(struct sta_rec_hdr_trans) + \ + sizeof(struct sta_rec_mld) + \ + sizeof(struct tlv)) + +#define MT7925_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \ + sizeof(struct mt76_connac_bss_basic_tlv) + \ + sizeof(struct mt76_connac_bss_qos_tlv) + \ + sizeof(struct bss_rate_tlv) + \ + sizeof(struct bss_mld_tlv) + \ + sizeof(struct bss_info_uni_he) + \ + sizeof(struct bss_info_uni_bss_color) + \ + sizeof(struct tlv)) + +#define MT_CONNAC3_SKU_POWER_LIMIT 449 +struct mt7925_sku_tlv { + u8 channel; + s8 pwr_limit[MT_CONNAC3_SKU_POWER_LIMIT]; +} __packed; + +struct mt7925_tx_power_limit_tlv { + u8 rsv[4]; + + __le16 tag; + __le16 len; + + /* DW0 - common info*/ + u8 ver; + u8 pad0; + __le16 rsv1; + /* DW1 - cmd hint */ + u8 n_chan; /* # channel */ + u8 band; /* 2.4GHz - 5GHz - 6GHz */ + u8 last_msg; + u8 limit_type; + /* DW3 */ + u8 alpha2[4]; /* regulatory_request.alpha2 */ + u8 pad2[32]; + + u8 data[]; +} __packed; + +struct mt7925_arpns_tlv { + __le16 tag; + __le16 len; + + u8 enable; + u8 ips_num; + u8 rsv[2]; +} __packed; + +struct mt7925_wow_pattern_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 index; /* pattern index */ + u8 enable; /* 0: disable + * 1: enable + */ + u8 data_len; /* pattern length */ + u8 offset; + u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN]; + u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN]; + u8 rsv[4]; +} __packed; + +int mt7925_mcu_set_dbdc(struct mt76_phy *phy); +int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req); +int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, + struct ieee80211_vif *vif); +int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq); +int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, + struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, + struct ieee80211_chanctx_conf *ctx, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + int enable); +int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable); +int mt7925_mcu_set_channel_domain(struct mt76_phy *phy); +int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable); +int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_chanctx_conf *ctx); +int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy); +int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, + struct mt76_vif *vif, + struct ieee80211_bss_conf *info); +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h new file mode 100644 index 000000000000..33785f526acf --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_H +#define __MT7925_H + +#include "../mt792x.h" +#include "regs.h" + +#define MT7925_BEACON_RATES_TBL 25 + +#define MT7925_TX_RING_SIZE 2048 +#define MT7925_TX_MCU_RING_SIZE 256 +#define MT7925_TX_FWDL_RING_SIZE 128 + +#define MT7925_RX_RING_SIZE 1536 +#define MT7925_RX_MCU_RING_SIZE 512 + +#define MT7925_EEPROM_SIZE 3584 +#define MT7925_TOKEN_SIZE 8192 + +#define MT7925_EEPROM_BLOCK_SIZE 16 + +#define MT7925_SKU_RATE_NUM 161 +#define MT7925_SKU_MAX_DELTA_IDX MT7925_SKU_RATE_NUM +#define MT7925_SKU_TABLE_SIZE (MT7925_SKU_RATE_NUM + 1) + +#define MCU_UNI_EVENT_ROC 0x27 + +enum { + UNI_ROC_ACQUIRE, + UNI_ROC_ABORT, + UNI_ROC_NUM +}; + +enum mt7925_roc_req { + MT7925_ROC_REQ_JOIN, + MT7925_ROC_REQ_ROC, + MT7925_ROC_REQ_NUM +}; + +enum { + UNI_EVENT_ROC_GRANT = 0, + UNI_EVENT_ROC_TAG_NUM +}; + +struct mt7925_roc_grant_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 status; + u8 primarychannel; + u8 rfsco; + u8 rfband; + u8 channelwidth; + u8 centerfreqseg1; + u8 centerfreqseg2; + u8 reqtype; + u8 dbdcband; + u8 rsv[1]; + __le32 max_interval; +} __packed; + +struct mt7925_beacon_loss_tlv { + __le16 tag; + __le16 len; + u8 reason; + u8 nr_btolink; + u8 pad[2]; +} __packed; + +struct mt7925_uni_beacon_loss_event { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7925_beacon_loss_tlv beacon_loss; +} __packed; + +#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) +#define to_rcpi(rssi) (2 * (rssi) + 220) + +enum mt7925_txq_id { + MT7925_TXQ_BAND0, + MT7925_TXQ_BAND1, + MT7925_TXQ_MCU_WM = 15, + MT7925_TXQ_FWDL, +}; + +enum mt7925_rxq_id { + MT7925_RXQ_BAND0 = 2, + MT7925_RXQ_BAND1, + MT7925_RXQ_MCU_WM = 0, + MT7925_RXQ_MCU_WM2, /* for tx done */ +}; + +enum { + MODE_OPEN = 0, + MODE_SHARED = 1, + MODE_WPA = 3, + MODE_WPA_PSK = 4, + MODE_WPA_NONE = 5, + MODE_WPA2 = 6, + MODE_WPA2_PSK = 7, + MODE_WPA3_SAE = 11, +}; + +enum { + MT7925_CLC_POWER, + MT7925_CLC_CHAN, + MT7925_CLC_MAX_NUM, +}; + +struct mt7925_clc_rule { + u8 alpha2[2]; + u8 type[2]; + u8 seg_idx; + u8 rsv[3]; +} __packed; + +struct mt7925_clc_segment { + u8 idx; + u8 rsv1[3]; + u32 offset; + u32 len; + u8 rsv2[4]; +} __packed; + +struct mt7925_clc { + __le32 len; + u8 idx; + u8 ver; + u8 nr_country; + u8 type; + u8 nr_seg; + u8 rsv[7]; + u8 data[]; +} __packed; + +enum mt7925_eeprom_field { + MT_EE_CHIP_ID = 0x000, + MT_EE_VERSION = 0x002, + MT_EE_MAC_ADDR = 0x004, + __MT_EE_MAX = 0x9ff +}; + +enum { + TXPWR_USER, + TXPWR_EEPROM, + TXPWR_MAC, + TXPWR_MAX_NUM, +}; + +struct mt7925_txpwr { + s8 cck[4][2]; + s8 ofdm[8][2]; + s8 ht20[8][2]; + s8 ht40[9][2]; + s8 vht20[12][2]; + s8 vht40[12][2]; + s8 vht80[12][2]; + s8 vht160[12][2]; + s8 he26[12][2]; + s8 he52[12][2]; + s8 he106[12][2]; + s8 he242[12][2]; + s8 he484[12][2]; + s8 he996[12][2]; + s8 he996x2[12][2]; + s8 eht26[16][2]; + s8 eht52[16][2]; + s8 eht106[16][2]; + s8 eht242[16][2]; + s8 eht484[16][2]; + s8 eht996[16][2]; + s8 eht996x2[16][2]; + s8 eht996x4[16][2]; + s8 eht26_52[16][2]; + s8 eht26_106[16][2]; + s8 eht484_242[16][2]; + s8 eht996_484[16][2]; + s8 eht996_484_242[16][2]; + s8 eht996x2_484[16][2]; + s8 eht996x3[16][2]; + s8 eht996x3_484[16][2]; +}; + +extern const struct ieee80211_ops mt7925_ops; + +int __mt7925_start(struct mt792x_phy *phy); +int mt7925_register_device(struct mt792x_dev *dev); +void mt7925_unregister_device(struct mt792x_dev *dev); +int mt7925_run_firmware(struct mt792x_dev *dev); +int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, bool enable, + enum mt76_sta_info_state state); +int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag); +int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif); +int mt7925_mcu_set_eeprom(struct mt792x_dev *dev); +int mt7925_mcu_get_rx_rate(struct mt792x_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct rate_info *rate); +int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl); +void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb); +int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd); +int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, + u8 bit_op, u32 bit_map); + +int mt7925_mac_init(struct mt792x_dev *dev); +int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask); +void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7925_mac_reset_work(struct work_struct *work); +int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); + +void mt7925_tx_token_put(struct mt792x_dev *dev); +bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len); +void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb, u32 *info); +void mt7925_stats_work(struct work_struct *work); +void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy); +int mt7925_init_debugfs(struct mt792x_dev *dev); + +int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); +int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); +void mt7925_scan_work(struct work_struct *work); +void mt7925_roc_work(struct work_struct *work); +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif); +void mt7925_coredump_work(struct work_struct *work); +int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, + struct mt7925_txpwr *txpwr); +void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev, + u8 tbl_idx, u16 rate_idx); +void mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, int pid, + enum mt76_txq_id qid, u32 changed); +void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, + struct ieee80211_sta *sta, bool clear_status, + struct list_head *free_list); +int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq); + +int mt7925e_mac_reset(struct mt792x_dev *dev); +int mt7925e_mcu_init(struct mt792x_dev *dev); +void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data); +void mt7925_set_runtime_pm(struct mt792x_dev *dev); +void mt7925_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif); +void mt7925_connac_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif); +void mt7925_set_ipv6_ns_work(struct work_struct *work); + +int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, + bool enable); +int mt7925_mcu_config_sniffer(struct mt792x_vif *vif, + struct ieee80211_chanctx_conf *ctx); + +int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); +void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + struct mt76_queue_entry *e); +bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); + +int mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + bool enable); +int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar); + +int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set); +int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, + enum environment_cap env_cap); +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + struct ieee80211_channel *chan, int duration, + enum mt7925_roc_req type, u8 token_id); +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, + u8 token_id); +int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq); +int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct mt76_connac_sta_key_conf *sta_key_conf, + struct ieee80211_key_conf *key, int mcu_cmd, + struct mt76_wcid *wcid, enum set_key_cmd cmd); +int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val); +int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c new file mode 100644 index 000000000000..08ef75e24e1c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "mt7925.h" +#include "mac.h" +#include "mcu.h" +#include "../dma.h" + +static const struct pci_device_id mt7925_pci_device_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7925), + .driver_data = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0717), + .driver_data = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + { }, +}; + +static bool mt7925_disable_aspm; +module_param_named(disable_aspm, mt7925_disable_aspm, bool, 0644); +MODULE_PARM_DESC(disable_aspm, "disable PCI ASPM support"); + +static int mt7925e_init_reset(struct mt792x_dev *dev) +{ + return mt792x_wpdma_reset(dev, true); +} + +static void mt7925e_unregister_device(struct mt792x_dev *dev) +{ + int i; + struct mt76_connac_pm *pm = &dev->pm; + + cancel_work_sync(&dev->init_work); + mt76_unregister_device(&dev->mt76); + mt76_for_each_q_rx(&dev->mt76, i) + napi_disable(&dev->mt76.napi[i]); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + cancel_work_sync(&dev->reset_work); + + mt7925_tx_token_put(dev); + __mt792x_mcu_drv_pmctrl(dev); + mt792x_dma_cleanup(dev); + mt792x_wfsys_reset(dev); + skb_queue_purge(&dev->mt76.mcu.res_q); + + tasklet_disable(&dev->mt76.irq_tasklet); +} + +static void mt7925_reg_remap_restore(struct mt792x_dev *dev) +{ + /* remap to ori status */ + if (unlikely(dev->backup_l1)) { + dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->backup_l1); + dev->backup_l1 = 0; + } + + if (dev->backup_l2) { + dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->backup_l2); + dev->backup_l2 = 0; + } +} + +static u32 mt7925_reg_map_l1(struct mt792x_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); + + dev->backup_l1 = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, + MT_HIF_REMAP_L1_MASK, + FIELD_PREP(MT_HIF_REMAP_L1_MASK, base)); + + /* use read to push write */ + dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + return MT_HIF_REMAP_BASE_L1 + offset; +} + +static u32 mt7925_reg_map_l2(struct mt792x_dev *dev, u32 addr) +{ + u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, MT_HIF_REMAP_BASE_L2); + + dev->backup_l2 = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, + MT_HIF_REMAP_L1_MASK, + FIELD_PREP(MT_HIF_REMAP_L1_MASK, base)); + + dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, addr); + /* use read to push write */ + dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); + + return MT_HIF_REMAP_BASE_L1; +} + +static u32 __mt7925_reg_addr(struct mt792x_dev *dev, u32 addr) +{ + static const struct mt76_connac_reg_map fixed_map[] = { + { 0x830c0000, 0x000000, 0x0001000 }, /* WF_MCU_BUS_CR_REMAP */ + { 0x54000000, 0x002000, 0x0001000 }, /* WFDMA PCIE0 MCU DMA0 */ + { 0x55000000, 0x003000, 0x0001000 }, /* WFDMA PCIE0 MCU DMA1 */ + { 0x56000000, 0x004000, 0x0001000 }, /* WFDMA reserved */ + { 0x57000000, 0x005000, 0x0001000 }, /* WFDMA MCU wrap CR */ + { 0x58000000, 0x006000, 0x0001000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ + { 0x59000000, 0x007000, 0x0001000 }, /* WFDMA PCIE1 MCU DMA1 */ + { 0x820c0000, 0x008000, 0x0004000 }, /* WF_UMAC_TOP (PLE) */ + { 0x820c8000, 0x00c000, 0x0002000 }, /* WF_UMAC_TOP (PSE) */ + { 0x820cc000, 0x00e000, 0x0002000 }, /* WF_UMAC_TOP (PP) */ + { 0x74030000, 0x010000, 0x0001000 }, /* PCIe MAC */ + { 0x820e0000, 0x020000, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ + { 0x820e1000, 0x020400, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ + { 0x820e2000, 0x020800, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ + { 0x820e3000, 0x020c00, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ + { 0x820e4000, 0x021000, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ + { 0x820e5000, 0x021400, 0x0000800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ + { 0x820ce000, 0x021c00, 0x0000200 }, /* WF_LMAC_TOP (WF_SEC) */ + { 0x820e7000, 0x021e00, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ + { 0x820cf000, 0x022000, 0x0001000 }, /* WF_LMAC_TOP (WF_PF) */ + { 0x820e9000, 0x023400, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ + { 0x820ea000, 0x024000, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ + { 0x820eb000, 0x024200, 0x0000400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ + { 0x820ec000, 0x024600, 0x0000200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ + { 0x820ed000, 0x024800, 0x0000800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ + { 0x820ca000, 0x026000, 0x0002000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */ + { 0x820d0000, 0x030000, 0x0010000 }, /* WF_LMAC_TOP (WF_WTBLON) */ + { 0x40000000, 0x070000, 0x0010000 }, /* WF_UMAC_SYSRAM */ + { 0x00400000, 0x080000, 0x0010000 }, /* WF_MCU_SYSRAM */ + { 0x00410000, 0x090000, 0x0010000 }, /* WF_MCU_SYSRAM (configure register) */ + { 0x820f0000, 0x0a0000, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ + { 0x820f1000, 0x0a0600, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ + { 0x820f2000, 0x0a0800, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ + { 0x820f3000, 0x0a0c00, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */ + { 0x820f4000, 0x0a1000, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */ + { 0x820f5000, 0x0a1400, 0x0000800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */ + { 0x820f7000, 0x0a1e00, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */ + { 0x820f9000, 0x0a3400, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ + { 0x820fa000, 0x0a4000, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */ + { 0x820fb000, 0x0a4200, 0x0000400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */ + { 0x820fc000, 0x0a4600, 0x0000200 }, /* WF_LMAC_TOP BN1 (WF_INT) */ + { 0x820fd000, 0x0a4800, 0x0000800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */ + { 0x820c4000, 0x0a8000, 0x0004000 }, /* WF_LMAC_TOP BN1 (WF_MUCOP) */ + { 0x820b0000, 0x0ae000, 0x0001000 }, /* [APB2] WFSYS_ON */ + { 0x80020000, 0x0b0000, 0x0010000 }, /* WF_TOP_MISC_OFF */ + { 0x81020000, 0x0c0000, 0x0010000 }, /* WF_TOP_MISC_ON */ + { 0x7c020000, 0x0d0000, 0x0010000 }, /* CONN_INFRA, wfdma */ + { 0x7c060000, 0x0e0000, 0x0010000 }, /* CONN_INFRA, conn_host_csr_top */ + { 0x7c000000, 0x0f0000, 0x0010000 }, /* CONN_INFRA */ + { 0x70020000, 0x1f0000, 0x0010000 }, /* Reserved for CBTOP, can't switch */ + { 0x7c500000, 0x060000, 0x2000000 }, /* remap */ + { 0x0, 0x0, 0x0 } /* End */ + }; + int i; + + if (addr < 0x200000) + return addr; + + mt7925_reg_remap_restore(dev); + + for (i = 0; i < ARRAY_SIZE(fixed_map); i++) { + u32 ofs; + + if (addr < fixed_map[i].phys) + continue; + + ofs = addr - fixed_map[i].phys; + if (ofs > fixed_map[i].size) + continue; + + return fixed_map[i].maps + ofs; + } + + if ((addr >= 0x18000000 && addr < 0x18c00000) || + (addr >= 0x70000000 && addr < 0x78000000) || + (addr >= 0x7c000000 && addr < 0x7c400000)) + return mt7925_reg_map_l1(dev, addr); + + return mt7925_reg_map_l2(dev, addr); +} + +static u32 mt7925_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 addr = __mt7925_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7925_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 addr = __mt7925_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7925_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 addr = __mt7925_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + +static int mt7925_dma_init(struct mt792x_dev *dev) +{ + int ret; + + mt76_dma_attach(&dev->mt76); + + ret = mt792x_dma_disable(dev, true); + if (ret) + return ret; + + /* init tx queue */ + ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7925_TXQ_BAND0, + MT7925_TX_RING_SIZE, + MT_TX_RING_BASE, 0); + if (ret) + return ret; + + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4); + + /* command to WM */ + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7925_TXQ_MCU_WM, + MT7925_TX_MCU_RING_SIZE, MT_TX_RING_BASE); + if (ret) + return ret; + + /* firmware download */ + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7925_TXQ_FWDL, + MT7925_TX_FWDL_RING_SIZE, MT_TX_RING_BASE); + if (ret) + return ret; + + /* rx event */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], + MT7925_RXQ_MCU_WM, MT7925_RX_MCU_RING_SIZE, + MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE); + if (ret) + return ret; + + /* rx data */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], + MT7925_RXQ_BAND0, MT7925_RX_RING_SIZE, + MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE); + if (ret) + return ret; + + ret = mt76_init_queues(dev, mt792x_poll_rx); + if (ret < 0) + return ret; + + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt792x_poll_tx); + napi_enable(&dev->mt76.tx_napi); + + return mt792x_dma_enable(dev); +} + +static int mt7925_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + /* txwi_size = txd size + txp size */ + .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_hw_txp), + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ | + MT_DRV_AMSDU_OFFLOAD, + .survey_flags = SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BSS_RX, + .token_size = MT7925_TOKEN_SIZE, + .tx_prepare_skb = mt7925e_tx_prepare_skb, + .tx_complete_skb = mt76_connac_tx_complete_skb, + .rx_check = mt7925_rx_check, + .rx_skb = mt7925_queue_rx_skb, + .rx_poll_complete = mt792x_rx_poll_complete, + .sta_add = mt7925_mac_sta_add, + .sta_assoc = mt7925_mac_sta_assoc, + .sta_remove = mt7925_mac_sta_remove, + .update_survey = mt792x_update_channel, + }; + static const struct mt792x_hif_ops mt7925_pcie_ops = { + .init_reset = mt7925e_init_reset, + .reset = mt7925e_mac_reset, + .mcu_init = mt7925e_mcu_init, + .drv_own = mt792xe_mcu_drv_pmctrl, + .fw_own = mt792xe_mcu_fw_pmctrl, + }; + static const struct mt792x_irq_map irq_map = { + .host_irq_enable = MT_WFDMA0_HOST_INT_ENA, + .tx = { + .all_complete_mask = MT_INT_TX_DONE_ALL, + .mcu_complete_mask = MT_INT_TX_DONE_MCU, + }, + .rx = { + .data_complete_mask = HOST_RX_DONE_INT_ENA2, + .wm_complete_mask = HOST_RX_DONE_INT_ENA0, + }, + }; + struct ieee80211_ops *ops; + struct mt76_bus_ops *bus_ops; + struct mt792x_dev *dev; + struct mt76_dev *mdev; + u8 features; + int ret; + u16 cmd; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) { + cmd |= PCI_COMMAND_MEMORY; + pci_write_config_word(pdev, PCI_COMMAND, cmd); + } + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + goto err_free_pci_vec; + + if (mt7925_disable_aspm) + mt76_pci_disable_aspm(pdev); + + ops = mt792x_get_mac80211_ops(&pdev->dev, &mt7925_ops, + (void *)id->driver_data, &features); + if (!ops) { + ret = -ENOMEM; + goto err_free_pci_vec; + } + + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) { + ret = -ENOMEM; + goto err_free_pci_vec; + } + + pci_set_drvdata(pdev, mdev); + + dev = container_of(mdev, struct mt792x_dev, mt76); + dev->fw_features = features; + dev->hif_ops = &mt7925_pcie_ops; + dev->irq_map = &irq_map; + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + tasklet_init(&mdev->irq_tasklet, mt792x_irq_tasklet, (unsigned long)dev); + + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + GFP_KERNEL); + if (!bus_ops) { + ret = -ENOMEM; + goto err_free_dev; + } + + bus_ops->rr = mt7925_rr; + bus_ops->wr = mt7925_wr; + bus_ops->rmw = mt7925_rmw; + dev->mt76.bus = bus_ops; + + ret = __mt792x_mcu_fw_pmctrl(dev); + if (ret) + goto err_free_dev; + + ret = __mt792xe_mcu_drv_pmctrl(dev); + if (ret) + goto err_free_dev; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + ret = mt792x_wfsys_reset(dev); + if (ret) + goto err_free_dev; + + mt76_wr(dev, irq_map.host_irq_enable, 0); + + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + + ret = devm_request_irq(mdev->dev, pdev->irq, mt792x_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto err_free_dev; + + ret = mt7925_dma_init(dev); + if (ret) + goto err_free_irq; + + ret = mt7925_register_device(dev); + if (ret) + goto err_free_irq; + + return 0; + +err_free_irq: + devm_free_irq(&pdev->dev, pdev->irq, dev); +err_free_dev: + mt76_free_device(&dev->mt76); +err_free_pci_vec: + pci_free_irq_vectors(pdev); + + return ret; +} + +static void mt7925_pci_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + + mt7925e_unregister_device(dev); + devm_free_irq(&pdev->dev, pdev->irq, dev); + mt76_free_device(&dev->mt76); + pci_free_irq_vectors(pdev); +} + +static int mt7925_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt76_connac_pm *pm = &dev->pm; + int i, err; + + pm->suspended = true; + flush_work(&dev->reset_work); + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + + err = mt792x_mcu_drv_pmctrl(dev); + if (err < 0) + goto restore_suspend; + + /* always enable deep sleep during suspend to reduce + * power consumption + */ + mt7925_mcu_set_deep_sleep(dev, true); + + err = mt76_connac_mcu_set_hif_suspend(mdev, true); + if (err) + goto restore_suspend; + + napi_disable(&mdev->tx_napi); + mt76_worker_disable(&mdev->tx_worker); + + mt76_for_each_q_rx(mdev, i) { + napi_disable(&mdev->napi[i]); + } + + /* wait until dma is idle */ + mt76_poll(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000); + + /* put dma disabled */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + /* disable interrupt */ + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); + mt76_wr(dev, MT_WFDMA0_HOST_INT_DIS, + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); + + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + + synchronize_irq(pdev->irq); + tasklet_kill(&mdev->irq_tasklet); + + err = mt792x_mcu_fw_pmctrl(dev); + if (err) + goto restore_napi; + + return 0; + +restore_napi: + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + + if (!pm->ds_enable) + mt7925_mcu_set_deep_sleep(dev, false); + + mt76_connac_mcu_set_hif_suspend(mdev, false); + +restore_suspend: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} + +static int mt7925_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt76_connac_pm *pm = &dev->pm; + int i, err; + + err = mt792x_mcu_drv_pmctrl(dev); + if (err < 0) + goto failed; + + mt792x_wpdma_reinit_cond(dev); + + /* enable interrupt */ + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + mt76_connac_irq_enable(&dev->mt76, + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); + mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); + + /* put dma enabled */ + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + mt76_worker_enable(&mdev->tx_worker); + + local_bh_disable(); + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + napi_schedule(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + napi_schedule(&mdev->tx_napi); + local_bh_enable(); + + err = mt76_connac_mcu_set_hif_suspend(mdev, false); + + /* restore previous ds setting */ + if (!pm->ds_enable) + mt7925_mcu_set_deep_sleep(dev, false); + +failed: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} + +static void mt7925_pci_shutdown(struct pci_dev *pdev) +{ + mt7925_pci_remove(pdev); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(mt7925_pm_ops, mt7925_pci_suspend, mt7925_pci_resume); + +static struct pci_driver mt7925_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7925_pci_device_table, + .probe = mt7925_pci_probe, + .remove = mt7925_pci_remove, + .shutdown = mt7925_pci_shutdown, + .driver.pm = pm_sleep_ptr(&mt7925_pm_ops), +}; + +module_pci_driver(mt7925_pci_driver); + +MODULE_DEVICE_TABLE(pci, mt7925_pci_device_table); +MODULE_FIRMWARE(MT7925_FIRMWARE_WM); +MODULE_FIRMWARE(MT7925_ROM_PATCH); +MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c new file mode 100644 index 000000000000..9fca887977d2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt7925.h" +#include "../dma.h" +#include "mac.h" + +int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct mt76_connac_hw_txp *txp; + struct mt76_txwi_cache *t; + int id, pid; + u8 *txwi = (u8 *)txwi_ptr; + + if (unlikely(tx_info->skb->len <= ETH_HLEN)) + return -EINVAL; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + id = mt76_token_consume(mdev, &t); + if (id < 0) + return id; + + if (sta) { + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + + if (time_after(jiffies, msta->last_txs + HZ / 4)) { + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + msta->last_txs = jiffies; + } + } + + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + mt7925_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, key, + pid, qid, 0); + + txp = (struct mt76_connac_hw_txp *)(txwi + MT_TXD_SIZE); + memset(txp, 0, sizeof(struct mt76_connac_hw_txp)); + mt76_connac_write_hw_txp(mdev, tx_info, txp, id); + + tx_info->skb = NULL; + + return 0; +} + +void mt7925_tx_token_put(struct mt792x_dev *dev) +{ + struct mt76_txwi_cache *txwi; + int id; + + spin_lock_bh(&dev->mt76.token_lock); + idr_for_each_entry(&dev->mt76.token, txwi, id) { + mt7925_txwi_free(dev, txwi, NULL, false, NULL); + dev->mt76.token_count--; + } + spin_unlock_bh(&dev->mt76.token_lock); + idr_destroy(&dev->mt76.token); +} + +int mt7925e_mac_reset(struct mt792x_dev *dev) +{ + const struct mt792x_irq_map *irq_map = dev->irq_map; + int i, err; + + mt792xe_mcu_drv_pmctrl(dev); + + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + + mt76_txq_schedule_all(&dev->mphy); + + mt76_worker_disable(&dev->mt76.tx_worker); + if (irq_map->rx.data_complete_mask) + napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); + if (irq_map->rx.wm_complete_mask) + napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); + if (irq_map->rx.wm2_complete_mask) + napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); + if (irq_map->tx.all_complete_mask) + napi_disable(&dev->mt76.tx_napi); + + mt7925_tx_token_put(dev); + idr_init(&dev->mt76.token); + + mt792x_wpdma_reset(dev, true); + + local_bh_disable(); + mt76_for_each_q_rx(&dev->mt76, i) { + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); + } + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + local_bh_enable(); + + dev->fw_assert = false; + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + + mt76_wr(dev, dev->irq_map->host_irq_enable, + dev->irq_map->tx.all_complete_mask | + MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + + err = mt792xe_mcu_fw_pmctrl(dev); + if (err) + return err; + + err = __mt792xe_mcu_drv_pmctrl(dev); + if (err) + goto out; + + err = mt7925_run_firmware(dev); + if (err) + goto out; + + err = mt7925_mcu_set_eeprom(dev); + if (err) + goto out; + + err = mt7925_mac_init(dev); + if (err) + goto out; + + err = __mt7925_start(&dev->phy); +out: + clear_bit(MT76_RESET, &dev->mphy.state); + + mt76_worker_enable(&dev->mt76.tx_worker); + + return err; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c new file mode 100644 index 000000000000..f95bc5dcd830 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt7925.h" +#include "mcu.h" + +static int +mt7925_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + enum mt76_mcuq_id txq = MT_MCUQ_WM; + int ret; + + ret = mt7925_mcu_fill_message(mdev, skb, cmd, seq); + if (ret) + return ret; + + mdev->mcu.timeout = 3 * HZ; + + if (cmd == MCU_CMD(FW_SCATTER)) + txq = MT_MCUQ_FWDL; + + return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); +} + +int mt7925e_mcu_init(struct mt792x_dev *dev) +{ + static const struct mt76_mcu_ops mt7925_mcu_ops = { + .headroom = sizeof(struct mt76_connac2_mcu_txd), + .mcu_skb_send_msg = mt7925_mcu_send_message, + .mcu_parse_response = mt7925_mcu_parse_response, + }; + int err; + + dev->mt76.mcu_ops = &mt7925_mcu_ops; + + err = mt792xe_mcu_fw_pmctrl(dev); + if (err) + return err; + + err = __mt792xe_mcu_drv_pmctrl(dev); + if (err) + return err; + + mt76_rmw_field(dev, MT_PCIE_MAC_PM, MT_PCIE_MAC_PM_L0S_DIS, 1); + + err = mt7925_run_firmware(dev); + + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); + + return err; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h new file mode 100644 index 000000000000..985794a40c1a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7925_REGS_H +#define __MT7925_REGS_H + +#include "../mt792x_regs.h" + +#define MT_MDP_BASE 0x820cc800 +#define MT_MDP(ofs) (MT_MDP_BASE + (ofs)) + +#define MT_MDP_DCR0 MT_MDP(0x000) +#define MT_MDP_DCR0_DAMSDU_EN BIT(15) +#define MT_MDP_DCR0_RX_HDR_TRANS_EN BIT(19) + +#define MT_MDP_DCR1 MT_MDP(0x004) +#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3) + +#define MT_MDP_BNRCFR0(_band) MT_MDP(0x090 + ((_band) << 8)) +#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4) +#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6) +#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8) + +#define MT_MDP_BNRCFR1(_band) MT_MDP(0x094 + ((_band) << 8)) +#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22) +#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27) +#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29) +#define MT_MDP_TO_HIF 0 +#define MT_MDP_TO_WM 1 + +#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x228) +#define MT_WFDMA0_HOST_INT_DIS MT_WFDMA0(0x22c) +#define HOST_RX_DONE_INT_ENA4 BIT(12) +#define HOST_RX_DONE_INT_ENA5 BIT(13) +#define HOST_RX_DONE_INT_ENA6 BIT(14) +#define HOST_RX_DONE_INT_ENA7 BIT(15) +#define HOST_RX_DONE_INT_ENA8 BIT(16) +#define HOST_RX_DONE_INT_ENA9 BIT(17) +#define HOST_RX_DONE_INT_ENA10 BIT(18) +#define HOST_RX_DONE_INT_ENA11 BIT(19) +#define HOST_TX_DONE_INT_ENA15 BIT(25) +#define HOST_TX_DONE_INT_ENA16 BIT(26) +#define HOST_TX_DONE_INT_ENA17 BIT(27) + +/* WFDMA interrupt */ +#define MT_INT_RX_DONE_DATA HOST_RX_DONE_INT_ENA2 +#define MT_INT_RX_DONE_WM HOST_RX_DONE_INT_ENA0 +#define MT_INT_RX_DONE_WM2 HOST_RX_DONE_INT_ENA1 +#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_DATA | \ + MT_INT_RX_DONE_WM | \ + MT_INT_RX_DONE_WM2) + +#define MT_INT_TX_DONE_MCU_WM (HOST_TX_DONE_INT_ENA15 | \ + HOST_TX_DONE_INT_ENA17) + +#define MT_INT_TX_DONE_FWDL HOST_TX_DONE_INT_ENA16 +#define MT_INT_TX_DONE_BAND0 HOST_TX_DONE_INT_ENA0 + +#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WM | \ + MT_INT_TX_DONE_FWDL) +#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU_WM | \ + MT_INT_TX_DONE_BAND0 | \ + GENMASK(18, 4)) + +#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500) + +#define MT_INFRA_CFG_BASE 0xd1000 +#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs)) + +#define MT_HIF_REMAP_L1 0x155024 +#define MT_HIF_REMAP_L1_MASK GENMASK(31, 16) +#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0) +#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16) +#define MT_HIF_REMAP_BASE_L1 0x130000 + +#define MT_HIF_REMAP_L2 0x0120 +#if IS_ENABLED(CONFIG_MT76_DEV) +#define MT_HIF_REMAP_BASE_L2 (0x7c500000 - (0x7c000000 - 0x18000000)) +#else +#define MT_HIF_REMAP_BASE_L2 0x18500000 +#endif + +#define MT_WFSYS_SW_RST_B 0x7c000140 + +#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x370) +#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(4, 0) + +#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x380) +#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(11, 0) +#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(14) + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c new file mode 100644 index 000000000000..9b885c5b3ed5 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> + +#include "mt7925.h" +#include "mcu.h" +#include "mac.h" + +static const struct usb_device_id mt7925u_device_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM }, + { }, +}; + +static int +mt7925u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *seq) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + u32 pad, ep; + int ret; + + ret = mt7925_mcu_fill_message(mdev, skb, cmd, seq); + if (ret) + return ret; + + mdev->mcu.timeout = 3 * HZ; + + if (cmd != MCU_CMD(FW_SCATTER)) + ep = MT_EP_OUT_INBAND_CMD; + else + ep = MT_EP_OUT_AC_BE; + + mt792x_skb_add_usb_sdio_hdr(dev, skb, 0); + pad = round_up(skb->len, 4) + 4 - skb->len; + __skb_put_zero(skb, pad); + + ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL, + 1000, ep); + dev_kfree_skb(skb); + + return ret; +} + +static int mt7925u_mcu_init(struct mt792x_dev *dev) +{ + static const struct mt76_mcu_ops mcu_ops = { + .headroom = MT_SDIO_HDR_SIZE + + sizeof(struct mt76_connac2_mcu_txd), + .tailroom = MT_USB_TAIL_SIZE, + .mcu_skb_send_msg = mt7925u_mcu_send_message, + .mcu_parse_response = mt7925_mcu_parse_response, + }; + int ret; + + dev->mt76.mcu_ops = &mcu_ops; + + mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + ret = mt7925_run_firmware(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + return 0; +} + +static int mt7925u_mac_reset(struct mt792x_dev *dev) +{ + int err; + + mt76_txq_schedule_all(&dev->mphy); + mt76_worker_disable(&dev->mt76.tx_worker); + + set_bit(MT76_RESET, &dev->mphy.state); + set_bit(MT76_MCU_RESET, &dev->mphy.state); + + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + mt792xu_wfsys_reset(dev); + + clear_bit(MT76_MCU_RESET, &dev->mphy.state); + err = mt76u_resume_rx(&dev->mt76); + if (err) + goto out; + + err = mt792xu_mcu_power_on(dev); + if (err) + goto out; + + err = mt792xu_dma_init(dev, false); + if (err) + goto out; + + mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); + mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + err = mt7925_run_firmware(dev); + if (err) + goto out; + + mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); + + err = mt7925_mcu_set_eeprom(dev); + if (err) + goto out; + + err = mt7925_mac_init(dev); + if (err) + goto out; + + err = __mt7925_start(&dev->phy); +out: + clear_bit(MT76_RESET, &dev->mphy.state); + + mt76_worker_enable(&dev->mt76.tx_worker); + + return err; +} + +static int mt7925u_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = MT_SDIO_TXD_SIZE, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ | + MT_DRV_AMSDU_OFFLOAD, + .survey_flags = SURVEY_INFO_TIME_TX | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BSS_RX, + .tx_prepare_skb = mt7925_usb_sdio_tx_prepare_skb, + .tx_complete_skb = mt7925_usb_sdio_tx_complete_skb, + .tx_status_data = mt7925_usb_sdio_tx_status_data, + .rx_skb = mt7925_queue_rx_skb, + .rx_check = mt7925_rx_check, + .sta_add = mt7925_mac_sta_add, + .sta_assoc = mt7925_mac_sta_assoc, + .sta_remove = mt7925_mac_sta_remove, + .update_survey = mt792x_update_channel, + }; + static const struct mt792x_hif_ops hif_ops = { + .mcu_init = mt7925u_mcu_init, + .init_reset = mt792xu_init_reset, + .reset = mt7925u_mac_reset, + }; + static struct mt76_bus_ops bus_ops = { + .rr = mt792xu_rr, + .wr = mt792xu_wr, + .rmw = mt792xu_rmw, + .read_copy = mt76u_read_copy, + .write_copy = mt792xu_copy, + .type = MT76_BUS_USB, + }; + struct usb_device *udev = interface_to_usbdev(usb_intf); + struct ieee80211_ops *ops; + struct ieee80211_hw *hw; + struct mt792x_dev *dev; + struct mt76_dev *mdev; + u8 features; + int ret; + + ops = mt792x_get_mac80211_ops(&usb_intf->dev, &mt7925_ops, + (void *)id->driver_info, &features); + if (!ops) + return -ENOMEM; + + ops->stop = mt792xu_stop; + + mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt792x_dev, mt76); + dev->fw_features = features; + dev->hif_ops = &hif_ops; + + udev = usb_get_dev(udev); + usb_reset_device(udev); + + usb_set_intfdata(usb_intf, dev); + + ret = __mt76u_init(mdev, usb_intf, &bus_ops); + if (ret < 0) + goto error; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + if (mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY)) { + ret = mt792xu_wfsys_reset(dev); + if (ret) + goto error; + } + + ret = mt792xu_mcu_power_on(dev); + if (ret) + goto error; + + ret = mt76u_alloc_mcu_queue(&dev->mt76); + if (ret) + goto error; + + ret = mt76u_alloc_queues(&dev->mt76); + if (ret) + goto error; + + ret = mt792xu_dma_init(dev, false); + if (ret) + goto error; + + hw = mt76_hw(dev); + /* check hw sg support in order to enable AMSDU */ + hw->max_tx_fragments = mdev->usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1; + + ret = mt7925_register_device(dev); + if (ret) + goto error; + + return 0; + +error: + mt76u_queues_deinit(&dev->mt76); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + mt76_free_device(&dev->mt76); + + return ret; +} + +#ifdef CONFIG_PM +static int mt7925u_suspend(struct usb_interface *intf, pm_message_t state) +{ + struct mt792x_dev *dev = usb_get_intfdata(intf); + struct mt76_connac_pm *pm = &dev->pm; + int err; + + pm->suspended = true; + flush_work(&dev->reset_work); + + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + if (err) + goto failed; + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + return 0; + +failed: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} + +static int mt7925u_resume(struct usb_interface *intf) +{ + struct mt792x_dev *dev = usb_get_intfdata(intf); + struct mt76_connac_pm *pm = &dev->pm; + bool reinit = true; + int err, i; + + for (i = 0; i < 10; i++) { + u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT); + + if (!(val & MT_WF_SW_SER_TRIGGER_SUSPEND)) { + reinit = false; + break; + } + if (val & MT_WF_SW_SER_DONE_SUSPEND) { + mt76_wr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT, 0); + break; + } + + msleep(20); + } + + if (reinit || mt792x_dma_need_reinit(dev)) { + err = mt792xu_dma_init(dev, true); + if (err) + goto failed; + } + + err = mt76u_resume_rx(&dev->mt76); + if (err < 0) + goto failed; + + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); +failed: + pm->suspended = false; + + if (err < 0) + mt792x_reset(&dev->mt76); + + return err; +} +#endif /* CONFIG_PM */ + +MODULE_DEVICE_TABLE(usb, mt7925u_device_table); +MODULE_FIRMWARE(MT7925_FIRMWARE_WM); +MODULE_FIRMWARE(MT7925_ROM_PATCH); + +static struct usb_driver mt7925u_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7925u_device_table, + .probe = mt7925u_probe, + .disconnect = mt792xu_disconnect, +#ifdef CONFIG_PM + .suspend = mt7925u_suspend, + .resume = mt7925u_resume, + .reset_resume = mt7925u_resume, +#endif /* CONFIG_PM */ + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt7925u_driver); + +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h new file mode 100644 index 000000000000..36fae736dd19 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT792X_H +#define __MT792X_H + +#include <linux/interrupt.h> +#include <linux/ktime.h> + +#include "mt76_connac_mcu.h" +#include "mt792x_regs.h" +#include "mt792x_acpi_sar.h" + +#define MT792x_PM_TIMEOUT (HZ / 12) +#define MT792x_HW_SCAN_TIMEOUT (HZ / 10) + +#define MT792x_MAX_INTERFACES 4 +#define MT792x_WTBL_SIZE 20 +#define MT792x_WTBL_RESERVED (MT792x_WTBL_SIZE - 1) +#define MT792x_WTBL_STA (MT792x_WTBL_RESERVED - MT792x_MAX_INTERFACES) + +#define MT792x_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ +#define MT792x_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ + +#define MT792x_FW_TAG_FEATURE 4 +#define MT792x_FW_CAP_CNM BIT(7) + +#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0) + +/* NOTE: used to map mt76_rates. idx may change if firmware expands table */ +#define MT792x_BASIC_RATES_TBL 11 + +#define MT792x_WATCHDOG_TIME (HZ / 4) + +#define MT792x_DRV_OWN_RETRY_COUNT 10 +#define MT792x_MCU_INIT_RETRY_COUNT 10 +#define MT792x_WFSYS_INIT_RETRY_COUNT 2 + +#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin" +#define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin" +#define MT7925_FIRMWARE_WM "mediatek/mt7925/WIFI_RAM_CODE_MT7925_1_1.bin" + +#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin" +#define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin" +#define MT7925_ROM_PATCH "mediatek/mt7925/WIFI_MT7925_PATCH_MCU_1_1_hdr.bin" + +#define MT792x_SDIO_HDR_TX_BYTES GENMASK(15, 0) +#define MT792x_SDIO_HDR_PKT_TYPE GENMASK(17, 16) + +struct mt792x_vif; +struct mt792x_sta; + +struct mt792x_realease_info { + __le16 len; + u8 pad_len; + u8 tag; +} __packed; + +struct mt792x_fw_features { + u8 segment; + u8 data; + u8 rsv[14]; +} __packed; + +enum { + MT792x_CLC_POWER, + MT792x_CLC_CHAN, + MT792x_CLC_MAX_NUM, +}; + +enum mt792x_reg_power_type { + MT_AP_UNSET = 0, + MT_AP_DEFAULT, + MT_AP_LPI, + MT_AP_SP, + MT_AP_VLP, +}; + +DECLARE_EWMA(avg_signal, 10, 8) + +struct mt792x_sta { + struct mt76_wcid wcid; /* must be first */ + + struct mt792x_vif *vif; + + u32 airtime_ac[8]; + + int ack_signal; + struct ewma_avg_signal avg_ack_signal; + + unsigned long last_txs; + + struct mt76_connac_sta_key_conf bip; +}; + +DECLARE_EWMA(rssi, 10, 8); + +struct mt792x_vif { + struct mt76_vif mt76; /* must be first */ + + struct mt792x_sta sta; + struct mt792x_sta *wep_sta; + + struct mt792x_phy *phy; + + struct ewma_rssi rssi; + + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; +}; + +struct mt792x_phy { + struct mt76_phy *mt76; + struct mt792x_dev *dev; + + struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES]; + + u64 omac_mask; + + u16 noise; + + s16 coverage_class; + u8 slottime; + + u32 rx_ampdu_ts; + u32 ampdu_ref; + + struct mt76_mib_stats mib; + + u8 sta_work_count; + u8 clc_chan_conf; + enum mt792x_reg_power_type power_type; + + struct sk_buff_head scan_event_list; + struct delayed_work scan_work; +#ifdef CONFIG_ACPI + void *acpisar; +#endif + void *clc[MT792x_CLC_MAX_NUM]; + u64 chip_cap; + + struct work_struct roc_work; + struct timer_list roc_timer; + wait_queue_head_t roc_wait; + u8 roc_token_id; + bool roc_grant; +}; + +struct mt792x_irq_map { + u32 host_irq_enable; + struct { + u32 all_complete_mask; + u32 mcu_complete_mask; + } tx; + struct { + u32 data_complete_mask; + u32 wm_complete_mask; + u32 wm2_complete_mask; + } rx; +}; + +#define mt792x_init_reset(dev) ((dev)->hif_ops->init_reset(dev)) +#define mt792x_dev_reset(dev) ((dev)->hif_ops->reset(dev)) +#define mt792x_mcu_init(dev) ((dev)->hif_ops->mcu_init(dev)) +#define __mt792x_mcu_drv_pmctrl(dev) ((dev)->hif_ops->drv_own(dev)) +#define __mt792x_mcu_fw_pmctrl(dev) ((dev)->hif_ops->fw_own(dev)) + +struct mt792x_hif_ops { + int (*init_reset)(struct mt792x_dev *dev); + int (*reset)(struct mt792x_dev *dev); + int (*mcu_init)(struct mt792x_dev *dev); + int (*drv_own)(struct mt792x_dev *dev); + int (*fw_own)(struct mt792x_dev *dev); +}; + +struct mt792x_dev { + union { /* must be first */ + struct mt76_dev mt76; + struct mt76_phy mphy; + }; + + const struct mt76_bus_ops *bus_ops; + struct mt792x_phy phy; + + struct work_struct reset_work; + bool hw_full_reset:1; + bool hw_init_done:1; + bool fw_assert:1; + bool has_eht:1; + + struct work_struct init_work; + + u8 fw_debug; + u8 fw_features; + + struct mt76_connac_pm pm; + struct mt76_connac_coredump coredump; + const struct mt792x_hif_ops *hif_ops; + const struct mt792x_irq_map *irq_map; + + struct work_struct ipv6_ns_work; + /* IPv6 addresses for WoWLAN */ + struct sk_buff_head ipv6_ns_list; + + enum environment_cap country_ie_env; + u32 backup_l1; + u32 backup_l2; +}; + +static inline struct mt792x_dev * +mt792x_hw_dev(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return container_of(phy->dev, struct mt792x_dev, mt76); +} + +static inline struct mt792x_phy * +mt792x_hw_phy(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return phy->priv; +} + +static inline void +mt792x_get_status_freq_info(struct mt76_rx_status *status, u8 chfreq) +{ + if (chfreq > 180) { + status->band = NL80211_BAND_6GHZ; + chfreq = (chfreq - 181) * 4 + 1; + } else if (chfreq > 14) { + status->band = NL80211_BAND_5GHZ; + } else { + status->band = NL80211_BAND_2GHZ; + } + status->freq = ieee80211_channel_to_frequency(chfreq, status->band); +} + +static inline bool mt792x_dma_need_reinit(struct mt792x_dev *dev) +{ + return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); +} + +#define mt792x_mutex_acquire(dev) \ + mt76_connac_mutex_acquire(&(dev)->mt76, &(dev)->pm) +#define mt792x_mutex_release(dev) \ + mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm) + +void mt792x_stop(struct ieee80211_hw *hw); +void mt792x_pm_wake_work(struct work_struct *work); +void mt792x_pm_power_save_work(struct work_struct *work); +void mt792x_reset(struct mt76_dev *mdev); +void mt792x_update_channel(struct mt76_phy *mphy); +void mt792x_mac_reset_counters(struct mt792x_phy *phy); +void mt792x_mac_init_band(struct mt792x_dev *dev, u8 band); +void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb); +struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx, + bool unicast); +void mt792x_mac_update_mib_stats(struct mt792x_phy *phy); +void mt792x_mac_set_timeing(struct mt792x_phy *phy); +void mt792x_mac_work(struct work_struct *work); +void mt792x_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb); +int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + unsigned int link_id, u16 queue, + const struct ieee80211_tx_queue_params *params); +int mt792x_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats); +u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 timestamp); +void mt792x_tx_worker(struct mt76_worker *w); +void mt792x_roc_timer(struct timer_list *timer); +void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop); +int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx); +void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx); +void mt792x_set_wakeup(struct ieee80211_hw *hw, bool enabled); +void mt792x_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 sset, u8 *data); +int mt792x_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int sset); +void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data); +void mt792x_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo); +void mt792x_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class); +void mt792x_dma_cleanup(struct mt792x_dev *dev); +int mt792x_dma_enable(struct mt792x_dev *dev); +int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force); +int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev); +int mt792x_dma_disable(struct mt792x_dev *dev, bool force); +irqreturn_t mt792x_irq_handler(int irq, void *dev_instance); +void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); +int mt792x_poll_tx(struct napi_struct *napi, int budget); +int mt792x_poll_rx(struct napi_struct *napi, int budget); +void mt792x_irq_tasklet(unsigned long data); +int mt792x_wfsys_reset(struct mt792x_dev *dev); +int mt792x_tx_stats_show(struct seq_file *file, void *data); +int mt792x_queues_acq(struct seq_file *s, void *data); +int mt792x_queues_read(struct seq_file *s, void *data); +int mt792x_pm_stats(struct seq_file *s, void *data); +int mt792x_pm_idle_timeout_set(void *data, u64 val); +int mt792x_pm_idle_timeout_get(void *data, u64 *val); +int mt792x_init_wiphy(struct ieee80211_hw *hw); +struct ieee80211_ops * +mt792x_get_mac80211_ops(struct device *dev, + const struct ieee80211_ops *mac80211_ops, + void *drv_data, u8 *fw_features); +int mt792x_init_wcid(struct mt792x_dev *dev); +int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev); +int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev); + +static inline char *mt792x_ram_name(struct mt792x_dev *dev) +{ + switch (mt76_chip(&dev->mt76)) { + case 0x7922: + return MT7922_FIRMWARE_WM; + case 0x7925: + return MT7925_FIRMWARE_WM; + default: + return MT7921_FIRMWARE_WM; + } +} + +static inline char *mt792x_patch_name(struct mt792x_dev *dev) +{ + switch (mt76_chip(&dev->mt76)) { + case 0x7922: + return MT7922_ROM_PATCH; + case 0x7925: + return MT7925_ROM_PATCH; + default: + return MT7921_ROM_PATCH; + } +} + +int mt792x_load_firmware(struct mt792x_dev *dev); + +/* usb */ +#define MT_USB_TYPE_VENDOR (USB_TYPE_VENDOR | 0x1f) +#define MT_USB_TYPE_UHW_VENDOR (USB_TYPE_VENDOR | 0x1e) +int mt792xu_dma_init(struct mt792x_dev *dev, bool resume); +int mt792xu_mcu_power_on(struct mt792x_dev *dev); +int mt792xu_wfsys_reset(struct mt792x_dev *dev); +int mt792xu_init_reset(struct mt792x_dev *dev); +u32 mt792xu_rr(struct mt76_dev *dev, u32 addr); +void mt792xu_wr(struct mt76_dev *dev, u32 addr, u32 val); +u32 mt792xu_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val); +void mt792xu_copy(struct mt76_dev *dev, u32 offset, const void *data, int len); +void mt792xu_disconnect(struct usb_interface *usb_intf); +void mt792xu_stop(struct ieee80211_hw *hw); + +static inline void +mt792x_skb_add_usb_sdio_hdr(struct mt792x_dev *dev, struct sk_buff *skb, + int type) +{ + u32 hdr, len; + + len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr); + hdr = FIELD_PREP(MT792x_SDIO_HDR_TX_BYTES, len) | + FIELD_PREP(MT792x_SDIO_HDR_PKT_TYPE, type); + + put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr))); +} + +int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev); +int mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev); +int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev); + +#ifdef CONFIG_ACPI +int mt792x_init_acpi_sar(struct mt792x_dev *dev); +int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default); +u8 mt792x_acpi_get_flags(struct mt792x_phy *phy); +#else +static inline int mt792x_init_acpi_sar(struct mt792x_dev *dev) +{ + return 0; +} + +static inline int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, + bool set_default) +{ + return 0; +} + +static inline u8 mt792x_acpi_get_flags(struct mt792x_phy *phy) +{ + return 0; +} +#endif + +#endif /* __MT7925_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c index 48dd0decac5d..303c0f5c9c66 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c @@ -1,15 +1,15 @@ // SPDX-License-Identifier: ISC -/* Copyright (C) 2022 MediaTek Inc. */ +/* Copyright (C) 2023 MediaTek Inc. */ #include <linux/acpi.h> -#include "mt7921.h" +#include "mt792x.h" static int -mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len) +mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *sar_root, *sar_unit; struct mt76_dev *mdev = &dev->mt76; + union acpi_object *sar_root; acpi_handle root, handle; acpi_status status; u32 i = 0; @@ -45,18 +45,20 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len) goto free; } } + if (len) *len = sar_root->package.count; for (i = 0; i < sar_root->package.count; i++) { - sar_unit = &sar_root->package.elements[i]; + union acpi_object *sar_unit = &sar_root->package.elements[i]; if (sar_unit->type != ACPI_TYPE_INTEGER) break; + *(*tbl + i) = (u8)sar_unit->integer.value; } - ret = (i == sar_root->package.count) ? 0 : -EINVAL; + ret = i == sar_root->package.count ? 0 : -EINVAL; free: kfree(sar_root); @@ -64,36 +66,37 @@ free: } /* MTCL : Country List Table for 6G band */ -static int -mt7921_asar_acpi_read_mtcl(struct mt7921_dev *dev, u8 **table, u8 *version) +static void +mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version) { - *version = (mt7921_acpi_read(dev, MT7921_ACPI_MTCL, table, NULL) < 0) - ? 1 : 2; - return 0; + if (mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL) < 0) + *version = 1; + else + *version = 2; } /* MTDS : Dynamic SAR Power Table */ static int -mt7921_asar_acpi_read_mtds(struct mt7921_dev *dev, u8 **table, u8 version) +mt792x_asar_acpi_read_mtds(struct mt792x_dev *dev, u8 **table, u8 version) { int len, ret, sarlen, prelen, tblcnt; bool enable; - ret = mt7921_acpi_read(dev, MT7921_ACPI_MTDS, table, &len); + ret = mt792x_acpi_read(dev, MT792x_ACPI_MTDS, table, &len); if (ret) return ret; /* Table content validation */ switch (version) { case 1: - enable = ((struct mt7921_asar_dyn *)*table)->enable; - sarlen = sizeof(struct mt7921_asar_dyn_limit); - prelen = sizeof(struct mt7921_asar_dyn); + enable = ((struct mt792x_asar_dyn *)*table)->enable; + sarlen = sizeof(struct mt792x_asar_dyn_limit); + prelen = sizeof(struct mt792x_asar_dyn); break; case 2: - enable = ((struct mt7921_asar_dyn_v2 *)*table)->enable; - sarlen = sizeof(struct mt7921_asar_dyn_limit_v2); - prelen = sizeof(struct mt7921_asar_dyn_v2); + enable = ((struct mt792x_asar_dyn_v2 *)*table)->enable; + sarlen = sizeof(struct mt792x_asar_dyn_limit_v2); + prelen = sizeof(struct mt792x_asar_dyn_v2); break; default: return -EINVAL; @@ -101,88 +104,89 @@ mt7921_asar_acpi_read_mtds(struct mt7921_dev *dev, u8 **table, u8 version) tblcnt = (len - prelen) / sarlen; if (!enable || - tblcnt > MT7921_ASAR_MAX_DYN || tblcnt < MT7921_ASAR_MIN_DYN) - ret = -EINVAL; + tblcnt > MT792x_ASAR_MAX_DYN || tblcnt < MT792x_ASAR_MIN_DYN) + return -EINVAL; - return ret; + return 0; } /* MTGS : Geo SAR Power Table */ static int -mt7921_asar_acpi_read_mtgs(struct mt7921_dev *dev, u8 **table, u8 version) +mt792x_asar_acpi_read_mtgs(struct mt792x_dev *dev, u8 **table, u8 version) { - int len, ret = 0, sarlen, prelen, tblcnt; + int len, ret, sarlen, prelen, tblcnt; - ret = mt7921_acpi_read(dev, MT7921_ACPI_MTGS, table, &len); + ret = mt792x_acpi_read(dev, MT792x_ACPI_MTGS, table, &len); if (ret) return ret; /* Table content validation */ switch (version) { case 1: - sarlen = sizeof(struct mt7921_asar_geo_limit); - prelen = sizeof(struct mt7921_asar_geo); + sarlen = sizeof(struct mt792x_asar_geo_limit); + prelen = sizeof(struct mt792x_asar_geo); break; case 2: - sarlen = sizeof(struct mt7921_asar_geo_limit_v2); - prelen = sizeof(struct mt7921_asar_geo_v2); + sarlen = sizeof(struct mt792x_asar_geo_limit_v2); + prelen = sizeof(struct mt792x_asar_geo_v2); break; default: return -EINVAL; } tblcnt = (len - prelen) / sarlen; - if (tblcnt > MT7921_ASAR_MAX_GEO || tblcnt < MT7921_ASAR_MIN_GEO) - ret = -EINVAL; + if (tblcnt > MT792x_ASAR_MAX_GEO || tblcnt < MT792x_ASAR_MIN_GEO) + return -EINVAL; - return ret; + return 0; } /* MTFG : Flag Table */ static int -mt7921_asar_acpi_read_mtfg(struct mt7921_dev *dev, u8 **table) +mt792x_asar_acpi_read_mtfg(struct mt792x_dev *dev, u8 **table) { int len, ret; - ret = mt7921_acpi_read(dev, MT7921_ACPI_MTFG, table, &len); + ret = mt792x_acpi_read(dev, MT792x_ACPI_MTFG, table, &len); if (ret) return ret; - if (len < MT7921_ASAR_MIN_FG) - ret = -EINVAL; + if (len < MT792x_ASAR_MIN_FG) + return -EINVAL; - return ret; + return 0; } -int mt7921_init_acpi_sar(struct mt7921_dev *dev) +int mt792x_init_acpi_sar(struct mt792x_dev *dev) { - struct mt7921_acpi_sar *asar; + struct mt792x_acpi_sar *asar; int ret; asar = devm_kzalloc(dev->mt76.dev, sizeof(*asar), GFP_KERNEL); if (!asar) return -ENOMEM; - mt7921_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver); + mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver); /* MTDS is mandatory. Return error if table is invalid */ - ret = mt7921_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver); + ret = mt792x_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver); if (ret) { devm_kfree(dev->mt76.dev, asar->dyn); devm_kfree(dev->mt76.dev, asar->countrylist); devm_kfree(dev->mt76.dev, asar); + return ret; } /* MTGS is optional */ - ret = mt7921_asar_acpi_read_mtgs(dev, (u8 **)&asar->geo, asar->ver); + ret = mt792x_asar_acpi_read_mtgs(dev, (u8 **)&asar->geo, asar->ver); if (ret) { devm_kfree(dev->mt76.dev, asar->geo); asar->geo = NULL; } /* MTFG is optional */ - ret = mt7921_asar_acpi_read_mtfg(dev, (u8 **)&asar->fg); + ret = mt792x_asar_acpi_read_mtfg(dev, (u8 **)&asar->fg); if (ret) { devm_kfree(dev->mt76.dev, asar->fg); asar->fg = NULL; @@ -191,13 +195,14 @@ int mt7921_init_acpi_sar(struct mt7921_dev *dev) return 0; } +EXPORT_SYMBOL_GPL(mt792x_init_acpi_sar); static s8 -mt7921_asar_get_geo_pwr(struct mt7921_phy *phy, +mt792x_asar_get_geo_pwr(struct mt792x_phy *phy, enum nl80211_band band, s8 dyn_power) { - struct mt7921_acpi_sar *asar = phy->acpisar; - struct mt7921_asar_geo_band *band_pwr; + struct mt792x_acpi_sar *asar = phy->acpisar; + struct mt792x_asar_geo_band *band_pwr; s8 geo_power; u8 idx, max; @@ -248,12 +253,12 @@ mt7921_asar_get_geo_pwr(struct mt7921_phy *phy, } static s8 -mt7921_asar_range_pwr(struct mt7921_phy *phy, +mt792x_asar_range_pwr(struct mt792x_phy *phy, const struct cfg80211_sar_freq_ranges *range, u8 idx) { const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa; - struct mt7921_acpi_sar *asar = phy->acpisar; + struct mt792x_acpi_sar *asar = phy->acpisar; u8 *limit, band, max; if (!capa) @@ -277,10 +282,10 @@ mt7921_asar_range_pwr(struct mt7921_phy *phy, else band = NL80211_BAND_2GHZ; - return mt7921_asar_get_geo_pwr(phy, band, limit[idx]); + return mt792x_asar_get_geo_pwr(phy, band, limit[idx]); } -int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default) +int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default) { const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa; int i; @@ -300,41 +305,46 @@ int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default) continue; frp->power = min_t(s8, set_default ? 127 : frp->power, - mt7921_asar_range_pwr(phy, frp->range, i)); + mt792x_asar_range_pwr(phy, frp->range, i)); } return 0; } +EXPORT_SYMBOL_GPL(mt792x_init_acpi_sar_power); -u8 mt7921_acpi_get_flags(struct mt7921_phy *phy) +u8 mt792x_acpi_get_flags(struct mt792x_phy *phy) { - struct mt7921_asar_fg *fg; + struct mt792x_acpi_sar *acpisar = phy->acpisar; + struct mt792x_asar_fg *fg; struct { u8 acpi_idx; u8 chip_idx; } map[] = { - {1, 1}, - {4, 2}, + { 1, 1 }, + { 4, 2 }, }; u8 flags = BIT(0); int i, j; - if (!phy->acpisar) + if (!acpisar) return 0; - fg = phy->acpisar->fg; + fg = acpisar->fg; if (!fg) return flags; /* pickup necessary settings per device and * translate the index of bitmap for chip command. */ - for (i = 0; i < fg->nr_flag; i++) - for (j = 0; j < ARRAY_SIZE(map); j++) + for (i = 0; i < fg->nr_flag; i++) { + for (j = 0; j < ARRAY_SIZE(map); j++) { if (fg->flag[i] == map[j].acpi_idx) { flags |= BIT(map[j].chip_idx); break; } + } + } return flags; } +EXPORT_SYMBOL_GPL(mt792x_acpi_get_flags); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h new file mode 100644 index 000000000000..d6d332e863ba --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT7921_ACPI_SAR_H +#define __MT7921_ACPI_SAR_H + +#define MT792x_ASAR_MIN_DYN 1 +#define MT792x_ASAR_MAX_DYN 8 +#define MT792x_ASAR_MIN_GEO 3 +#define MT792x_ASAR_MAX_GEO 8 +#define MT792x_ASAR_MIN_FG 8 + +#define MT792x_ACPI_MTCL "MTCL" +#define MT792x_ACPI_MTDS "MTDS" +#define MT792x_ACPI_MTGS "MTGS" +#define MT792x_ACPI_MTFG "MTFG" + +struct mt792x_asar_dyn_limit { + u8 idx; + u8 frp[5]; +} __packed; + +struct mt792x_asar_dyn { + u8 names[4]; + u8 enable; + u8 nr_tbl; + DECLARE_FLEX_ARRAY(struct mt792x_asar_dyn_limit, tbl); +} __packed; + +struct mt792x_asar_dyn_limit_v2 { + u8 idx; + u8 frp[11]; +} __packed; + +struct mt792x_asar_dyn_v2 { + u8 names[4]; + u8 enable; + u8 rsvd; + u8 nr_tbl; + DECLARE_FLEX_ARRAY(struct mt792x_asar_dyn_limit_v2, tbl); +} __packed; + +struct mt792x_asar_geo_band { + u8 pwr; + u8 offset; +} __packed; + +struct mt792x_asar_geo_limit { + u8 idx; + /* 0:2G, 1:5G */ + struct mt792x_asar_geo_band band[2]; +} __packed; + +struct mt792x_asar_geo { + u8 names[4]; + u8 version; + u8 nr_tbl; + DECLARE_FLEX_ARRAY(struct mt792x_asar_geo_limit, tbl); +} __packed; + +struct mt792x_asar_geo_limit_v2 { + u8 idx; + /* 0:2G, 1:5G, 2:6G */ + struct mt792x_asar_geo_band band[3]; +} __packed; + +struct mt792x_asar_geo_v2 { + u8 names[4]; + u8 version; + u8 rsvd; + u8 nr_tbl; + DECLARE_FLEX_ARRAY(struct mt792x_asar_geo_limit_v2, tbl); +} __packed; + +struct mt792x_asar_cl { + u8 names[4]; + u8 version; + u8 mode_6g; + u8 cl6g[6]; +} __packed; + +struct mt792x_asar_fg { + u8 names[4]; + u8 version; + u8 rsvd; + u8 nr_flag; + u8 rsvd1; + u8 flag[]; +} __packed; + +struct mt792x_acpi_sar { + u8 ver; + union { + struct mt792x_asar_dyn *dyn; + struct mt792x_asar_dyn_v2 *dyn_v2; + }; + union { + struct mt792x_asar_geo *geo; + struct mt792x_asar_geo_v2 *geo_v2; + }; + struct mt792x_asar_cl *countrylist; + struct mt792x_asar_fg *fg; +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c new file mode 100644 index 000000000000..502be22dbe36 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/module.h> +#include <linux/firmware.h> + +#include "mt792x.h" +#include "dma.h" + +static const struct ieee80211_iface_limit if_limits[] = { + { + .max = MT792x_MAX_INTERFACES, + .types = BIT(NL80211_IFTYPE_STATION) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) + } +}; + +static const struct ieee80211_iface_combination if_comb[] = { + { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = MT792x_MAX_INTERFACES, + .num_different_channels = 1, + .beacon_int_infra_match = true, + }, +}; + +static const struct ieee80211_iface_limit if_limits_chanctx[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO) + } +}; + +static const struct ieee80211_iface_combination if_comb_chanctx[] = { + { + .limits = if_limits_chanctx, + .n_limits = ARRAY_SIZE(if_limits_chanctx), + .max_interfaces = 2, + .num_different_channels = 2, + .beacon_int_infra_match = false, + } +}; + +void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + int qid; + + if (control->sta) { + struct mt792x_sta *sta; + + sta = (struct mt792x_sta *)control->sta->drv_priv; + wcid = &sta->wcid; + } + + if (vif && !control->sta) { + struct mt792x_vif *mvif; + + mvif = (struct mt792x_vif *)vif->drv_priv; + wcid = &mvif->sta.wcid; + } + + if (mt76_connac_pm_ref(mphy, &dev->pm)) { + mt76_tx(mphy, control->sta, wcid, skb); + mt76_connac_pm_unref(mphy, &dev->pm); + return; + } + + qid = skb_get_queue_mapping(skb); + if (qid >= MT_TXQ_PSD) { + qid = IEEE80211_AC_BE; + skb_set_queue_mapping(skb, qid); + } + + mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb); +} +EXPORT_SYMBOL_GPL(mt792x_tx); + +void mt792x_stop(struct ieee80211_hw *hw) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + + cancel_delayed_work_sync(&phy->mt76->mac_work); + + cancel_delayed_work_sync(&dev->pm.ps_work); + cancel_work_sync(&dev->pm.wake_work); + cancel_work_sync(&dev->reset_work); + mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); + + if (is_mt7921(&dev->mt76)) { + mt792x_mutex_acquire(dev); + mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); + mt792x_mutex_release(dev); + } + + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); +} +EXPORT_SYMBOL_GPL(mt792x_stop); + +void mt792x_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = &mvif->sta; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int idx = msta->wcid.idx; + + mt792x_mutex_acquire(dev); + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); + + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + + dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); + phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); + mt792x_mutex_release(dev); + + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + mt76_wcid_cleanup(&dev->mt76, &msta->wcid); +} +EXPORT_SYMBOL_GPL(mt792x_remove_interface); + +int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + unsigned int link_id, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + /* no need to update right away, we'll get BSS_CHANGED_QOS */ + queue = mt76_connac_lmac_mapping(queue); + mvif->queue_params[queue] = *params; + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_conf_tx); + +int mt792x_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt76_mib_stats *mib = &phy->mib; + + mt792x_mutex_acquire(phy->dev); + + stats->dot11RTSSuccessCount = mib->rts_cnt; + stats->dot11RTSFailureCount = mib->rts_retries_cnt; + stats->dot11FCSErrorCount = mib->fcs_err_cnt; + stats->dot11ACKFailureCount = mib->ack_fail_cnt; + + mt792x_mutex_release(phy->dev); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_get_stats); + +u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + u8 omac_idx = mvif->mt76.omac_idx; + union { + u64 t64; + u32 t32[2]; + } tsf; + u16 n; + + mt792x_mutex_acquire(dev); + + n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; + /* TSF software read */ + mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_MODE); + tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(0)); + tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(0)); + + mt792x_mutex_release(dev); + + return tsf.t64; +} +EXPORT_SYMBOL_GPL(mt792x_get_tsf); + +void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 timestamp) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + u8 omac_idx = mvif->mt76.omac_idx; + union { + u64 t64; + u32 t32[2]; + } tsf = { .t64 = timestamp, }; + u16 n; + + mt792x_mutex_acquire(dev); + + n = omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : omac_idx; + mt76_wr(dev, MT_LPON_UTTR0(0), tsf.t32[0]); + mt76_wr(dev, MT_LPON_UTTR1(0), tsf.t32[1]); + /* TSF software overwrite */ + mt76_set(dev, MT_LPON_TCR(0, n), MT_LPON_TCR_SW_WRITE); + + mt792x_mutex_release(dev); +} +EXPORT_SYMBOL_GPL(mt792x_set_tsf); + +void mt792x_tx_worker(struct mt76_worker *w) +{ + struct mt792x_dev *dev = container_of(w, struct mt792x_dev, + mt76.tx_worker); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return; + } + + mt76_txq_schedule_all(&dev->mphy); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); +} +EXPORT_SYMBOL_GPL(mt792x_tx_worker); + +void mt792x_roc_timer(struct timer_list *timer) +{ + struct mt792x_phy *phy = from_timer(phy, timer, roc_timer); + + ieee80211_queue_work(phy->mt76->hw, &phy->roc_work); +} +EXPORT_SYMBOL_GPL(mt792x_roc_timer); + +void mt792x_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + wait_event_timeout(dev->mt76.tx_wait, + !mt76_has_tx_pending(&dev->mphy), HZ / 2); +} +EXPORT_SYMBOL_GPL(mt792x_flush); + +int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + mvif->mt76.ctx = ctx; + mutex_unlock(&dev->mt76.mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_assign_vif_chanctx); + +void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + mvif->mt76.ctx = NULL; + mutex_unlock(&dev->mt76.mutex); +} +EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx); + +void mt792x_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt76_dev *mdev = &dev->mt76; + + device_set_wakeup_enable(mdev->dev, enabled); +} +EXPORT_SYMBOL_GPL(mt792x_set_wakeup); + +static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = { + /* tx counters */ + "tx_ampdu_cnt", + "tx_mpdu_attempts", + "tx_mpdu_success", + "tx_pkt_ebf_cnt", + "tx_pkt_ibf_cnt", + "tx_ampdu_len:0-1", + "tx_ampdu_len:2-10", + "tx_ampdu_len:11-19", + "tx_ampdu_len:20-28", + "tx_ampdu_len:29-37", + "tx_ampdu_len:38-46", + "tx_ampdu_len:47-55", + "tx_ampdu_len:56-79", + "tx_ampdu_len:80-103", + "tx_ampdu_len:104-127", + "tx_ampdu_len:128-151", + "tx_ampdu_len:152-175", + "tx_ampdu_len:176-199", + "tx_ampdu_len:200-223", + "tx_ampdu_len:224-247", + "ba_miss_count", + "tx_beamformer_ppdu_iBF", + "tx_beamformer_ppdu_eBF", + "tx_beamformer_rx_feedback_all", + "tx_beamformer_rx_feedback_he", + "tx_beamformer_rx_feedback_vht", + "tx_beamformer_rx_feedback_ht", + "tx_msdu_pack_1", + "tx_msdu_pack_2", + "tx_msdu_pack_3", + "tx_msdu_pack_4", + "tx_msdu_pack_5", + "tx_msdu_pack_6", + "tx_msdu_pack_7", + "tx_msdu_pack_8", + /* rx counters */ + "rx_mpdu_cnt", + "rx_ampdu_cnt", + "rx_ampdu_bytes_cnt", + "rx_ba_cnt", + /* per vif counters */ + "v_tx_mode_cck", + "v_tx_mode_ofdm", + "v_tx_mode_ht", + "v_tx_mode_ht_gf", + "v_tx_mode_vht", + "v_tx_mode_he_su", + "v_tx_mode_he_ext_su", + "v_tx_mode_he_tb", + "v_tx_mode_he_mu", + "v_tx_mode_eht_su", + "v_tx_mode_eht_trig", + "v_tx_mode_eht_mu", + "v_tx_bw_20", + "v_tx_bw_40", + "v_tx_bw_80", + "v_tx_bw_160", + "v_tx_mcs_0", + "v_tx_mcs_1", + "v_tx_mcs_2", + "v_tx_mcs_3", + "v_tx_mcs_4", + "v_tx_mcs_5", + "v_tx_mcs_6", + "v_tx_mcs_7", + "v_tx_mcs_8", + "v_tx_mcs_9", + "v_tx_mcs_10", + "v_tx_mcs_11", + "v_tx_mcs_12", + "v_tx_mcs_13", + "v_tx_nss_1", + "v_tx_nss_2", + "v_tx_nss_3", + "v_tx_nss_4", +}; + +void mt792x_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset != ETH_SS_STATS) + return; + + memcpy(data, mt792x_gstrings_stats, sizeof(mt792x_gstrings_stats)); + + data += sizeof(mt792x_gstrings_stats); + page_pool_ethtool_stats_get_strings(data); +} +EXPORT_SYMBOL_GPL(mt792x_get_et_strings); + +int mt792x_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int sset) +{ + if (sset != ETH_SS_STATS) + return 0; + + return ARRAY_SIZE(mt792x_gstrings_stats) + + page_pool_ethtool_stats_get_count(); +} +EXPORT_SYMBOL_GPL(mt792x_get_et_sset_count); + +static void +mt792x_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt76_ethtool_worker_info *wi = wi_data; + + if (msta->vif->mt76.idx != wi->idx) + return; + + mt76_ethtool_worker(wi, &msta->wcid.stats, true); +} + +void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + int stats_size = ARRAY_SIZE(mt792x_gstrings_stats); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = phy->dev; + struct mt76_mib_stats *mib = &phy->mib; + struct mt76_ethtool_worker_info wi = { + .data = data, + .idx = mvif->mt76.idx, + }; + int i, ei = 0; + + mt792x_mutex_acquire(dev); + + mt792x_mac_update_mib_stats(phy); + + data[ei++] = mib->tx_ampdu_cnt; + data[ei++] = mib->tx_mpdu_attempts_cnt; + data[ei++] = mib->tx_mpdu_success_cnt; + data[ei++] = mib->tx_pkt_ebf_cnt; + data[ei++] = mib->tx_pkt_ibf_cnt; + + /* Tx ampdu stat */ + for (i = 0; i < 15; i++) + data[ei++] = phy->mt76->aggr_stats[i]; + + data[ei++] = phy->mib.ba_miss_cnt; + + /* Tx Beamformer monitor */ + data[ei++] = mib->tx_bf_ibf_ppdu_cnt; + data[ei++] = mib->tx_bf_ebf_ppdu_cnt; + + /* Tx Beamformer Rx feedback monitor */ + data[ei++] = mib->tx_bf_rx_fb_all_cnt; + data[ei++] = mib->tx_bf_rx_fb_he_cnt; + data[ei++] = mib->tx_bf_rx_fb_vht_cnt; + data[ei++] = mib->tx_bf_rx_fb_ht_cnt; + + /* Tx amsdu info (pack-count histogram) */ + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) + data[ei++] = mib->tx_amsdu[i]; + + /* rx counters */ + data[ei++] = mib->rx_mpdu_cnt; + data[ei++] = mib->rx_ampdu_cnt; + data[ei++] = mib->rx_ampdu_bytes_cnt; + data[ei++] = mib->rx_ba_cnt; + + /* Add values for all stations owned by this vif */ + wi.initial_stat_idx = ei; + ieee80211_iterate_stations_atomic(hw, mt792x_ethtool_worker, &wi); + + mt792x_mutex_release(dev); + + if (!wi.sta_count) + return; + + ei += wi.worker_stat_count; + + mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei); + stats_size += page_pool_ethtool_stats_get_count(); + + if (ei != stats_size) + dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, + stats_size); +} +EXPORT_SYMBOL_GPL(mt792x_get_et_stats); + +void mt792x_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct rate_info *txrate = &msta->wcid.rate; + + if (!txrate->legacy && !txrate->flags) + return; + + if (txrate->legacy) { + sinfo->txrate.legacy = txrate->legacy; + } else { + sinfo->txrate.mcs = txrate->mcs; + sinfo->txrate.nss = txrate->nss; + sinfo->txrate.bw = txrate->bw; + sinfo->txrate.he_gi = txrate->he_gi; + sinfo->txrate.he_dcm = txrate->he_dcm; + sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; + } + sinfo->tx_failed = msta->wcid.stats.tx_failed; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + + sinfo->tx_retries = msta->wcid.stats.tx_retries; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + + sinfo->txrate.flags = txrate->flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + + sinfo->ack_signal = (s8)msta->ack_signal; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); + + sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); +} +EXPORT_SYMBOL_GPL(mt792x_sta_statistics); + +void mt792x_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = phy->dev; + + mt792x_mutex_acquire(dev); + + phy->coverage_class = max_t(s16, coverage_class, 0); + mt792x_mac_set_timeing(phy); + + mt792x_mutex_release(dev); +} +EXPORT_SYMBOL_GPL(mt792x_set_coverage_class); + +int mt792x_init_wiphy(struct ieee80211_hw *hw) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = phy->dev; + struct wiphy *wiphy = hw->wiphy; + + hw->queues = 4; + if (dev->has_eht) { + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; + } else { + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + } + hw->netdev_features = NETIF_F_RXCSUM; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US; + + phy->slottime = 9; + + hw->sta_data_size = sizeof(struct mt792x_sta); + hw->vif_data_size = sizeof(struct mt792x_vif); + + if (dev->fw_features & MT792x_FW_CAP_CNM) { + wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + wiphy->iface_combinations = if_comb_chanctx; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_chanctx); + } else { + wiphy->flags &= ~WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + } + wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP | + WIPHY_FLAG_4ADDR_STATION); + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + wiphy->max_remain_on_channel_duration = 5000; + wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; + wiphy->max_scan_ssids = 4; + wiphy->max_sched_scan_plan_interval = + MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL; + wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN; + wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID; + wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH; + wiphy->max_sched_scan_reqs = 1; + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | + WIPHY_FLAG_SPLIT_SCAN_6GHZ; + + wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); + ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + ieee80211_hw_set(hw, CONNECTION_MONITOR); + + if (dev->pm.enable) + ieee80211_hw_set(hw, CONNECTION_MONITOR); + + hw->max_tx_fragments = 4; + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_init_wiphy); + +static u8 +mt792x_get_offload_capability(struct device *dev, const char *fw_wm) +{ + const struct mt76_connac2_fw_trailer *hdr; + struct mt792x_realease_info *rel_info; + const struct firmware *fw; + int ret, i, offset = 0; + const u8 *data, *end; + u8 offload_caps = 0; + + ret = request_firmware(&fw, fw_wm, dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev, "Invalid firmware\n"); + goto out; + } + + data = fw->data; + hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); + + for (i = 0; i < hdr->n_region; i++) { + const struct mt76_connac2_fw_region *region; + + region = (const void *)((const u8 *)hdr - + (hdr->n_region - i) * sizeof(*region)); + offset += le32_to_cpu(region->len); + } + + data += offset + 16; + rel_info = (struct mt792x_realease_info *)data; + data += sizeof(*rel_info); + end = data + le16_to_cpu(rel_info->len); + + while (data < end) { + rel_info = (struct mt792x_realease_info *)data; + data += sizeof(*rel_info); + + if (rel_info->tag == MT792x_FW_TAG_FEATURE) { + struct mt792x_fw_features *features; + + features = (struct mt792x_fw_features *)data; + offload_caps = features->data; + break; + } + + data += le16_to_cpu(rel_info->len) + rel_info->pad_len; + } + +out: + release_firmware(fw); + + return offload_caps; +} + +struct ieee80211_ops * +mt792x_get_mac80211_ops(struct device *dev, + const struct ieee80211_ops *mac80211_ops, + void *drv_data, u8 *fw_features) +{ + struct ieee80211_ops *ops; + + ops = devm_kmemdup(dev, mac80211_ops, sizeof(struct ieee80211_ops), + GFP_KERNEL); + if (!ops) + return NULL; + + *fw_features = mt792x_get_offload_capability(dev, drv_data); + if (!(*fw_features & MT792x_FW_CAP_CNM)) { + ops->remain_on_channel = NULL; + ops->cancel_remain_on_channel = NULL; + ops->add_chanctx = NULL; + ops->remove_chanctx = NULL; + ops->change_chanctx = NULL; + ops->assign_vif_chanctx = NULL; + ops->unassign_vif_chanctx = NULL; + ops->mgd_prepare_tx = NULL; + ops->mgd_complete_tx = NULL; + } + return ops; +} +EXPORT_SYMBOL_GPL(mt792x_get_mac80211_ops); + +int mt792x_init_wcid(struct mt792x_dev *dev) +{ + int idx; + + /* Beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); + if (idx) + return -ENOSPC; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_init_wcid); + +int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int err = 0; + + mutex_lock(&pm->mutex); + + if (!test_bit(MT76_STATE_PM, &mphy->state)) + goto out; + + err = __mt792x_mcu_drv_pmctrl(dev); +out: + mutex_unlock(&pm->mutex); + + if (err) + mt792x_reset(&dev->mt76); + + return err; +} +EXPORT_SYMBOL_GPL(mt792x_mcu_drv_pmctrl); + +int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int err = 0; + + mutex_lock(&pm->mutex); + + if (mt76_connac_skip_fw_pmctrl(mphy, pm)) + goto out; + + err = __mt792x_mcu_fw_pmctrl(dev); +out: + mutex_unlock(&pm->mutex); + + if (err) + mt792x_reset(&dev->mt76); + + return err; +} +EXPORT_SYMBOL_GPL(mt792x_mcu_fw_pmctrl); + +int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) +{ + int i, err = 0; + + for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { + mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); + if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, + PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1)) + break; + } + + if (i == MT792x_DRV_OWN_RETRY_COUNT) { + dev_err(dev->mt76.dev, "driver own failed\n"); + err = -EIO; + } + + return err; +} +EXPORT_SYMBOL_GPL(__mt792xe_mcu_drv_pmctrl); + +int mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int err; + + err = __mt792xe_mcu_drv_pmctrl(dev); + if (err < 0) + goto out; + + mt792x_wpdma_reinit_cond(dev); + clear_bit(MT76_STATE_PM, &mphy->state); + + pm->stats.last_wake_event = jiffies; + pm->stats.doze_time += pm->stats.last_wake_event - + pm->stats.last_doze_event; +out: + return err; +} +EXPORT_SYMBOL_GPL(mt792xe_mcu_drv_pmctrl); + +int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt76_connac_pm *pm = &dev->pm; + int i; + + for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { + mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); + if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, + PCIE_LPCR_HOST_OWN_SYNC, 4, 50, 1)) + break; + } + + if (i == MT792x_DRV_OWN_RETRY_COUNT) { + dev_err(dev->mt76.dev, "firmware own failed\n"); + clear_bit(MT76_STATE_PM, &mphy->state); + return -EIO; + } + + pm->stats.last_doze_event = jiffies; + pm->stats.awake_time += pm->stats.last_doze_event - + pm->stats.last_wake_event; + + return 0; +} +EXPORT_SYMBOL_GPL(mt792xe_mcu_fw_pmctrl); + +int mt792x_load_firmware(struct mt792x_dev *dev) +{ + int ret; + + ret = mt76_connac2_load_patch(&dev->mt76, mt792x_patch_name(dev)); + if (ret) + return ret; + + if (mt76_is_sdio(&dev->mt76)) { + /* activate again */ + ret = __mt792x_mcu_fw_pmctrl(dev); + if (!ret) + ret = __mt792x_mcu_drv_pmctrl(dev); + } + + ret = mt76_connac2_load_ram(&dev->mt76, mt792x_ram_name(dev), NULL); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY, + MT_TOP_MISC2_FW_N9_RDY, 1500)) { + dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); + + return -EIO; + } + +#ifdef CONFIG_PM + dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; +#endif /* CONFIG_PM */ + + dev_dbg(dev->mt76.dev, "Firmware init done\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_load_firmware); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c new file mode 100644 index 000000000000..9858d9a93851 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include "mt792x.h" + +static void +mt792x_ampdu_stat_read_phy(struct mt792x_phy *phy, + struct seq_file *file) +{ + struct mt792x_dev *dev = file->private; + int bound[15], range[4], i; + + if (!phy) + return; + + mt792x_mac_update_mib_stats(phy); + + /* Tx ampdu stat */ + for (i = 0; i < ARRAY_SIZE(range); i++) + range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i)); + + for (i = 0; i < ARRAY_SIZE(bound); i++) + bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1; + + seq_puts(file, "\nPhy0\n"); + + seq_printf(file, "Length: %8d | ", bound[0]); + for (i = 0; i < ARRAY_SIZE(bound) - 1; i++) + seq_printf(file, "%3d %3d | ", bound[i] + 1, bound[i + 1]); + + seq_puts(file, "\nCount: "); + for (i = 0; i < ARRAY_SIZE(bound); i++) + seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]); + seq_puts(file, "\n"); + + seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt); +} + +int mt792x_tx_stats_show(struct seq_file *file, void *data) +{ + struct mt792x_dev *dev = file->private; + struct mt792x_phy *phy = &dev->phy; + struct mt76_mib_stats *mib = &phy->mib; + int i; + + mt792x_mutex_acquire(dev); + + mt792x_ampdu_stat_read_phy(phy, file); + + seq_puts(file, "Tx MSDU stat:\n"); + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { + seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ", + i + 1, mib->tx_amsdu[i]); + if (mib->tx_amsdu_cnt) + seq_printf(file, "(%3d%%)\n", + mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt); + else + seq_puts(file, "\n"); + } + + mt792x_mutex_release(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_tx_stats_show); + +int mt792x_queues_acq(struct seq_file *s, void *data) +{ + struct mt792x_dev *dev = dev_get_drvdata(s->private); + int i; + + mt792x_mutex_acquire(dev); + + for (i = 0; i < 4; i++) { + u32 ctrl, val, qlen = 0; + int j; + + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i)); + ctrl = BIT(31) | BIT(11) | (i << 24); + + for (j = 0; j < 32; j++) { + if (val & BIT(j)) + continue; + + mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j); + qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, + GENMASK(11, 0)); + } + seq_printf(s, "AC%d: queued=%d\n", i, qlen); + } + + mt792x_mutex_release(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_queues_acq); + +int mt792x_queues_read(struct seq_file *s, void *data) +{ + struct mt792x_dev *dev = dev_get_drvdata(s->private); + struct { + struct mt76_queue *q; + char *queue; + } queue_map[] = { + { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" }, + { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" }, + { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(queue_map); i++) { + struct mt76_queue *q = queue_map[i].q; + + if (!q) + continue; + + seq_printf(s, + "%s: queued=%d head=%d tail=%d\n", + queue_map[i].queue, q->queued, q->head, + q->tail); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_queues_read); + +int mt792x_pm_stats(struct seq_file *s, void *data) +{ + struct mt792x_dev *dev = dev_get_drvdata(s->private); + struct mt76_connac_pm *pm = &dev->pm; + + unsigned long awake_time = pm->stats.awake_time; + unsigned long doze_time = pm->stats.doze_time; + + if (!test_bit(MT76_STATE_PM, &dev->mphy.state)) + awake_time += jiffies - pm->stats.last_wake_event; + else + doze_time += jiffies - pm->stats.last_doze_event; + + seq_printf(s, "awake time: %14u\ndoze time: %15u\n", + jiffies_to_msecs(awake_time), + jiffies_to_msecs(doze_time)); + + seq_printf(s, "low power wakes: %9d\n", pm->stats.lp_wake); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_pm_stats); + +int mt792x_pm_idle_timeout_set(void *data, u64 val) +{ + struct mt792x_dev *dev = data; + + dev->pm.idle_timeout = msecs_to_jiffies(val); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_pm_idle_timeout_set); + +int mt792x_pm_idle_timeout_get(void *data, u64 *val) +{ + struct mt792x_dev *dev = data; + + *val = jiffies_to_msecs(dev->pm.idle_timeout); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_pm_idle_timeout_get); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c new file mode 100644 index 000000000000..488326ce5ed4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/module.h> +#include <linux/firmware.h> + +#include "mt792x.h" +#include "dma.h" +#include "trace.h" + +irqreturn_t mt792x_irq_handler(int irq, void *dev_instance) +{ + struct mt792x_dev *dev = dev_instance; + + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return IRQ_NONE; + + tasklet_schedule(&dev->mt76.irq_tasklet); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(mt792x_irq_handler); + +void mt792x_irq_tasklet(unsigned long data) +{ + struct mt792x_dev *dev = (struct mt792x_dev *)data; + const struct mt792x_irq_map *irq_map = dev->irq_map; + u32 intr, mask = 0; + + mt76_wr(dev, irq_map->host_irq_enable, 0); + + intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA); + intr &= dev->mt76.mmio.irqmask; + mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr); + + trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); + + mask |= intr & (irq_map->rx.data_complete_mask | + irq_map->rx.wm_complete_mask | + irq_map->rx.wm2_complete_mask); + if (intr & dev->irq_map->tx.mcu_complete_mask) + mask |= dev->irq_map->tx.mcu_complete_mask; + + if (intr & MT_INT_MCU_CMD) { + u32 intr_sw; + + intr_sw = mt76_rr(dev, MT_MCU_CMD); + /* ack MCU2HOST_SW_INT_STA */ + mt76_wr(dev, MT_MCU_CMD, intr_sw); + if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) { + mask |= irq_map->rx.data_complete_mask; + intr |= irq_map->rx.data_complete_mask; + } + } + + mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0); + + if (intr & dev->irq_map->tx.all_complete_mask) + napi_schedule(&dev->mt76.tx_napi); + + if (intr & irq_map->rx.wm_complete_mask) + napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]); + + if (intr & irq_map->rx.wm2_complete_mask) + napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]); + + if (intr & irq_map->rx.data_complete_mask) + napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]); +} +EXPORT_SYMBOL_GPL(mt792x_irq_tasklet); + +void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + const struct mt792x_irq_map *irq_map = dev->irq_map; + + if (q == MT_RXQ_MAIN) + mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask); + else if (q == MT_RXQ_MCU_WA) + mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask); + else + mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask); +} +EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete); + +#define PREFETCH(base, depth) ((base) << 16 | (depth)) +static void mt792x_dma_prefetch(struct mt792x_dev *dev) +{ + if (is_mt7925(&dev->mt76)) { + /* rx ring */ + mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4)); + /* tx ring */ + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0200, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0300, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x0400, 0x10)); + mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x0500, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0540, 0x4)); + } else { + /* rx ring */ + mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4)); + mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4)); + /* tx ring */ + mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4)); + mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4)); + } +} + +int mt792x_dma_enable(struct mt792x_dev *dev) +{ + if (is_mt7925(&dev->mt76)) + mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28)); + + /* configure perfetch settings */ + mt792x_dma_prefetch(dev); + + /* reset dma idx */ + mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); + + /* configure delay interrupt */ + mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); + + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_WB_DDONE | + MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | + MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); + + /* enable interrupts for TX/RX rings */ + mt76_connac_irq_enable(&dev->mt76, + dev->irq_map->tx.all_complete_mask | + dev->irq_map->rx.data_complete_mask | + dev->irq_map->rx.wm2_complete_mask | + dev->irq_map->rx.wm_complete_mask | + MT_INT_MCU_CMD); + mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_dma_enable); + +static int +mt792x_dma_reset(struct mt792x_dev *dev, bool force) +{ + int i, err; + + err = mt792x_dma_disable(dev, force); + if (err) + return err; + + /* reset hw queues */ + for (i = 0; i < __MT_TXQ_MAX; i++) + mt76_queue_reset(dev, dev->mphy.q_tx[i]); + + for (i = 0; i < __MT_MCUQ_MAX; i++) + mt76_queue_reset(dev, dev->mt76.q_mcu[i]); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_reset(dev, &dev->mt76.q_rx[i]); + + mt76_tx_status_check(&dev->mt76, true); + + return mt792x_dma_enable(dev); +} + +int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force) +{ + int i, err; + + /* clean up hw queues */ + for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); + + if (force) { + err = mt792x_wfsys_reset(dev); + if (err) + return err; + } + err = mt792x_dma_reset(dev, force); + if (err) + return err; + + mt76_for_each_q_rx(&dev->mt76, i) + mt76_queue_rx_reset(dev, i); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_wpdma_reset); + +int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev) +{ + struct mt76_connac_pm *pm = &dev->pm; + int err; + + /* check if the wpdma must be reinitialized */ + if (mt792x_dma_need_reinit(dev)) { + /* disable interrutpts */ + mt76_wr(dev, dev->irq_map->host_irq_enable, 0); + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); + + err = mt792x_wpdma_reset(dev, false); + if (err) { + dev_err(dev->mt76.dev, "wpdma reset failed\n"); + return err; + } + + /* enable interrutpts */ + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + pm->stats.lp_wake++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond); + +int mt792x_dma_disable(struct mt792x_dev *dev, bool force) +{ + /* disable WFDMA0 */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1)) + return -ETIMEDOUT; + + /* disable dmashdl */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, + MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); + mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS); + + if (force) { + /* reset */ + mt76_clear(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + + mt76_set(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_dma_disable); + +void mt792x_dma_cleanup(struct mt792x_dev *dev) +{ + /* disable */ + mt76_clear(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1); + + /* reset */ + mt76_clear(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + + mt76_set(dev, MT_WFDMA0_RST, + MT_WFDMA0_RST_DMASHDL_ALL_RST | + MT_WFDMA0_RST_LOGIC_RST); + + mt76_dma_cleanup(&dev->mt76); +} +EXPORT_SYMBOL_GPL(mt792x_dma_cleanup); + +int mt792x_poll_tx(struct napi_struct *napi, int budget) +{ + struct mt792x_dev *dev; + + dev = container_of(napi, struct mt792x_dev, mt76.tx_napi); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } + + mt76_connac_tx_cleanup(&dev->mt76); + if (napi_complete(napi)) + mt76_connac_irq_enable(&dev->mt76, + dev->irq_map->tx.all_complete_mask); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_poll_tx); + +int mt792x_poll_rx(struct napi_struct *napi, int budget) +{ + struct mt792x_dev *dev; + int done; + + dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev); + + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { + napi_complete(napi); + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return 0; + } + done = mt76_dma_rx_poll(napi, budget); + mt76_connac_pm_unref(&dev->mphy, &dev->pm); + + return done; +} +EXPORT_SYMBOL_GPL(mt792x_poll_rx); + +int mt792x_wfsys_reset(struct mt792x_dev *dev) +{ + u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140; + + mt76_clear(dev, addr, WFSYS_SW_RST_B); + msleep(50); + mt76_set(dev, addr, WFSYS_SW_RST_B); + + if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE, + WFSYS_SW_INIT_DONE, 500)) + return -ETIMEDOUT; + + return 0; +} +EXPORT_SYMBOL_GPL(mt792x_wfsys_reset); + diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c new file mode 100644 index 000000000000..5d1f8229fdc1 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. */ + +#include <linux/module.h> + +#include "mt792x.h" +#include "mt792x_regs.h" + +void mt792x_mac_work(struct work_struct *work) +{ + struct mt792x_phy *phy; + struct mt76_phy *mphy; + + mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, + mac_work.work); + phy = mphy->priv; + + mt792x_mutex_acquire(phy->dev); + + mt76_update_survey(mphy); + if (++mphy->mac_work_count == 2) { + mphy->mac_work_count = 0; + + mt792x_mac_update_mib_stats(phy); + } + + mt792x_mutex_release(phy->dev); + + mt76_tx_status_check(mphy->dev, false); + ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work, + MT792x_WATCHDOG_TIME); +} +EXPORT_SYMBOL_GPL(mt792x_mac_work); + +void mt792x_mac_set_timeing(struct mt792x_phy *phy) +{ + s16 coverage_class = phy->coverage_class; + struct mt792x_dev *dev = phy->dev; + u32 val, reg_offset; + u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); + u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); + bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ; + int sifs = is_2ghz ? 10 : 16, offset; + + if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) + return; + + mt76_set(dev, MT_ARB_SCR(0), + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); + udelay(1); + + offset = 3 * coverage_class; + reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); + + mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset); + mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset); + mt76_wr(dev, MT_TMAC_ICR0(0), + FIELD_PREP(MT_IFS_EIFS, 360) | + FIELD_PREP(MT_IFS_RIFS, 2) | + FIELD_PREP(MT_IFS_SIFS, sifs) | + FIELD_PREP(MT_IFS_SLOT, phy->slottime)); + + if (phy->slottime < 20 || !is_2ghz) + val = MT792x_CFEND_RATE_DEFAULT; + else + val = MT792x_CFEND_RATE_11B; + + mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val); + mt76_clear(dev, MT_ARB_SCR(0), + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); +} +EXPORT_SYMBOL_GPL(mt792x_mac_set_timeing); + +void mt792x_mac_update_mib_stats(struct mt792x_phy *phy) +{ + struct mt76_mib_stats *mib = &phy->mib; + struct mt792x_dev *dev = phy->dev; + int i, aggr0 = 0, aggr1; + u32 val; + + mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0), + MT_MIB_SDR3_FCS_ERR_MASK); + mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0), + MT_MIB_ACK_FAIL_COUNT_MASK); + mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0), + MT_MIB_BA_FAIL_COUNT_MASK); + mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0), + MT_MIB_RTS_COUNT_MASK); + mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), + MT_MIB_RTS_FAIL_COUNT_MASK); + + mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0)); + mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0)); + mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0)); + + val = mt76_rr(dev, MT_MIB_SDR32(0)); + mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val); + mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val); + + val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0)); + mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val); + mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val); + + val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0)); + mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val); + mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val); + mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val); + mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val); + + mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0)); + mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0)); + mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0)); + mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0)); + + for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { + val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); + mib->tx_amsdu[i] += val; + mib->tx_amsdu_cnt += val; + } + + for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) { + u32 val2; + + val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); + val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); + + phy->mt76->aggr_stats[aggr0++] += val & 0xffff; + phy->mt76->aggr_stats[aggr0++] += val >> 16; + phy->mt76->aggr_stats[aggr1++] += val2 & 0xffff; + phy->mt76->aggr_stats[aggr1++] += val2 >> 16; + } +} +EXPORT_SYMBOL_GPL(mt792x_mac_update_mib_stats); + +struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx, + bool unicast) +{ + struct mt792x_sta *sta; + struct mt76_wcid *wcid; + + if (idx >= ARRAY_SIZE(dev->mt76.wcid)) + return NULL; + + wcid = rcu_dereference(dev->mt76.wcid[idx]); + if (unicast || !wcid) + return wcid; + + if (!wcid->sta) + return NULL; + + sta = container_of(wcid, struct mt792x_sta, wcid); + if (!sta->vif) + return NULL; + + return &sta->vif->sta.wcid; +} +EXPORT_SYMBOL_GPL(mt792x_rx_get_wcid); + +static void +mt792x_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct sk_buff *skb = priv; + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (status->signal > 0) + return; + + if (!ether_addr_equal(vif->addr, hdr->addr1)) + return; + + ewma_rssi_add(&mvif->rssi, -status->signal); +} + +void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); + + if (!ieee80211_is_assoc_resp(hdr->frame_control) && + !ieee80211_is_auth(hdr->frame_control)) + return; + + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt792x_mac_rssi_iter, skb); +} +EXPORT_SYMBOL_GPL(mt792x_mac_assoc_rssi); + +void mt792x_mac_reset_counters(struct mt792x_phy *phy) +{ + struct mt792x_dev *dev = phy->dev; + int i; + + for (i = 0; i < 4; i++) { + mt76_rr(dev, MT_TX_AGG_CNT(0, i)); + mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); + } + + dev->mt76.phy.survey_time = ktime_get_boottime(); + memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); + + /* reset airtime counters */ + mt76_rr(dev, MT_MIB_SDR9(0)); + mt76_rr(dev, MT_MIB_SDR36(0)); + mt76_rr(dev, MT_MIB_SDR37(0)); + + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); + mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); +} +EXPORT_SYMBOL_GPL(mt792x_mac_reset_counters); + +static u8 +mt792x_phy_get_nf(struct mt792x_phy *phy, int idx) +{ + return 0; +} + +static void +mt792x_phy_update_channel(struct mt76_phy *mphy, int idx) +{ + struct mt792x_dev *dev = container_of(mphy->dev, struct mt792x_dev, mt76); + struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv; + struct mt76_channel_state *state; + u64 busy_time, tx_time, rx_time, obss_time; + int nf; + + busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), + MT_MIB_SDR9_BUSY_MASK); + tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), + MT_MIB_SDR36_TXTIME_MASK); + rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), + MT_MIB_SDR37_RXTIME_MASK); + obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx), + MT_MIB_OBSSTIME_MASK); + + nf = mt792x_phy_get_nf(phy, idx); + if (!phy->noise) + phy->noise = nf << 4; + else if (nf) + phy->noise += nf - (phy->noise >> 4); + + state = mphy->chan_state; + state->cc_busy += busy_time; + state->cc_tx += tx_time; + state->cc_rx += rx_time + obss_time; + state->cc_bss_rx += rx_time; + state->noise = -(phy->noise >> 4); +} + +void mt792x_update_channel(struct mt76_phy *mphy) +{ + struct mt792x_dev *dev = container_of(mphy->dev, struct mt792x_dev, mt76); + + if (mt76_connac_pm_wake(mphy, &dev->pm)) + return; + + mt792x_phy_update_channel(mphy, 0); + /* reset obss airtime */ + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); + mt76_connac_power_save_sched(mphy, &dev->pm); +} +EXPORT_SYMBOL_GPL(mt792x_update_channel); + +void mt792x_reset(struct mt76_dev *mdev) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt76_connac_pm *pm = &dev->pm; + + if (!dev->hw_init_done) + return; + + if (dev->hw_full_reset) + return; + + if (pm->suspended) + return; + + queue_work(dev->mt76.wq, &dev->reset_work); +} +EXPORT_SYMBOL_GPL(mt792x_reset); + +void mt792x_mac_init_band(struct mt792x_dev *dev, u8 band) +{ + u32 mask, set; + + mt76_rmw_field(dev, MT_TMAC_CTCR0(band), + MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f); + mt76_set(dev, MT_TMAC_CTCR0(band), + MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN | + MT_TMAC_CTCR0_INS_DDLMT_EN); + + mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); + mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); + + /* enable MIB tx-rx time reporting */ + mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_TXDUR_EN); + mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_RXDUR_EN); + + mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536); + /* disable rx rate report by default due to hw issues */ + mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); + + /* filter out non-resp frames and get instantaneous signal reporting */ + mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM; + set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) | + FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3); + mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set); +} +EXPORT_SYMBOL_GPL(mt792x_mac_init_band); + +void mt792x_pm_wake_work(struct work_struct *work) +{ + struct mt792x_dev *dev; + struct mt76_phy *mphy; + + dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev, + pm.wake_work); + mphy = dev->phy.mt76; + + if (!mt792x_mcu_drv_pmctrl(dev)) { + struct mt76_dev *mdev = &dev->mt76; + int i; + + if (mt76_is_sdio(mdev)) { + mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); + mt76_worker_schedule(&mdev->sdio.txrx_worker); + } else { + local_bh_disable(); + mt76_for_each_q_rx(mdev, i) + napi_schedule(&mdev->napi[i]); + local_bh_enable(); + mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); + mt76_connac_tx_cleanup(mdev); + } + if (test_bit(MT76_STATE_RUNNING, &mphy->state)) + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, + MT792x_WATCHDOG_TIME); + } + + ieee80211_wake_queues(mphy->hw); + wake_up(&dev->pm.wait); +} +EXPORT_SYMBOL_GPL(mt792x_pm_wake_work); + +void mt792x_pm_power_save_work(struct work_struct *work) +{ + struct mt792x_dev *dev; + unsigned long delta; + struct mt76_phy *mphy; + + dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev, + pm.ps_work.work); + mphy = dev->phy.mt76; + + delta = dev->pm.idle_timeout; + if (test_bit(MT76_HW_SCANNING, &mphy->state) || + test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) || + dev->fw_assert) + goto out; + + if (mutex_is_locked(&dev->mt76.mutex)) + /* if mt76 mutex is held we should not put the device + * to sleep since we are currently accessing device + * register map. We need to wait for the next power_save + * trigger. + */ + goto out; + + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { + delta = dev->pm.last_activity + delta - jiffies; + goto out; + } + + if (!mt792x_mcu_fw_pmctrl(dev)) { + cancel_delayed_work_sync(&mphy->mac_work); + return; + } +out: + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); +} +EXPORT_SYMBOL_GPL(mt792x_pm_power_save_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h new file mode 100644 index 000000000000..a99af23e4b56 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h @@ -0,0 +1,479 @@ +/* SPDX-License-Identifier: ISC */ +/* Copyright (C) 2023 MediaTek Inc. */ + +#ifndef __MT792X_REGS_H +#define __MT792X_REGS_H + +/* MCU WFDMA1 */ +#define MT_MCU_WFDMA1_BASE 0x3000 +#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs)) + +#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108) +#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0) +#define MT_MCU_INT_EVENT_DMA_INIT BIT(1) +#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2) +#define MT_MCU_INT_EVENT_RESET_DONE BIT(3) + +#define MT_PLE_BASE 0x820c0000 +#define MT_PLE(ofs) (MT_PLE_BASE + (ofs)) + +#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0) +#define MT_PLE_FL_Q1_CTRL MT_PLE(0x3e4) +#define MT_PLE_FL_Q2_CTRL MT_PLE(0x3e8) +#define MT_PLE_FL_Q3_CTRL MT_PLE(0x3ec) + +#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n)) +#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2)) + +/* TMAC: band 0(0x21000), band 1(0xa1000) */ +#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000) +#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) + +#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0) +#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25) + +#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090) +#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094) +#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0) +#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16) + +#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4) +#define MT_IFS_EIFS GENMASK(8, 0) +#define MT_IFS_RIFS GENMASK(14, 10) +#define MT_IFS_SIFS GENMASK(22, 16) +#define MT_IFS_SLOT GENMASK(30, 24) + +#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4) +#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0) +#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) +#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18) + +#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c) +#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0) + +#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000) +#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs)) + +#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000) +#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3) +#define MT_DMA_DCR0_RXD_G5_EN BIT(23) + +/* WTBLOFF TOP: band 0(0x820e9000),band 1(0x820f9000) */ +#define MT_WTBLOFF_TOP_BASE(_band) ((_band) ? 0x820f9000 : 0x820e9000) +#define MT_WTBLOFF_TOP(_band, ofs) (MT_WTBLOFF_TOP_BASE(_band) + (ofs)) + +#define MT_WTBLOFF_TOP_RSCR(_band) MT_WTBLOFF_TOP(_band, 0x008) +#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30) +#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24) + +/* LPON: band 0(0x24200), band 1(0xa4200) */ +#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000) +#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs)) + +#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080) +#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084) + +#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4) +#define MT_LPON_TCR_SW_MODE GENMASK(1, 0) +#define MT_LPON_TCR_SW_WRITE BIT(0) + +/* ETBF: band 0(0x24000), band 1(0xa4000) */ +#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000) +#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs)) + +#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x150) +#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16) +#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0) + +#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x158) +#define MT_ETBF_RX_FB_ALL GENMASK(31, 24) +#define MT_ETBF_RX_FB_HE GENMASK(23, 16) +#define MT_ETBF_RX_FB_VHT GENMASK(15, 8) +#define MT_ETBF_RX_FB_HT GENMASK(7, 0) + +/* MIB: band 0(0x24800), band 1(0xa4800) */ +#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000) +#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs)) + +#define MT_MIB_SCR1(_band) MT_WF_MIB(_band, 0x004) +#define MT_MIB_TXDUR_EN BIT(8) +#define MT_MIB_RXDUR_EN BIT(9) + +#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x698) +#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(31, 16) + +#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x780) + +#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c) +#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0) + +#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x558) +#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x564) +#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x568) + +#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) +#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) + +#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x770) +#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x774) +#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x55c) + +#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x7a8) +#define MT_MIB_SDR9_IBF_CNT_MASK GENMASK(31, 16) +#define MT_MIB_SDR9_EBF_CNT_MASK GENMASK(15, 0) + +#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090) +#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0) + +#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x054) +#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) +#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x058) +#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) + +#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0) +#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4) +#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc) + +#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4)) +#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16) + +#define MT_MIB_MB_BSDR0(_band) MT_WF_MIB(_band, 0x688) +#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR1(_band) MT_WF_MIB(_band, 0x690) +#define MT_MIB_RTS_FAIL_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR2(_band) MT_WF_MIB(_band, 0x518) +#define MT_MIB_BA_FAIL_COUNT_MASK GENMASK(15, 0) +#define MT_MIB_MB_BSDR3(_band) MT_WF_MIB(_band, 0x520) +#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(15, 0) + +#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x108 + ((n) << 4)) +#define MT_MIB_FRAME_RETRIES_COUNT_MASK GENMASK(15, 0) + +#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x7dc + ((n) << 2)) +#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x7ec + ((n) << 2)) +#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2)) +#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0)) + +#define MT_WTBLON_TOP_BASE 0x820d4000 +#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs)) + +#define MT_WTBL_UPDATE_BUSY BIT(31) + +#define MT_WTBL_ITCR MT_WTBLON_TOP(0x3b0) +#define MT_WTBL_ITCR_WR BIT(16) +#define MT_WTBL_ITCR_EXEC BIT(31) +#define MT_WTBL_ITDR0 MT_WTBLON_TOP(0x3b8) +#define MT_WTBL_ITDR1 MT_WTBLON_TOP(0x3bc) +#define MT_WTBL_SPE_IDX_SEL BIT(6) + +#define MT_WTBL_BASE 0x820d8000 +#define MT_WTBL_LMAC_ID GENMASK(14, 8) +#define MT_WTBL_LMAC_DW GENMASK(7, 2) +#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \ + FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \ + FIELD_PREP(MT_WTBL_LMAC_DW, _dw)) + +/* AGG: band 0(0x20800), band 1(0xa0800) */ +#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000) +#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) + +#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4) +#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4) +#define MT_AGG_PCR0_MM_PROT BIT(0) +#define MT_AGG_PCR0_GF_PROT BIT(1) +#define MT_AGG_PCR0_BW20_PROT BIT(2) +#define MT_AGG_PCR0_BW40_PROT BIT(4) +#define MT_AGG_PCR0_BW80_PROT BIT(6) +#define MT_AGG_PCR0_ERP_PROT GENMASK(12, 8) +#define MT_AGG_PCR0_VHT_PROT BIT(13) +#define MT_AGG_PCR0_PTA_WIN_DIS BIT(15) + +#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23) +#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0) + +#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084) +#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0) +#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16) + +#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098) +#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12) +#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6) +#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7) +#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24) + +#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0) +#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4) + +/* ARB: band 0(0x20c00), band 1(0xa0c00) */ +#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000) +#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) + +#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080) +#define MT_ARB_SCR_TX_DISABLE BIT(8) +#define MT_ARB_SCR_RX_DISABLE BIT(9) + +#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4) + +/* RMAC: band 0(0x21400), band 1(0xa1400) */ +#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000) +#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs)) + +#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000) +#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0) +#define MT_WF_RFCR_DROP_FCSFAIL BIT(1) +#define MT_WF_RFCR_DROP_VERSION BIT(3) +#define MT_WF_RFCR_DROP_PROBEREQ BIT(4) +#define MT_WF_RFCR_DROP_MCAST BIT(5) +#define MT_WF_RFCR_DROP_BCAST BIT(6) +#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7) +#define MT_WF_RFCR_DROP_A3_MAC BIT(8) +#define MT_WF_RFCR_DROP_A3_BSSID BIT(9) +#define MT_WF_RFCR_DROP_A2_BSSID BIT(10) +#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11) +#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12) +#define MT_WF_RFCR_DROP_CTL_RSV BIT(13) +#define MT_WF_RFCR_DROP_CTS BIT(14) +#define MT_WF_RFCR_DROP_RTS BIT(15) +#define MT_WF_RFCR_DROP_DUPLICATE BIT(16) +#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17) +#define MT_WF_RFCR_DROP_OTHER_UC BIT(18) +#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19) +#define MT_WF_RFCR_DROP_NDPA BIT(20) +#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21) + +#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004) +#define MT_WF_RFCR1_DROP_ACK BIT(4) +#define MT_WF_RFCR1_DROP_BF_POLL BIT(5) +#define MT_WF_RFCR1_DROP_BA BIT(6) +#define MT_WF_RFCR1_DROP_CFEND BIT(7) +#define MT_WF_RFCR1_DROP_CFACK BIT(8) + +#define MT_WF_RMAC_MIB_TIME0(_band) MT_WF_RMAC(_band, 0x03c4) +#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31) +#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30) + +#define MT_WF_RMAC_MIB_AIRTIME14(_band) MT_WF_RMAC(_band, 0x03b8) +#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0) +#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380) + +/* WFDMA0 */ +#define MT_WFDMA0_BASE 0xd4000 +#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs)) + +#define MT_WFDMA0_RST MT_WFDMA0(0x100) +#define MT_WFDMA0_RST_LOGIC_RST BIT(4) +#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5) + +#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c) +#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0) +#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1) +#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2) + +#define MT_MCU_CMD MT_WFDMA0(0x1f0) +#define MT_MCU_CMD_WAKE_RX_PCIE BIT(0) +#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1) +#define MT_MCU_CMD_STOP_DMA BIT(2) +#define MT_MCU_CMD_RESET_DONE BIT(3) +#define MT_MCU_CMD_RECOVERY_DONE BIT(4) +#define MT_MCU_CMD_NORMAL_STATE BIT(5) +#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1) + +#define MT_MCU2HOST_SW_INT_ENA MT_WFDMA0(0x1f4) + +#define MT_WFDMA0_HOST_INT_STA MT_WFDMA0(0x200) +#define HOST_RX_DONE_INT_STS0 BIT(0) /* Rx mcu */ +#define HOST_RX_DONE_INT_STS2 BIT(2) /* Rx data */ +#define HOST_RX_DONE_INT_STS4 BIT(22) /* Rx mcu after fw downloaded */ +#define HOST_TX_DONE_INT_STS16 BIT(26) +#define HOST_TX_DONE_INT_STS17 BIT(27) /* MCU tx done*/ + +#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208) +#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6) +#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9) +#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15) +#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21) +#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27) +#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MT_WFDMA0_GLO_CFG_CLK_GAT_DIS BIT(30) + +#define HOST_RX_DONE_INT_ENA0 BIT(0) +#define HOST_RX_DONE_INT_ENA1 BIT(1) +#define HOST_RX_DONE_INT_ENA2 BIT(2) +#define HOST_RX_DONE_INT_ENA3 BIT(3) +#define HOST_TX_DONE_INT_ENA0 BIT(4) +#define HOST_TX_DONE_INT_ENA1 BIT(5) +#define HOST_TX_DONE_INT_ENA2 BIT(6) +#define HOST_TX_DONE_INT_ENA3 BIT(7) +#define HOST_TX_DONE_INT_ENA4 BIT(8) +#define HOST_TX_DONE_INT_ENA5 BIT(9) +#define HOST_TX_DONE_INT_ENA6 BIT(10) +#define HOST_TX_DONE_INT_ENA7 BIT(11) +#define HOST_RX_COHERENT_EN BIT(20) +#define HOST_TX_COHERENT_EN BIT(21) +#define MCU2HOST_SW_INT_ENA BIT(29) +#define HOST_TX_DONE_INT_ENA18 BIT(30) + +#define MT_INT_MCU_CMD MCU2HOST_SW_INT_ENA + +#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c) +#define MT_WFDMA0_RST_DRX_PTR MT_WFDMA0(0x280) +#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0) +#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6) +#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0) + +#define MT_WFDMA0_TX_RING0_EXT_CTRL MT_WFDMA0(0x600) +#define MT_WFDMA0_TX_RING1_EXT_CTRL MT_WFDMA0(0x604) +#define MT_WFDMA0_TX_RING2_EXT_CTRL MT_WFDMA0(0x608) +#define MT_WFDMA0_TX_RING3_EXT_CTRL MT_WFDMA0(0x60c) +#define MT_WFDMA0_TX_RING4_EXT_CTRL MT_WFDMA0(0x610) +#define MT_WFDMA0_TX_RING5_EXT_CTRL MT_WFDMA0(0x614) +#define MT_WFDMA0_TX_RING6_EXT_CTRL MT_WFDMA0(0x618) +#define MT_WFDMA0_TX_RING15_EXT_CTRL MT_WFDMA0(0x63c) +#define MT_WFDMA0_TX_RING16_EXT_CTRL MT_WFDMA0(0x640) +#define MT_WFDMA0_TX_RING17_EXT_CTRL MT_WFDMA0(0x644) + +#define MT_WPDMA0_MAX_CNT_MASK GENMASK(7, 0) +#define MT_WPDMA0_BASE_PTR_MASK GENMASK(31, 16) + +#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680) +#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684) +#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688) +#define MT_WFDMA0_RX_RING3_EXT_CTRL MT_WFDMA0(0x68c) +#define MT_WFDMA0_RX_RING4_EXT_CTRL MT_WFDMA0(0x690) +#define MT_WFDMA0_RX_RING5_EXT_CTRL MT_WFDMA0(0x694) +#define MT_WFDMA0_RX_RING6_EXT_CTRL MT_WFDMA0(0x698) +#define MT_WFDMA0_RX_RING7_EXT_CTRL MT_WFDMA0(0x69c) + +#define MT_TX_RING_BASE MT_WFDMA0(0x300) +#define MT_RX_EVENT_RING_BASE MT_WFDMA0(0x500) + +/* WFDMA CSR */ +#define MT_WFDMA_EXT_CSR_BASE 0xd7000 +#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs)) +#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44) +#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0) + +#define MT_SWDEF_BASE 0x41f200 +#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) +#define MT_SWDEF_MODE MT_SWDEF(0x3c) +#define MT_SWDEF_NORMAL_MODE 0 +#define MT_SWDEF_ICAP_MODE 1 +#define MT_SWDEF_SPECTRUM_MODE 2 + +#define MT_TOP_BASE 0x18060000 +#define MT_TOP(ofs) (MT_TOP_BASE + (ofs)) + +#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10) +#define MT_TOP_LPCR_HOST_FW_OWN BIT(0) +#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1) + +#define MT_TOP_MISC MT_TOP(0xf0) +#define MT_TOP_MISC_FW_STATE GENMASK(2, 0) + +#define MT_MCU_WPDMA0_BASE 0x54000000 +#define MT_MCU_WPDMA0(ofs) (MT_MCU_WPDMA0_BASE + (ofs)) + +#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120) +#define MT_WFDMA_NEED_REINIT BIT(1) + +#define MT_CBTOP_RGU(ofs) (0x70002000 + (ofs)) +#define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600) +#define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0) + +#define MT_HW_BOUND 0x70010020 +#define MT_HW_CHIPID 0x70010200 +#define MT_HW_REV 0x70010204 + +#define MT_PCIE_MAC_BASE 0x10000 +#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs)) +#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188) +#define MT_PCIE_MAC_PM MT_PCIE_MAC(0x194) +#define MT_PCIE_MAC_PM_L0S_DIS BIT(8) + +#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs)) +#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004) +#define MT_DMASHDL_DMASHDL_BYPASS BIT(28) +#define MT_DMASHDL_OPTIONAL MT_DMA_SHDL(0x008) +#define MT_DMASHDL_PAGE MT_DMA_SHDL(0x00c) +#define MT_DMASHDL_GROUP_SEQ_ORDER BIT(16) +#define MT_DMASHDL_REFILL MT_DMA_SHDL(0x010) +#define MT_DMASHDL_REFILL_MASK GENMASK(31, 16) +#define MT_DMASHDL_PKT_MAX_SIZE MT_DMA_SHDL(0x01c) +#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0) +#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16) + +#define MT_DMASHDL_GROUP_QUOTA(_n) MT_DMA_SHDL(0x020 + ((_n) << 2)) +#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0) +#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16) + +#define MT_DMASHDL_Q_MAP(_n) MT_DMA_SHDL(0x060 + ((_n) << 2)) +#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0) +#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8)) + +#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2)) + +#define MT_WFDMA_HOST_CONFIG 0x7c027030 +#define MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN BIT(6) + +#define MT_UMAC(ofs) (0x74000000 + (ofs)) +#define MT_UDMA_TX_QSEL MT_UMAC(0x008) +#define MT_FW_DL_EN BIT(3) + +#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c) +#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0) +#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8) + +#define MT_UDMA_WLCFG_0 MT_UMAC(0x18) +#define MT_WL_RX_AGG_TO GENMASK(7, 0) +#define MT_WL_RX_AGG_LMT GENMASK(15, 8) +#define MT_WL_TX_TMOUT_FUNC_EN BIT(16) +#define MT_WL_TX_DPH_CHK_EN BIT(17) +#define MT_WL_RX_MPSZ_PAD0 BIT(18) +#define MT_WL_RX_FLUSH BIT(19) +#define MT_TICK_1US_EN BIT(20) +#define MT_WL_RX_AGG_EN BIT(21) +#define MT_WL_RX_EN BIT(22) +#define MT_WL_TX_EN BIT(23) +#define MT_WL_RX_BUSY BIT(30) +#define MT_WL_TX_BUSY BIT(31) + +#define MT_UDMA_CONN_INFRA_STATUS MT_UMAC(0xa20) +#define MT_UDMA_CONN_WFSYS_INIT_DONE BIT(22) +#define MT_UDMA_CONN_INFRA_STATUS_SEL MT_UMAC(0xa24) + +#define MT_SSUSB_EPCTL_CSR(ofs) (0x74011800 + (ofs)) +#define MT_SSUSB_EPCTL_CSR_EP_RST_OPT MT_SSUSB_EPCTL_CSR(0x090) + +#define MT_UWFDMA0(ofs) (0x7c024000 + (ofs)) +#define MT_UWFDMA0_GLO_CFG MT_UWFDMA0(0x208) +#define MT_UWFDMA0_GLO_CFG_EXT0 MT_UWFDMA0(0x2b0) +#define MT_UWFDMA0_GLO_CFG_EXT1 MT_UWFDMA0(0x2b4) +#define MT_UWFDMA0_TX_RING_EXT_CTRL(_n) MT_UWFDMA0(0x600 + ((_n) << 2)) + +#define MT_CONN_STATUS 0x7c053c10 +#define MT_WIFI_PATCH_DL_STATE BIT(0) + +#define MT_CONN_ON_LPCTL 0x7c060010 +#define PCIE_LPCR_HOST_SET_OWN BIT(0) +#define PCIE_LPCR_HOST_CLR_OWN BIT(1) +#define PCIE_LPCR_HOST_OWN_SYNC BIT(2) + +#define MT_CONN_ON_MISC 0x7c0600f0 +#define MT_TOP_MISC2_FW_PWR_ON BIT(0) +#define MT_TOP_MISC2_FW_N9_ON BIT(1) +#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0) + +#define MT_WF_SW_DEF_CR(ofs) (0x401a00 + (ofs)) +#define MT_WF_SW_DEF_CR_USB_MCU_EVENT MT_WF_SW_DEF_CR(0x028) +#define MT_WF_SW_SER_TRIGGER_SUSPEND BIT(6) +#define MT_WF_SW_SER_DONE_SUSPEND BIT(7) + +#define WFSYS_SW_RST_B BIT(0) +#define WFSYS_SW_INIT_DONE BIT(4) + +#endif /* __MT792X_REGS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_trace.c b/drivers/net/wireless/mediatek/mt76/mt792x_trace.c new file mode 100644 index 000000000000..b6f284fb929d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_trace.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/module.h> + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "mt792x_trace.h" + +EXPORT_TRACEPOINT_SYMBOL_GPL(lp_event); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h b/drivers/net/wireless/mediatek/mt76/mt792x_trace.h index 9bc4db67f352..61f2aa260656 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x_trace.h @@ -1,27 +1,27 @@ /* SPDX-License-Identifier: ISC */ /* - * Copyright (C) 2021 Lorenzo Bianconi <lorenzo@kernel.org> + * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org> */ -#if !defined(__MT7921_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define __MT7921_TRACE_H +#if !defined(__MT792X_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT792X_TRACE_H #include <linux/tracepoint.h> -#include "mt7921.h" +#include "mt792x.h" #undef TRACE_SYSTEM -#define TRACE_SYSTEM mt7921 +#define TRACE_SYSTEM mt792x #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ +#define DEV_ASSIGN strscpy(__entry->wiphy_name, \ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME) #define DEV_PR_FMT "%s" #define DEV_PR_ARG __entry->wiphy_name #define LP_STATE_PR_ARG __entry->lp_state ? "lp ready" : "lp not ready" TRACE_EVENT(lp_event, - TP_PROTO(struct mt7921_dev *dev, u8 lp_state), + TP_PROTO(struct mt792x_dev *dev, u8 lp_state), TP_ARGS(dev, lp_state), @@ -46,6 +46,6 @@ TRACE_EVENT(lp_event, #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE mt7921_trace +#define TRACE_INCLUDE_FILE mt792x_trace #include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c new file mode 100644 index 000000000000..2dd283caed36 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2023 MediaTek Inc. + * + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> + +#include "mt792x.h" +#include "mt76_connac2_mac.h" + +u32 mt792xu_rr(struct mt76_dev *dev, u32 addr) +{ + u32 ret; + + mutex_lock(&dev->usb.usb_ctrl_mtx); + ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, + USB_DIR_IN | MT_USB_TYPE_VENDOR, addr); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(mt792xu_rr); + +void mt792xu_wr(struct mt76_dev *dev, u32 addr, u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + ___mt76u_wr(dev, MT_VEND_WRITE_EXT, + USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); +} +EXPORT_SYMBOL_GPL(mt792xu_wr); + +u32 mt792xu_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, + USB_DIR_IN | MT_USB_TYPE_VENDOR, addr) & ~mask; + ___mt76u_wr(dev, MT_VEND_WRITE_EXT, + USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return val; +} +EXPORT_SYMBOL_GPL(mt792xu_rmw); + +void mt792xu_copy(struct mt76_dev *dev, u32 offset, const void *data, int len) +{ + struct mt76_usb *usb = &dev->usb; + int ret, i = 0, batch_len; + const u8 *val = data; + + len = round_up(len, 4); + + mutex_lock(&usb->usb_ctrl_mtx); + while (i < len) { + batch_len = min_t(int, usb->data_len, len - i); + memcpy(usb->data, val + i, batch_len); + ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT, + USB_DIR_OUT | MT_USB_TYPE_VENDOR, + (offset + i) >> 16, offset + i, + usb->data, batch_len); + if (ret < 0) + break; + + i += batch_len; + } + mutex_unlock(&usb->usb_ctrl_mtx); +} +EXPORT_SYMBOL_GPL(mt792xu_copy); + +int mt792xu_mcu_power_on(struct mt792x_dev *dev) +{ + int ret; + + ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON, + USB_DIR_OUT | MT_USB_TYPE_VENDOR, + 0x0, 0x1, NULL, 0); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, + MT_TOP_MISC2_FW_PWR_ON, 500)) { + dev_err(dev->mt76.dev, "Timeout for power on\n"); + ret = -EIO; + } + + return ret; +} +EXPORT_SYMBOL_GPL(mt792xu_mcu_power_on); + +static void mt792xu_cleanup(struct mt792x_dev *dev) +{ + clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + mt792xu_wfsys_reset(dev); + skb_queue_purge(&dev->mt76.mcu.res_q); + mt76u_queues_deinit(&dev->mt76); +} + +static u32 mt792xu_uhw_rr(struct mt76_dev *dev, u32 addr) +{ + u32 ret; + + mutex_lock(&dev->usb.usb_ctrl_mtx); + ret = ___mt76u_rr(dev, MT_VEND_DEV_MODE, + USB_DIR_IN | MT_USB_TYPE_UHW_VENDOR, addr); + mutex_unlock(&dev->usb.usb_ctrl_mtx); + + return ret; +} + +static void mt792xu_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val) +{ + mutex_lock(&dev->usb.usb_ctrl_mtx); + ___mt76u_wr(dev, MT_VEND_WRITE, + USB_DIR_OUT | MT_USB_TYPE_UHW_VENDOR, addr, val); + mutex_unlock(&dev->usb.usb_ctrl_mtx); +} + +static void mt792xu_dma_prefetch(struct mt792x_dev *dev) +{ + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0), + MT_WPDMA0_MAX_CNT_MASK, 4); + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0), + MT_WPDMA0_BASE_PTR_MASK, 0x80); + + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1), + MT_WPDMA0_MAX_CNT_MASK, 4); + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1), + MT_WPDMA0_BASE_PTR_MASK, 0xc0); + + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2), + MT_WPDMA0_MAX_CNT_MASK, 4); + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2), + MT_WPDMA0_BASE_PTR_MASK, 0x100); + + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3), + MT_WPDMA0_MAX_CNT_MASK, 4); + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3), + MT_WPDMA0_BASE_PTR_MASK, 0x140); + + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4), + MT_WPDMA0_MAX_CNT_MASK, 4); + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4), + MT_WPDMA0_BASE_PTR_MASK, 0x180); + + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16), + MT_WPDMA0_MAX_CNT_MASK, 4); + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16), + MT_WPDMA0_BASE_PTR_MASK, 0x280); + + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17), + MT_WPDMA0_MAX_CNT_MASK, 4); + mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17), + MT_WPDMA0_BASE_PTR_MASK, 0x2c0); +} + +static void mt792xu_wfdma_init(struct mt792x_dev *dev) +{ + mt792xu_dma_prefetch(dev); + + mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO); + mt76_set(dev, MT_UWFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 | + MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL | + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + /* disable dmashdl */ + mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0, + MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); + mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS); + + mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); +} + +static int mt792xu_dma_rx_evt_ep4(struct mt792x_dev *dev) +{ + if (!mt76_poll(dev, MT_UWFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000)) + return -ETIMEDOUT; + + mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN); + mt76_set(dev, MT_WFDMA_HOST_CONFIG, + MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN); + mt76_set(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN); + + return 0; +} + +static void mt792xu_epctl_rst_opt(struct mt792x_dev *dev, bool reset) +{ + u32 val; + + /* usb endpoint reset opt + * bits[4,9]: out blk ep 4-9 + * bits[20,21]: in blk ep 4-5 + * bits[22]: in int ep 6 + */ + val = mt792xu_uhw_rr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT); + if (reset) + val |= GENMASK(9, 4) | GENMASK(22, 20); + else + val &= ~(GENMASK(9, 4) | GENMASK(22, 20)); + mt792xu_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val); +} + +int mt792xu_dma_init(struct mt792x_dev *dev, bool resume) +{ + int err; + + mt792xu_wfdma_init(dev); + + mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); + + mt76_set(dev, MT_UDMA_WLCFG_0, + MT_WL_RX_EN | MT_WL_TX_EN | + MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN); + mt76_clear(dev, MT_UDMA_WLCFG_0, + MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT); + mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT); + + if (resume) + return 0; + + err = mt792xu_dma_rx_evt_ep4(dev); + if (err) + return err; + + mt792xu_epctl_rst_opt(dev, false); + + return 0; +} +EXPORT_SYMBOL_GPL(mt792xu_dma_init); + +int mt792xu_wfsys_reset(struct mt792x_dev *dev) +{ + u32 val; + int i; + + mt792xu_epctl_rst_opt(dev, false); + + val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST); + val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH; + mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val); + + usleep_range(10, 20); + + val = mt792xu_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST); + val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH; + mt792xu_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val); + + mt792xu_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0); + for (i = 0; i < MT792x_WFSYS_INIT_RETRY_COUNT; i++) { + val = mt792xu_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS); + if (val & MT_UDMA_CONN_WFSYS_INIT_DONE) + break; + + msleep(100); + } + + if (i == MT792x_WFSYS_INIT_RETRY_COUNT) + return -ETIMEDOUT; + + return 0; +} +EXPORT_SYMBOL_GPL(mt792xu_wfsys_reset); + +int mt792xu_init_reset(struct mt792x_dev *dev) +{ + set_bit(MT76_RESET, &dev->mphy.state); + + wake_up(&dev->mt76.mcu.wait); + skb_queue_purge(&dev->mt76.mcu.res_q); + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + mt792xu_wfsys_reset(dev); + + clear_bit(MT76_RESET, &dev->mphy.state); + + return mt76u_resume_rx(&dev->mt76); +} +EXPORT_SYMBOL_GPL(mt792xu_init_reset); + +void mt792xu_stop(struct ieee80211_hw *hw) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + + mt76u_stop_tx(&dev->mt76); + mt792x_stop(hw); +} +EXPORT_SYMBOL_GPL(mt792xu_stop); + +void mt792xu_disconnect(struct usb_interface *usb_intf) +{ + struct mt792x_dev *dev = usb_get_intfdata(usb_intf); + + cancel_work_sync(&dev->init_work); + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return; + + mt76_unregister_device(&dev->mt76); + mt792xu_cleanup(dev); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + mt76_free_device(&dev->mt76); +} +EXPORT_SYMBOL_GPL(mt792xu_disconnect); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig index 1afa2f662e47..bb44d4a5e2dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7996/Kconfig @@ -7,7 +7,7 @@ config MT7996E depends on MAC80211 depends on PCI help - This adds support for MT7996-based wireless PCIe devices, + This adds support for MT7996-based PCIe wireless devices, which support concurrent tri-band operation at 6GHz, 5GHz, and 2.4GHz IEEE 802.11be 4x4:4SS 4096-QAM, 320MHz channels. diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c index 513ab4ba41c9..4d40ec7ff57f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c @@ -474,10 +474,10 @@ mt7996_ampdu_stat_read_phy(struct mt7996_phy *phy, struct seq_file *file) static void mt7996_txbf_stat_read_phy(struct mt7996_phy *phy, struct seq_file *s) { + struct mt76_mib_stats *mib = &phy->mib; static const char * const bw[] = { "BW20", "BW40", "BW80", "BW160" }; - struct mib_stats *mib = &phy->mib; /* Tx Beamformer monitor */ seq_puts(s, "\nTx Beamformer applied PPDU counts: "); @@ -523,7 +523,7 @@ mt7996_tx_stats_show(struct seq_file *file, void *data) { struct mt7996_phy *phy = file->private; struct mt7996_dev *dev = phy->dev; - struct mib_stats *mib = &phy->mib; + struct mt76_mib_stats *mib = &phy->mib; int i; u32 attempts, success, per; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c index 534143465d9b..586e247a1e06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c @@ -128,7 +128,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) } } -static int mt7996_dma_enable(struct mt7996_dev *dev) +void mt7996_dma_start(struct mt7996_dev *dev, bool reset) { u32 hif1_ofs = 0; u32 irq_mask; @@ -136,6 +136,50 @@ static int mt7996_dma_enable(struct mt7996_dev *dev) if (dev->hif2) hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); + /* enable WFDMA Tx/Rx */ + if (!reset) { + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + + if (dev->hif2) + mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + } + + /* enable interrupts for TX/RX rings */ + irq_mask = MT_INT_MCU_CMD; + if (reset) + goto done; + + irq_mask = MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU; + + if (!dev->mphy.band_idx) + irq_mask |= MT_INT_BAND0_RX_DONE; + + if (dev->dbdc_support) + irq_mask |= MT_INT_BAND1_RX_DONE; + + if (dev->tbtc_support) + irq_mask |= MT_INT_BAND2_RX_DONE; + +done: + mt7996_irq_enable(dev, irq_mask); + mt7996_irq_disable(dev, 0); +} + +static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset) +{ + u32 hif1_ofs = 0; + + if (dev->hif2) + hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); + /* reset dma idx */ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); if (dev->hif2) @@ -170,13 +214,6 @@ static int mt7996_dma_enable(struct mt7996_dev *dev) mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); - /* set WFDMA Tx/Rx */ - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - /* GLO_CFG_EXT0 */ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0, WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | @@ -187,12 +224,6 @@ static int mt7996_dma_enable(struct mt7996_dev *dev) WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); if (dev->hif2) { - mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); - /* GLO_CFG_EXT0 */ mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | @@ -216,23 +247,7 @@ static int mt7996_dma_enable(struct mt7996_dev *dev) /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */ } - /* enable interrupts for TX/RX rings */ - irq_mask = MT_INT_RX_DONE_MCU | - MT_INT_TX_DONE_MCU | - MT_INT_MCU_CMD; - - if (!dev->mphy.band_idx) - irq_mask |= MT_INT_BAND0_RX_DONE; - - if (dev->dbdc_support) - irq_mask |= MT_INT_BAND1_RX_DONE; - - if (dev->tbtc_support) - irq_mask |= MT_INT_BAND2_RX_DONE; - - mt7996_irq_enable(dev, irq_mask); - - return 0; + mt7996_dma_start(dev, reset); } int mt7996_dma_init(struct mt7996_dev *dev) @@ -293,7 +308,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) /* event from WA */ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], MT_RXQ_ID(MT_RXQ_MCU_WA), - MT7996_RX_MCU_RING_SIZE, + MT7996_RX_MCU_RING_SIZE_WA, MT_RX_BUF_SIZE, MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); if (ret) @@ -347,7 +362,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) mt7996_poll_tx); napi_enable(&dev->mt76.tx_napi); - mt7996_dma_enable(dev); + mt7996_dma_enable(dev, false); return 0; } @@ -413,7 +428,7 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force) mt76_for_each_q_rx(&dev->mt76, i) mt76_queue_rx_reset(dev, i); - mt7996_dma_enable(dev); + mt7996_dma_enable(dev, !force); } void mt7996_dma_cleanup(struct mt7996_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index f1b48cdda58f..55cb1770fa34 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -4,6 +4,7 @@ */ #include <linux/etherdevice.h> +#include <linux/of.h> #include <linux/thermal.h> #include "mt7996.h" #include "mac.h" @@ -53,23 +54,31 @@ static void mt7996_led_set_config(struct led_classdev *led_cdev, dev = container_of(mphy->dev, struct mt7996_dev, mt76); /* select TX blink mode, 2: only data frames */ - mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2); + mt76_rmw_field(dev, MT_TMAC_TCR0(mphy->band_idx), MT_TMAC_TCR0_TX_BLINK, 2); /* enable LED */ - mt76_wr(dev, MT_LED_EN(0), 1); + mt76_wr(dev, MT_LED_EN(mphy->band_idx), 1); /* set LED Tx blink on/off time */ val = FIELD_PREP(MT_LED_TX_BLINK_ON_MASK, delay_on) | FIELD_PREP(MT_LED_TX_BLINK_OFF_MASK, delay_off); - mt76_wr(dev, MT_LED_TX_BLINK(0), val); + mt76_wr(dev, MT_LED_TX_BLINK(mphy->band_idx), val); + + /* turn LED off */ + if (delay_off == 0xff && delay_on == 0x0) { + val = MT_LED_CTRL_POLARITY | MT_LED_CTRL_KICK; + } else { + /* control LED */ + val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK; + if (mphy->band_idx == MT_BAND1) + val |= MT_LED_CTRL_BLINK_BAND_SEL; + } - /* control LED */ - val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK; if (mphy->leds.al) val |= MT_LED_CTRL_POLARITY; - mt76_wr(dev, MT_LED_CTRL(0), val); - mt76_clear(dev, MT_LED_CTRL(0), MT_LED_CTRL_KICK); + mt76_wr(dev, MT_LED_CTRL(mphy->band_idx), val); + mt76_clear(dev, MT_LED_CTRL(mphy->band_idx), MT_LED_CTRL_KICK); } static int mt7996_led_set_blink(struct led_classdev *led_cdev, @@ -172,6 +181,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw) wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); wiphy->reg_notifier = mt7996_regd_notifier; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + wiphy->mbssid_max_interfaces = 16; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); @@ -183,6 +193,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); if (!mdev->dev->of_node || !of_property_read_bool(mdev->dev->of_node, @@ -194,6 +205,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); hw->max_tx_fragments = 4; @@ -217,6 +229,14 @@ mt7996_init_wiphy(struct ieee80211_hw *hw) IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; phy->mt76->sband_5g.sband.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_1; + + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + } + + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + phy->mt76->leds.cdev.brightness_set = mt7996_led_set_brightness; + phy->mt76->leds.cdev.blink_set = mt7996_led_set_blink; } mt76_set_stream_caps(phy->mt76, true); @@ -254,6 +274,11 @@ mt7996_mac_init_band(struct mt7996_dev *dev, u8 band) set = FIELD_PREP(MT_WTBLOFF_RSCR_RCPI_MODE, 0) | FIELD_PREP(MT_WTBLOFF_RSCR_RCPI_PARAM, 0x3); mt76_rmw(dev, MT_WTBLOFF_RSCR(band), mask, set); + + /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than + * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set. + */ + mt76_set(dev, MT_AGG_ACR4(band), MT_AGG_ACR_PPDU_TXS2H); } static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev) @@ -729,16 +754,17 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; + val = max_t(u8, sts - 1, 3); eht_cap_elem->phy_cap_info[0] |= - u8_encode_bits(u8_get_bits(sts - 1, BIT(0)), + u8_encode_bits(u8_get_bits(val, BIT(0)), IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK); eht_cap_elem->phy_cap_info[1] = - u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)), + u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)), IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | - u8_encode_bits(sts - 1, + u8_encode_bits(val, IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) | - u8_encode_bits(sts - 1, + u8_encode_bits(val, IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); eht_cap_elem->phy_cap_info[2] = @@ -823,8 +849,7 @@ __mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy, n++; } - sband->iftype_data = data; - sband->n_iftype_data = n; + _ieee80211_set_sband_iftype_data(sband, data, n); } void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy) @@ -853,9 +878,7 @@ int mt7996_register_device(struct mt7996_dev *dev) INIT_WORK(&dev->rc_work, mt7996_mac_sta_rc_work); INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7996_mac_work); INIT_LIST_HEAD(&dev->sta_rc_list); - INIT_LIST_HEAD(&dev->sta_poll_list); INIT_LIST_HEAD(&dev->twt_list); - spin_lock_init(&dev->sta_poll_lock); init_waitqueue_head(&dev->reset_wait); INIT_WORK(&dev->reset_work, mt7996_mac_reset_work); @@ -868,12 +891,6 @@ int mt7996_register_device(struct mt7996_dev *dev) mt7996_init_wiphy(hw); - /* init led callbacks */ - if (IS_ENABLED(CONFIG_MT76_LEDS)) { - dev->mphy.leds.cdev.brightness_set = mt7996_led_set_brightness; - dev->mphy.leds.cdev.blink_set = mt7996_led_set_blink; - } - ret = mt76_register_device(&dev->mt76, true, mt76_rates, ARRAY_SIZE(mt76_rates)); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 9b0f6053e0fa..04540833485f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -13,10 +13,6 @@ #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) -#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) -#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ - IEEE80211_RADIOTAP_HE_##f) - static const struct mt7996_dfs_radar_spec etsi_radar_specs = { .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, .radar_pattern = { @@ -111,9 +107,9 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev) LIST_HEAD(sta_poll_list); int i; - spin_lock_bh(&dev->sta_poll_lock); - list_splice_init(&dev->sta_poll_list, &sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); rcu_read_lock(); @@ -124,15 +120,15 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev) s8 rssi[4]; u8 bw; - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); if (list_empty(&sta_poll_list)) { - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); break; } msta = list_first_entry(&sta_poll_list, - struct mt7996_sta, poll_list); - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + struct mt7996_sta, wcid.poll_list); + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); idx = msta->wcid.idx; @@ -263,180 +259,6 @@ void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev, mt76_wr(dev, MT_WTBL_ITCR, ctrl); } -static void -mt7996_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, - struct ieee80211_radiotap_he *he, - __le32 *rxv) -{ - u32 ru, offs = 0; - - ru = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC); - - status->bw = RATE_INFO_BW_HE_RU; - - switch (ru) { - case 0 ... 36: - status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; - offs = ru; - break; - case 37 ... 52: - status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; - offs = ru - 37; - break; - case 53 ... 60: - status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; - offs = ru - 53; - break; - case 61 ... 64: - status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; - offs = ru - 61; - break; - case 65 ... 66: - status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; - offs = ru - 65; - break; - case 67: - status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; - break; - case 68: - status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; - break; - } - - he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); - he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) | - le16_encode_bits(offs, - IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); -} - -static void -mt7996_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv) -{ - struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - static const struct ieee80211_radiotap_he_mu mu_known = { - .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | - HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | - HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) | - HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN), - .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), - }; - struct ieee80211_radiotap_he_mu *he_mu = NULL; - - status->flag |= RX_FLAG_RADIOTAP_HE_MU; - - he_mu = skb_push(skb, sizeof(mu_known)); - memcpy(he_mu, &mu_known, sizeof(mu_known)); - -#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f) - - he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx); - if (status->he_dcm) - he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm); - - he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) | - MU_PREP(FLAGS2_SIG_B_SYMS_USERS, - le32_get_bits(rxv[4], MT_CRXV_HE_NUM_USER)); - - he_mu->ru_ch1[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU0) & 0xff; - - if (status->bw >= RATE_INFO_BW_40) { - he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN); - he_mu->ru_ch2[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU1) & 0xff; - } - - if (status->bw >= RATE_INFO_BW_80) { - u32 ru_h, ru_l; - - he_mu->ru_ch1[1] = le32_get_bits(rxv[16], MT_CRXV_HE_RU2) & 0xff; - - ru_l = le32_get_bits(rxv[16], MT_CRXV_HE_RU3_L); - ru_h = le32_get_bits(rxv[17], MT_CRXV_HE_RU3_H) & 0x7; - he_mu->ru_ch2[1] = (u8)(ru_l | ru_h << 4); - } -} - -static void -mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode) -{ - struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; - static const struct ieee80211_radiotap_he known = { - .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | - HE_BITS(DATA1_DATA_DCM_KNOWN) | - HE_BITS(DATA1_STBC_KNOWN) | - HE_BITS(DATA1_CODING_KNOWN) | - HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | - HE_BITS(DATA1_DOPPLER_KNOWN) | - HE_BITS(DATA1_SPTL_REUSE_KNOWN) | - HE_BITS(DATA1_BSS_COLOR_KNOWN), - .data2 = HE_BITS(DATA2_GI_KNOWN) | - HE_BITS(DATA2_TXBF_KNOWN) | - HE_BITS(DATA2_PE_DISAMBIG_KNOWN) | - HE_BITS(DATA2_TXOP_KNOWN), - }; - struct ieee80211_radiotap_he *he = NULL; - u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1; - - status->flag |= RX_FLAG_RADIOTAP_HE; - - he = skb_push(skb, sizeof(known)); - memcpy(he, &known, sizeof(known)); - - he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[9]) | - HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[4]); - he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[13]); - he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[5]) | - le16_encode_bits(ltf_size, - IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); - if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF) - he->data5 |= HE_BITS(DATA5_TXBF); - he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[9]) | - HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[9]); - - switch (mode) { - case MT_PHY_TYPE_HE_SU: - he->data1 |= HE_BITS(DATA1_FORMAT_SU) | - HE_BITS(DATA1_UL_DL_KNOWN) | - HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | - HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); - - he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[8]) | - HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); - break; - case MT_PHY_TYPE_HE_EXT_SU: - he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | - HE_BITS(DATA1_UL_DL_KNOWN) | - HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); - - he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); - break; - case MT_PHY_TYPE_HE_MU: - he->data1 |= HE_BITS(DATA1_FORMAT_MU) | - HE_BITS(DATA1_UL_DL_KNOWN); - - he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]); - he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[8]); - - mt7996_mac_decode_he_radiotap_ru(status, he, rxv); - mt7996_mac_decode_he_mu_radiotap(skb, rxv); - break; - case MT_PHY_TYPE_HE_TB: - he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | - HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | - HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | - HE_BITS(DATA1_SPTL_REUSE4_KNOWN); - - he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[13]) | - HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[13]) | - HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[13]) | - HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[13]); - - mt7996_mac_decode_he_radiotap_ru(status, he, rxv); - break; - default: - break; - } -} - /* The HW does not translate the mac header to 802.3 for mesh point */ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) { @@ -611,7 +433,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, case IEEE80211_STA_RX_BW_160: status->bw = RATE_INFO_BW_160; break; + /* rxv reports bw 320-1 and 320-2 separately */ case IEEE80211_STA_RX_BW_320: + case IEEE80211_STA_RX_BW_320 + 1: status->bw = RATE_INFO_BW_320; break; default: @@ -681,10 +505,11 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) struct mt7996_sta *msta; msta = container_of(status->wcid, struct mt7996_sta, wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } status->freq = mphy->chandef.chan->center_freq; @@ -836,14 +661,19 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) skb_pull(skb, hdr_gap); if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { pad_start = ieee80211_get_hdrlen_from_skb(skb); - } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR) && - get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) { + } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { /* When header translation failure is indicated, * the hardware will insert an extra 2-byte field * containing the data length after the protocol - * type field. + * type field. This happens either when the LLC-SNAP + * pattern did not match, or if a VLAN header was + * detected. */ - pad_start = 16; + pad_start = 12; + if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) + pad_start += 4; + else + pad_start = 0; } if (pad_start) { @@ -881,7 +711,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) } if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) - mt7996_mac_decode_he_radiotap(skb, rxv, mode); + mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; @@ -1007,7 +837,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; - struct mt7996_vif *mvif; + struct mt76_vif *mvif; u16 tx_count = 15; u32 val; bool beacon = !!(changed & (BSS_CHANGED_BEACON | @@ -1015,11 +845,11 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | BSS_CHANGED_FILS_DISCOVERY)); - mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; + mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL; if (mvif) { - omac_idx = mvif->mt76.omac_idx; - wmm_idx = mvif->mt76.wmm_idx; - band_idx = mvif->mt76.band_idx; + omac_idx = mvif->omac_idx; + wmm_idx = mvif->wmm_idx; + band_idx = mvif->band_idx; } if (inband_disc) { @@ -1120,15 +950,6 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (!wcid) wcid = &dev->mt76.global_wcid; - if (sta) { - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; - - if (time_after(jiffies, msta->jiffies + HZ / 4)) { - info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->jiffies = jiffies; - } - } - t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); t->skb = tx_info->skb; @@ -1163,11 +984,9 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } txp->fw.token = cpu_to_le16(id); - if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) - txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx); - else - txp->fw.rept_wds_wcid = cpu_to_le16(0xfff); - tx_info->skb = DMA_DUMMY_DATA; + txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); + + tx_info->skb = NULL; /* pass partial skb header to fw */ tx_info->buf[1].len = MT_CT_PARSE_LEN; @@ -1178,27 +997,40 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } static void -mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) { struct mt7996_sta *msta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; u16 fc, tid; - u32 val; if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) return; - tid = le32_get_bits(txwi[1], MT_TXD1_TID); + tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; if (tid >= 6) /* skip VO queue */ return; - val = le32_to_cpu(txwi[2]); - fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | - FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; + if (is_8023) { + fc = IEEE80211_FTYPE_DATA | + (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA); + } else { + /* No need to get precise TID for Action/Management Frame, + * since it will not meet the following Frame Control + * condition anyway. + */ + + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + fc = le16_to_cpu(hdr->frame_control) & + (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); + } + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) return; msta = (struct mt7996_sta *)sta->drv_priv; - if (!test_and_set_bit(tid, &msta->ampdu_state)) + if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) ieee80211_start_tx_ba_session(sta, tid, 0); } @@ -1221,9 +1053,9 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, wcid_idx = wcid->idx; if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7996_tx_check_aggr(sta, txwi); + mt7996_tx_check_aggr(sta, t->skb); } else { - wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); } __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); @@ -1242,6 +1074,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; struct mt76_txwi_cache *txwi; struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid; LIST_HEAD(free_list); struct sk_buff *skb, *tmp; void *end = data + len; @@ -1260,7 +1093,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); } - if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4)) + if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5)) return; total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); @@ -1276,7 +1109,6 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) info = le32_to_cpu(*cur_info); if (info & MT_TXFREE_INFO_PAIR) { struct mt7996_sta *msta; - struct mt76_wcid *wcid; u16 idx; idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); @@ -1286,15 +1118,27 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) continue; msta = container_of(wcid, struct mt7996_sta, wcid); - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&mdev->sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, + &mdev->sta_poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); continue; - } + } else if (info & MT_TXFREE_INFO_HEADER) { + u32 tx_retries = 0, tx_failed = 0; - if (info & MT_TXFREE_INFO_HEADER) + if (!wcid) + continue; + + tx_retries = + FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; + tx_failed = tx_retries + + !!FIELD_GET(MT_TXFREE_INFO_STAT, info); + + wcid->stats.tx_retries += tx_retries; + wcid->stats.tx_failed += tx_failed; continue; + } for (i = 0; i < 2; i++) { msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; @@ -1324,9 +1168,10 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) } static bool -mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid, - __le32 *txs_data, struct mt76_sta_stats *stats) +mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, + int pid, __le32 *txs_data) { + struct mt76_sta_stats *stats = &wcid->stats; struct ieee80211_supported_band *sband; struct mt76_dev *mdev = &dev->mt76; struct mt76_phy *mphy; @@ -1337,22 +1182,31 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid, bool cck = false; u32 txrate, txs, mode, stbc; + txs = le32_to_cpu(txs_data[0]); + mt76_tx_status_lock(mdev, &list); skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); - if (!skb) - goto out_no_skb; - txs = le32_to_cpu(txs_data[0]); + if (skb) { + info = IEEE80211_SKB_CB(skb); + if (!(txs & MT_TXS0_ACK_ERROR_MASK)) + info->flags |= IEEE80211_TX_STAT_ACK; - info = IEEE80211_SKB_CB(skb); - if (!(txs & MT_TXS0_ACK_ERROR_MASK)) - info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = + !!(info->flags & IEEE80211_TX_STAT_ACK); - info->status.ampdu_len = 1; - info->status.ampdu_ack_len = !!(info->flags & - IEEE80211_TX_STAT_ACK); + info->status.rates[0].idx = -1; + } - info->status.rates[0].idx = -1; + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { + struct ieee80211_sta *sta; + u8 tid; + + sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); + tid = FIELD_GET(MT_TXS0_TID, txs); + ieee80211_refresh_tx_agg_session_timer(sta, tid); + } txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); @@ -1452,9 +1306,8 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid, wcid->rate = rate; out: - mt76_tx_status_skb_done(mdev, skb, &list); - -out_no_skb: + if (skb) + mt76_tx_status_skb_done(mdev, skb, &list); mt76_tx_status_unlock(mdev, &list); return !!skb; @@ -1468,13 +1321,10 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) u16 wcidx; u8 pid; - if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) - return; - wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); pid = le32_get_bits(txs_data[3], MT_TXS3_PID); - if (pid < MT_PACKET_ID_FIRST) + if (pid < MT_PACKET_ID_NO_SKB) return; if (wcidx >= mt7996_wtbl_size(dev)) @@ -1488,15 +1338,15 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) msta = container_of(wcid, struct mt7996_sta, wcid); - mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats); + mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); if (!wcid->sta) goto out; - spin_lock_bh(&dev->sta_poll_lock); - if (list_empty(&msta->poll_list)) - list_add_tail(&msta->poll_list, &dev->sta_poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (list_empty(&msta->wcid.poll_list)) + list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); out: rcu_read_unlock(); @@ -1609,20 +1459,19 @@ void mt7996_mac_reset_counters(struct mt7996_phy *phy) mt7996_mcu_get_chan_mib_info(phy, true); } -void mt7996_mac_set_timing(struct mt7996_phy *phy) +void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) { s16 coverage_class = phy->coverage_class; struct mt7996_dev *dev = phy->dev; struct mt7996_phy *phy2 = mt7996_phy2(dev); struct mt7996_phy *phy3 = mt7996_phy3(dev); - u32 val, reg_offset; + u32 reg_offset; u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); u8 band_idx = phy->mt76->band_idx; int offset; - bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) return; @@ -1635,34 +1484,12 @@ void mt7996_mac_set_timing(struct mt7996_phy *phy) coverage_class = max_t(s16, coverage_class, phy3->coverage_class); - mt76_set(dev, MT_ARB_SCR(band_idx), - MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); - udelay(1); - offset = 3 * coverage_class; reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); - mt76_wr(dev, MT_TMAC_ICR0(band_idx), - FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) | - FIELD_PREP(MT_IFS_RIFS, 2) | - FIELD_PREP(MT_IFS_SIFS, 10) | - FIELD_PREP(MT_IFS_SLOT, phy->slottime)); - - if (!a_band) - mt76_wr(dev, MT_TMAC_ICR1(band_idx), - FIELD_PREP(MT_IFS_EIFS_CCK, 314)); - - if (phy->slottime < 20 || a_band) - val = MT7996_CFEND_RATE_DEFAULT; - else - val = MT7996_CFEND_RATE_11B; - - mt76_rmw_field(dev, MT_RATE_HRCR0(band_idx), MT_RATE_HRCR0_CFEND_RATE, val); - mt76_clear(dev, MT_ARB_SCR(band_idx), - MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); } void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) @@ -2046,6 +1873,12 @@ void mt7996_mac_reset_work(struct work_struct *work) mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); } + mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); + mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); + + /* enable DMA Tx/Tx and interrupt */ + mt7996_dma_start(dev, false); + clear_bit(MT76_MCU_RESET, &dev->mphy.state); clear_bit(MT76_RESET, &dev->mphy.state); if (phy2) @@ -2062,9 +1895,6 @@ void mt7996_mac_reset_work(struct work_struct *work) tasklet_schedule(&dev->mt76.irq_tasklet); - mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); - mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); - mt76_worker_enable(&dev->mt76.tx_worker); local_bh_disable(); @@ -2191,8 +2021,8 @@ void mt7996_reset(struct mt7996_dev *dev) void mt7996_mac_update_stats(struct mt7996_phy *phy) { + struct mt76_mib_stats *mib = &phy->mib; struct mt7996_dev *dev = phy->dev; - struct mib_stats *mib = &phy->mib; u8 band_idx = phy->mt76->band_idx; u32 cnt; int i; @@ -2339,7 +2169,7 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) u32 changed; LIST_HEAD(list); - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); list_splice_init(&dev->sta_rc_list, &list); while (!list_empty(&list)) { @@ -2347,7 +2177,7 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) list_del_init(&msta->rc_list); changed = msta->changed; msta->changed = 0; - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); @@ -2359,10 +2189,10 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) /* TODO: smps change */ - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); } - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } void mt7996_mac_work(struct work_struct *work) @@ -2381,6 +2211,11 @@ void mt7996_mac_work(struct work_struct *work) mphy->mac_work_count = 0; mt7996_mac_update_stats(phy); + + if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { + mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); + mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); + } } mutex_unlock(&mphy->dev->mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h index bc4e6c55373e..e629324a5617 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h @@ -6,320 +6,7 @@ #ifndef __MT7996_MAC_H #define __MT7996_MAC_H -#define MT_CT_PARSE_LEN 72 -#define MT_CT_DMA_BUF_NUM 2 - -#define MT_RXD0_LENGTH GENMASK(15, 0) -#define MT_RXD0_PKT_TYPE GENMASK(31, 27) - -#define MT_RXD0_MESH BIT(18) -#define MT_RXD0_MHCP BIT(19) -#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) -#define MT_RXD0_NORMAL_IP_SUM BIT(23) -#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24) - -#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16) -#define MT_RXD0_SW_PKT_TYPE_MAP 0x380F -#define MT_RXD0_SW_PKT_TYPE_FRAME 0x3801 - -/* RXD DW1 */ -#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(11, 0) -#define MT_RXD1_NORMAL_GROUP_1 BIT(16) -#define MT_RXD1_NORMAL_GROUP_2 BIT(17) -#define MT_RXD1_NORMAL_GROUP_3 BIT(18) -#define MT_RXD1_NORMAL_GROUP_4 BIT(19) -#define MT_RXD1_NORMAL_GROUP_5 BIT(20) -#define MT_RXD1_NORMAL_KEY_ID GENMASK(22, 21) -#define MT_RXD1_NORMAL_CM BIT(23) -#define MT_RXD1_NORMAL_CLM BIT(24) -#define MT_RXD1_NORMAL_ICV_ERR BIT(25) -#define MT_RXD1_NORMAL_TKIP_MIC_ERR BIT(26) -#define MT_RXD1_NORMAL_BAND_IDX GENMASK(28, 27) -#define MT_RXD1_NORMAL_SPP_EN BIT(29) -#define MT_RXD1_NORMAL_ADD_OM BIT(30) -#define MT_RXD1_NORMAL_SEC_DONE BIT(31) - -/* RXD DW2 */ -#define MT_RXD2_NORMAL_BSSID GENMASK(5, 0) -#define MT_RXD2_NORMAL_MAC_HDR_LEN GENMASK(12, 8) -#define MT_RXD2_NORMAL_HDR_TRANS BIT(7) -#define MT_RXD2_NORMAL_HDR_OFFSET GENMASK(15, 13) -#define MT_RXD2_NORMAL_SEC_MODE GENMASK(20, 16) -#define MT_RXD2_NORMAL_MU_BAR BIT(21) -#define MT_RXD2_NORMAL_SW_BIT BIT(22) -#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23) -#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24) -#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25) -#define MT_RXD2_NORMAL_INT_FRAME BIT(26) -#define MT_RXD2_NORMAL_FRAG BIT(27) -#define MT_RXD2_NORMAL_NULL_FRAME BIT(28) -#define MT_RXD2_NORMAL_NDATA BIT(29) -#define MT_RXD2_NORMAL_NON_AMPDU BIT(30) -#define MT_RXD2_NORMAL_BF_REPORT BIT(31) - -/* RXD DW3 */ -#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) -#define MT_RXD3_NORMAL_CH_FREQ GENMASK(15, 8) -#define MT_RXD3_NORMAL_ADDR_TYPE GENMASK(17, 16) -#define MT_RXD3_NORMAL_U2M BIT(0) -#define MT_RXD3_NORMAL_HTC_VLD BIT(18) -#define MT_RXD3_NORMAL_BEACON_MC BIT(20) -#define MT_RXD3_NORMAL_BEACON_UC BIT(21) -#define MT_RXD3_NORMAL_CO_ANT BIT(22) -#define MT_RXD3_NORMAL_FCS_ERR BIT(24) -#define MT_RXD3_NORMAL_VLAN2ETH BIT(31) - -/* RXD DW4 */ -#define MT_RXD4_NORMAL_PAYLOAD_FORMAT GENMASK(1, 0) -#define MT_RXD4_FIRST_AMSDU_FRAME GENMASK(1, 0) -#define MT_RXD4_MID_AMSDU_FRAME BIT(1) -#define MT_RXD4_LAST_AMSDU_FRAME BIT(0) - -#define MT_RXV_HDR_BAND_IDX BIT(24) - -/* RXD GROUP4 */ -#define MT_RXD8_FRAME_CONTROL GENMASK(15, 0) - -#define MT_RXD10_SEQ_CTRL GENMASK(15, 0) -#define MT_RXD10_QOS_CTL GENMASK(31, 16) - -#define MT_RXD11_HT_CONTROL GENMASK(31, 0) - -/* P-RXV */ -#define MT_PRXV_TX_RATE GENMASK(6, 0) -#define MT_PRXV_TX_DCM BIT(4) -#define MT_PRXV_TX_ER_SU_106T BIT(5) -#define MT_PRXV_NSTS GENMASK(10, 7) -#define MT_PRXV_TXBF BIT(11) -#define MT_PRXV_HT_AD_CODE BIT(12) -#define MT_PRXV_HE_RU_ALLOC GENMASK(30, 22) -#define MT_PRXV_RCPI3 GENMASK(31, 24) -#define MT_PRXV_RCPI2 GENMASK(23, 16) -#define MT_PRXV_RCPI1 GENMASK(15, 8) -#define MT_PRXV_RCPI0 GENMASK(7, 0) -#define MT_PRXV_HT_SHORT_GI GENMASK(4, 3) -#define MT_PRXV_HT_STBC GENMASK(10, 9) -#define MT_PRXV_TX_MODE GENMASK(14, 11) -#define MT_PRXV_FRAME_MODE GENMASK(2, 0) -#define MT_PRXV_DCM BIT(5) - -/* C-RXV */ -#define MT_CRXV_HE_NUM_USER GENMASK(26, 20) -#define MT_CRXV_HE_LTF_SIZE GENMASK(28, 27) -#define MT_CRXV_HE_LDPC_EXT_SYM BIT(30) - -#define MT_CRXV_HE_PE_DISAMBIG BIT(1) -#define MT_CRXV_HE_UPLINK BIT(2) - -#define MT_CRXV_HE_MU_AID GENMASK(27, 17) -#define MT_CRXV_HE_BEAM_CHNG BIT(29) - -#define MT_CRXV_HE_DOPPLER BIT(0) -#define MT_CRXV_HE_BSS_COLOR GENMASK(15, 10) -#define MT_CRXV_HE_TXOP_DUR GENMASK(19, 17) - -#define MT_CRXV_HE_SR_MASK GENMASK(11, 8) -#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12) -#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17) -#define MT_CRXV_HE_SR3_MASK GENMASK(24, 21) - -#define MT_CRXV_HE_RU0 GENMASK(8, 0) -#define MT_CRXV_HE_RU1 GENMASK(17, 9) -#define MT_CRXV_HE_RU2 GENMASK(26, 18) -#define MT_CRXV_HE_RU3_L GENMASK(31, 27) -#define MT_CRXV_HE_RU3_H GENMASK(3, 0) - -enum tx_header_format { - MT_HDR_FORMAT_802_3, - MT_HDR_FORMAT_CMD, - MT_HDR_FORMAT_802_11, - MT_HDR_FORMAT_802_11_EXT, -}; - -enum tx_pkt_type { - MT_TX_TYPE_CT, - MT_TX_TYPE_SF, - MT_TX_TYPE_CMD, - MT_TX_TYPE_FW, -}; - -enum tx_port_idx { - MT_TX_PORT_IDX_LMAC, - MT_TX_PORT_IDX_MCU -}; - -enum tx_mcu_port_q_idx { - MT_TX_MCU_PORT_RX_Q0 = 0x20, - MT_TX_MCU_PORT_RX_Q1, - MT_TX_MCU_PORT_RX_Q2, - MT_TX_MCU_PORT_RX_Q3, - MT_TX_MCU_PORT_RX_FWDL = 0x3e -}; - -enum tx_mgnt_type { - MT_TX_NORMAL, - MT_TX_TIMING, - MT_TX_ADDBA, -}; - -#define MT_CT_INFO_APPLY_TXD BIT(0) -#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1) -#define MT_CT_INFO_MGMT_FRAME BIT(2) -#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3) -#define MT_CT_INFO_HSR2_TX BIT(4) -#define MT_CT_INFO_FROM_HOST BIT(7) - -#define MT_TXD_SIZE (8 * 4) - -#define MT_TXD0_Q_IDX GENMASK(31, 25) -#define MT_TXD0_PKT_FMT GENMASK(24, 23) -#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) -#define MT_TXD0_TX_BYTES GENMASK(15, 0) - -#define MT_TXD1_FIXED_RATE BIT(31) -#define MT_TXD1_OWN_MAC GENMASK(30, 25) -#define MT_TXD1_TID GENMASK(24, 21) -#define MT_TXD1_BIP BIT(24) -#define MT_TXD1_ETH_802_3 BIT(20) -#define MT_TXD1_HDR_INFO GENMASK(20, 16) -#define MT_TXD1_HDR_FORMAT GENMASK(15, 14) -#define MT_TXD1_TGID GENMASK(13, 12) -#define MT_TXD1_WLAN_IDX GENMASK(11, 0) - -#define MT_TXD2_POWER_OFFSET GENMASK(31, 26) -#define MT_TXD2_MAX_TX_TIME GENMASK(25, 16) -#define MT_TXD2_FRAG GENMASK(15, 14) -#define MT_TXD2_HTC_VLD BIT(13) -#define MT_TXD2_DURATION BIT(12) -#define MT_TXD2_HDR_PAD GENMASK(11, 10) -#define MT_TXD2_RTS BIT(9) -#define MT_TXD2_OWN_MAC_MAP BIT(8) -#define MT_TXD2_BF_TYPE GENMASK(6, 7) -#define MT_TXD2_FRAME_TYPE GENMASK(5, 4) -#define MT_TXD2_SUB_TYPE GENMASK(3, 0) - -#define MT_TXD3_SN_VALID BIT(31) -#define MT_TXD3_PN_VALID BIT(30) -#define MT_TXD3_SW_POWER_MGMT BIT(29) -#define MT_TXD3_BA_DISABLE BIT(28) -#define MT_TXD3_SEQ GENMASK(27, 16) -#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11) -#define MT_TXD3_TX_COUNT GENMASK(10, 6) -#define MT_TXD3_HW_AMSDU BIT(5) -#define MT_TXD3_BCM BIT(4) -#define MT_TXD3_EEOSP BIT(3) -#define MT_TXD3_EMRD BIT(2) -#define MT_TXD3_PROTECT_FRAME BIT(1) -#define MT_TXD3_NO_ACK BIT(0) - -#define MT_TXD4_PN_LOW GENMASK(31, 0) - -#define MT_TXD5_PN_HIGH GENMASK(31, 16) -#define MT_TXD5_FL BIT(15) -#define MT_TXD5_BYPASS_TBB BIT(14) -#define MT_TXD5_BYPASS_RBB BIT(13) -#define MT_TXD5_BSS_COLOR_ZERO BIT(12) -#define MT_TXD5_TX_STATUS_HOST BIT(10) -#define MT_TXD5_TX_STATUS_MCU BIT(9) -#define MT_TXD5_TX_STATUS_FMT BIT(8) -#define MT_TXD5_PID GENMASK(7, 0) - -#define MT_TXD6_TX_SRC GENMASK(31, 30) -#define MT_TXD6_VTA BIT(28) -#define MT_TXD6_BW GENMASK(25, 22) -#define MT_TXD6_TX_RATE GENMASK(21, 16) -#define MT_TXD6_TIMESTAMP_OFS_EN BIT(15) -#define MT_TXD6_TIMESTAMP_OFS_IDX GENMASK(14, 10) -#define MT_TXD6_MSDU_CNT GENMASK(9, 4) -#define MT_TXD6_DIS_MAT BIT(3) -#define MT_TXD6_DAS BIT(2) -#define MT_TXD6_AMSDU_CAP BIT(1) - -#define MT_TXD7_TXD_LEN GENMASK(31, 30) -#define MT_TXD7_IP_SUM BIT(29) -#define MT_TXD7_DROP_BY_SDO BIT(28) -#define MT_TXD7_MAC_TXD BIT(27) -#define MT_TXD7_CTXD BIT(26) -#define MT_TXD7_CTXD_CNT GENMASK(25, 22) -#define MT_TXD7_UDP_TCP_SUM BIT(15) -#define MT_TXD7_TX_TIME GENMASK(9, 0) - -#define MT_TX_RATE_STBC BIT(14) -#define MT_TX_RATE_NSS GENMASK(13, 10) -#define MT_TX_RATE_MODE GENMASK(9, 6) -#define MT_TX_RATE_SU_EXT_TONE BIT(5) -#define MT_TX_RATE_DCM BIT(4) -/* VHT/HE only use bits 0-3 */ -#define MT_TX_RATE_IDX GENMASK(5, 0) - -#define MT_TXFREE0_PKT_TYPE GENMASK(31, 27) -#define MT_TXFREE0_MSDU_CNT GENMASK(25, 16) -#define MT_TXFREE0_RX_BYTE GENMASK(15, 0) - -#define MT_TXFREE1_VER GENMASK(18, 16) - -#define MT_TXFREE_INFO_PAIR BIT(31) -#define MT_TXFREE_INFO_HEADER BIT(30) -#define MT_TXFREE_INFO_WLAN_ID GENMASK(23, 12) -#define MT_TXFREE_INFO_MSDU_ID GENMASK(14, 0) - -#define MT_TXS0_BW GENMASK(31, 29) -#define MT_TXS0_TID GENMASK(28, 26) -#define MT_TXS0_AMPDU BIT(25) -#define MT_TXS0_TXS_FORMAT GENMASK(24, 23) -#define MT_TXS0_BA_ERROR BIT(22) -#define MT_TXS0_PS_FLAG BIT(21) -#define MT_TXS0_TXOP_TIMEOUT BIT(20) -#define MT_TXS0_BIP_ERROR BIT(19) - -#define MT_TXS0_QUEUE_TIMEOUT BIT(18) -#define MT_TXS0_RTS_TIMEOUT BIT(17) -#define MT_TXS0_ACK_TIMEOUT BIT(16) -#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16) - -#define MT_TXS0_TX_STATUS_HOST BIT(15) -#define MT_TXS0_TX_STATUS_MCU BIT(14) -#define MT_TXS0_TX_RATE GENMASK(13, 0) - -#define MT_TXS1_SEQNO GENMASK(31, 20) -#define MT_TXS1_RESP_RATE GENMASK(19, 16) -#define MT_TXS1_RXV_SEQNO GENMASK(15, 8) -#define MT_TXS1_TX_POWER_DBM GENMASK(7, 0) - -#define MT_TXS2_BF_STATUS GENMASK(31, 30) -#define MT_TXS2_BAND GENMASK(29, 28) -#define MT_TXS2_WCID GENMASK(27, 16) -#define MT_TXS2_TX_DELAY GENMASK(15, 0) - -#define MT_TXS3_PID GENMASK(31, 24) -#define MT_TXS3_RATE_STBC BIT(7) -#define MT_TXS3_FIXED_RATE BIT(6) -#define MT_TXS3_SRC GENMASK(5, 4) -#define MT_TXS3_SHARED_ANTENNA BIT(3) -#define MT_TXS3_LAST_TX_RATE GENMASK(2, 0) - -#define MT_TXS4_TIMESTAMP GENMASK(31, 0) - -#define MT_TXS5_F0_FINAL_MPDU BIT(31) -#define MT_TXS5_F0_QOS BIT(30) -#define MT_TXS5_F0_TX_COUNT GENMASK(29, 25) -#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0) -#define MT_TXS5_F1_MPDU_TX_COUNT GENMASK(31, 24) -#define MT_TXS5_F1_MPDU_TX_BYTES GENMASK(23, 0) - -#define MT_TXS6_F0_NOISE_3 GENMASK(31, 24) -#define MT_TXS6_F0_NOISE_2 GENMASK(23, 16) -#define MT_TXS6_F0_NOISE_1 GENMASK(15, 8) -#define MT_TXS6_F0_NOISE_0 GENMASK(7, 0) -#define MT_TXS6_F1_MPDU_FAIL_COUNT GENMASK(31, 24) -#define MT_TXS6_F1_MPDU_FAIL_BYTES GENMASK(23, 0) - -#define MT_TXS7_F0_RCPI_3 GENMASK(31, 24) -#define MT_TXS7_F0_RCPI_2 GENMASK(23, 16) -#define MT_TXS7_F0_RCPI_1 GENMASK(15, 8) -#define MT_TXS7_F0_RCPI_0 GENMASK(7, 0) -#define MT_TXS7_F1_MPDU_RETRY_COUNT GENMASK(31, 24) -#define MT_TXS7_F1_MPDU_RETRY_BYTES GENMASK(23, 0) +#include "../mt76_connac3_mac.h" struct mt7996_dfs_pulse { u32 max_width; /* us */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index f306e9c50ea3..09c7a28a3d51 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -43,6 +43,10 @@ int mt7996_run(struct ieee80211_hw *hw) if (ret) goto out; + ret = mt7996_mcu_set_radio_en(phy, true); + if (ret) + goto out; + ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH); if (ret) goto out; @@ -82,6 +86,8 @@ static void mt7996_stop(struct ieee80211_hw *hw) mutex_lock(&dev->mt76.mutex); + mt7996_mcu_set_radio_en(phy, false); + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); mutex_unlock(&dev->mt76.mutex); @@ -184,28 +190,24 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mvif->mt76.omac_idx = idx; mvif->phy = phy; mvif->mt76.band_idx = band_idx; - mvif->mt76.wmm_idx = band_idx; + mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; ret = mt7996_mcu_add_dev_info(phy, vif, true); if (ret) goto out; - ret = mt7996_mcu_set_radio_en(phy, true); - if (ret) - goto out; - dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); idx = MT7996_WTBL_RESERVED - mvif->mt76.idx; INIT_LIST_HEAD(&mvif->sta.rc_list); - INIT_LIST_HEAD(&mvif->sta.poll_list); + INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.phy_idx = band_idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_packet_id_init(&mvif->sta.wcid); + mt76_wcid_init(&mvif->sta.wcid); mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -221,9 +223,9 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR; if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) - mvif->basic_rates_idx = MT7996_BASIC_RATES_TBL + 4; + mvif->mt76.basic_rates_idx = MT7996_BASIC_RATES_TBL + 4; else - mvif->basic_rates_idx = MT7996_BASIC_RATES_TBL; + mvif->mt76.basic_rates_idx = MT7996_BASIC_RATES_TBL; mt7996_init_bitrate_mask(vif); @@ -246,14 +248,13 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw, struct mt7996_phy *phy = mt7996_hw_phy(hw); int idx = msta->wcid.idx; - mt7996_mcu_add_bss_info(phy, vif, false); mt7996_mcu_add_sta(dev, vif, NULL, false); + mt7996_mcu_add_bss_info(phy, vif, false); if (vif == phy->monitor_vif) phy->monitor_vif = NULL; mt7996_mcu_add_dev_info(phy, vif, false); - mt7996_mcu_set_radio_en(phy, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); @@ -262,12 +263,12 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw, phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mutex_unlock(&dev->mt76.mutex); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_packet_id_flush(&dev->mt76, &msta->wcid); + mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } int mt7996_set_channel(struct mt7996_phy *phy) @@ -286,7 +287,6 @@ int mt7996_set_channel(struct mt7996_phy *phy) if (ret) goto out; - mt7996_mac_set_timing(phy); ret = mt7996_dfs_init_radar_detector(phy); mt7996_mac_cca_stats_reset(phy); @@ -414,10 +414,16 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_tx_queue_params *params) { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + const u8 mq_to_aci[] = { + [IEEE80211_AC_VO] = 3, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_BE] = 0, + [IEEE80211_AC_BK] = 1, + }; + /* firmware uses access class index */ + mvif->queue_params[mq_to_aci[queue]] = *params; /* no need to update right away, we'll get BSS_CHANGED_QOS */ - queue = mt76_connac_lmac_mapping(queue); - mvif->queue_params[queue] = *params; return 0; } @@ -505,7 +511,7 @@ static u8 mt7996_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool beacon, bool mcast) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; struct mt76_phy *mphy = hw->priv; u16 rate; u8 i, idx, ht; @@ -517,7 +523,7 @@ mt7996_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct mt7996_dev *dev = mt7996_hw_dev(hw); /* must odd index */ - idx = MT7996_BEACON_RATES_TBL + 2 * (mvif->mt76.idx % 20); + idx = MT7996_BEACON_RATES_TBL + 2 * (mvif->idx % 20); mt7996_mac_set_fixed_rate_table(dev, idx, rate); return idx; } @@ -530,12 +536,32 @@ mt7996_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return mvif->basic_rates_idx; } +static void +mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_dev *dev = mt7996_hw_dev(hw); + u8 band = mvif->mt76.band_idx; + u32 *mu; + + mu = (u32 *)info->mu_group.membership; + mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_VLD0(band), mu[0]); + mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_VLD1(band), mu[1]); + + mu = (u32 *)info->mu_group.position; + mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_POS0(band), mu[0]); + mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_POS1(band), mu[1]); + mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_POS2(band), mu[2]); + mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_POS3(band), mu[3]); +} + static void mt7996_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; struct mt7996_phy *phy = mt7996_hw_phy(hw); struct mt7996_dev *dev = mt7996_hw_dev(hw); @@ -544,17 +570,13 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, /* station mode uses BSSID to map the wlan entry to a peer, * and then peer references bss_info_rfch to set bandwidth cap. */ - if (changed & BSS_CHANGED_BSSID && - vif->type == NL80211_IFTYPE_STATION) { - bool join = !is_zero_ether_addr(info->bssid); - - mt7996_mcu_add_bss_info(phy, vif, join); - mt7996_mcu_add_sta(dev, vif, NULL, join); + if ((changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) || + (changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) || + (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) { + mt7996_mcu_add_bss_info(phy, vif, true); + mt7996_mcu_add_sta(dev, vif, NULL, true); } - if (changed & BSS_CHANGED_ASSOC) - mt7996_mcu_add_bss_info(phy, vif, vif->cfg.assoc); - if (changed & BSS_CHANGED_ERP_CTS_PROT) mt7996_mac_enable_rtscts(dev, vif, info->use_cts_prot); @@ -563,7 +585,7 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, if (slottime != phy->slottime) { phy->slottime = slottime; - mt7996_mac_set_timing(phy); + mt7996_mcu_set_timing(phy, vif); } } @@ -575,11 +597,6 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, mvif->basic_rates_idx = mt7996_get_rates_table(hw, vif, false, false); - if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { - mt7996_mcu_add_bss_info(phy, vif, true); - mt7996_mcu_add_sta(dev, vif, NULL, true); - } - /* ensure that enable txcmd_mode after bss_info */ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) mt7996_mcu_set_tx(dev, vif); @@ -598,10 +615,13 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, mt7996_mcu_add_beacon(hw, vif, info->enable_beacon); } - if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP || - changed & BSS_CHANGED_FILS_DISCOVERY) + if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)) mt7996_mcu_beacon_inband_discov(dev, vif, changed); + if (changed & BSS_CHANGED_MU_GROUPS) + mt7996_update_mu_group(hw, vif, info); + mutex_unlock(&dev->mt76.mutex); } @@ -631,13 +651,12 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, return -ENOSPC; INIT_LIST_HEAD(&msta->rc_list); - INIT_LIST_HEAD(&msta->poll_list); + INIT_LIST_HEAD(&msta->wcid.poll_list); msta->vif = mvif; msta->wcid.sta = 1; msta->wcid.idx = idx; msta->wcid.phy_idx = band_idx; msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->jiffies = jiffies; ewma_avg_signal_init(&msta->avg_ack_signal); @@ -666,12 +685,12 @@ void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++) mt7996_mac_twt_teardown_flow(dev, msta, i); - spin_lock_bh(&dev->sta_poll_lock); - if (!list_empty(&msta->poll_list)) - list_del_init(&msta->poll_list); + spin_lock_bh(&mdev->sta_poll_lock); + if (!list_empty(&msta->wcid.poll_list)) + list_del_init(&msta->wcid.poll_list); if (!list_empty(&msta->rc_list)) list_del_init(&msta->rc_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&mdev->sta_poll_lock); } static void mt7996_tx(struct ieee80211_hw *hw, @@ -751,16 +770,16 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->ampdu_state); + clear_bit(tid, &msta->wcid.ampdu_state); ret = mt7996_mcu_add_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->ampdu_state); + set_bit(tid, &msta->wcid.ampdu_state); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->ampdu_state); + clear_bit(tid, &msta->wcid.ampdu_state); ret = mt7996_mcu_add_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -792,7 +811,7 @@ mt7996_get_stats(struct ieee80211_hw *hw, { struct mt7996_phy *phy = mt7996_hw_phy(hw); struct mt7996_dev *dev = mt7996_hw_dev(hw); - struct mib_stats *mib = &phy->mib; + struct mt76_mib_stats *mib = &phy->mib; mutex_lock(&dev->mt76.mutex); @@ -903,7 +922,7 @@ mt7996_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); - mt7996_mac_set_timing(phy); + mt7996_mac_set_coverage_class(phy); mutex_unlock(&dev->mt76.mutex); } @@ -949,30 +968,52 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { + struct mt7996_phy *phy = mt7996_hw_phy(hw); struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct rate_info *txrate = &msta->wcid.rate; - if (!txrate->legacy && !txrate->flags) - return; - - if (txrate->legacy) { - sinfo->txrate.legacy = txrate->legacy; - } else { - sinfo->txrate.mcs = txrate->mcs; - sinfo->txrate.nss = txrate->nss; - sinfo->txrate.bw = txrate->bw; - sinfo->txrate.he_gi = txrate->he_gi; - sinfo->txrate.he_dcm = txrate->he_dcm; - sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; + if (txrate->legacy || txrate->flags) { + if (txrate->legacy) { + sinfo->txrate.legacy = txrate->legacy; + } else { + sinfo->txrate.mcs = txrate->mcs; + sinfo->txrate.nss = txrate->nss; + sinfo->txrate.bw = txrate->bw; + sinfo->txrate.he_gi = txrate->he_gi; + sinfo->txrate.he_dcm = txrate->he_dcm; + sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; + } + sinfo->txrate.flags = txrate->flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + sinfo->tx_failed = msta->wcid.stats.tx_failed; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + + sinfo->tx_retries = msta->wcid.stats.tx_retries; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + sinfo->ack_signal = (s8)msta->ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); + + if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { + sinfo->tx_bytes = msta->wcid.stats.tx_bytes; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); + + sinfo->rx_bytes = msta->wcid.stats.rx_bytes; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); + + sinfo->tx_packets = msta->wcid.stats.tx_packets; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + + sinfo->rx_packets = msta->wcid.stats.rx_packets; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); + } } static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta) @@ -981,11 +1022,11 @@ static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta) struct mt7996_dev *dev = msta->vif->phy->dev; u32 *changed = data; - spin_lock_bh(&dev->sta_poll_lock); + spin_lock_bh(&dev->mt76.sta_poll_lock); msta->changed |= *changed; if (list_empty(&msta->rc_list)) list_add_tail(&msta->rc_list, &dev->sta_rc_list); - spin_unlock_bh(&dev->sta_poll_lock); + spin_unlock_bh(&dev->mt76.sta_poll_lock); } static void mt7996_sta_rc_update(struct ieee80211_hw *hw, @@ -1153,6 +1194,10 @@ static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = { "v_tx_mcs_11", "v_tx_mcs_12", "v_tx_mcs_13", + "v_tx_nss_1", + "v_tx_nss_2", + "v_tx_nss_3", + "v_tx_nss_4", }; #define MT7996_SSTATS_LEN ARRAY_SIZE(mt7996_gstrings_stats) @@ -1164,7 +1209,7 @@ void mt7996_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *mt7996_gstrings_stats, + memcpy(data, mt7996_gstrings_stats, sizeof(mt7996_gstrings_stats)); } @@ -1186,7 +1231,7 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) if (msta->vif->mt76.idx != wi->idx) return; - mt76_ethtool_worker(wi, &msta->stats, true); + mt76_ethtool_worker(wi, &msta->wcid.stats, true); } static @@ -1197,11 +1242,11 @@ void mt7996_get_et_stats(struct ieee80211_hw *hw, struct mt7996_dev *dev = mt7996_hw_dev(hw); struct mt7996_phy *phy = mt7996_hw_phy(hw); struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt76_mib_stats *mib = &phy->mib; struct mt76_ethtool_worker_info wi = { .data = data, .idx = mvif->mt76.idx, }; - struct mib_stats *mib = &phy->mib; /* See mt7996_ampdu_stat_read_phy, etc */ int i, ei = 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 88e2f9d0e513..bf917beb9439 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -324,8 +324,10 @@ int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) static void mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (vif->bss_conf.csa_active) - ieee80211_csa_finish(vif); + if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION) + return; + + ieee80211_csa_finish(vif); } static void @@ -339,7 +341,11 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb) if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys)) return; - mphy = dev->mt76.phys[r->band_idx]; + if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2) + mphy = dev->rdd2_phy->mt76; + else + mphy = dev->mt76.phys[r->band_idx]; + if (!mphy) return; @@ -395,7 +401,7 @@ out: static void mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { - if (!vif->bss_conf.color_change_active) + if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) return; ieee80211_color_change_finish(vif); @@ -444,6 +450,54 @@ mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb) } static void +mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb) +{ + struct mt7996_mcu_all_sta_info_event *res; + u16 i; + + skb_pull(skb, sizeof(struct mt7996_mcu_rxd)); + + res = (struct mt7996_mcu_all_sta_info_event *)skb->data; + + for (i = 0; i < le16_to_cpu(res->sta_num); i++) { + u8 ac; + u16 wlan_idx; + struct mt76_wcid *wcid; + + switch (le16_to_cpu(res->tag)) { + case UNI_ALL_STA_TXRX_ADM_STAT: + wlan_idx = le16_to_cpu(res->adm_stat[i].wlan_idx); + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + + if (!wcid) + break; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + wcid->stats.tx_bytes += + le32_to_cpu(res->adm_stat[i].tx_bytes[ac]); + wcid->stats.rx_bytes += + le32_to_cpu(res->adm_stat[i].rx_bytes[ac]); + } + break; + case UNI_ALL_STA_TXRX_MSDU_COUNT: + wlan_idx = le16_to_cpu(res->msdu_cnt[i].wlan_idx); + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + + if (!wcid) + break; + + wcid->stats.tx_packets += + le32_to_cpu(res->msdu_cnt[i].tx_msdu_cnt); + wcid->stats.rx_packets += + le32_to_cpu(res->msdu_cnt[i].rx_msdu_cnt); + break; + default: + break; + } + } +} + +static void mt7996_mcu_rx_ext_event(struct mt7996_dev *dev, struct sk_buff *skb) { struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data; @@ -487,6 +541,9 @@ mt7996_mcu_uni_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb) case MCU_UNI_EVENT_RDD_REPORT: mt7996_mcu_rx_radar_detected(dev, skb); break; + case MCU_UNI_EVENT_ALL_STA_INFO: + mt7996_mcu_rx_all_sta_info_event(dev, skb); + break; default: break; } @@ -597,10 +654,28 @@ mt7996_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } static void +mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct mt7996_phy *phy, int enable) +{ + struct bss_info_uni_mbssid *mbssid; + struct tlv *tlv; + + tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid)); + + mbssid = (struct bss_info_uni_mbssid *)tlv; + + if (enable && vif->bss_conf.bssid_indicator) { + mbssid->max_indicator = vif->bss_conf.bssid_indicator; + mbssid->mbss_idx = vif->bss_conf.bssid_index; + mbssid->tx_bss_omac_idx = 0; + } +} + +static void mt7996_mcu_bss_bmc_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct mt7996_phy *phy) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; struct bss_rate_tlv *bmc; struct cfg80211_chan_def *chandef = &phy->mt76->chandef; enum nl80211_band band = chandef->chan->band; @@ -701,6 +776,34 @@ mt7996_mcu_muar_config(struct mt7996_phy *phy, struct ieee80211_vif *vif, sizeof(req), true); } +static void +mt7996_mcu_bss_ifs_timing_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_phy *phy = mvif->phy; + struct bss_ifs_time_tlv *ifs_time; + struct tlv *tlv; + bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ; + + tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_IFS_TIME, sizeof(*ifs_time)); + + ifs_time = (struct bss_ifs_time_tlv *)tlv; + ifs_time->slot_valid = true; + ifs_time->sifs_valid = true; + ifs_time->rifs_valid = true; + ifs_time->eifs_valid = true; + + ifs_time->slot_time = cpu_to_le16(phy->slottime); + ifs_time->sifs_time = cpu_to_le16(10); + ifs_time->rifs_time = cpu_to_le16(2); + ifs_time->eifs_time = cpu_to_le16(is_2ghz ? 78 : 84); + + if (is_2ghz) { + ifs_time->eifs_cck_valid = true; + ifs_time->eifs_cck_time = cpu_to_le16(314); + } +} + static int mt7996_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, @@ -712,6 +815,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb, struct cfg80211_chan_def *chandef = &phy->chandef; struct mt76_connac_bss_basic_tlv *bss; u32 type = CONNECTION_INFRA_AP; + u16 sta_wlan_idx = wlan_idx; struct tlv *tlv; int idx; @@ -731,7 +835,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb, struct mt76_wcid *wcid; wcid = (struct mt76_wcid *)sta->drv_priv; - wlan_idx = wcid->idx; + sta_wlan_idx = wcid->idx; } rcu_read_unlock(); } @@ -751,7 +855,7 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb, bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); bss->dtim_period = vif->bss_conf.dtim_period; bss->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx); - bss->sta_idx = cpu_to_le16(wlan_idx); + bss->sta_idx = cpu_to_le16(sta_wlan_idx); bss->conn_type = cpu_to_le32(type); bss->omac_idx = mvif->omac_idx; bss->band_idx = mvif->band_idx; @@ -825,6 +929,7 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, mt7996_mcu_bss_bmc_tlv(skb, vif, phy); mt7996_mcu_bss_ra_tlv(skb, vif, phy); mt7996_mcu_bss_txcmd_tlv(skb, true); + mt7996_mcu_bss_ifs_timing_tlv(skb, vif); if (vif->bss_conf.he_support) mt7996_mcu_bss_he_tlv(skb, vif, phy); @@ -832,11 +937,31 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, /* this tag is necessary no matter if the vif is MLD */ mt7996_mcu_bss_mld_tlv(skb, vif); } + + mt7996_mcu_bss_mbssid_tlv(skb, vif, phy, enable); + out: return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); } +int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_dev *dev = phy->dev; + struct sk_buff *skb; + + skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, + MT7996_BSS_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7996_mcu_bss_ifs_timing_tlv(skb, vif); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); +} + static int mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, struct ieee80211_ampdu_params *params, @@ -1050,6 +1175,61 @@ mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb, } } +static void +mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; + struct sta_rec_muru *muru; + struct tlv *tlv; + + if (vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_AP) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru)); + + muru = (struct sta_rec_muru *)tlv; + muru->cfg.mimo_dl_en = vif->bss_conf.eht_mu_beamformer || + vif->bss_conf.he_mu_beamformer || + vif->bss_conf.vht_mu_beamformer || + vif->bss_conf.vht_mu_beamformee; + muru->cfg.ofdma_dl_en = true; + + if (sta->deflink.vht_cap.vht_supported) + muru->mimo_dl.vht_mu_bfee = + !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + + if (!sta->deflink.he_cap.has_he) + return; + + muru->mimo_dl.partial_bw_dl_mimo = + HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]); + + muru->mimo_ul.full_ul_mimo = + HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]); + muru->mimo_ul.partial_ul_mimo = + HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); + + muru->ofdma_dl.punc_pream_rx = + HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); + muru->ofdma_dl.he_20m_in_40m_2g = + HE_PHY(CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G, elem->phy_cap_info[8]); + muru->ofdma_dl.he_20m_in_160m = + HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); + muru->ofdma_dl.he_80m_in_160m = + HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); + + muru->ofdma_ul.t_frame_dur = + HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); + muru->ofdma_ul.mu_cascading = + HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); + muru->ofdma_ul.uo_ra = + HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); + muru->ofdma_ul.rx_ctrl_frame_to_mbss = + HE_MAC(CAP3_RX_CTRL_FRAME_TO_MULTIBSS, elem->mac_cap_info[3]); +} + static inline bool mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool bfee) @@ -1520,6 +1700,132 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, MCU_WM_UNI_CMD(RA), true); } +static int +mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, void *data, u32 field) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct sta_phy *phy = data; + struct sta_rec_ra_fixed *ra; + struct sk_buff *skb; + struct tlv *tlv; + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid, + MT7996_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); + ra = (struct sta_rec_ra_fixed *)tlv; + + switch (field) { + case RATE_PARAM_AUTO: + break; + case RATE_PARAM_FIXED: + case RATE_PARAM_FIXED_MCS: + case RATE_PARAM_FIXED_GI: + case RATE_PARAM_FIXED_HE_LTF: + if (phy) + ra->phy = *phy; + break; + default: + break; + } + ra->field = cpu_to_le32(field); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); +} + +static int +mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; + struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask; + enum nl80211_band band = chandef->chan->band; + struct sta_phy phy = {}; + int ret, nrates = 0; + +#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \ + do { \ + u8 i, gi = mask->control[band]._gi; \ + gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \ + phy.sgi = gi; \ + phy.he_ltf = mask->control[band].he_ltf; \ + for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \ + if (!mask->control[band]._mcs[i]) \ + continue; \ + nrates += hweight16(mask->control[band]._mcs[i]); \ + phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \ + if (_ht) \ + phy.mcs += 8 * i; \ + } \ + } while (0) + + if (sta->deflink.he_cap.has_he) { + __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1); + } else if (sta->deflink.vht_cap.vht_supported) { + __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0); + } else if (sta->deflink.ht_cap.ht_supported) { + __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0); + } else { + nrates = hweight32(mask->control[band].legacy); + phy.mcs = ffs(mask->control[band].legacy) - 1; + } +#undef __sta_phy_bitrate_mask_check + + /* fall back to auto rate control */ + if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI && + mask->control[band].he_gi == GENMASK(7, 0) && + mask->control[band].he_ltf == GENMASK(7, 0) && + nrates != 1) + return 0; + + /* fixed single rate */ + if (nrates == 1) { + ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + RATE_PARAM_FIXED_MCS); + if (ret) + return ret; + } + + /* fixed GI */ + if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI || + mask->control[band].he_gi != GENMASK(7, 0)) { + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + u32 addr; + + /* firmware updates only TXCMD but doesn't take WTBL into + * account, so driver should update here to reflect the + * actual txrate hardware sends out. + */ + addr = mt7996_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7); + if (sta->deflink.he_cap.has_he) + mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); + else + mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); + + ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + RATE_PARAM_FIXED_GI); + if (ret) + return ret; + } + + /* fixed HE_LTF */ + if (mask->control[band].he_ltf != GENMASK(7, 0)) { + ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + RATE_PARAM_FIXED_HE_LTF); + if (ret) + return ret; + } + + return 0; +} + static void mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -1629,6 +1935,7 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct sk_buff *skb; + int ret; skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid, @@ -1648,8 +1955,12 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, */ mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); - return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); + if (ret) + return ret; + + return mt7996_mcu_add_rate_ctrl_fixed(dev, vif, sta); } static int @@ -1727,7 +2038,8 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, mt7996_mcu_sta_he_6g_tlv(skb, sta); /* starec eht */ mt7996_mcu_sta_eht_tlv(skb, sta); - /* TODO: starec muru */ + /* starec muru */ + mt7996_mcu_sta_muru_tlv(dev, skb, vif, sta); /* starec bfee */ mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta); /* starec hdr trans */ @@ -1891,6 +2203,59 @@ mt7996_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb, } static void +mt7996_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb, + struct ieee80211_vif *vif, struct bss_bcn_content_tlv *bcn, + struct ieee80211_mutable_offsets *offs) +{ + struct bss_bcn_mbss_tlv *mbss; + const struct element *elem; + struct tlv *tlv; + + if (!vif->bss_conf.bssid_indicator) + return; + + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_MBSSID, sizeof(*mbss)); + + mbss = (struct bss_bcn_mbss_tlv *)tlv; + mbss->offset[0] = cpu_to_le16(offs->tim_offset); + mbss->bitmap = cpu_to_le32(1); + + for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, + &skb->data[offs->mbssid_off], + skb->len - offs->mbssid_off) { + const struct element *sub_elem; + + if (elem->datalen < 2) + continue; + + for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) { + const struct ieee80211_bssid_index *idx; + const u8 *idx_ie; + + /* not a valid BSS profile */ + if (sub_elem->id || sub_elem->datalen < 4) + continue; + + /* Find WLAN_EID_MULTI_BSSID_IDX + * in the merged nontransmitted profile + */ + idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, + sub_elem->data, sub_elem->datalen); + if (!idx_ie || idx_ie[1] < sizeof(*idx)) + continue; + + idx = (void *)(idx_ie + 2); + if (!idx->bssid_index || idx->bssid_index > 31) + continue; + + mbss->offset[idx->bssid_index] = cpu_to_le16(idx_ie - + skb->data); + mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index)); + } + } +} + +static void mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct sk_buff *rskb, struct sk_buff *skb, struct bss_bcn_content_tlv *bcn, @@ -1911,7 +2276,7 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif, bcn->bcc_ie_pos = cpu_to_le16(offset - 3); } - buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE; + buf = (u8 *)bcn + sizeof(*bcn); mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON); @@ -1929,26 +2294,25 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct sk_buff *skb, *rskb; struct tlv *tlv; struct bss_bcn_content_tlv *bcn; + int len; + + if (vif->bss_conf.nontransmitted) + return 0; rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, - MT7996_BEACON_UPDATE_SIZE); + MT7996_MAX_BSS_OFFLOAD_SIZE); if (IS_ERR(rskb)) return PTR_ERR(rskb); - tlv = mt7996_mcu_add_uni_tlv(rskb, - UNI_BSS_INFO_BCN_CONTENT, sizeof(*bcn)); - bcn = (struct bss_bcn_content_tlv *)tlv; - bcn->enable = en; - - if (!en) - goto out; - skb = ieee80211_beacon_get_template(hw, vif, &offs, 0); - if (!skb) + if (!skb) { + dev_kfree_skb(rskb); return -EINVAL; + } - if (skb->len > MAX_BEACON_SIZE - MT_TXD_SIZE) { + if (skb->len > MT7996_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); return -EINVAL; } @@ -1956,11 +2320,18 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, info = IEEE80211_SKB_CB(skb); info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx); + len = sizeof(*bcn) + MT_TXD_SIZE + skb->len; + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len); + bcn = (struct bss_bcn_content_tlv *)tlv; + bcn->enable = en; + if (!en) + goto out; + mt7996_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); - /* TODO: subtag - 11v MBSSID */ + mt7996_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs); mt7996_mcu_beacon_cntdwn(vif, rskb, skb, &offs); - dev_kfree_skb(skb); out: + dev_kfree_skb(skb); return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true); } @@ -1981,9 +2352,13 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, struct sk_buff *rskb, *skb = NULL; struct tlv *tlv; u8 *buf, interval; + int len; + + if (vif->bss_conf.nontransmitted) + return 0; rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, - MT7996_INBAND_FRAME_SIZE); + MT7996_MAX_BSS_OFFLOAD_SIZE); if (IS_ERR(rskb)) return PTR_ERR(rskb); @@ -1997,11 +2372,14 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); } - if (!skb) + if (!skb) { + dev_kfree_skb(rskb); return -EINVAL; + } - if (skb->len > MAX_INBAND_FRAME_SIZE - MT_TXD_SIZE) { + if (skb->len > MT7996_MAX_BEACON_SIZE) { dev_err(dev->mt76.dev, "inband discovery size limit exceed\n"); + dev_kfree_skb(rskb); dev_kfree_skb(skb); return -EINVAL; } @@ -2011,7 +2389,9 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, info->band = band; info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx); - tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, sizeof(*discov)); + len = sizeof(*discov) + MT_TXD_SIZE + skb->len; + + tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len); discov = (struct bss_inband_discovery_tlv *)tlv; discov->tx_mode = OFFLOAD_TX_MODE_SU; @@ -2022,7 +2402,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, discov->enable = true; discov->wcid = cpu_to_le16(MT7996_WTBL_RESERVED); - buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE; + buf = (u8 *)tlv + sizeof(*discov); mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, changed); @@ -2155,7 +2535,7 @@ out: static int mt7996_mcu_send_ram_firmware(struct mt7996_dev *dev, const struct mt7996_fw_trailer *hdr, - const u8 *data, bool is_wa) + const u8 *data, enum mt7996_ram_type type) { int i, offset = 0; u32 override = 0, option = 0; @@ -2167,8 +2547,10 @@ mt7996_mcu_send_ram_firmware(struct mt7996_dev *dev, region = (const struct mt7996_fw_region *)((const u8 *)hdr - (hdr->n_region - i) * sizeof(*region)); + /* DSP and WA use same mode */ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76, - region->feature_set, is_wa); + region->feature_set, + type != MT7996_RAM_TYPE_WM); len = le32_to_cpu(region->len); addr = le32_to_cpu(region->addr); @@ -2195,42 +2577,22 @@ mt7996_mcu_send_ram_firmware(struct mt7996_dev *dev, if (override) option |= FW_START_OVERRIDE; - if (is_wa) + if (type == MT7996_RAM_TYPE_WA) option |= FW_START_WORKING_PDA_CR4; + else if (type == MT7996_RAM_TYPE_DSP) + option |= FW_START_WORKING_PDA_DSP; return mt76_connac_mcu_start_firmware(&dev->mt76, override, option); } -static int mt7996_load_ram(struct mt7996_dev *dev) +static int __mt7996_load_ram(struct mt7996_dev *dev, const char *fw_type, + const char *fw_file, enum mt7996_ram_type ram_type) { const struct mt7996_fw_trailer *hdr; const struct firmware *fw; int ret; - ret = request_firmware(&fw, MT7996_FIRMWARE_WM, dev->mt76.dev); - if (ret) - return ret; - - if (!fw || !fw->data || fw->size < sizeof(*hdr)) { - dev_err(dev->mt76.dev, "Invalid firmware\n"); - ret = -EINVAL; - goto out; - } - - hdr = (const struct mt7996_fw_trailer *)(fw->data + fw->size - sizeof(*hdr)); - - dev_info(dev->mt76.dev, "WM Firmware Version: %.10s, Build Time: %.15s\n", - hdr->fw_ver, hdr->build_date); - - ret = mt7996_mcu_send_ram_firmware(dev, hdr, fw->data, false); - if (ret) { - dev_err(dev->mt76.dev, "Failed to start WM firmware\n"); - goto out; - } - - release_firmware(fw); - - ret = request_firmware(&fw, MT7996_FIRMWARE_WA, dev->mt76.dev); + ret = request_firmware(&fw, fw_file, dev->mt76.dev); if (ret) return ret; @@ -2240,14 +2602,13 @@ static int mt7996_load_ram(struct mt7996_dev *dev) goto out; } - hdr = (const struct mt7996_fw_trailer *)(fw->data + fw->size - sizeof(*hdr)); - - dev_info(dev->mt76.dev, "WA Firmware Version: %.10s, Build Time: %.15s\n", - hdr->fw_ver, hdr->build_date); + hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); + dev_info(dev->mt76.dev, "%s Firmware Version: %.10s, Build Time: %.15s\n", + fw_type, hdr->fw_ver, hdr->build_date); - ret = mt7996_mcu_send_ram_firmware(dev, hdr, fw->data, true); + ret = mt7996_mcu_send_ram_firmware(dev, hdr, fw->data, ram_type); if (ret) { - dev_err(dev->mt76.dev, "Failed to start WA firmware\n"); + dev_err(dev->mt76.dev, "Failed to start %s firmware\n", fw_type); goto out; } @@ -2261,6 +2622,24 @@ out: return ret; } +static int mt7996_load_ram(struct mt7996_dev *dev) +{ + int ret; + + ret = __mt7996_load_ram(dev, "WM", MT7996_FIRMWARE_WM, + MT7996_RAM_TYPE_WM); + if (ret) + return ret; + + ret = __mt7996_load_ram(dev, "DSP", MT7996_FIRMWARE_DSP, + MT7996_RAM_TYPE_DSP); + if (ret) + return ret; + + return __mt7996_load_ram(dev, "WA", MT7996_FIRMWARE_WA, + MT7996_RAM_TYPE_WA); +} + static int mt7996_firmware_state(struct mt7996_dev *dev, bool wa) { @@ -2575,7 +2954,7 @@ int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif) e = (struct edca *)tlv; e->set = WMM_PARAM_SET; - e->queue = ac + mvif->mt76.wmm_idx * MT7996_MAX_WMM_SETS; + e->queue = ac; e->aifs = q->aifs; e->txop = cpu_to_le16(q->txop); @@ -2856,10 +3235,10 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag) .channel_band = ch_band[chandef->chan->band], }; - if (tag == UNI_CHANNEL_RX_PATH || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) + if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) + else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL || + phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, NL80211_IFTYPE_AP)) @@ -3203,8 +3582,8 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action) tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en)); req_mod_en = (struct bf_mod_en_ctrl *)tlv; - req_mod_en->bf_num = 2; - req_mod_en->bf_bitmap = GENMASK(0, 0); + req_mod_en->bf_num = 3; + req_mod_en->bf_bitmap = GENMASK(2, 0); break; } default: @@ -3444,7 +3823,9 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, int cmd) { struct { - u8 _rsv[4]; + /* fixed field */ + u8 bss; + u8 _rsv[3]; __le16 tag; __le16 len; @@ -3462,7 +3843,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, u8 exponent; u8 is_ap; u8 agrt_params; - u8 __rsv2[135]; + u8 __rsv2[23]; } __packed req = { .tag = cpu_to_le16(UNI_CMD_TWT_ARGT_UPDATE), .len = cpu_to_le16(sizeof(req) - 4), @@ -3472,6 +3853,7 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, .flowid = flow->id, .peer_id = cpu_to_le16(flow->wcid), .duration = flow->duration, + .bss = mvif->mt76.idx, .bss_idx = mvif->mt76.idx, .start_tsf = cpu_to_le64(flow->tsf), .mantissa = flow->mantissa, @@ -3682,3 +4064,20 @@ int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val) return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RRO), &req, sizeof(req), true); } + +int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag) +{ + struct mt7996_dev *dev = phy->dev; + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + } __packed req = { + .tag = cpu_to_le16(tag), + .len = cpu_to_le16(sizeof(req) - 4), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(ALL_STA_INFO), + &req, sizeof(req), false); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h index d7075a4d0667..a88f6af323da 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h @@ -153,6 +153,32 @@ struct mt7996_mcu_mib { __le64 data; } __packed; +struct mt7996_mcu_all_sta_info_event { + u8 rsv[4]; + __le16 tag; + __le16 len; + u8 more; + u8 rsv2; + __le16 sta_num; + u8 rsv3[2]; + + union { + struct { + __le16 wlan_idx; + u8 rsv[2]; + __le32 tx_bytes[IEEE80211_NUM_ACS]; + __le32 rx_bytes[IEEE80211_NUM_ACS]; + } adm_stat[0]; + + struct { + __le16 wlan_idx; + u8 rsv[2]; + __le32 tx_msdu_cnt; + __le32 rx_msdu_cnt; + } msdu_cnt[0]; + }; +} __packed; + enum mt7996_chan_mib_offs { UNI_MIB_OBSS_AIRTIME = 26, UNI_MIB_NON_WIFI_TIME = 27, @@ -270,8 +296,6 @@ struct bss_inband_discovery_tlv { u8 enable; __le16 wcid; __le16 prob_rsp_len; -#define MAX_INBAND_FRAME_SIZE 512 - u8 pkt[MAX_INBAND_FRAME_SIZE]; } __packed; struct bss_bcn_content_tlv { @@ -283,8 +307,6 @@ struct bss_bcn_content_tlv { u8 enable; u8 type; __le16 pkt_len; -#define MAX_BEACON_SIZE 512 - u8 pkt[MAX_BEACON_SIZE]; } __packed; struct bss_bcn_cntdwn_tlv { @@ -317,6 +339,22 @@ struct bss_sec_tlv { u8 __rsv2[1]; } __packed; +struct bss_ifs_time_tlv { + __le16 tag; + __le16 len; + u8 slot_valid; + u8 sifs_valid; + u8 rifs_valid; + u8 eifs_valid; + __le16 slot_time; + __le16 sifs_time; + __le16 rifs_time; + __le16 eifs_time; + u8 eifs_cck_valid; + u8 rsv; + __le16 eifs_cck_time; +} __packed; + struct bss_power_save { __le16 tag; __le16 len; @@ -552,6 +590,7 @@ enum { sizeof(struct bss_txcmd_tlv) + \ sizeof(struct bss_power_save) + \ sizeof(struct bss_sec_tlv) + \ + sizeof(struct bss_ifs_time_tlv) + \ sizeof(struct bss_mld_tlv)) #define MT7996_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ @@ -574,13 +613,14 @@ enum { sizeof(struct sta_rec_hdr_trans) + \ sizeof(struct tlv)) +#define MT7996_MAX_BEACON_SIZE 1342 #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \ sizeof(struct bss_bcn_content_tlv) + \ + MT_TXD_SIZE + \ sizeof(struct bss_bcn_cntdwn_tlv) + \ sizeof(struct bss_bcn_mbss_tlv)) - -#define MT7996_INBAND_FRAME_SIZE (sizeof(struct bss_req_hdr) + \ - sizeof(struct bss_inband_discovery_tlv)) +#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \ + MT7996_BEACON_UPDATE_SIZE) enum { UNI_BAND_CONFIG_RADIO_ENABLE, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 4d7dcb95a620..e53cf6a3704c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -26,15 +26,17 @@ #define MT7996_RX_RING_SIZE 1536 #define MT7996_RX_MCU_RING_SIZE 512 +#define MT7996_RX_MCU_RING_SIZE_WA 1024 #define MT7996_FIRMWARE_WA "mediatek/mt7996/mt7996_wa.bin" #define MT7996_FIRMWARE_WM "mediatek/mt7996/mt7996_wm.bin" +#define MT7996_FIRMWARE_DSP "mediatek/mt7996/mt7996_dsp.bin" #define MT7996_ROM_PATCH "mediatek/mt7996/mt7996_rom_patch.bin" #define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin" #define MT7996_EEPROM_SIZE 7680 #define MT7996_EEPROM_BLOCK_SIZE 16 -#define MT7996_TOKEN_SIZE 8192 +#define MT7996_TOKEN_SIZE 16384 #define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ @@ -52,6 +54,12 @@ struct mt7996_sta; struct mt7996_dfs_pulse; struct mt7996_dfs_pattern; +enum mt7996_ram_type { + MT7996_RAM_TYPE_WM, + MT7996_RAM_TYPE_WA, + MT7996_RAM_TYPE_DSP, +}; + enum mt7996_txq_id { MT7996_TXQ_FWDL = 16, MT7996_TXQ_MCU_WM, @@ -95,7 +103,6 @@ struct mt7996_sta { struct mt7996_vif *vif; - struct list_head poll_list; struct list_head rc_list; u32 airtime_ac[8]; @@ -103,10 +110,6 @@ struct mt7996_sta { struct ewma_avg_signal avg_ack_signal; unsigned long changed; - unsigned long jiffies; - unsigned long ampdu_state; - - struct mt76_sta_stats stats; struct mt76_connac_sta_key_conf bip; @@ -124,64 +127,6 @@ struct mt7996_vif { struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; struct cfg80211_bitrate_mask bitrate_mask; - - u8 basic_rates_idx; - u8 mcast_rates_idx; - u8 beacon_rates_idx; -}; - -/* per-phy stats. */ -struct mib_stats { - u32 ack_fail_cnt; - u32 fcs_err_cnt; - u32 rts_cnt; - u32 rts_retries_cnt; - u32 ba_miss_cnt; - u32 tx_mu_bf_cnt; - u32 tx_mu_mpdu_cnt; - u32 tx_mu_acked_mpdu_cnt; - u32 tx_su_acked_mpdu_cnt; - u32 tx_bf_ibf_ppdu_cnt; - u32 tx_bf_ebf_ppdu_cnt; - - u32 tx_bf_rx_fb_all_cnt; - u32 tx_bf_rx_fb_eht_cnt; - u32 tx_bf_rx_fb_he_cnt; - u32 tx_bf_rx_fb_vht_cnt; - u32 tx_bf_rx_fb_ht_cnt; - - u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ - u32 tx_bf_rx_fb_nc_cnt; - u32 tx_bf_rx_fb_nr_cnt; - u32 tx_bf_fb_cpl_cnt; - u32 tx_bf_fb_trig_cnt; - - u32 tx_ampdu_cnt; - u32 tx_stop_q_empty_cnt; - u32 tx_mpdu_attempts_cnt; - u32 tx_mpdu_success_cnt; - /* BF counter is PPDU-based, so remove MPDU-based BF counter */ - - u32 tx_rwp_fail_cnt; - u32 tx_rwp_need_cnt; - - /* rx stats */ - u32 rx_fifo_full_cnt; - u32 channel_idle_cnt; - u32 rx_vector_mismatch_cnt; - u32 rx_delimiter_fail_cnt; - u32 rx_len_mismatch_cnt; - u32 rx_mpdu_cnt; - u32 rx_ampdu_cnt; - u32 rx_ampdu_bytes_cnt; - u32 rx_ampdu_valid_subframe_cnt; - u32 rx_ampdu_valid_subframe_bytes_cnt; - u32 rx_pfdrop_cnt; - u32 rx_vec_queue_overflow_drop_cnt; - u32 rx_ba_cnt; - - u32 tx_amsdu[8]; - u32 tx_amsdu_cnt; }; /* crash-dump */ @@ -222,7 +167,7 @@ struct mt7996_phy { u32 rx_ampdu_ts; u32 ampdu_ref; - struct mib_stats mib; + struct mt76_mib_stats mib; struct mt76_channel_state state_ts; }; @@ -272,9 +217,7 @@ struct mt7996_dev { #endif struct list_head sta_rc_list; - struct list_head sta_poll_list; struct list_head twt_list; - spinlock_t sta_poll_lock; u32 hw_pattern; @@ -311,20 +254,6 @@ enum { }; enum { - MT_CTX0, - MT_HIF0 = 0x0, - - MT_LMAC_AC00 = 0x0, - MT_LMAC_AC01, - MT_LMAC_AC02, - MT_LMAC_AC03, - MT_LMAC_ALTX0 = 0x10, - MT_LMAC_BMC0, - MT_LMAC_BCN0, - MT_LMAC_PSMP0, -}; - -enum { MT_RX_SEL0, MT_RX_SEL1, MT_RX_SEL2, /* monitor chain */ @@ -405,6 +334,7 @@ int mt7996_dma_init(struct mt7996_dev *dev); void mt7996_dma_reset(struct mt7996_dev *dev, bool force); void mt7996_dma_prefetch(struct mt7996_dev *dev); void mt7996_dma_cleanup(struct mt7996_dev *dev); +void mt7996_dma_start(struct mt7996_dev *dev, bool reset); void mt7996_init_txpower(struct mt7996_dev *dev, struct ieee80211_supported_band *sband); int mt7996_txbf_init(struct mt7996_dev *dev); @@ -456,6 +386,7 @@ int mt7996_mcu_set_radar_th(struct mt7996_dev *dev, int index, const struct mt7996_dfs_pattern *pattern); int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable); int mt7996_mcu_set_rts_thresh(struct mt7996_phy *phy, u32 val); +int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif); int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch); int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index, u8 rx_sel, u8 val); @@ -470,6 +401,7 @@ int mt7996_mcu_fw_dbg_ctrl(struct mt7996_dev *dev, u32 module, u8 level); int mt7996_mcu_trigger_assert(struct mt7996_dev *dev); void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb); void mt7996_mcu_exit(struct mt7996_dev *dev); +int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag); static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev) { @@ -519,7 +451,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, int pid, enum mt76_txq_id qid, u32 changed); -void mt7996_mac_set_timing(struct mt7996_phy *phy); +void mt7996_mac_set_coverage_class(struct mt7996_phy *phy); int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c index 64aee3fb5445..c5301050ff8b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c @@ -219,4 +219,5 @@ MODULE_DEVICE_TABLE(pci, mt7996_pci_device_table); MODULE_DEVICE_TABLE(pci, mt7996_hif_device_table); MODULE_FIRMWARE(MT7996_FIRMWARE_WA); MODULE_FIRMWARE(MT7996_FIRMWARE_WM); +MODULE_FIRMWARE(MT7996_FIRMWARE_DSP); MODULE_FIRMWARE(MT7996_ROM_PATCH); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h index d1d3d154195d..0086a7866657 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h @@ -243,6 +243,13 @@ enum base_rev { FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \ FIELD_PREP(MT_WTBL_LMAC_DW, _dw)) +/* AGG: band 0(0x820e2000), band 1(0x820f2000), band 2(0x830e2000) */ +#define MT_WF_AGG_BASE(_band) __BASE(WF_AGG_BASE, (_band)) +#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) + +#define MT_AGG_ACR4(_band) MT_WF_AGG(_band, 0x3c) +#define MT_AGG_ACR_PPDU_TXS2H BIT(1) + /* ARB: band 0(0x820e3000), band 1(0x820f3000), band 2(0x830e3000) */ #define MT_WF_ARB_BASE(_band) __BASE(WF_ARB_BASE, (_band)) #define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) @@ -509,6 +516,7 @@ enum base_rev { #define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4)) #define MT_LED_CTRL_KICK BIT(7) +#define MT_LED_CTRL_BLINK_BAND_SEL BIT(4) #define MT_LED_CTRL_BLINK_MODE BIT(2) #define MT_LED_CTRL_POLARITY BIT(1) @@ -557,22 +565,29 @@ enum base_rev { #define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188) +/* PHYRX CSD */ +#define MT_WF_PHYRX_CSD_BASE 0x83000000 +#define MT_WF_PHYRX_CSD(_band, _wf, ofs) (MT_WF_PHYRX_CSD_BASE + \ + ((_band) << 20) + \ + ((_wf) << 16) + (ofs)) +#define MT_WF_PHYRX_CSD_IRPI(_band, _wf) MT_WF_PHYRX_CSD(_band, _wf, 0x1000) + /* PHYRX CTRL */ #define MT_WF_PHYRX_BAND_BASE 0x83080000 #define MT_WF_PHYRX_BAND(_band, ofs) (MT_WF_PHYRX_BAND_BASE + \ ((_band) << 20) + (ofs)) +#define MT_WF_PHYRX_BAND_GID_TAB_VLD0(_band) MT_WF_PHYRX_BAND(_band, 0x1054) +#define MT_WF_PHYRX_BAND_GID_TAB_VLD1(_band) MT_WF_PHYRX_BAND(_band, 0x1058) +#define MT_WF_PHYRX_BAND_GID_TAB_POS0(_band) MT_WF_PHYRX_BAND(_band, 0x105c) +#define MT_WF_PHYRX_BAND_GID_TAB_POS1(_band) MT_WF_PHYRX_BAND(_band, 0x1060) +#define MT_WF_PHYRX_BAND_GID_TAB_POS2(_band) MT_WF_PHYRX_BAND(_band, 0x1064) +#define MT_WF_PHYRX_BAND_GID_TAB_POS3(_band) MT_WF_PHYRX_BAND(_band, 0x1068) + #define MT_WF_PHYRX_BAND_RX_CTRL1(_band) MT_WF_PHYRX_BAND(_band, 0x2004) #define MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN GENMASK(2, 0) #define MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN GENMASK(11, 9) -/* PHYRX CSD */ -#define MT_WF_PHYRX_CSD_BASE 0x83000000 -#define MT_WF_PHYRX_CSD(_band, _wf, ofs) (MT_WF_PHYRX_CSD_BASE + \ - ((_band) << 20) + \ - ((_wf) << 16) + (ofs)) -#define MT_WF_PHYRX_CSD_IRPI(_band, _wf) MT_WF_PHYRX_CSD(_band, _wf, 0x1000) - /* PHYRX CSD BAND */ #define MT_WF_PHYRX_CSD_BAND_RXTD12(_band) MT_WF_PHYRX_BAND(_band, 0x8230) #define MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY BIT(18) diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c index 0accc71a91c9..4644dace9bb3 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/testmode.c @@ -8,6 +8,7 @@ const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG }, [MT76_TM_ATTR_STATE] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 }, + [MT76_TM_ATTR_TX_LENGTH] = { .type = NLA_U32 }, [MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 }, diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h index c3d0ef8e2890..109a07f9733a 100644 --- a/drivers/net/wireless/mediatek/mt76/trace.h +++ b/drivers/net/wireless/mediatek/mt76/trace.h @@ -14,7 +14,7 @@ #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEVICE_ASSIGN strlcpy(__entry->wiphy_name, \ +#define DEVICE_ASSIGN strscpy(__entry->wiphy_name, \ wiphy_name(dev->hw->wiphy), MAXNAME) #define DEV_PR_FMT "%s" #define DEV_PR_ARG __entry->wiphy_name diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 72b3ec715e47..1809b03292c3 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -121,6 +121,7 @@ int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, struct sk_buff *skb) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); int pid; @@ -134,8 +135,14 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, return MT_PACKET_ID_NO_ACK; if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_CTL_RATE_CTRL_PROBE))) + IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { + if (mtk_wed_device_active(&dev->mmio.wed) && + ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || + ieee80211_is_data(hdr->frame_control))) + return MT_PACKET_ID_WED; + return MT_PACKET_ID_NO_SKB; + } spin_lock_bh(&dev->status_lock); @@ -263,8 +270,15 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff * #endif if (cb->pktid < MT_PACKET_ID_FIRST) { + struct ieee80211_rate_status rs = {}; + hw = mt76_tx_status_get_hw(dev, skb); status.sta = wcid_to_sta(wcid); + if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { + rs.rate_idx = wcid->rate; + status.rates = &rs; + status.n_rates = 1; + } spin_lock_bh(&dev->rx_lock); ieee80211_tx_status_ext(hw, &status); spin_unlock_bh(&dev->rx_lock); @@ -315,40 +329,32 @@ void mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) { - struct mt76_dev *dev = phy->dev; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct mt76_queue *q; - int qid = skb_get_queue_mapping(skb); if (mt76_testmode_enabled(phy)) { ieee80211_free_txskb(phy->hw, skb); return; } - if (WARN_ON(qid >= MT_TXQ_PSD)) { - qid = MT_TXQ_BE; - skb_set_queue_mapping(skb, qid); - } - - if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && - !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && - !ieee80211_is_data(hdr->frame_control) && - !ieee80211_is_bufferable_mmpdu(skb)) { - qid = MT_TXQ_PSD; - } + if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD)) + skb_set_queue_mapping(skb, MT_TXQ_BE); if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx); - q = phy->q_tx[qid]; - spin_lock_bh(&q->lock); - __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); - dev->queue_ops->kick(dev, q); - spin_unlock_bh(&q->lock); + spin_lock_bh(&wcid->tx_pending.lock); + __skb_queue_tail(&wcid->tx_pending, skb); + spin_unlock_bh(&wcid->tx_pending.lock); + + spin_lock_bh(&phy->tx_lock); + if (list_empty(&wcid->tx_list)) + list_add_tail(&wcid->tx_list, &phy->tx_list); + spin_unlock_bh(&phy->tx_lock); + + mt76_worker_schedule(&phy->dev->tx_worker); } EXPORT_SYMBOL_GPL(mt76_tx); @@ -579,10 +585,86 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) } EXPORT_SYMBOL_GPL(mt76_txq_schedule); +static int +mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) +{ + struct mt76_dev *dev = phy->dev; + struct ieee80211_sta *sta; + struct mt76_queue *q; + struct sk_buff *skb; + int ret = 0; + + spin_lock(&wcid->tx_pending.lock); + while ((skb = skb_peek(&wcid->tx_pending)) != NULL) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int qid = skb_get_queue_mapping(skb); + + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + !ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(skb)) + qid = MT_TXQ_PSD; + + q = phy->q_tx[qid]; + if (mt76_txq_stopped(q)) { + ret = -1; + break; + } + + __skb_unlink(skb, &wcid->tx_pending); + spin_unlock(&wcid->tx_pending.lock); + + sta = wcid_to_sta(wcid); + spin_lock(&q->lock); + __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); + dev->queue_ops->kick(dev, q); + spin_unlock(&q->lock); + + spin_lock(&wcid->tx_pending.lock); + } + spin_unlock(&wcid->tx_pending.lock); + + return ret; +} + +static void mt76_txq_schedule_pending(struct mt76_phy *phy) +{ + if (list_empty(&phy->tx_list)) + return; + + local_bh_disable(); + rcu_read_lock(); + + spin_lock(&phy->tx_lock); + while (!list_empty(&phy->tx_list)) { + struct mt76_wcid *wcid = NULL; + int ret; + + wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list); + list_del_init(&wcid->tx_list); + + spin_unlock(&phy->tx_lock); + ret = mt76_txq_schedule_pending_wcid(phy, wcid); + spin_lock(&phy->tx_lock); + + if (ret) { + if (list_empty(&wcid->tx_list)) + list_add_tail(&wcid->tx_list, &phy->tx_list); + break; + } + } + spin_unlock(&phy->tx_lock); + + rcu_read_unlock(); + local_bh_enable(); +} + void mt76_txq_schedule_all(struct mt76_phy *phy) { int i; + mt76_txq_schedule_pending(phy); for (i = 0; i <= MT_TXQ_BK; i++) mt76_txq_schedule(phy, i); } diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 5e5c7bf51174..1584665fe3cb 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -286,8 +286,7 @@ static bool mt76u_check_sg(struct mt76_dev *dev) struct usb_device *udev = interface_to_usbdev(uintf); return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && - (udev->bus->no_sg_constraint || - udev->speed == USB_SPEED_WIRELESS)); + udev->bus->no_sg_constraint); } static int diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h index f5ab3215af80..7b261ddb2ac6 100644 --- a/drivers/net/wireless/mediatek/mt76/usb_trace.h +++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h @@ -14,7 +14,7 @@ #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ +#define DEV_ASSIGN strscpy(__entry->wiphy_name, \ wiphy_name(dev->hw->wiphy), MAXNAME) #define DEV_PR_FMT "%s " #define DEV_PR_ARG __entry->wiphy_name diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig index 4a8b96280670..4880fc053d9d 100644 --- a/drivers/net/wireless/mediatek/mt7601u/Kconfig +++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig @@ -4,4 +4,4 @@ config MT7601U depends on MAC80211 depends on USB help - This adds support for MT7601U-based wireless USB dongles. + This adds support for MT7601U-based USB wireless dongles. diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c index 51d977ffc52f..5aeeac0dd9fe 100644 --- a/drivers/net/wireless/mediatek/mt7601u/tx.c +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -110,7 +110,7 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) info->flags |= IEEE80211_TX_STAT_ACK; spin_lock_bh(&dev->mac_lock); - ieee80211_tx_status(dev->hw, skb); + ieee80211_tx_status_skb(dev->hw, skb); spin_unlock_bh(&dev->mac_lock); } diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index cc772045d526..c41ae251cb95 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -365,6 +365,7 @@ static int mt7601u_resume(struct usb_interface *usb_intf) MODULE_DEVICE_TABLE(usb, mt7601u_device_table); MODULE_FIRMWARE(MT7601U_FIRMWARE); +MODULE_DESCRIPTION("MediaTek MT7601U USB Wireless LAN driver"); MODULE_LICENSE("GPL"); static struct usb_driver mt7601u_driver = { diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index b545d93c6e37..da52f91693b5 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -1441,11 +1441,11 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev, } static int change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *beacon) + struct cfg80211_ap_update *params) { struct wilc_vif *vif = netdev_priv(dev); - return wilc_add_beacon(vif, 0, 0, beacon); + return wilc_add_beacon(vif, 0, 0, ¶ms->beacon); } static int stop_ap(struct wiphy *wiphy, struct net_device *dev, diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.h b/drivers/net/wireless/microchip/wilc1000/cfg80211.h index 37b294cb3b37..8c65951cfaf9 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.h +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.h @@ -8,15 +8,12 @@ #define WILC_CFG80211_H #include "netdev.h" -struct wiphy *wilc_cfg_alloc(void); int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, const struct wilc_hif_func *ops); struct wilc *wilc_create_wiphy(struct device *dev); void wilc_deinit_host_int(struct net_device *net); int wilc_init_host_int(struct net_device *net); void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size); -struct wilc_vif *wilc_netdev_interface(struct wilc *wl, const char *name, - enum nl80211_iftype type); void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked); struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, const char *name, @@ -24,7 +21,6 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy, struct wireless_dev *wdev, struct mgmt_frame_regs *upd); -struct wilc_vif *wilc_get_interface(struct wilc *wl); struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl); void wlan_deinit_locks(struct wilc *wilc); #endif diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index e9f59de31b0b..91d71e0f7ef2 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -148,8 +148,8 @@ static int wilc_txq_task(void *vp) complete(&wl->txq_thread_started); while (1) { - wait_for_completion(&wl->txq_event); - + if (wait_for_completion_interruptible(&wl->txq_event)) + continue; if (wl->close) { complete(&wl->txq_thread_started); @@ -166,12 +166,24 @@ static int wilc_txq_task(void *vp) srcu_idx = srcu_read_lock(&wl->srcu); list_for_each_entry_rcu(ifc, &wl->vif_list, list) { - if (ifc->mac_opened && ifc->ndev) + if (ifc->mac_opened && + netif_queue_stopped(ifc->ndev)) netif_wake_queue(ifc->ndev); } srcu_read_unlock(&wl->srcu, srcu_idx); } - } while (ret == WILC_VMM_ENTRY_FULL_RETRY && !wl->close); + if (ret != WILC_VMM_ENTRY_FULL_RETRY) + break; + /* Back off TX task from sending packets for some time. + * msleep_interruptible will allow RX task to run and + * free buffers. TX task will be in TASK_INTERRUPTIBLE + * state which will put the thread back to CPU running + * queue when it's signaled even if the timeout isn't + * elapsed. This gives faster chance for reserved SK + * buffers to be free. + */ + msleep_interruptible(TX_BACKOFF_WEIGHT_MS); + } while (!wl->close); } return 0; } diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index bb1a315a7b7e..aafe3dc44ac6 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -27,6 +27,8 @@ #define TCP_ACK_FILTER_LINK_SPEED_THRESH 54 #define DEFAULT_LINK_SPEED 72 +#define TX_BACKOFF_WEIGHT_MS 1 + struct wilc_wfi_stats { unsigned long rx_packets; unsigned long tx_packets; diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index a05bda7b9a3b..87948ba69a22 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -28,7 +28,6 @@ struct wilc_sdio { bool irq_gpio; u32 block_size; bool isinit; - int has_thrpt_enh3; u8 *cmd53_buf; }; @@ -722,21 +721,12 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) * make sure can read back chip id correctly **/ if (!resume) { - int rev; - ret = wilc_sdio_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { dev_err(&func->dev, "Fail cmd read chip id...\n"); return ret; } dev_err(&func->dev, "chipid (%08x)\n", chipid); - rev = FIELD_GET(WILC_CHIP_REV_FIELD, chipid); - if (rev > FIELD_GET(WILC_CHIP_REV_FIELD, WILC_1000_BASE_ID_2A)) - sdio_priv->has_thrpt_enh3 = 1; - else - sdio_priv->has_thrpt_enh3 = 0; - dev_info(&func->dev, "has_thrpt_enh3 = %d...\n", - sdio_priv->has_thrpt_enh3); } sdio_priv->isinit = true; @@ -809,102 +799,29 @@ static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val) struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; int ret; - int vmm_ctl; - - if (sdio_priv->has_thrpt_enh3) { - u32 reg = 0; - - if (sdio_priv->irq_gpio) - reg = val & (BIT(MAX_NUM_INT) - 1); - - /* select VMM table 0 */ - if (val & SEL_VMM_TBL0) - reg |= BIT(5); - /* select VMM table 1 */ - if (val & SEL_VMM_TBL1) - reg |= BIT(6); - /* enable VMM */ - if (val & EN_VMM) - reg |= BIT(7); - if (reg) { - struct sdio_cmd52 cmd; - - cmd.read_write = 1; - cmd.function = 0; - cmd.raw = 0; - cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG; - cmd.data = reg; - - ret = wilc_sdio_cmd52(wilc, &cmd); - if (ret) { - dev_err(&func->dev, - "Failed cmd52, set (%02x) data (%d) ...\n", - cmd.address, __LINE__); - return ret; - } - } - return 0; - } - if (sdio_priv->irq_gpio) { - /* has_thrpt_enh2 uses register 0xf8 to clear interrupts. */ - /* - * Cannot clear multiple interrupts. - * Must clear each interrupt individually. - */ - u32 flags; - int i; - - flags = val & (BIT(MAX_NUM_INT) - 1); - for (i = 0; i < NUM_INT_EXT && flags; i++) { - if (flags & BIT(i)) { - struct sdio_cmd52 cmd; - - cmd.read_write = 1; - cmd.function = 0; - cmd.raw = 0; - cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG; - cmd.data = BIT(i); - - ret = wilc_sdio_cmd52(wilc, &cmd); - if (ret) { - dev_err(&func->dev, - "Failed cmd52, set (%02x) data (%d) ...\n", - cmd.address, __LINE__); - return ret; - } - flags &= ~BIT(i); - } - } + u32 reg = 0; - for (i = NUM_INT_EXT; i < MAX_NUM_INT && flags; i++) { - if (flags & BIT(i)) { - dev_err(&func->dev, - "Unexpected interrupt cleared %d...\n", - i); - flags &= ~BIT(i); - } - } - } + if (sdio_priv->irq_gpio) + reg = val & (BIT(MAX_NUM_INT) - 1); - vmm_ctl = 0; /* select VMM table 0 */ if (val & SEL_VMM_TBL0) - vmm_ctl |= BIT(0); + reg |= BIT(5); /* select VMM table 1 */ if (val & SEL_VMM_TBL1) - vmm_ctl |= BIT(1); + reg |= BIT(6); /* enable VMM */ if (val & EN_VMM) - vmm_ctl |= BIT(2); - - if (vmm_ctl) { + reg |= BIT(7); + if (reg) { struct sdio_cmd52 cmd; cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; - cmd.address = WILC_SDIO_VMM_TBL_CTRL_REG; - cmd.data = vmm_ctl; + cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG; + cmd.data = reg; + ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index b0fc5e68feec..77b4cdff73c3 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -74,6 +74,7 @@ static int wilc_spi_reset(struct wilc *wilc); #define CMD_SINGLE_READ 0xca #define CMD_RESET 0xcf +#define SPI_RETRY_MAX_LIMIT 10 #define SPI_ENABLE_VMM_RETRY_LIMIT 2 /* SPI response fields (section 11.1.2 in ATWILC1000 User Guide): */ @@ -830,59 +831,91 @@ static int wilc_spi_special_cmd(struct wilc *wilc, u8 cmd) return 0; } +static void wilc_spi_reset_cmd_sequence(struct wilc *wl, u8 attempt, u32 addr) +{ + struct spi_device *spi = to_spi_device(wl->dev); + struct wilc_spi *spi_priv = wl->bus_data; + + if (!spi_priv->probing_crc) + dev_err(&spi->dev, "Reset and retry %d %x\n", attempt, addr); + + usleep_range(1000, 1100); + wilc_spi_reset(wl); + usleep_range(1000, 1100); +} + static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data) { struct spi_device *spi = to_spi_device(wilc->dev); int result; u8 cmd = CMD_SINGLE_READ; u8 clockless = 0; + u8 i; - if (addr < WILC_SPI_CLOCKLESS_ADDR_LIMIT) { + if (addr <= WILC_SPI_CLOCKLESS_ADDR_LIMIT) { /* Clockless register */ cmd = CMD_INTERNAL_READ; clockless = 1; } - result = wilc_spi_single_read(wilc, cmd, addr, data, clockless); - if (result) { + for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { + result = wilc_spi_single_read(wilc, cmd, addr, data, clockless); + if (!result) { + le32_to_cpus(data); + return 0; + } + + /* retry is not applicable for clockless registers */ + if (clockless) + break; + dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr); - return result; + wilc_spi_reset_cmd_sequence(wilc, i, addr); } - le32_to_cpus(data); - - return 0; + return result; } static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct spi_device *spi = to_spi_device(wilc->dev); int result; + u8 i; if (size <= 4) return -EINVAL; - result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_READ, addr, buf, size); - if (result) { + for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { + result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_READ, addr, + buf, size); + if (!result) + return 0; + dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr); - return result; + + wilc_spi_reset_cmd_sequence(wilc, i, addr); } - return 0; + return result; } static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat) { struct spi_device *spi = to_spi_device(wilc->dev); int result; + u8 i; - result = wilc_spi_write_cmd(wilc, CMD_INTERNAL_WRITE, adr, dat, 0); - if (result) { + for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { + result = wilc_spi_write_cmd(wilc, CMD_INTERNAL_WRITE, adr, + dat, 0); + if (!result) + return 0; dev_err(&spi->dev, "Failed internal write cmd...\n"); - return result; + + wilc_spi_reset_cmd_sequence(wilc, i, adr); } - return 0; + return result; } static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) @@ -890,17 +923,22 @@ static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; int result; + u8 i; - result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, data, 0); - if (result) { + for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { + result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, + data, 0); + if (!result) { + le32_to_cpus(data); + return 0; + } if (!spi_priv->probing_crc) dev_err(&spi->dev, "Failed internal read cmd...\n"); - return result; - } - le32_to_cpus(data); + wilc_spi_reset_cmd_sequence(wilc, i, adr); + } - return 0; + return result; } /******************************************** @@ -915,20 +953,27 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data) int result; u8 cmd = CMD_SINGLE_WRITE; u8 clockless = 0; + u8 i; - if (addr < WILC_SPI_CLOCKLESS_ADDR_LIMIT) { + if (addr <= WILC_SPI_CLOCKLESS_ADDR_LIMIT) { /* Clockless register */ cmd = CMD_INTERNAL_WRITE; clockless = 1; } - result = wilc_spi_write_cmd(wilc, cmd, addr, data, clockless); - if (result) { + for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { + result = wilc_spi_write_cmd(wilc, cmd, addr, data, clockless); + if (!result) + return 0; + dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr); - return result; - } - return 0; + if (clockless) + break; + + wilc_spi_reset_cmd_sequence(wilc, i, addr); + } + return result; } static int spi_data_rsp(struct wilc *wilc, u8 cmd) @@ -981,6 +1026,7 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct spi_device *spi = to_spi_device(wilc->dev); int result; + u8 i; /* * has to be greated than 4 @@ -988,26 +1034,38 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) if (size <= 4) return -EINVAL; - result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size); - if (result) { - dev_err(&spi->dev, - "Failed cmd, write block (%08x)...\n", addr); - return result; - } + for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { + result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_WRITE, addr, + NULL, size); + if (result) { + dev_err(&spi->dev, + "Failed cmd, write block (%08x)...\n", addr); + wilc_spi_reset_cmd_sequence(wilc, i, addr); + continue; + } - /* - * Data - */ - result = spi_data_write(wilc, buf, size); - if (result) { - dev_err(&spi->dev, "Failed block data write...\n"); - return result; - } + /* + * Data + */ + result = spi_data_write(wilc, buf, size); + if (result) { + dev_err(&spi->dev, "Failed block data write...\n"); + wilc_spi_reset_cmd_sequence(wilc, i, addr); + continue; + } - /* - * Data response - */ - return spi_data_rsp(wilc, CMD_DMA_EXT_WRITE); + /* + * Data response + */ + result = spi_data_rsp(wilc, CMD_DMA_EXT_WRITE); + if (result) { + dev_err(&spi->dev, "Failed block data rsp...\n"); + wilc_spi_reset_cmd_sequence(wilc, i, addr); + continue; + } + break; + } + return result; } /******************************************** diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 58bbf50081e4..9eb115c79c90 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -1492,7 +1492,7 @@ int wilc_wlan_init(struct net_device *dev) } if (!wilc->vmm_table) - wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL); + wilc->vmm_table = kcalloc(WILC_VMM_TBL_SIZE, sizeof(u32), GFP_KERNEL); if (!wilc->vmm_table) { ret = -ENOBUFS; diff --git a/drivers/net/wireless/purelifi/plfxlc/Kconfig b/drivers/net/wireless/purelifi/plfxlc/Kconfig index 4e0be27a5e0e..dd5fca480d7e 100644 --- a/drivers/net/wireless/purelifi/plfxlc/Kconfig +++ b/drivers/net/wireless/purelifi/plfxlc/Kconfig @@ -3,7 +3,7 @@ config PLFXLC tristate "pureLiFi X, XL, XC device support" depends on CFG80211 && MAC80211 && USB help - This option adds support for pureLiFi LiFi wireless USB + This option adds support for pureLiFi LiFi USB wireless adapters. The pureLiFi X, XL, XC USB devices are based on 802.11 OFDM PHY but uses light as the transmission medium. The driver supports common 802.11 encryption/authentication diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c index 94ee831b5de3..506d2f31efb5 100644 --- a/drivers/net/wireless/purelifi/plfxlc/mac.c +++ b/drivers/net/wireless/purelifi/plfxlc/mac.c @@ -666,7 +666,7 @@ static void plfxlc_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *et_strings, sizeof(et_strings)); + memcpy(data, et_strings, sizeof(et_strings)); } static void plfxlc_get_et_stats(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 73e6f9408b51..663d77770fce 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -331,11 +331,11 @@ out: } static int qtnf_change_beacon(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *info) + struct cfg80211_ap_update *info) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); - return qtnf_mgmt_set_appie(vif, info); + return qtnf_mgmt_set_appie(vif, &info->beacon); } static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 68ae9c7ea95a..9540ad6196d7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1335,7 +1335,7 @@ static int qtnf_cmd_band_fill_iftype(const u8 *data, return -EINVAL; } - kfree(band->iftype_data); + kfree((__force void *)band->iftype_data); band->iftype_data = NULL; band->n_iftype_data = tlv->n_iftype_data; if (band->n_iftype_data == 0) @@ -1347,7 +1347,8 @@ static int qtnf_cmd_band_fill_iftype(const u8 *data, band->n_iftype_data = 0; return -ENOMEM; } - band->iftype_data = iftype_data; + + _ieee80211_set_sband_iftype_data(band, iftype_data, tlv->n_iftype_data); for (i = 0; i < band->n_iftype_data; i++) qtnf_cmd_conv_iftype(iftype_data++, &tlv->iftype_data[i]); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 2a63ffdc4b2c..677bac835330 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -535,7 +535,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) if (!wiphy->bands[band]) continue; - kfree(wiphy->bands[band]->iftype_data); + kfree((__force void *)wiphy->bands[band]->iftype_data); wiphy->bands[band]->n_iftype_data = 0; kfree(wiphy->bands[band]->channels); diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 31bc58e96ac0..3b283e93a13e 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -477,9 +477,9 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac, if (!vif->netdev) continue; - mutex_lock(&vif->wdev.mtx); + wiphy_lock(priv_to_wiphy(vif->mac)); cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0); - mutex_unlock(&vif->wdev.mtx); + wiphy_unlock(priv_to_wiphy(vif->mac)); } return 0; diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig index dcccc290a7f5..d1fd66d44a7e 100644 --- a/drivers/net/wireless/ralink/rt2x00/Kconfig +++ b/drivers/net/wireless/ralink/rt2x00/Kconfig @@ -170,7 +170,7 @@ config RT2800USB_RT35XX config RT2800USB_RT3573 bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)" help - This enables support for RT3573 chipset based wireless USB devices + This enables support for RT3573 chipset based USB wireless devices in the rt2800usb driver. config RT2800USB_RT53XX diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index de2ee5ffc34e..48521e45577d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -871,6 +871,18 @@ #define LED_CFG_LED_POLAR FIELD32(0x40000000) /* + * AMPDU_MAX_LEN_20M1S: Per MCS max A-MPDU length, 20 MHz, MCS 0-7 + * AMPDU_MAX_LEN_20M2S: Per MCS max A-MPDU length, 20 MHz, MCS 8-15 + * AMPDU_MAX_LEN_40M1S: Per MCS max A-MPDU length, 40 MHz, MCS 0-7 + * AMPDU_MAX_LEN_40M2S: Per MCS max A-MPDU length, 40 MHz, MCS 8-15 + * Maximum A-MPDU length = 2^(AMPDU_MAX - 5) kilobytes + */ +#define AMPDU_MAX_LEN_20M1S 0x1030 +#define AMPDU_MAX_LEN_20M2S 0x1034 +#define AMPDU_MAX_LEN_40M1S 0x1038 +#define AMPDU_MAX_LEN_40M2S 0x103C + +/* * AMPDU_BA_WINSIZE: Force BlockAck window size * FORCE_WINSIZE_ENABLE: * 0: Disable forcing of BlockAck window size @@ -1545,6 +1557,12 @@ */ #define EXP_ACK_TIME 0x1380 +/* + * HT_FBK_TO_LEGACY: Enable/Disable HT/RTS fallback to OFDM/CCK rate + * Not available for legacy SoCs + */ +#define HT_FBK_TO_LEGACY 0x1384 + /* TX_PWR_CFG_5 */ #define TX_PWR_CFG_5 0x1384 #define TX_PWR_CFG_5_MCS16_CH0 FIELD32(0x0000000f) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 1226a883cd67..ee880f749b3c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -856,6 +856,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) s8 rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0); s8 rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1); s8 rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2); + s8 base_val = rt2x00_rt(rt2x00dev, RT6352) ? -2 : -12; u16 eeprom; u8 offset0; u8 offset1; @@ -880,9 +881,9 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) * If the value in the descriptor is 0, it is considered invalid * and the default (extremely low) rssi value is assumed */ - rssi0 = (rssi0) ? (-12 - offset0 - rt2x00dev->lna_gain - rssi0) : -128; - rssi1 = (rssi1) ? (-12 - offset1 - rt2x00dev->lna_gain - rssi1) : -128; - rssi2 = (rssi2) ? (-12 - offset2 - rt2x00dev->lna_gain - rssi2) : -128; + rssi0 = (rssi0) ? (base_val - offset0 - rt2x00dev->lna_gain - rssi0) : -128; + rssi1 = (rssi1) ? (base_val - offset1 - rt2x00dev->lna_gain - rssi1) : -128; + rssi2 = (rssi2) ? (base_val - offset2 - rt2x00dev->lna_gain - rssi2) : -128; /* * mac80211 only accepts a single RSSI value. Calculating the @@ -1236,13 +1237,14 @@ void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev) } EXPORT_SYMBOL_GPL(rt2800_txdone_nostatus); -static int rt2800_check_hung(struct data_queue *queue) +static bool rt2800_check_hung(struct data_queue *queue) { unsigned int cur_idx = rt2800_drv_get_dma_done(queue); - if (queue->wd_idx != cur_idx) + if (queue->wd_idx != cur_idx) { + queue->wd_idx = cur_idx; queue->wd_count = 0; - else + } else queue->wd_count++; return queue->wd_count > 16; @@ -1279,7 +1281,7 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) case QID_MGMT: if (rt2x00queue_empty(queue)) continue; - hung_tx = rt2800_check_hung(queue); + hung_tx = hung_tx || rt2800_check_hung(queue); break; case QID_RX: /* For station mode we should reactive at least @@ -1288,7 +1290,7 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) */ if (rt2x00dev->intf_sta_count == 0) continue; - hung_rx = rt2800_check_hung(queue); + hung_rx = hung_rx || rt2800_check_hung(queue); break; default: break; @@ -1301,8 +1303,12 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) if (hung_rx) rt2x00_warn(rt2x00dev, "Watchdog RX hung detected\n"); - if (hung_tx || hung_rx) + if (hung_tx || hung_rx) { + queue_for_each(rt2x00dev, queue) + queue->wd_count = 0; + ieee80211_restart_hw(rt2x00dev->hw); + } } EXPORT_SYMBOL_GPL(rt2800_watchdog); @@ -3855,38 +3861,53 @@ static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev, rfcsr |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr); } - - if (conf_is_ht40(conf)) { - rt2800_bbp_glrt_write(rt2x00dev, 141, 0x10); - rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2f); - } else { - rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1a); - rt2800_bbp_glrt_write(rt2x00dev, 157, 0x40); - } } -static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev, - struct ieee80211_channel *chan, - int power_level) { - u16 eeprom, target_power, max_power; +static void rt2800_config_alc_rt6352(struct rt2x00_dev *rt2x00dev, + struct ieee80211_channel *chan, + int power_level) +{ + int cur_channel = rt2x00dev->rf_channel; + u16 eeprom, chan_power, rate_power, target_power; + u16 tx_power[2]; + s8 *power_group[2]; u32 mac_sys_ctrl; - u32 reg; + u32 cnt, reg; u8 bbp; - /* hardware unit is 0.5dBm, limited to 23.5dBm */ - power_level *= 2; - if (power_level > 0x2f) - power_level = 0x2f; + if (WARN_ON(cur_channel < 1 || cur_channel > 14)) + return; - max_power = chan->max_power * 2; - if (max_power > 0x2f) - max_power = 0x2f; + /* get per chain power, 2 chains in total, unit is 0.5dBm */ + power_level = (power_level - 3) * 2; + + /* We can't get the accurate TX power. Based on some tests, the real + * TX power is approximately equal to channel_power + (max)rate_power. + * Usually max rate_power is the gain of the OFDM 6M rate. The antenna + * gain and externel PA gain are not included as we are unable to + * obtain these values. + */ + rate_power = rt2800_eeprom_read_from_array(rt2x00dev, + EEPROM_TXPOWER_BYRATE, 1); + rate_power &= 0x3f; + power_level -= rate_power; + if (power_level < 1) + power_level = 1; + + power_group[0] = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); + power_group[1] = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); + for (cnt = 0; cnt < 2; cnt++) { + chan_power = power_group[cnt][cur_channel - 1]; + if (chan_power >= 0x20 || chan_power == 0) + chan_power = 0x10; + tx_power[cnt] = power_level < chan_power ? power_level : chan_power; + } reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_0); - rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, power_level); - rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, power_level); - rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_0, max_power); - rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_1, max_power); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_0, tx_power[0]); + rt2x00_set_field32(®, TX_ALC_CFG_0_CH_INIT_1, tx_power[1]); + rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_0, 0x2f); + rt2x00_set_field32(®, TX_ALC_CFG_0_LIMIT_1, 0x2f); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_INTERNAL_TX_ALC)) { @@ -4408,66 +4429,45 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, usleep_range(1000, 1500); } - if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) { - reg = 0x10; - if (!conf_is_ht40(conf)) { - if (rt2x00_rt(rt2x00dev, RT6352) && - rt2x00_has_cap_external_lna_bg(rt2x00dev)) { - reg |= 0x5; - } else { - reg |= 0xa; - } - } - rt2800_bbp_write(rt2x00dev, 195, 141); - rt2800_bbp_write(rt2x00dev, 196, reg); + if (rt2x00_rt(rt2x00dev, RT5592)) { + bbp = conf_is_ht40(conf) ? 0x10 : 0x1a; + rt2800_bbp_glrt_write(rt2x00dev, 141, bbp); - /* AGC init. - * Despite the vendor driver using different values here for - * RT6352 chip, we use 0x1c for now. This may have to be changed - * once TSSI got implemented. - */ - reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain; - rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); + bbp = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain; + rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, bbp); - if (rt2x00_rt(rt2x00dev, RT5592)) - rt2800_iq_calibrate(rt2x00dev, rf->channel); + rt2800_iq_calibrate(rt2x00dev, rf->channel); } if (rt2x00_rt(rt2x00dev, RT6352)) { - if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, - &rt2x00dev->cap_flags)) { - reg = rt2800_register_read(rt2x00dev, RF_CONTROL3); - reg |= 0x00000101; - rt2800_register_write(rt2x00dev, RF_CONTROL3, reg); - - reg = rt2800_register_read(rt2x00dev, RF_BYPASS3); - reg |= 0x00000101; - rt2800_register_write(rt2x00dev, RF_BYPASS3, reg); - - rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73); - rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73); - rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73); - rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); - rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xC8); - rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xA4); - rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05); - rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); - rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xC8); - rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xA4); - rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05); - rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27); - rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xC8); - rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xA4); - rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05); - rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00); - - rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, - 0x36303636); - rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, - 0x6C6C6B6C); - rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, - 0x6C6C6B6C); + /* BBP for GLRT BW */ + bbp = conf_is_ht40(conf) ? + 0x10 : rt2x00_has_cap_external_lna_bg(rt2x00dev) ? + 0x15 : 0x1a; + rt2800_bbp_glrt_write(rt2x00dev, 141, bbp); + + bbp = conf_is_ht40(conf) ? 0x2f : 0x40; + rt2800_bbp_glrt_write(rt2x00dev, 157, bbp); + + if (rt2x00dev->default_ant.rx_chain_num == 1) { + rt2800_bbp_write(rt2x00dev, 91, 0x07); + rt2800_bbp_write(rt2x00dev, 95, 0x1a); + rt2800_bbp_glrt_write(rt2x00dev, 128, 0xa0); + rt2800_bbp_glrt_write(rt2x00dev, 170, 0x12); + rt2800_bbp_glrt_write(rt2x00dev, 171, 0x10); + } else { + rt2800_bbp_write(rt2x00dev, 91, 0x06); + rt2800_bbp_write(rt2x00dev, 95, 0x9a); + rt2800_bbp_glrt_write(rt2x00dev, 128, 0xe0); + rt2800_bbp_glrt_write(rt2x00dev, 170, 0x30); + rt2800_bbp_glrt_write(rt2x00dev, 171, 0x30); } + + /* AGC init */ + bbp = rf->channel <= 14 ? 0x04 + 2 * rt2x00dev->lna_gain : 0; + rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, bbp); + + usleep_range(1000, 1500); } bbp = rt2800_bbp_read(rt2x00dev, 4); @@ -5268,7 +5268,7 @@ static void rt2800_config_txpower_rt6352(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t); rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, pwreg); - rt2800_config_alc(rt2x00dev, chan, power_level); + rt2800_config_alc_rt6352(rt2x00dev, chan, power_level); /* TODO: temperature compensation code! */ } @@ -5577,43 +5577,6 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) } } rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); - - if (rt2x00_rt(rt2x00dev, RT6352)) { - if (rt2x00dev->default_ant.rx_chain_num == 1) { - rt2800_bbp_write(rt2x00dev, 91, 0x07); - rt2800_bbp_write(rt2x00dev, 95, 0x1A); - rt2800_bbp_write(rt2x00dev, 195, 128); - rt2800_bbp_write(rt2x00dev, 196, 0xA0); - rt2800_bbp_write(rt2x00dev, 195, 170); - rt2800_bbp_write(rt2x00dev, 196, 0x12); - rt2800_bbp_write(rt2x00dev, 195, 171); - rt2800_bbp_write(rt2x00dev, 196, 0x10); - } else { - rt2800_bbp_write(rt2x00dev, 91, 0x06); - rt2800_bbp_write(rt2x00dev, 95, 0x9A); - rt2800_bbp_write(rt2x00dev, 195, 128); - rt2800_bbp_write(rt2x00dev, 196, 0xE0); - rt2800_bbp_write(rt2x00dev, 195, 170); - rt2800_bbp_write(rt2x00dev, 196, 0x30); - rt2800_bbp_write(rt2x00dev, 195, 171); - rt2800_bbp_write(rt2x00dev, 196, 0x30); - } - - if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { - rt2800_bbp_write(rt2x00dev, 75, 0x68); - rt2800_bbp_write(rt2x00dev, 76, 0x4C); - rt2800_bbp_write(rt2x00dev, 79, 0x1C); - rt2800_bbp_write(rt2x00dev, 80, 0x0C); - rt2800_bbp_write(rt2x00dev, 82, 0xB6); - } - - /* On 11A, We should delay and wait RF/BBP to be stable - * and the appropriate time should be 1000 micro seconds - * 2005/06/05 - On 11G, we also need this delay time. - * Otherwise it's difficult to pass the WHQL. - */ - usleep_range(1000, 1500); - } } EXPORT_SYMBOL_GPL(rt2800_vco_calibration); @@ -5822,6 +5785,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; u16 eeprom; + u8 bbp; unsigned int i; int ret; @@ -5831,6 +5795,19 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) if (ret) return ret; + if (rt2x00_rt(rt2x00dev, RT6352)) { + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x01); + + bbp = rt2800_bbp_read(rt2x00dev, 21); + bbp |= 0x01; + rt2800_bbp_write(rt2x00dev, 21, bbp); + bbp = rt2800_bbp_read(rt2x00dev, 21); + bbp &= (~0x01); + rt2800_bbp_write(rt2x00dev, 21, bbp); + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00); + } + rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f); rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); @@ -5984,6 +5961,14 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_1); rt2x00_set_field32(®, TX_ALC_CFG_1_ROS_BUSY_EN, 0); rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg); + + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_20M1S, 0x77754433); + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_20M2S, 0x77765543); + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_40M1S, 0x77765544); + rt2800_register_write(rt2x00dev, AMPDU_MAX_LEN_40M2S, 0x77765544); + + rt2800_register_write(rt2x00dev, HT_FBK_TO_LEGACY, 0x1010); + } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -7202,6 +7187,8 @@ static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev) rt2800_bbp_dcoc_write(rt2x00dev, 159, 0x64); rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 84, 0x19); } static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) @@ -8561,7 +8548,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev) rt2x00_warn(rt2x00dev, "Wait MAC Tx Status to MAX !!!\n"); maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); - maccfg &= (~0x04); + maccfg &= (~0x08); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX))) @@ -9677,9 +9664,6 @@ static void rt2800_loft_iq_calibration(struct rt2x00_dev *rt2x00dev) rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx], rfvga_gain_table[vga_gain[ch_idx]]); - - if (vga_gain[ch_idx] < 0) - vga_gain[ch_idx] = 0; } rfvalue = rfvga_gain_table[vga_gain[ch_idx]]; @@ -10316,6 +10300,128 @@ do_cal: rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0); } +static void rt2800_restore_rf_bbp_rt6352(struct rt2x00_dev *rt2x00dev) +{ + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0); + rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0); + } + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x16); + rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x23); + rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x02); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xd3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xb3); + rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xd5); + rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x6c); + rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xfc); + rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1f); + rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66); + rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xff); + rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x1c); + rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x20); + rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6b); + rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xf7); + rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x09); + } + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_bbp_write(rt2x00dev, 75, 0x60); + rt2800_bbp_write(rt2x00dev, 76, 0x44); + rt2800_bbp_write(rt2x00dev, 79, 0x1c); + rt2800_bbp_write(rt2x00dev, 80, 0x0c); + rt2800_bbp_write(rt2x00dev, 82, 0xB6); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, 0x3630363a); + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6c6c666c); + rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6c6c666c); + } +} + +static void rt2800_calibration_rt6352(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + if (rt2x00_has_cap_external_pa(rt2x00dev) || + rt2x00_has_cap_external_lna_bg(rt2x00dev)) + rt2800_restore_rf_bbp_rt6352(rt2x00dev); + + rt2800_r_calibration(rt2x00dev); + rt2800_rf_self_txdc_cal(rt2x00dev); + rt2800_rxdcoc_calibration(rt2x00dev); + rt2800_bw_filter_calibration(rt2x00dev, true); + rt2800_bw_filter_calibration(rt2x00dev, false); + rt2800_loft_iq_calibration(rt2x00dev); + + /* missing DPD calibration for internal PA devices */ + + rt2800_rxdcoc_calibration(rt2x00dev); + rt2800_rxiq_calibration(rt2x00dev); + + if (!rt2x00_has_cap_external_pa(rt2x00dev) && + !rt2x00_has_cap_external_lna_bg(rt2x00dev)) + return; + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + reg = rt2800_register_read(rt2x00dev, RF_CONTROL3); + reg |= 0x00000101; + rt2800_register_write(rt2x00dev, RF_CONTROL3, reg); + + reg = rt2800_register_read(rt2x00dev, RF_BYPASS3); + reg |= 0x00000101; + rt2800_register_write(rt2x00dev, RF_BYPASS3, reg); + } + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x66); + rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x20); + rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x42); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73); + rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73); + rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73); + rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xc8); + rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xa4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05); + rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xc8); + rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xa4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05); + rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27); + rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xc8); + rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xa4); + rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) + rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00); + + if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { + rt2800_bbp_write(rt2x00dev, 75, 0x68); + rt2800_bbp_write(rt2x00dev, 76, 0x4c); + rt2800_bbp_write(rt2x00dev, 79, 0x1c); + rt2800_bbp_write(rt2x00dev, 80, 0x0c); + rt2800_bbp_write(rt2x00dev, 82, 0xb6); + } + + if (rt2x00_has_cap_external_pa(rt2x00dev)) { + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, 0x36303636); + rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6c6c6b6c); + rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6c6c6b6c); + } +} + static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev) { /* Initialize RF central register to default value */ @@ -10580,13 +10686,8 @@ static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C); - rt2800_r_calibration(rt2x00dev); - rt2800_rf_self_txdc_cal(rt2x00dev); - rt2800_rxdcoc_calibration(rt2x00dev); - rt2800_bw_filter_calibration(rt2x00dev, true); - rt2800_bw_filter_calibration(rt2x00dev, false); - rt2800_loft_iq_calibration(rt2x00dev); - rt2800_rxiq_calibration(rt2x00dev); + /* Do calibration and init PA/LNA */ + rt2800_calibration_rt6352(rt2x00dev); } static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index 862098f753d2..5323acff962a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -760,6 +760,9 @@ int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev) rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); + if (rt2x00_rt(rt2x00dev, RT6352)) + return 0; + reg = 0; rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 07a6a5a9ce13..aaaf99331967 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1263,6 +1263,12 @@ rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev) } static inline bool +rt2x00_has_cap_external_pa(struct rt2x00_dev *rt2x00dev) +{ + return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_PA_TX0); +} + +static inline bool rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev) { return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 9a9cfd0ce402..c88ce446e117 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -533,7 +533,7 @@ void rt2x00lib_txdone(struct queue_entry *entry, */ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT)) - ieee80211_tx_status(rt2x00dev->hw, entry->skb); + ieee80211_tx_status_skb(rt2x00dev->hw, entry->skb); else ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); } else { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index f673aa9ba15a..47bcaec6f2db 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1767,6 +1767,8 @@ struct rtl8xxxu_fileops rtl8192eu_fops = { .has_s0s1 = 0, .gen2_thermal_meter = 1, .needs_full_init = 1, + .supports_ap = 1, + .max_macid_num = 128, .adda_1t_init = 0x0fc01616, .adda_1t_path_on = 0x0fc01616, .adda_2t_path_on_a = 0x0fc01616, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c index 18dc5221a9c0..28e93835e05a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c @@ -2079,6 +2079,8 @@ struct rtl8xxxu_fileops rtl8192fu_fops = { .ampdu_max_time = 0x5e, .ustime_tsf_edca = 0x50, .max_aggr_num = 0x1f1f, + .supports_ap = 1, + .max_macid_num = 128, .trxff_boundary = 0x3f3f, .pbp_rx = PBP_PAGE_SIZE_256, .pbp_tx = PBP_PAGE_SIZE_256, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c index f0d17b75c5f1..871b8cca8a18 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c @@ -1875,6 +1875,8 @@ struct rtl8xxxu_fileops rtl8710bu_fops = { */ .ustime_tsf_edca = 0x28, .max_aggr_num = 0x0c14, + .supports_ap = 1, + .max_macid_num = 16, .adda_1t_init = 0x03c00016, .adda_1t_path_on = 0x03c00016, .trxff_boundary = 0x3f7f, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 13ad5d5b73f4..954369ed6226 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1742,6 +1742,8 @@ struct rtl8xxxu_fileops rtl8723bu_fops = { .ampdu_max_time = 0x5e, .ustime_tsf_edca = 0x50, .max_aggr_num = 0x0c14, + .supports_ap = 1, + .max_macid_num = 128, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 5d102a1246a3..43ee7592bc6e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -7500,6 +7500,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, case 0x8179: case 0xb711: case 0xf192: + case 0x2005: untested = 0; break; } @@ -7800,6 +7801,7 @@ static const struct usb_device_id dev_table[] = { /* Asus USB-N13 rev C1 */ {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x18f1, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192fu_fops}, +/* EDIMAX EW-7722UTn V3 */ {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb722, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192fu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x318b, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 807a53a97325..7ce37fb4fdbf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1317,12 +1317,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) struct rtl_priv *rtlpriv = rtl_priv(hw); __le16 fc = rtl_get_fc(skb); - if (rtlpriv->dm.supp_phymode_switch && - mac->link_state < MAC80211_LINKED && - (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } if (ieee80211_is_auth(fc)) { rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 4fb16f5f6f83..69e97647e3d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -662,13 +662,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) if (mac->act_scanning) mac->n_channels++; - if (rtlpriv->dm.supp_phymode_switch && - mac->link_state < MAC80211_LINKED && - !mac->act_scanning) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } - /* *because we should back channel to *current_network.chan in scanning, @@ -1197,10 +1190,6 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, mac->vendor = PEER_UNKNOWN; mac->mode = 0; - if (rtlpriv->dm.supp_phymode_switch) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG, "BSS_CHANGED_UN_ASSOC\n"); } @@ -1464,11 +1453,6 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv, 1); - if (rtlpriv->dm.supp_phymode_switch) { - if (rtlpriv->cfg->ops->chk_switch_dmdp) - rtlpriv->cfg->ops->chk_switch_dmdp(hw); - } - if (mac->link_state == MAC80211_LINKED) { rtl_lps_leave(hw, true); mac->link_state = MAC80211_LINKED_SCANNING; @@ -1656,7 +1640,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, memcpy(rtlpriv->sec.key_buf[key_idx], key->key, key->keylen); rtlpriv->sec.key_len[key_idx] = key->keylen; - memcpy(mac_addr, bcast_addr, ETH_ALEN); + eth_broadcast_addr(mac_addr); } else { /* pairwise key */ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "set pairwise key\n"); @@ -1897,7 +1881,7 @@ bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) /*this is wrong, fill_tx_cmddesc needs update*/ pdesc = &ring->desc[0]; - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, skb); __skb_queue_tail(&ring->queue, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 629c03271bde..6241e4fed4f6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -681,25 +681,10 @@ void rtl_swlps_wq_callback(struct work_struct *work) ps_work.work); struct ieee80211_hw *hw = rtlworks->hw; struct rtl_priv *rtlpriv = rtl_priv(hw); - bool ps = false; - - ps = (hw->conf.flags & IEEE80211_CONF_PS); /* we can sleep after ps null send ok */ - if (rtlpriv->psc.state_inap) { + if (rtlpriv->psc.state_inap) rtl_swlps_rf_sleep(hw); - - if (rtlpriv->psc.state && !ps) { - rtlpriv->psc.sleep_ms = jiffies_to_msecs(jiffies - - rtlpriv->psc.last_action); - } - - if (ps) - rtlpriv->psc.last_slept = jiffies; - - rtlpriv->psc.last_action = jiffies; - rtlpriv->psc.state = ps; - } } static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index 6f61d6a10627..5a34894a533b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -799,7 +799,7 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 58b1a46066b5..27f6c35ba0f9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -433,14 +433,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *val; if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 65ebe52883d3..d094163a9a71 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -665,9 +665,8 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -687,8 +686,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index e17f70b4d199..aae654b0e3ba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -797,6 +797,5 @@ bool rtl88ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index 0b6a15c2e5cc..d92aad60edfe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c @@ -640,7 +640,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 049c4fe9eeed..0bc915723b93 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -208,14 +208,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *val; if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 5376bb34251f..50e139186a93 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -518,9 +518,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -540,9 +539,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); - + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); set_tx_desc_tx_rate(pdesc, DESC_RATE1M); set_tx_desc_seq(pdesc, 0); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h index b45b05a6a523..f3ffe3d9883c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h @@ -527,6 +527,5 @@ bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index a040c07791d1..5ec0eb8773a5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1539,7 +1539,7 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) * if its "here". * * This is maybe necessary: - * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb); + * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, skb); */ dev_kfree_skb(skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index e6403d4c937c..20b4aac69642 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -102,7 +102,6 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = { .set_hw_reg = rtl92cu_set_hw_reg, .update_rate_tbl = rtl92cu_update_hal_rate_tbl, .fill_tx_desc = rtl92cu_tx_fill_desc, - .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, .query_rx_desc = rtl92cu_rx_query_desc, .set_channel_access = rtl92cu_update_channel_access_setting, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index b70767e72f3d..2f44c8aa6066 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -600,35 +600,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n"); } -void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc8, - u32 buffer_len, bool is_pspoll) -{ - __le32 *pdesc = (__le32 *)pdesc8; - - /* Clear all status */ - memset(pdesc, 0, RTL_TX_HEADER_SIZE); - set_tx_desc_first_seg(pdesc, 1); /* bFirstSeg; */ - set_tx_desc_last_seg(pdesc, 1); /* bLastSeg; */ - set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ - set_tx_desc_pkt_size(pdesc, buffer_len); /* Buffer size + command hdr */ - set_tx_desc_queue_sel(pdesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ - /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error - * vlaue by Hw. */ - if (is_pspoll) { - set_tx_desc_nav_use_hdr(pdesc, 1); - } else { - set_tx_desc_hwseq_en(pdesc, 1); /* Hw set sequence number */ - set_tx_desc_pkt_id(pdesc, BIT(3)); /* set bit3 to 1. */ - } - set_tx_desc_use_rate(pdesc, 1); /* use data rate which is set by Sw */ - set_tx_desc_own(pdesc, 1); - set_tx_desc_tx_rate(pdesc, DESC_RATE1M); - _rtl_tx_desc_checksum(pdesc); -} - -void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 fw_queue = QSLT_BEACON; @@ -637,8 +610,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, __le32 *pdesc = (__le32 *)pdesc8; memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); + set_tx_desc_offset(pdesc, RTL_TX_HEADER_SIZE); set_tx_desc_tx_rate(pdesc, DESC_RATE1M); set_tx_desc_seq(pdesc, 0); set_tx_desc_linip(pdesc, 0); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index 171fe39dfb0c..5f81cab205cc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -394,10 +394,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 queue_index, struct rtl_tcb_desc *tcb_desc); -void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc, - u32 buffer_len, bool ispspoll); -void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool b_firstseg, - bool b_lastseg, struct sk_buff *skb); +void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c index 6cc9c7649eda..cf4aca83bd05 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c @@ -592,32 +592,18 @@ static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + const u32 edca_be_ul = 0x5ea42b; + const u32 edca_be_dl = 0x5ea42b; static u64 last_txok_cnt; static u64 last_rxok_cnt; u64 cur_txok_cnt; u64 cur_rxok_cnt; - u32 edca_be_ul = 0x5ea42b; - u32 edca_be_dl = 0x5ea42b; if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; goto exit; } - /* Enable BEQ TxOP limit configuration in wireless G-mode. */ - /* To check whether we shall force turn on TXOP configuration. */ - if ((!rtlpriv->dm.disable_framebursting) && - (rtlpriv->sec.pairwise_enc_algorithm == WEP40_ENCRYPTION || - rtlpriv->sec.pairwise_enc_algorithm == WEP104_ENCRYPTION || - rtlpriv->sec.pairwise_enc_algorithm == TKIP_ENCRYPTION)) { - /* Force TxOP limit to 0x005e for UL. */ - if (!(edca_be_ul & 0xffff0000)) - edca_be_ul |= 0x005e0000; - /* Force TxOP limit to 0x005e for DL. */ - if (!(edca_be_dl & 0xffff0000)) - edca_be_dl |= 0x005e0000; - } - if ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting)) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 9ddb8478784b..e1fb29962801 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -469,7 +469,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw, pdesc = &ring->desc[idx]; /* discard output from call below */ rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN); - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 31a18bbface9..743ac6871bf4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -225,13 +225,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AMPDU_MIN_SPACE: { u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *val; if (min_spacing_to_set <= 7) { - sec_min_space = 0; - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | min_spacing_to_set); *val = min_spacing_to_set; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index c09c0c312665..02ac69c08ed3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -655,9 +655,8 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -678,8 +677,7 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, return; } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); /* 5G have no CCK rate * Caution: The macros below are multi-line expansions. * The braces are needed no matter what checkpatch says diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index d01578875cd5..2992668c156c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -564,7 +564,6 @@ bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c index 997ff115b9ab..5a828a934fe9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c @@ -936,8 +936,7 @@ void rtl92ee_dm_init(struct ieee80211_hw *hw) static void rtl92ee_dm_common_info_self_update(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_sta_info *drv_priv; - u8 cnt = 0; + u8 cnt; rtlpriv->dm.one_entry_only = false; @@ -951,9 +950,7 @@ static void rtl92ee_dm_common_info_self_update(struct ieee80211_hw *hw) rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); - list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { - cnt++; - } + cnt = list_count_nodes(&rtlpriv->entry_list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); if (cnt == 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 616a47d8d97a..011ce82efeff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -199,7 +199,6 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .get_hw_reg = rtl92ee_get_hw_reg, .set_hw_reg = rtl92ee_set_hw_reg, .update_rate_tbl = rtl92ee_update_hal_rate_tbl, - .pre_fill_tx_bd_desc = rtl92ee_pre_fill_tx_bd_desc, .rx_desc_buff_remained_cnt = rtl92ee_rx_desc_buff_remained_cnt, .rx_check_dma_ok = rtl92ee_rx_check_dma_ok, .fill_tx_desc = rtl92ee_tx_fill_desc, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index a182cdeb58e2..16589e18494b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -550,9 +550,11 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) return point_diff; } -void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, - u8 *tx_bd_desc8, u8 *desc8, u8 queue_index, - struct sk_buff *skb, dma_addr_t addr) +static void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, + u8 *tx_bd_desc8, u8 *desc8, + u8 queue_index, + struct sk_buff *skb, + dma_addr_t addr) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -827,9 +829,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -846,8 +847,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, txdesc_len); - if (firstseg) - set_tx_desc_offset(pdesc, txdesc_len); + set_tx_desc_offset(pdesc, txdesc_len); set_tx_desc_tx_rate(pdesc, DESC_RATE1M); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index 967cef3a9cbf..4c6cf4f16f95 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -720,9 +720,6 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc, u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index); u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index); -void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, - u8 *tx_bd_desc, u8 *desc, u8 queue_index, - struct sk_buff *skb, dma_addr_t addr); void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, @@ -743,6 +740,5 @@ u64 rtl92ee_get_desc(struct ieee80211_hw *hw, bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index f570495af044..579b1789d6d1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c @@ -122,7 +122,7 @@ static bool _rtl92s_cmd_send_packet(struct ieee80211_hw *hw, idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; pdesc = &ring->desc[idx]; - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index a5853a170b58..f104cdb649f8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -492,7 +492,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, } void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, - bool firstseg, bool lastseg, struct sk_buff *skb) + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h index 90aa12fc6a7f..40291a6a15d0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h @@ -10,8 +10,8 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); -void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, - bool lastseg, struct sk_buff *skb); +void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + struct sk_buff *skb); bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 8ada31380efa..0ff8e355c23a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c @@ -466,7 +466,7 @@ static void rtl8723e_dm_check_edca_turbo(struct ieee80211_hw *hw) } if (rtlpriv->btcoexist.bt_edca_dl != 0) { - edca_be_ul = rtlpriv->btcoexist.bt_edca_dl; + edca_be_dl = rtlpriv->btcoexist.bt_edca_dl; bt_change_edca = true; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index 53af0d209b11..b34dffc6a30c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c @@ -1122,7 +1122,7 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) /* Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO */ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", + "[BTCoex], BT btInqPageStartTime = 0x%lx, btTxRxCntLvl = %d\n", hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl); if ((hal_coex_8723.bt_inq_page_start_time) || (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) { @@ -1335,7 +1335,7 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw) btdm8723.dec_bt_pwr = true; rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", + "[BTCoex], BT btInqPageStartTime = 0x%lx, btTxRxCntLvl = %d\n", hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl); if ((hal_coex_8723.bt_inq_page_start_time) || @@ -1358,9 +1358,8 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw) static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 cur_time; + unsigned long cur_time = jiffies; - cur_time = jiffies; if (hal_coex_8723.c2h_bt_inquiry_page) { /* bt inquiry or page is started. */ if (hal_coex_8723.bt_inq_page_start_time == 0) { @@ -1368,18 +1367,17 @@ static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) BT_COEX_STATE_BT_INQ_PAGE; hal_coex_8723.bt_inq_page_start_time = cur_time; rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT Inquiry/page is started at time : 0x%x\n", + "[BTCoex], BT Inquiry/page is started at time : 0x%lx\n", hal_coex_8723.bt_inq_page_start_time); } } rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n", + "[BTCoex], BT Inquiry/page started time : 0x%lx, cur_time : 0x%lx\n", hal_coex_8723.bt_inq_page_start_time, cur_time); if (hal_coex_8723.bt_inq_page_start_time) { - if ((((long)cur_time - - (long)hal_coex_8723.bt_inq_page_start_time) / HZ) - >= 10) { + if (jiffies_to_msecs(cur_time - + hal_coex_8723.bt_inq_page_start_time) >= 10000) { rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex], BT Inquiry/page >= 10sec!!!\n"); hal_coex_8723.bt_inq_page_start_time = 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index d26d4c4314a3..6991713a66d0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -212,14 +212,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *((u8 *)val); if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 7f294e698994..d9823ddab7be 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -519,9 +519,8 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -541,8 +540,7 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, } clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE); - if (firstseg) - set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); + set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN); set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 2d25f62a4d52..f352fddfff32 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -530,6 +530,5 @@ bool rtl8723e_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c index c3c990cc032f..c53f95144812 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c @@ -1210,8 +1210,7 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw) static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 cnt = 0; - struct rtl_sta_info *drv_priv; + u8 cnt; rtlpriv->dm.one_entry_only = false; @@ -1225,9 +1224,7 @@ static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw) rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); - list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { - cnt++; - } + cnt = list_count_nodes(&rtlpriv->entry_list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); if (cnt == 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 15575644551f..0e77de1baaf8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -468,15 +468,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *((u8 *)val); if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; - mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | min_spacing_to_set); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 24ef7cc52e99..8b6352f7f93b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -585,7 +585,6 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, } void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, - bool firstseg, bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 174aca20c7e1..da027f915cf4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -642,6 +642,5 @@ bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index 36c00b89ccae..50b79cf8fb3c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -215,31 +215,3 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, } EXPORT_SYMBOL_GPL(rtl8723_download_fw); -bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl8192_tx_ring *ring; - struct rtl_tx_desc *pdesc; - struct sk_buff *pskb = NULL; - unsigned long flags; - - ring = &rtlpci->tx_ring[BEACON_QUEUE]; - - pskb = __skb_dequeue(&ring->queue); - kfree_skb(pskb); - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); - - pdesc = &ring->desc[0]; - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); - - __skb_queue_tail(&ring->queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - - rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); - - return true; -} -EXPORT_SYMBOL_GPL(rtl8723_cmd_send_packet); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h index b527fcbbdf08..c8e04f9722ae 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h @@ -66,7 +66,5 @@ void rtl8723_write_fw(struct ieee80211_hw *hw, u8 *buffer, u32 size, u8 max_page); int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, int count); int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be, int count); -bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index f3fe16798c59..76b5395539d0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -827,8 +827,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw) static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 cnt = 0; - struct rtl_sta_info *drv_priv; + u8 cnt; rtlpriv->dm.tx_rate = 0xff; @@ -844,8 +843,7 @@ static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); - list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) - cnt++; + cnt = list_count_nodes(&rtlpriv->entry_list); spin_unlock_bh(&rtlpriv->locks.entry_list_lock); if (cnt == 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 3f8f6da33b12..1633328bc3d1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -546,14 +546,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; - u8 sec_min_space; min_spacing_to_set = *((u8 *)val); if (min_spacing_to_set <= 7) { - sec_min_space = 0; - - if (min_spacing_to_set < sec_min_space) - min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index d7cb3319d885..bd71592fe26a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -828,9 +828,8 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); } -void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc8, bool firstseg, - bool lastseg, struct sk_buff *skb) +void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index a9ed6fd41089..1155365348f3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -648,6 +648,5 @@ bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 2e7e04f91279..31a481f43a07 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1597,7 +1597,7 @@ struct bt_coexist_8723 { u8 c2h_bt_info; bool c2h_bt_info_req_sent; bool c2h_bt_inquiry_page; - u32 bt_inq_page_start_time; + unsigned long bt_inq_page_start_time; u8 bt_retry_cnt; u8 c2h_bt_info_original; u8 bt_inquiry_page_cnt; @@ -2032,19 +2032,15 @@ struct rtl_ps_ctl { /* for SW LPS*/ bool sw_ps_enabled; - bool state; bool state_inap; bool multi_buffered; u16 nullfunc_seq; unsigned int dtim_counter; - unsigned int sleep_ms; unsigned long last_sleep_jiffies; unsigned long last_awake_jiffies; unsigned long last_delaylps_stamp_jiffies; unsigned long last_dtim; unsigned long last_beacon; - unsigned long last_action; - unsigned long last_slept; /*For P2P PS */ struct rtl_p2p_ps_info p2p_ps_info; @@ -2231,9 +2227,6 @@ struct rtl_hal_ops { void (*update_rate_tbl)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 rssi_leve, bool update_bw); - void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc, - u8 *desc, u8 queue_index, - struct sk_buff *skb, dma_addr_t addr); void (*update_rate_mask)(struct ieee80211_hw *hw, u8 rssi_level); u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw, u8 queue_index); @@ -2246,10 +2239,7 @@ struct rtl_hal_ops { struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); - void (*fill_fake_txdesc)(struct ieee80211_hw *hw, u8 *pdesc, - u32 buffer_len, bool bsspspoll); void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, struct sk_buff *skb); void (*fill_tx_special_desc)(struct ieee80211_hw *hw, u8 *pdesc, u8 *pbd_desc, @@ -2285,7 +2275,6 @@ struct rtl_hal_ops { void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask, u32 data); void (*linked_set_reg)(struct ieee80211_hw *hw); - void (*chk_switch_dmdp)(struct ieee80211_hw *hw); void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw); bool (*phy_rf6052_config)(struct ieee80211_hw *hw); void (*phy_rf6052_set_cck_txpower)(struct ieee80211_hw *hw, @@ -2708,7 +2697,7 @@ struct rtl_c2hcmd { struct rtl_bssid_entry { struct list_head list; u8 bssid[ETH_ALEN]; - u32 age; + unsigned long age; }; struct rtl_scan_list { diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index f8ba133baff0..35bc37a3c469 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -1233,9 +1233,9 @@ static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { #define rtw_debugfs_add_core(name, mode, fopname, parent) \ do { \ rtw_debug_priv_ ##name.rtwdev = rtwdev; \ - if (!debugfs_create_file(#name, mode, \ + if (IS_ERR(debugfs_create_file(#name, mode, \ parent, &rtw_debug_priv_ ##name,\ - &file_ops_ ##fopname)) \ + &file_ops_ ##fopname))) \ pr_debug("Unable to initialize debugfs:%s\n", \ #name); \ } while (0) diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index a9149c6c2b48..a03ced11bbe0 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -48,11 +48,23 @@ void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, #define rtw_dbg(rtwdev, a...) __rtw_dbg(rtwdev, ##a) +static inline bool rtw_dbg_is_enabled(struct rtw_dev *rtwdev, + enum rtw_debug_mask mask) +{ + return !!(rtw_debug_mask & mask); +} + #else static inline void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, const char *fmt, ...) {} +static inline bool rtw_dbg_is_enabled(struct rtw_dev *rtwdev, + enum rtw_debug_mask mask) +{ + return false; +} + #endif /* CONFIG_RTW88_DEBUG */ #define rtw_info(rtwdev, a...) dev_info(rtwdev->dev, ##a) diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 567bbedd8ee0..acd78311c8c4 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -17,6 +17,79 @@ #include "phy.h" #include "mac.h" +static const struct rtw_hw_reg_desc fw_h2c_regs[] = { + {REG_FWIMR, MASKDWORD, "FWIMR"}, + {REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "FWIMR enable"}, + {REG_FWISR, MASKDWORD, "FWISR"}, + {REG_FWISR, BIT_FS_H2CCMD_INT, "FWISR enable"}, + {REG_HMETFR, BIT_INT_BOX_ALL, "BoxBitMap"}, + {REG_HMEBOX0, MASKDWORD, "MSG 0"}, + {REG_HMEBOX0_EX, MASKDWORD, "MSG_EX 0"}, + {REG_HMEBOX1, MASKDWORD, "MSG 1"}, + {REG_HMEBOX1_EX, MASKDWORD, "MSG_EX 1"}, + {REG_HMEBOX2, MASKDWORD, "MSG 2"}, + {REG_HMEBOX2_EX, MASKDWORD, "MSG_EX 2"}, + {REG_HMEBOX3, MASKDWORD, "MSG 3"}, + {REG_HMEBOX3_EX, MASKDWORD, "MSG_EX 3"}, + {REG_FT1IMR, MASKDWORD, "FT1IMR"}, + {REG_FT1IMR, BIT_FS_H2C_CMD_OK_INT_EN, "FT1IMR enable"}, + {REG_FT1ISR, MASKDWORD, "FT1ISR"}, + {REG_FT1ISR, BIT_FS_H2C_CMD_OK_INT, "FT1ISR enable "}, +}; + +static const struct rtw_hw_reg_desc fw_c2h_regs[] = { + {REG_FWIMR, MASKDWORD, "FWIMR"}, + {REG_FWIMR, BIT_FS_H2CCMD_INT_EN, "CPWM"}, + {REG_FWIMR, BIT_FS_HRCV_INT_EN, "HRECV"}, + {REG_FWISR, MASKDWORD, "FWISR"}, + {REG_FWISR, BIT_FS_H2CCMD_INT, "CPWM"}, + {REG_FWISR, BIT_FS_HRCV_INT, "HRECV"}, + {REG_CPWM, MASKDWORD, "REG_CPWM"}, +}; + +static const struct rtw_hw_reg_desc fw_core_regs[] = { + {REG_ARFR2_V1, MASKDWORD, "EPC"}, + {REG_ARFRH2_V1, MASKDWORD, "BADADDR"}, + {REG_ARFR3_V1, MASKDWORD, "CAUSE"}, + {REG_ARFR3_V1, BIT_EXC_CODE, "ExcCode"}, + {REG_ARFRH3_V1, MASKDWORD, "Status"}, + {REG_ARFR4, MASKDWORD, "SP"}, + {REG_ARFRH4, MASKDWORD, "RA"}, + {REG_FW_DBG6, MASKDWORD, "DBG 6"}, + {REG_FW_DBG7, MASKDWORD, "DBG 7"}, +}; + +static void _rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev, + const struct rtw_hw_reg_desc regs[], u32 size) +{ + const struct rtw_hw_reg_desc *reg; + u32 val; + int i; + + for (i = 0; i < size; i++) { + reg = ®s[i]; + val = rtw_read32_mask(rtwdev, reg->addr, reg->mask); + + rtw_dbg(rtwdev, RTW_DBG_FW, "[%s]addr:0x%x mask:0x%x value:0x%x\n", + reg->desc, reg->addr, reg->mask, val); + } +} + +void rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev) +{ + int i; + + if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_FW)) + return; + + _rtw_fw_dump_dbg_info(rtwdev, fw_h2c_regs, ARRAY_SIZE(fw_h2c_regs)); + _rtw_fw_dump_dbg_info(rtwdev, fw_c2h_regs, ARRAY_SIZE(fw_c2h_regs)); + for (i = 0 ; i < RTW_DEBUG_DUMP_TIMES; i++) { + rtw_dbg(rtwdev, RTW_DBG_FW, "Firmware Coredump %dth\n", i + 1); + _rtw_fw_dump_dbg_info(rtwdev, fw_core_regs, ARRAY_SIZE(fw_core_regs)); + } +} + static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb) { @@ -140,7 +213,7 @@ struct rtw_beacon_filter_iter_data { u8 *payload; }; -static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac, +static void rtw_fw_bcn_filter_notify_vif_iter(void *data, struct ieee80211_vif *vif) { struct rtw_beacon_filter_iter_data *iter_data = data; @@ -349,6 +422,7 @@ static void rtw_fw_send_h2c_command_register(struct rtw_dev *rtwdev, if (ret) { rtw_err(rtwdev, "failed to send h2c command\n"); + rtw_fw_dump_dbg_info(rtwdev); return; } diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 43ccdf9965ac..84e47c71ea12 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -44,6 +44,8 @@ #define RTW_OLD_PROBE_PG_CNT 2 #define RTW_PROBE_PG_CNT 4 +#define RTW_DEBUG_DUMP_TIMES 10 + enum rtw_c2h_cmd_id { C2H_CCX_TX_RPT = 0x03, C2H_BT_INFO = 0x09, @@ -808,6 +810,7 @@ static inline bool rtw_fw_feature_ext_check(struct rtw_fw_state *fw, return !!(fw->feature_ext & feature); } +void rtw_fw_dump_dbg_info(struct rtw_dev *rtwdev); void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, struct sk_buff *skb); void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index c853e2f2d448..4a33d2e47f33 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -185,8 +185,7 @@ static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) bf_info->cur_csi_rpt_rate = new_csi_rate_idx; } -static void rtw_vif_watch_dog_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +static void rtw_vif_watch_dog_iter(void *data, struct ieee80211_vif *vif) { struct rtw_watch_dog_iter_data *iter_data = data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; @@ -1303,7 +1302,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, si->stbc_en = stbc_en; si->ldpc_en = ldpc_en; si->rf_type = rf_type; - si->wireless_set = wireless_set; si->sgi_enable = is_support_sgi; si->vht_enable = is_vht_enable; si->ra_mask = ra_mask; @@ -2183,10 +2181,12 @@ void rtw_core_deinit(struct rtw_dev *rtwdev) release_firmware(wow_fw->firmware); destroy_workqueue(rtwdev->tx_wq); + timer_delete_sync(&rtwdev->tx_report.purge_timer); spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags); skb_queue_purge(&rtwdev->tx_report.queue); - skb_queue_purge(&rtwdev->coex.queue); spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags); + skb_queue_purge(&rtwdev->coex.queue); + skb_queue_purge(&rtwdev->c2h_queue); list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, build_list) { @@ -2329,7 +2329,7 @@ struct rtw_iter_port_switch_data { struct rtw_vif *rtwvif_ap; }; -static void rtw_port_switch_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +static void rtw_port_switch_iter(void *data, struct ieee80211_vif *vif) { struct rtw_iter_port_switch_data *iter_data = data; struct rtw_dev *rtwdev = iter_data->rtwdev; @@ -2381,8 +2381,7 @@ void rtw_core_port_switch(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) rtw_iterate_vifs(rtwdev, rtw_port_switch_iter, &iter_data); } -static void rtw_check_sta_active_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +static void rtw_check_sta_active_iter(void *data, struct ieee80211_vif *vif) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; bool *active = data; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index f9dd2ab941c8..b6bfd4c02e2d 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -342,8 +342,10 @@ enum rtw_regulatory_domains { RTW_REGD_UKRAINE = 7, RTW_REGD_MEXICO = 8, RTW_REGD_CN = 9, - RTW_REGD_WW, + RTW_REGD_QATAR = 10, + RTW_REGD_UK = 11, + RTW_REGD_WW, RTW_REGD_MAX }; @@ -511,12 +513,6 @@ struct rtw_txpwr_idx { struct rtw_5g_txpwr_idx pwr_idx_5g; }; -struct rtw_timer_list { - struct timer_list timer; - void (*function)(void *data); - void *args; -}; - struct rtw_channel_params { u8 center_chan; u8 primary_chan; @@ -528,6 +524,12 @@ struct rtw_hw_reg { u32 mask; }; +struct rtw_hw_reg_desc { + u32 addr; + u32 mask; + const char *desc; +}; + struct rtw_ltecoex_addr { u32 ctrl; u32 wdata; @@ -734,9 +736,7 @@ struct rtw_ra_report { struct rtw_txq { struct list_head list; - unsigned long flags; - unsigned long last_push; }; #define RTW_BC_MC_MACID 1 @@ -754,7 +754,6 @@ struct rtw_sta_info { u8 rate_id; enum rtw_bandwidth bw_mode; enum rtw_rf_type rf_type; - enum rtw_wireless_set wireless_set; u8 stbc_en:2; u8 ldpc_en:2; bool sgi_enable; diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 44a8fff34cdd..2bfc0e822b8d 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1828,5 +1828,5 @@ void rtw_pci_shutdown(struct pci_dev *pdev) EXPORT_SYMBOL(rtw_pci_shutdown); MODULE_AUTHOR("Realtek Corporation"); -MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); +MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c index 43e80a3a8136..add5a20b8432 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.c +++ b/drivers/net/wireless/realtek/rtw88/ps.c @@ -37,8 +37,7 @@ int rtw_enter_ips(struct rtw_dev *rtwdev) return 0; } -static void rtw_restore_port_cfg_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +static void rtw_restore_port_cfg_iter(void *data, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; @@ -105,6 +104,7 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter) */ WARN(1, "firmware failed to ack driver for %s Deep Power mode\n", enter ? "entering" : "leaving"); + rtw_fw_dump_dbg_info(rtwdev); } } EXPORT_SYMBOL(rtw_power_mode_change); @@ -165,6 +165,7 @@ static void rtw_fw_leave_lps_check(struct rtw_dev *rtwdev) if (ret) { rtw_write32_clr(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN); rtw_warn(rtwdev, "firmware failed to leave lps state\n"); + rtw_fw_dump_dbg_info(rtwdev); } } @@ -320,8 +321,7 @@ static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data, data->found_vif = vif; } -static void rtw_vif_recalc_lps_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +static void rtw_vif_recalc_lps_iter(void *data, struct ieee80211_vif *vif) { __rtw_vif_recalc_lps(data, vif); } diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 7c6c11d50ff3..1634f03784f1 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -224,12 +224,25 @@ #define REG_RXFF_BNDY 0x011C #define REG_FE1IMR 0x0120 #define BIT_FS_RXDONE BIT(16) +#define REG_CPWM 0x012C +#define REG_FWIMR 0x0130 +#define BIT_FS_H2CCMD_INT_EN BIT(4) +#define BIT_FS_HRCV_INT_EN BIT(5) +#define REG_FWISR 0x0134 +#define BIT_FS_H2CCMD_INT BIT(4) +#define BIT_FS_HRCV_INT BIT(5) #define REG_PKTBUF_DBG_CTRL 0x0140 #define REG_C2HEVT 0x01A0 #define REG_MCUTST_1 0x01C0 #define REG_MCUTST_II 0x01C4 #define REG_WOWLAN_WAKE_REASON 0x01C7 #define REG_HMETFR 0x01CC +#define BIT_INT_BOX0 BIT(0) +#define BIT_INT_BOX1 BIT(1) +#define BIT_INT_BOX2 BIT(2) +#define BIT_INT_BOX3 BIT(3) +#define BIT_INT_BOX_ALL (BIT_INT_BOX0 | BIT_INT_BOX1 | BIT_INT_BOX2 | \ + BIT_INT_BOX3) #define REG_HMEBOX0 0x01D0 #define REG_HMEBOX1 0x01D4 #define REG_HMEBOX2 0x01D8 @@ -338,6 +351,11 @@ #define BIT_EN_GNT_BT_AWAKE BIT(3) #define BIT_EN_EOF_V1 BIT(2) #define REG_DATA_SC 0x0483 +#define REG_ARFR2_V1 0x048C +#define REG_ARFRH2_V1 0x0490 +#define REG_ARFR3_V1 0x0494 +#define BIT_EXC_CODE GENMASK(6, 2) +#define REG_ARFRH3_V1 0x0498 #define REG_ARFR4 0x049C #define BIT_WL_RFK BIT(0) #define REG_ARFRH4 0x04A0 @@ -548,11 +566,16 @@ #define REG_H2C_PKT_READADDR 0x10D0 #define REG_H2C_PKT_WRITEADDR 0x10D4 +#define REG_FW_DBG6 0x10F8 #define REG_FW_DBG7 0x10FC #define FW_KEY_MASK 0xffffff00 #define REG_CR_EXT 0x1100 +#define REG_FT1IMR 0x1138 +#define BIT_FS_H2C_CMD_OK_INT_EN BIT(25) +#define REG_FT1ISR 0x113c +#define BIT_FS_H2C_CMD_OK_INT BIT(25) #define REG_DDMA_CH0SA 0x1200 #define REG_DDMA_CH0DA 0x1204 #define REG_DDMA_CH0CTRL 0x1208 diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c index 2f547cbcf6da..7f3b2ea3f2a5 100644 --- a/drivers/net/wireless/realtek/rtw88/regd.c +++ b/drivers/net/wireless/realtek/rtw88/regd.c @@ -70,16 +70,16 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("BY", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("BZ", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("CA", RTW_REGD_IC, RTW_REGD_IC), - COUNTRY_REGD_ENT("CC", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CC", RTW_REGD_ACMA, RTW_REGD_ACMA), COUNTRY_REGD_ENT("CD", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CF", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CG", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CH", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CI", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("CK", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("CL", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("CL", RTW_REGD_CHILE, RTW_REGD_CHILE), COUNTRY_REGD_ENT("CM", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("CN", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("CN", RTW_REGD_CN, RTW_REGD_CN), COUNTRY_REGD_ENT("CO", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("CR", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("CV", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -106,7 +106,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("FO", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("FR", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("GA", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("GB", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("GB", RTW_REGD_UK, RTW_REGD_UK), COUNTRY_REGD_ENT("GD", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("GE", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("GF", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -214,7 +214,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("PT", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("PW", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("PY", RTW_REGD_FCC, RTW_REGD_FCC), - COUNTRY_REGD_ENT("QA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("QA", RTW_REGD_QATAR, RTW_REGD_QATAR), COUNTRY_REGD_ENT("RE", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("RO", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("RS", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -234,7 +234,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("SN", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("SO", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("SR", RTW_REGD_FCC, RTW_REGD_FCC), - COUNTRY_REGD_ENT("ST", RTW_REGD_FCC, RTW_REGD_FCC), + COUNTRY_REGD_ENT("ST", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("SV", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("SX", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("SZ", RTW_REGD_ETSI, RTW_REGD_ETSI), @@ -253,7 +253,7 @@ static const struct rtw_regulatory rtw_reg_map[] = { COUNTRY_REGD_ENT("TV", RTW_REGD_ETSI, RTW_REGD_WW), COUNTRY_REGD_ENT("TW", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("TZ", RTW_REGD_ETSI, RTW_REGD_ETSI), - COUNTRY_REGD_ENT("UA", RTW_REGD_ETSI, RTW_REGD_ETSI), + COUNTRY_REGD_ENT("UA", RTW_REGD_UKRAINE, RTW_REGD_UKRAINE), COUNTRY_REGD_ENT("UG", RTW_REGD_ETSI, RTW_REGD_ETSI), COUNTRY_REGD_ENT("US", RTW_REGD_FCC, RTW_REGD_FCC), COUNTRY_REGD_ENT("UY", RTW_REGD_FCC, RTW_REGD_FCC), @@ -502,6 +502,14 @@ u8 rtw_regd_get(struct rtw_dev *rtwdev) } EXPORT_SYMBOL(rtw_regd_get); +bool rtw_regd_srrc(struct rtw_dev *rtwdev) +{ + struct rtw_regd *regd = &rtwdev->regd; + + return rtw_reg_match(regd->regulatory, "CN"); +} +EXPORT_SYMBOL(rtw_regd_srrc); + struct rtw_regd_alternative_t { bool set; u8 alt; @@ -519,6 +527,8 @@ rtw_regd_alt[RTW_REGD_MAX] = { DECL_REGD_ALT(RTW_REGD_UKRAINE, RTW_REGD_ETSI), DECL_REGD_ALT(RTW_REGD_MEXICO, RTW_REGD_FCC), DECL_REGD_ALT(RTW_REGD_CN, RTW_REGD_ETSI), + DECL_REGD_ALT(RTW_REGD_QATAR, RTW_REGD_ETSI), + DECL_REGD_ALT(RTW_REGD_UK, RTW_REGD_ETSI), }; bool rtw_regd_has_alt(u8 regd, u8 *regd_alt) diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h index 34cb13d0cd9e..3c5a6fd8e6dd 100644 --- a/drivers/net/wireless/realtek/rtw88/regd.h +++ b/drivers/net/wireless/realtek/rtw88/regd.h @@ -68,4 +68,6 @@ int rtw_regd_init(struct rtw_dev *rtwdev); int rtw_regd_hint(struct rtw_dev *rtwdev); u8 rtw_regd_get(struct rtw_dev *rtwdev); bool rtw_regd_has_alt(u8 regd, u8 *regd_alt); +bool rtw_regd_srrc(struct rtw_dev *rtwdev); + #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h index 3642a2c7f80c..2434e2480cbe 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h @@ -46,6 +46,7 @@ struct rtw8723du_efuse { u8 vender_id[2]; /* 0x100 */ u8 product_id[2]; /* 0x102 */ u8 usb_option; /* 0x104 */ + u8 res5[2]; /* 0x105 */ u8 mac_addr[ETH_ALEN]; /* 0x107 */ }; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index adf224618a2a..429bb420b056 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -381,6 +381,65 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw) } } +static void rtw8821c_cck_tx_filter_srrc(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + struct rtw_hal *hal = &rtwdev->hal; + + if (channel == 14) { + rtw_write32_mask(rtwdev, REG_CCA_FLTR, MASKHWORD, 0xe82c); + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x0000b81c); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00000); + } else if (channel == 13 || + (channel == 11 && bw == RTW_CHANNEL_WIDTH_40)) { + rtw_write32_mask(rtwdev, REG_CCA_FLTR, MASKHWORD, 0xf8fe); + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x64b80c1c); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x8810); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x01235667); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00027); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00027); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00029); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00026); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00000); + } else { + rtw_write32_mask(rtwdev, REG_CCA_FLTR, MASKHWORD, 0xe82c); + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, + hal->ch_param[0]); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, + hal->ch_param[1] & MASKLWORD); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, + hal->ch_param[2]); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0001c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000e); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x0000c); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x00000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, RFREG_MASK, 0x00000); + } +} + static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, u8 primary_ch_idx) { @@ -395,6 +454,13 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x0); rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x96a); + + if (rtw_regd_srrc(rtwdev)) { + rtw8821c_cck_tx_filter_srrc(rtwdev, channel, bw); + goto set_bw; + } + + /* CCK TX filter parameters for default case */ if (channel == 14) { rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x0000b81c); rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000); @@ -430,6 +496,7 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x412); } +set_bw: switch (bw) { case RTW_CHANNEL_WIDTH_20: default: diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index fcff31688c45..91ed921407bb 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -238,6 +238,7 @@ extern const struct rtw_chip_info rtw8821c_hw_spec; #define REG_RXSB 0xa00 #define REG_ADCINI 0xa04 #define REG_PWRTH 0xa08 +#define REG_CCA_FLTR 0xa20 #define REG_TXSF2 0xa24 #define REG_TXSF6 0xa28 #define REG_FA_CCK 0xa5c diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c index 6c82c4383497..0393b9a0c1a3 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c @@ -6013,996 +6013,1492 @@ RTW_DECL_TABLE_RF_RADIO(rtw8821c_rf_a, A); static const struct rtw_txpwr_lmt_cfg_pair rtw8821c_txpwr_lmt_type0[] = { { 0, 0, 0, 0, 1, 30, }, { 2, 0, 0, 0, 1, 30, }, - { 0, 0, 0, 0, 2, 32, }, - { 2, 0, 0, 0, 2, 30, }, - { 0, 0, 0, 0, 3, 32, }, - { 2, 0, 0, 0, 3, 30, }, - { 0, 0, 0, 0, 4, 32, }, - { 2, 0, 0, 0, 4, 30, }, - { 0, 0, 0, 0, 5, 32, }, - { 2, 0, 0, 0, 5, 30, }, - { 0, 0, 0, 0, 6, 32, }, - { 2, 0, 0, 0, 6, 30, }, - { 0, 0, 0, 0, 7, 32, }, - { 2, 0, 0, 0, 7, 30, }, - { 0, 0, 0, 0, 8, 32, }, - { 2, 0, 0, 0, 8, 30, }, - { 0, 0, 0, 0, 9, 32, }, - { 2, 0, 0, 0, 9, 30, }, - { 0, 0, 0, 0, 10, 32, }, - { 2, 0, 0, 0, 10, 30, }, - { 0, 0, 0, 0, 11, 32, }, - { 2, 0, 0, 0, 11, 30, }, - { 0, 0, 0, 0, 12, 24, }, - { 2, 0, 0, 0, 12, 30, }, - { 0, 0, 0, 0, 13, 16, }, - { 2, 0, 0, 0, 13, 30, }, - { 0, 0, 0, 0, 14, 63, }, - { 2, 0, 0, 0, 14, 63, }, - { 0, 0, 0, 1, 1, 30, }, - { 2, 0, 0, 1, 1, 30, }, - { 0, 0, 0, 1, 2, 32, }, - { 2, 0, 0, 1, 2, 30, }, - { 0, 0, 0, 1, 3, 34, }, - { 2, 0, 0, 1, 3, 30, }, - { 0, 0, 0, 1, 4, 34, }, - { 2, 0, 0, 1, 4, 30, }, - { 0, 0, 0, 1, 5, 34, }, - { 2, 0, 0, 1, 5, 30, }, - { 0, 0, 0, 1, 6, 34, }, - { 2, 0, 0, 1, 6, 30, }, - { 0, 0, 0, 1, 7, 34, }, - { 2, 0, 0, 1, 7, 30, }, - { 0, 0, 0, 1, 8, 34, }, - { 2, 0, 0, 1, 8, 30, }, - { 0, 0, 0, 1, 9, 34, }, - { 2, 0, 0, 1, 9, 30, }, - { 0, 0, 0, 1, 10, 32, }, - { 2, 0, 0, 1, 10, 30, }, - { 0, 0, 0, 1, 11, 30, }, - { 2, 0, 0, 1, 11, 30, }, - { 0, 0, 0, 1, 12, 28, }, - { 2, 0, 0, 1, 12, 30, }, - { 0, 0, 0, 1, 13, 16, }, - { 2, 0, 0, 1, 13, 30, }, - { 0, 0, 0, 1, 14, 63, }, - { 2, 0, 0, 1, 14, 63, }, - { 0, 0, 0, 2, 1, 26, }, - { 2, 0, 0, 2, 1, 30, }, - { 0, 0, 0, 2, 2, 30, }, - { 2, 0, 0, 2, 2, 30, }, - { 0, 0, 0, 2, 3, 32, }, - { 2, 0, 0, 2, 3, 30, }, - { 0, 0, 0, 2, 4, 34, }, - { 2, 0, 0, 2, 4, 30, }, - { 0, 0, 0, 2, 5, 34, }, - { 2, 0, 0, 2, 5, 30, }, - { 0, 0, 0, 2, 6, 34, }, - { 2, 0, 0, 2, 6, 30, }, - { 0, 0, 0, 2, 7, 34, }, - { 2, 0, 0, 2, 7, 30, }, - { 0, 0, 0, 2, 8, 34, }, - { 2, 0, 0, 2, 8, 30, }, - { 0, 0, 0, 2, 9, 32, }, - { 2, 0, 0, 2, 9, 30, }, - { 0, 0, 0, 2, 10, 30, }, - { 2, 0, 0, 2, 10, 30, }, - { 0, 0, 0, 2, 11, 28, }, - { 2, 0, 0, 2, 11, 30, }, - { 0, 0, 0, 2, 12, 26, }, - { 2, 0, 0, 2, 12, 30, }, - { 0, 0, 0, 2, 13, 12, }, - { 2, 0, 0, 2, 13, 30, }, - { 0, 0, 0, 2, 14, 63, }, - { 2, 0, 0, 2, 14, 63, }, - { 0, 0, 1, 2, 1, 63, }, - { 2, 0, 1, 2, 1, 63, }, - { 0, 0, 1, 2, 2, 63, }, - { 2, 0, 1, 2, 2, 63, }, - { 0, 0, 1, 2, 3, 26, }, - { 2, 0, 1, 2, 3, 30, }, - { 0, 0, 1, 2, 4, 26, }, - { 2, 0, 1, 2, 4, 30, }, - { 0, 0, 1, 2, 5, 30, }, - { 2, 0, 1, 2, 5, 30, }, - { 0, 0, 1, 2, 6, 30, }, - { 2, 0, 1, 2, 6, 30, }, - { 0, 0, 1, 2, 7, 30, }, - { 2, 0, 1, 2, 7, 30, }, - { 0, 0, 1, 2, 8, 26, }, - { 2, 0, 1, 2, 8, 30, }, - { 0, 0, 1, 2, 9, 26, }, - { 2, 0, 1, 2, 9, 30, }, - { 0, 0, 1, 2, 10, 28, }, - { 2, 0, 1, 2, 10, 30, }, - { 0, 0, 1, 2, 11, 20, }, - { 2, 0, 1, 2, 11, 30, }, - { 0, 0, 1, 2, 12, 63, }, - { 2, 0, 1, 2, 12, 63, }, - { 0, 0, 1, 2, 13, 63, }, - { 2, 0, 1, 2, 13, 63, }, - { 0, 0, 1, 2, 14, 63, }, - { 2, 0, 1, 2, 14, 63, }, - { 0, 1, 0, 1, 36, 31, }, - { 2, 1, 0, 1, 36, 32, }, - { 0, 1, 0, 1, 40, 33, }, - { 2, 1, 0, 1, 40, 32, }, - { 0, 1, 0, 1, 44, 33, }, - { 2, 1, 0, 1, 44, 32, }, - { 0, 1, 0, 1, 48, 31, }, - { 2, 1, 0, 1, 48, 32, }, - { 0, 1, 0, 1, 52, 33, }, - { 2, 1, 0, 1, 52, 32, }, - { 0, 1, 0, 1, 56, 33, }, - { 2, 1, 0, 1, 56, 32, }, - { 0, 1, 0, 1, 60, 33, }, - { 2, 1, 0, 1, 60, 32, }, - { 0, 1, 0, 1, 64, 30, }, - { 2, 1, 0, 1, 64, 32, }, - { 0, 1, 0, 1, 100, 30, }, - { 2, 1, 0, 1, 100, 32, }, - { 0, 1, 0, 1, 104, 33, }, - { 2, 1, 0, 1, 104, 32, }, - { 0, 1, 0, 1, 108, 33, }, - { 2, 1, 0, 1, 108, 32, }, - { 0, 1, 0, 1, 112, 33, }, - { 2, 1, 0, 1, 112, 32, }, - { 0, 1, 0, 1, 116, 33, }, - { 2, 1, 0, 1, 116, 32, }, - { 0, 1, 0, 1, 120, 33, }, - { 2, 1, 0, 1, 120, 32, }, - { 0, 1, 0, 1, 124, 33, }, - { 2, 1, 0, 1, 124, 32, }, - { 0, 1, 0, 1, 128, 33, }, - { 2, 1, 0, 1, 128, 32, }, - { 0, 1, 0, 1, 132, 33, }, - { 2, 1, 0, 1, 132, 32, }, - { 0, 1, 0, 1, 136, 33, }, - { 2, 1, 0, 1, 136, 32, }, - { 0, 1, 0, 1, 140, 31, }, - { 2, 1, 0, 1, 140, 32, }, - { 0, 1, 0, 1, 144, 30, }, - { 2, 1, 0, 1, 144, 63, }, - { 0, 1, 0, 1, 149, 33, }, - { 2, 1, 0, 1, 149, 63, }, - { 0, 1, 0, 1, 153, 33, }, - { 2, 1, 0, 1, 153, 63, }, - { 0, 1, 0, 1, 157, 33, }, - { 2, 1, 0, 1, 157, 63, }, - { 0, 1, 0, 1, 161, 33, }, - { 2, 1, 0, 1, 161, 63, }, - { 0, 1, 0, 1, 165, 33, }, - { 2, 1, 0, 1, 165, 63, }, - { 0, 1, 0, 2, 36, 30, }, - { 2, 1, 0, 2, 36, 32, }, - { 0, 1, 0, 2, 40, 33, }, - { 2, 1, 0, 2, 40, 32, }, - { 0, 1, 0, 2, 44, 33, }, - { 2, 1, 0, 2, 44, 32, }, - { 0, 1, 0, 2, 48, 33, }, - { 2, 1, 0, 2, 48, 32, }, - { 0, 1, 0, 2, 52, 33, }, - { 2, 1, 0, 2, 52, 32, }, - { 0, 1, 0, 2, 56, 33, }, - { 2, 1, 0, 2, 56, 32, }, - { 0, 1, 0, 2, 60, 33, }, - { 2, 1, 0, 2, 60, 32, }, - { 0, 1, 0, 2, 64, 30, }, - { 2, 1, 0, 2, 64, 32, }, - { 0, 1, 0, 2, 100, 30, }, - { 2, 1, 0, 2, 100, 32, }, - { 0, 1, 0, 2, 104, 33, }, - { 2, 1, 0, 2, 104, 32, }, - { 0, 1, 0, 2, 108, 33, }, - { 2, 1, 0, 2, 108, 32, }, - { 0, 1, 0, 2, 112, 33, }, - { 2, 1, 0, 2, 112, 32, }, - { 0, 1, 0, 2, 116, 33, }, - { 2, 1, 0, 2, 116, 32, }, - { 0, 1, 0, 2, 120, 33, }, - { 2, 1, 0, 2, 120, 32, }, - { 0, 1, 0, 2, 124, 33, }, - { 2, 1, 0, 2, 124, 32, }, - { 0, 1, 0, 2, 128, 33, }, - { 2, 1, 0, 2, 128, 32, }, - { 0, 1, 0, 2, 132, 33, }, - { 2, 1, 0, 2, 132, 32, }, - { 0, 1, 0, 2, 136, 33, }, - { 2, 1, 0, 2, 136, 32, }, - { 0, 1, 0, 2, 140, 29, }, - { 2, 1, 0, 2, 140, 32, }, - { 0, 1, 0, 2, 144, 27, }, - { 2, 1, 0, 2, 144, 63, }, - { 0, 1, 0, 2, 149, 33, }, - { 2, 1, 0, 2, 149, 63, }, - { 0, 1, 0, 2, 153, 33, }, - { 2, 1, 0, 2, 153, 63, }, - { 0, 1, 0, 2, 157, 33, }, - { 2, 1, 0, 2, 157, 63, }, - { 0, 1, 0, 2, 161, 33, }, - { 2, 1, 0, 2, 161, 63, }, - { 0, 1, 0, 2, 165, 33, }, - { 2, 1, 0, 2, 165, 63, }, - { 0, 1, 1, 2, 38, 22, }, - { 2, 1, 1, 2, 38, 32, }, - { 0, 1, 1, 2, 46, 32, }, - { 2, 1, 1, 2, 46, 32, }, - { 0, 1, 1, 2, 54, 32, }, - { 2, 1, 1, 2, 54, 32, }, - { 0, 1, 1, 2, 62, 23, }, - { 2, 1, 1, 2, 62, 32, }, - { 0, 1, 1, 2, 102, 21, }, - { 2, 1, 1, 2, 102, 32, }, - { 0, 1, 1, 2, 110, 32, }, - { 2, 1, 1, 2, 110, 32, }, - { 0, 1, 1, 2, 118, 32, }, - { 2, 1, 1, 2, 118, 32, }, - { 0, 1, 1, 2, 126, 32, }, - { 2, 1, 1, 2, 126, 32, }, - { 0, 1, 1, 2, 134, 32, }, - { 2, 1, 1, 2, 134, 32, }, - { 0, 1, 1, 2, 142, 29, }, - { 2, 1, 1, 2, 142, 63, }, - { 0, 1, 1, 2, 151, 32, }, - { 2, 1, 1, 2, 151, 63, }, - { 0, 1, 1, 2, 159, 32, }, - { 2, 1, 1, 2, 159, 63, }, - { 0, 1, 2, 4, 42, 19, }, - { 2, 1, 2, 4, 42, 32, }, - { 0, 1, 2, 4, 58, 22, }, - { 2, 1, 2, 4, 58, 32, }, - { 0, 1, 2, 4, 106, 18, }, - { 2, 1, 2, 4, 106, 32, }, - { 0, 1, 2, 4, 122, 32, }, - { 2, 1, 2, 4, 122, 32, }, - { 0, 1, 2, 4, 138, 28, }, - { 2, 1, 2, 4, 138, 63, }, - { 0, 1, 2, 4, 155, 32, }, - { 2, 1, 2, 4, 155, 63, }, { 1, 0, 0, 0, 1, 34, }, { 3, 0, 0, 0, 1, 30, }, { 4, 0, 0, 0, 1, 34, }, { 5, 0, 0, 0, 1, 30, }, { 6, 0, 0, 0, 1, 30, }, { 7, 0, 0, 0, 1, 30, }, + { 8, 0, 0, 0, 1, 30, }, + { 9, 0, 0, 0, 1, 28, }, + { 10, 0, 0, 0, 1, 30, }, + { 11, 0, 0, 0, 1, 30, }, + { 0, 0, 0, 0, 2, 32, }, + { 2, 0, 0, 0, 2, 30, }, { 1, 0, 0, 0, 2, 34, }, { 3, 0, 0, 0, 2, 32, }, { 4, 0, 0, 0, 2, 34, }, { 5, 0, 0, 0, 2, 30, }, { 6, 0, 0, 0, 2, 32, }, { 7, 0, 0, 0, 2, 30, }, + { 8, 0, 0, 0, 2, 32, }, + { 9, 0, 0, 0, 2, 28, }, + { 10, 0, 0, 0, 2, 30, }, + { 11, 0, 0, 0, 2, 30, }, + { 0, 0, 0, 0, 3, 32, }, + { 2, 0, 0, 0, 3, 30, }, { 1, 0, 0, 0, 3, 34, }, { 3, 0, 0, 0, 3, 32, }, { 4, 0, 0, 0, 3, 34, }, { 5, 0, 0, 0, 3, 30, }, { 6, 0, 0, 0, 3, 32, }, { 7, 0, 0, 0, 3, 30, }, + { 8, 0, 0, 0, 3, 32, }, + { 9, 0, 0, 0, 3, 28, }, + { 10, 0, 0, 0, 3, 30, }, + { 11, 0, 0, 0, 3, 30, }, + { 0, 0, 0, 0, 4, 32, }, + { 2, 0, 0, 0, 4, 30, }, { 1, 0, 0, 0, 4, 34, }, { 3, 0, 0, 0, 4, 32, }, { 4, 0, 0, 0, 4, 34, }, { 5, 0, 0, 0, 4, 30, }, { 6, 0, 0, 0, 4, 32, }, { 7, 0, 0, 0, 4, 30, }, + { 8, 0, 0, 0, 4, 32, }, + { 9, 0, 0, 0, 4, 28, }, + { 10, 0, 0, 0, 4, 30, }, + { 11, 0, 0, 0, 4, 30, }, + { 0, 0, 0, 0, 5, 32, }, + { 2, 0, 0, 0, 5, 30, }, { 1, 0, 0, 0, 5, 34, }, { 3, 0, 0, 0, 5, 32, }, { 4, 0, 0, 0, 5, 34, }, { 5, 0, 0, 0, 5, 30, }, { 6, 0, 0, 0, 5, 32, }, { 7, 0, 0, 0, 5, 30, }, + { 8, 0, 0, 0, 5, 32, }, + { 9, 0, 0, 0, 5, 28, }, + { 10, 0, 0, 0, 5, 30, }, + { 11, 0, 0, 0, 5, 30, }, + { 0, 0, 0, 0, 6, 32, }, + { 2, 0, 0, 0, 6, 30, }, { 1, 0, 0, 0, 6, 34, }, { 3, 0, 0, 0, 6, 32, }, { 4, 0, 0, 0, 6, 34, }, { 5, 0, 0, 0, 6, 30, }, { 6, 0, 0, 0, 6, 32, }, { 7, 0, 0, 0, 6, 30, }, + { 8, 0, 0, 0, 6, 32, }, + { 9, 0, 0, 0, 6, 28, }, + { 10, 0, 0, 0, 6, 30, }, + { 11, 0, 0, 0, 6, 30, }, + { 0, 0, 0, 0, 7, 32, }, + { 2, 0, 0, 0, 7, 30, }, { 1, 0, 0, 0, 7, 34, }, { 3, 0, 0, 0, 7, 32, }, { 4, 0, 0, 0, 7, 34, }, { 5, 0, 0, 0, 7, 30, }, { 6, 0, 0, 0, 7, 32, }, { 7, 0, 0, 0, 7, 30, }, + { 8, 0, 0, 0, 7, 32, }, + { 9, 0, 0, 0, 7, 28, }, + { 10, 0, 0, 0, 7, 30, }, + { 11, 0, 0, 0, 7, 30, }, + { 0, 0, 0, 0, 8, 32, }, + { 2, 0, 0, 0, 8, 30, }, { 1, 0, 0, 0, 8, 34, }, { 3, 0, 0, 0, 8, 32, }, { 4, 0, 0, 0, 8, 34, }, { 5, 0, 0, 0, 8, 30, }, { 6, 0, 0, 0, 8, 32, }, { 7, 0, 0, 0, 8, 30, }, + { 8, 0, 0, 0, 8, 32, }, + { 9, 0, 0, 0, 8, 28, }, + { 10, 0, 0, 0, 8, 30, }, + { 11, 0, 0, 0, 8, 30, }, + { 0, 0, 0, 0, 9, 32, }, + { 2, 0, 0, 0, 9, 30, }, { 1, 0, 0, 0, 9, 34, }, { 3, 0, 0, 0, 9, 32, }, { 4, 0, 0, 0, 9, 34, }, { 5, 0, 0, 0, 9, 30, }, { 6, 0, 0, 0, 9, 32, }, { 7, 0, 0, 0, 9, 30, }, + { 8, 0, 0, 0, 9, 32, }, + { 9, 0, 0, 0, 9, 28, }, + { 10, 0, 0, 0, 9, 30, }, + { 11, 0, 0, 0, 9, 30, }, + { 0, 0, 0, 0, 10, 32, }, + { 2, 0, 0, 0, 10, 30, }, { 1, 0, 0, 0, 10, 34, }, { 3, 0, 0, 0, 10, 32, }, { 4, 0, 0, 0, 10, 34, }, { 5, 0, 0, 0, 10, 30, }, { 6, 0, 0, 0, 10, 32, }, { 7, 0, 0, 0, 10, 30, }, + { 8, 0, 0, 0, 10, 32, }, + { 9, 0, 0, 0, 10, 28, }, + { 10, 0, 0, 0, 10, 30, }, + { 11, 0, 0, 0, 10, 30, }, + { 0, 0, 0, 0, 11, 32, }, + { 2, 0, 0, 0, 11, 30, }, { 1, 0, 0, 0, 11, 34, }, { 3, 0, 0, 0, 11, 32, }, { 4, 0, 0, 0, 11, 34, }, { 5, 0, 0, 0, 11, 30, }, { 6, 0, 0, 0, 11, 32, }, { 7, 0, 0, 0, 11, 30, }, + { 8, 0, 0, 0, 11, 32, }, + { 9, 0, 0, 0, 11, 28, }, + { 10, 0, 0, 0, 11, 30, }, + { 11, 0, 0, 0, 11, 30, }, + { 0, 0, 0, 0, 12, 24, }, + { 2, 0, 0, 0, 12, 30, }, { 1, 0, 0, 0, 12, 34, }, { 3, 0, 0, 0, 12, 24, }, { 4, 0, 0, 0, 12, 34, }, { 5, 0, 0, 0, 12, 30, }, { 6, 0, 0, 0, 12, 24, }, { 7, 0, 0, 0, 12, 30, }, + { 8, 0, 0, 0, 12, 24, }, + { 9, 0, 0, 0, 12, 24, }, + { 10, 0, 0, 0, 12, 30, }, + { 11, 0, 0, 0, 12, 30, }, + { 0, 0, 0, 0, 13, 16, }, + { 2, 0, 0, 0, 13, 30, }, { 1, 0, 0, 0, 13, 34, }, { 3, 0, 0, 0, 13, 16, }, { 4, 0, 0, 0, 13, 34, }, { 5, 0, 0, 0, 13, 30, }, { 6, 0, 0, 0, 13, 16, }, { 7, 0, 0, 0, 13, 30, }, + { 8, 0, 0, 0, 13, 16, }, + { 9, 0, 0, 0, 13, 18, }, + { 10, 0, 0, 0, 13, 30, }, + { 11, 0, 0, 0, 13, 30, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, { 1, 0, 0, 0, 14, 34, }, { 3, 0, 0, 0, 14, 63, }, { 4, 0, 0, 0, 14, 63, }, { 5, 0, 0, 0, 14, 63, }, { 6, 0, 0, 0, 14, 63, }, { 7, 0, 0, 0, 14, 63, }, + { 8, 0, 0, 0, 14, 63, }, + { 9, 0, 0, 0, 14, 63, }, + { 10, 0, 0, 0, 14, 63, }, + { 11, 0, 0, 0, 14, 63, }, + { 0, 0, 0, 1, 1, 30, }, + { 2, 0, 0, 1, 1, 30, }, { 1, 0, 0, 1, 1, 34, }, { 3, 0, 0, 1, 1, 30, }, { 4, 0, 0, 1, 1, 32, }, { 5, 0, 0, 1, 1, 30, }, { 6, 0, 0, 1, 1, 30, }, { 7, 0, 0, 1, 1, 30, }, + { 8, 0, 0, 1, 1, 30, }, + { 9, 0, 0, 1, 1, 30, }, + { 10, 0, 0, 1, 1, 30, }, + { 11, 0, 0, 1, 1, 30, }, + { 0, 0, 0, 1, 2, 32, }, + { 2, 0, 0, 1, 2, 30, }, { 1, 0, 0, 1, 2, 34, }, { 3, 0, 0, 1, 2, 32, }, { 4, 0, 0, 1, 2, 34, }, { 5, 0, 0, 1, 2, 30, }, { 6, 0, 0, 1, 2, 32, }, { 7, 0, 0, 1, 2, 30, }, + { 8, 0, 0, 1, 2, 32, }, + { 9, 0, 0, 1, 2, 30, }, + { 10, 0, 0, 1, 2, 30, }, + { 11, 0, 0, 1, 2, 30, }, + { 0, 0, 0, 1, 3, 34, }, + { 2, 0, 0, 1, 3, 30, }, { 1, 0, 0, 1, 3, 34, }, { 3, 0, 0, 1, 3, 34, }, { 4, 0, 0, 1, 3, 34, }, { 5, 0, 0, 1, 3, 30, }, { 6, 0, 0, 1, 3, 34, }, { 7, 0, 0, 1, 3, 30, }, + { 8, 0, 0, 1, 3, 34, }, + { 9, 0, 0, 1, 3, 30, }, + { 10, 0, 0, 1, 3, 30, }, + { 11, 0, 0, 1, 3, 30, }, + { 0, 0, 0, 1, 4, 34, }, + { 2, 0, 0, 1, 4, 30, }, { 1, 0, 0, 1, 4, 34, }, { 3, 0, 0, 1, 4, 34, }, { 4, 0, 0, 1, 4, 34, }, { 5, 0, 0, 1, 4, 30, }, { 6, 0, 0, 1, 4, 34, }, { 7, 0, 0, 1, 4, 30, }, + { 8, 0, 0, 1, 4, 34, }, + { 9, 0, 0, 1, 4, 30, }, + { 10, 0, 0, 1, 4, 30, }, + { 11, 0, 0, 1, 4, 30, }, + { 0, 0, 0, 1, 5, 34, }, + { 2, 0, 0, 1, 5, 30, }, { 1, 0, 0, 1, 5, 34, }, { 3, 0, 0, 1, 5, 34, }, { 4, 0, 0, 1, 5, 34, }, { 5, 0, 0, 1, 5, 30, }, { 6, 0, 0, 1, 5, 34, }, { 7, 0, 0, 1, 5, 30, }, + { 8, 0, 0, 1, 5, 34, }, + { 9, 0, 0, 1, 5, 30, }, + { 10, 0, 0, 1, 5, 30, }, + { 11, 0, 0, 1, 5, 30, }, + { 0, 0, 0, 1, 6, 34, }, + { 2, 0, 0, 1, 6, 30, }, { 1, 0, 0, 1, 6, 34, }, { 3, 0, 0, 1, 6, 34, }, { 4, 0, 0, 1, 6, 34, }, { 5, 0, 0, 1, 6, 30, }, { 6, 0, 0, 1, 6, 34, }, { 7, 0, 0, 1, 6, 30, }, + { 8, 0, 0, 1, 6, 34, }, + { 9, 0, 0, 1, 6, 30, }, + { 10, 0, 0, 1, 6, 30, }, + { 11, 0, 0, 1, 6, 30, }, + { 0, 0, 0, 1, 7, 34, }, + { 2, 0, 0, 1, 7, 30, }, { 1, 0, 0, 1, 7, 34, }, { 3, 0, 0, 1, 7, 34, }, { 4, 0, 0, 1, 7, 34, }, { 5, 0, 0, 1, 7, 30, }, { 6, 0, 0, 1, 7, 34, }, { 7, 0, 0, 1, 7, 30, }, + { 8, 0, 0, 1, 7, 34, }, + { 9, 0, 0, 1, 7, 30, }, + { 10, 0, 0, 1, 7, 30, }, + { 11, 0, 0, 1, 7, 30, }, + { 0, 0, 0, 1, 8, 34, }, + { 2, 0, 0, 1, 8, 30, }, { 1, 0, 0, 1, 8, 34, }, { 3, 0, 0, 1, 8, 34, }, { 4, 0, 0, 1, 8, 34, }, { 5, 0, 0, 1, 8, 30, }, { 6, 0, 0, 1, 8, 34, }, { 7, 0, 0, 1, 8, 30, }, + { 8, 0, 0, 1, 8, 34, }, + { 9, 0, 0, 1, 8, 30, }, + { 10, 0, 0, 1, 8, 30, }, + { 11, 0, 0, 1, 8, 30, }, + { 0, 0, 0, 1, 9, 34, }, + { 2, 0, 0, 1, 9, 30, }, { 1, 0, 0, 1, 9, 34, }, { 3, 0, 0, 1, 9, 34, }, { 4, 0, 0, 1, 9, 34, }, { 5, 0, 0, 1, 9, 30, }, { 6, 0, 0, 1, 9, 34, }, { 7, 0, 0, 1, 9, 30, }, + { 8, 0, 0, 1, 9, 34, }, + { 9, 0, 0, 1, 9, 30, }, + { 10, 0, 0, 1, 9, 30, }, + { 11, 0, 0, 1, 9, 30, }, + { 0, 0, 0, 1, 10, 32, }, + { 2, 0, 0, 1, 10, 30, }, { 1, 0, 0, 1, 10, 34, }, { 3, 0, 0, 1, 10, 32, }, { 4, 0, 0, 1, 10, 34, }, { 5, 0, 0, 1, 10, 30, }, { 6, 0, 0, 1, 10, 32, }, { 7, 0, 0, 1, 10, 30, }, + { 8, 0, 0, 1, 10, 32, }, + { 9, 0, 0, 1, 10, 26, }, + { 10, 0, 0, 1, 10, 30, }, + { 11, 0, 0, 1, 10, 30, }, + { 0, 0, 0, 1, 11, 30, }, + { 2, 0, 0, 1, 11, 30, }, { 1, 0, 0, 1, 11, 34, }, { 3, 0, 0, 1, 11, 30, }, { 4, 0, 0, 1, 11, 34, }, { 5, 0, 0, 1, 11, 30, }, { 6, 0, 0, 1, 11, 30, }, { 7, 0, 0, 1, 11, 30, }, + { 8, 0, 0, 1, 11, 30, }, + { 9, 0, 0, 1, 11, 22, }, + { 10, 0, 0, 1, 11, 30, }, + { 11, 0, 0, 1, 11, 30, }, + { 0, 0, 0, 1, 12, 28, }, + { 2, 0, 0, 1, 12, 30, }, { 1, 0, 0, 1, 12, 34, }, { 3, 0, 0, 1, 12, 28, }, { 4, 0, 0, 1, 12, 34, }, { 5, 0, 0, 1, 12, 30, }, { 6, 0, 0, 1, 12, 28, }, { 7, 0, 0, 1, 12, 30, }, + { 8, 0, 0, 1, 12, 28, }, + { 9, 0, 0, 1, 12, 18, }, + { 10, 0, 0, 1, 12, 30, }, + { 11, 0, 0, 1, 12, 30, }, + { 0, 0, 0, 1, 13, 16, }, + { 2, 0, 0, 1, 13, 30, }, { 1, 0, 0, 1, 13, 34, }, { 3, 0, 0, 1, 13, 16, }, { 4, 0, 0, 1, 13, 32, }, { 5, 0, 0, 1, 13, 30, }, { 6, 0, 0, 1, 13, 16, }, { 7, 0, 0, 1, 13, 30, }, + { 8, 0, 0, 1, 13, 16, }, + { 9, 0, 0, 1, 13, 2, }, + { 10, 0, 0, 1, 13, 30, }, + { 11, 0, 0, 1, 13, 30, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, { 1, 0, 0, 1, 14, 63, }, { 3, 0, 0, 1, 14, 63, }, { 4, 0, 0, 1, 14, 63, }, { 5, 0, 0, 1, 14, 63, }, { 6, 0, 0, 1, 14, 63, }, { 7, 0, 0, 1, 14, 63, }, + { 8, 0, 0, 1, 14, 63, }, + { 9, 0, 0, 1, 14, 63, }, + { 10, 0, 0, 1, 14, 63, }, + { 11, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 26, }, + { 2, 0, 0, 2, 1, 30, }, { 1, 0, 0, 2, 1, 34, }, { 3, 0, 0, 2, 1, 26, }, { 4, 0, 0, 2, 1, 32, }, { 5, 0, 0, 2, 1, 30, }, { 6, 0, 0, 2, 1, 26, }, { 7, 0, 0, 2, 1, 30, }, + { 8, 0, 0, 2, 1, 26, }, + { 9, 0, 0, 2, 1, 30, }, + { 10, 0, 0, 2, 1, 30, }, + { 11, 0, 0, 2, 1, 30, }, + { 0, 0, 0, 2, 2, 30, }, + { 2, 0, 0, 2, 2, 30, }, { 1, 0, 0, 2, 2, 34, }, { 3, 0, 0, 2, 2, 30, }, { 4, 0, 0, 2, 2, 34, }, { 5, 0, 0, 2, 2, 30, }, { 6, 0, 0, 2, 2, 30, }, { 7, 0, 0, 2, 2, 30, }, + { 8, 0, 0, 2, 2, 30, }, + { 9, 0, 0, 2, 2, 30, }, + { 10, 0, 0, 2, 2, 30, }, + { 11, 0, 0, 2, 2, 30, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 30, }, { 1, 0, 0, 2, 3, 34, }, { 3, 0, 0, 2, 3, 32, }, { 4, 0, 0, 2, 3, 34, }, { 5, 0, 0, 2, 3, 30, }, { 6, 0, 0, 2, 3, 32, }, { 7, 0, 0, 2, 3, 30, }, + { 8, 0, 0, 2, 3, 32, }, + { 9, 0, 0, 2, 3, 30, }, + { 10, 0, 0, 2, 3, 30, }, + { 11, 0, 0, 2, 3, 30, }, + { 0, 0, 0, 2, 4, 34, }, + { 2, 0, 0, 2, 4, 30, }, { 1, 0, 0, 2, 4, 34, }, { 3, 0, 0, 2, 4, 34, }, { 4, 0, 0, 2, 4, 34, }, { 5, 0, 0, 2, 4, 30, }, { 6, 0, 0, 2, 4, 34, }, { 7, 0, 0, 2, 4, 30, }, + { 8, 0, 0, 2, 4, 34, }, + { 9, 0, 0, 2, 4, 30, }, + { 10, 0, 0, 2, 4, 30, }, + { 11, 0, 0, 2, 4, 30, }, + { 0, 0, 0, 2, 5, 34, }, + { 2, 0, 0, 2, 5, 30, }, { 1, 0, 0, 2, 5, 34, }, { 3, 0, 0, 2, 5, 34, }, { 4, 0, 0, 2, 5, 34, }, { 5, 0, 0, 2, 5, 30, }, { 6, 0, 0, 2, 5, 34, }, { 7, 0, 0, 2, 5, 30, }, + { 8, 0, 0, 2, 5, 34, }, + { 9, 0, 0, 2, 5, 30, }, + { 10, 0, 0, 2, 5, 30, }, + { 11, 0, 0, 2, 5, 30, }, + { 0, 0, 0, 2, 6, 34, }, + { 2, 0, 0, 2, 6, 30, }, { 1, 0, 0, 2, 6, 34, }, { 3, 0, 0, 2, 6, 34, }, { 4, 0, 0, 2, 6, 34, }, { 5, 0, 0, 2, 6, 30, }, { 6, 0, 0, 2, 6, 34, }, { 7, 0, 0, 2, 6, 30, }, + { 8, 0, 0, 2, 6, 34, }, + { 9, 0, 0, 2, 6, 30, }, + { 10, 0, 0, 2, 6, 30, }, + { 11, 0, 0, 2, 6, 30, }, + { 0, 0, 0, 2, 7, 34, }, + { 2, 0, 0, 2, 7, 30, }, { 1, 0, 0, 2, 7, 34, }, { 3, 0, 0, 2, 7, 34, }, { 4, 0, 0, 2, 7, 34, }, { 5, 0, 0, 2, 7, 30, }, { 6, 0, 0, 2, 7, 34, }, { 7, 0, 0, 2, 7, 30, }, + { 8, 0, 0, 2, 7, 34, }, + { 9, 0, 0, 2, 7, 30, }, + { 10, 0, 0, 2, 7, 30, }, + { 11, 0, 0, 2, 7, 30, }, + { 0, 0, 0, 2, 8, 34, }, + { 2, 0, 0, 2, 8, 30, }, { 1, 0, 0, 2, 8, 34, }, { 3, 0, 0, 2, 8, 34, }, { 4, 0, 0, 2, 8, 34, }, { 5, 0, 0, 2, 8, 30, }, { 6, 0, 0, 2, 8, 34, }, { 7, 0, 0, 2, 8, 30, }, + { 8, 0, 0, 2, 8, 34, }, + { 9, 0, 0, 2, 8, 30, }, + { 10, 0, 0, 2, 8, 30, }, + { 11, 0, 0, 2, 8, 30, }, + { 0, 0, 0, 2, 9, 32, }, + { 2, 0, 0, 2, 9, 30, }, { 1, 0, 0, 2, 9, 34, }, { 3, 0, 0, 2, 9, 32, }, { 4, 0, 0, 2, 9, 34, }, { 5, 0, 0, 2, 9, 30, }, { 6, 0, 0, 2, 9, 32, }, { 7, 0, 0, 2, 9, 30, }, + { 8, 0, 0, 2, 9, 32, }, + { 9, 0, 0, 2, 9, 30, }, + { 10, 0, 0, 2, 9, 30, }, + { 11, 0, 0, 2, 9, 30, }, + { 0, 0, 0, 2, 10, 30, }, + { 2, 0, 0, 2, 10, 30, }, { 1, 0, 0, 2, 10, 34, }, { 3, 0, 0, 2, 10, 30, }, { 4, 0, 0, 2, 10, 34, }, { 5, 0, 0, 2, 10, 30, }, { 6, 0, 0, 2, 10, 30, }, { 7, 0, 0, 2, 10, 30, }, + { 8, 0, 0, 2, 10, 30, }, + { 9, 0, 0, 2, 10, 24, }, + { 10, 0, 0, 2, 10, 30, }, + { 11, 0, 0, 2, 10, 30, }, + { 0, 0, 0, 2, 11, 28, }, + { 2, 0, 0, 2, 11, 30, }, { 1, 0, 0, 2, 11, 34, }, { 3, 0, 0, 2, 11, 28, }, { 4, 0, 0, 2, 11, 34, }, { 5, 0, 0, 2, 11, 30, }, { 6, 0, 0, 2, 11, 28, }, { 7, 0, 0, 2, 11, 30, }, + { 8, 0, 0, 2, 11, 28, }, + { 9, 0, 0, 2, 11, 20, }, + { 10, 0, 0, 2, 11, 30, }, + { 11, 0, 0, 2, 11, 30, }, + { 0, 0, 0, 2, 12, 26, }, + { 2, 0, 0, 2, 12, 30, }, { 1, 0, 0, 2, 12, 34, }, { 3, 0, 0, 2, 12, 26, }, { 4, 0, 0, 2, 12, 34, }, { 5, 0, 0, 2, 12, 30, }, { 6, 0, 0, 2, 12, 26, }, { 7, 0, 0, 2, 12, 30, }, + { 8, 0, 0, 2, 12, 26, }, + { 9, 0, 0, 2, 12, 16, }, + { 10, 0, 0, 2, 12, 30, }, + { 11, 0, 0, 2, 12, 30, }, + { 0, 0, 0, 2, 13, 12, }, + { 2, 0, 0, 2, 13, 30, }, { 1, 0, 0, 2, 13, 34, }, { 3, 0, 0, 2, 13, 12, }, { 4, 0, 0, 2, 13, 32, }, { 5, 0, 0, 2, 13, 30, }, { 6, 0, 0, 2, 13, 12, }, { 7, 0, 0, 2, 13, 30, }, + { 8, 0, 0, 2, 13, 12, }, + { 9, 0, 0, 2, 13, 0, }, + { 10, 0, 0, 2, 13, 30, }, + { 11, 0, 0, 2, 13, 30, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, { 1, 0, 0, 2, 14, 63, }, { 3, 0, 0, 2, 14, 63, }, { 4, 0, 0, 2, 14, 63, }, { 5, 0, 0, 2, 14, 63, }, { 6, 0, 0, 2, 14, 63, }, { 7, 0, 0, 2, 14, 63, }, + { 8, 0, 0, 2, 14, 63, }, + { 9, 0, 0, 2, 14, 63, }, + { 10, 0, 0, 2, 14, 63, }, + { 11, 0, 0, 2, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, { 1, 0, 1, 2, 1, 63, }, { 3, 0, 1, 2, 1, 63, }, { 4, 0, 1, 2, 1, 63, }, { 5, 0, 1, 2, 1, 63, }, { 6, 0, 1, 2, 1, 63, }, { 7, 0, 1, 2, 1, 63, }, + { 8, 0, 1, 2, 1, 63, }, + { 9, 0, 1, 2, 1, 63, }, + { 10, 0, 1, 2, 1, 63, }, + { 11, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, { 1, 0, 1, 2, 2, 63, }, { 3, 0, 1, 2, 2, 63, }, { 4, 0, 1, 2, 2, 63, }, { 5, 0, 1, 2, 2, 63, }, { 6, 0, 1, 2, 2, 63, }, { 7, 0, 1, 2, 2, 63, }, + { 8, 0, 1, 2, 2, 63, }, + { 9, 0, 1, 2, 2, 63, }, + { 10, 0, 1, 2, 2, 63, }, + { 11, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 26, }, + { 2, 0, 1, 2, 3, 30, }, { 1, 0, 1, 2, 3, 30, }, { 3, 0, 1, 2, 3, 26, }, { 4, 0, 1, 2, 3, 30, }, { 5, 0, 1, 2, 3, 30, }, { 6, 0, 1, 2, 3, 26, }, { 7, 0, 1, 2, 3, 30, }, + { 8, 0, 1, 2, 3, 26, }, + { 9, 0, 1, 2, 3, 29, }, + { 10, 0, 1, 2, 3, 30, }, + { 11, 0, 1, 2, 3, 30, }, + { 0, 0, 1, 2, 4, 26, }, + { 2, 0, 1, 2, 4, 30, }, { 1, 0, 1, 2, 4, 30, }, { 3, 0, 1, 2, 4, 26, }, { 4, 0, 1, 2, 4, 30, }, { 5, 0, 1, 2, 4, 30, }, { 6, 0, 1, 2, 4, 26, }, { 7, 0, 1, 2, 4, 30, }, + { 8, 0, 1, 2, 4, 26, }, + { 9, 0, 1, 2, 4, 29, }, + { 10, 0, 1, 2, 4, 30, }, + { 11, 0, 1, 2, 4, 30, }, + { 0, 0, 1, 2, 5, 30, }, + { 2, 0, 1, 2, 5, 30, }, { 1, 0, 1, 2, 5, 30, }, { 3, 0, 1, 2, 5, 30, }, { 4, 0, 1, 2, 5, 30, }, { 5, 0, 1, 2, 5, 30, }, { 6, 0, 1, 2, 5, 30, }, { 7, 0, 1, 2, 5, 30, }, + { 8, 0, 1, 2, 5, 30, }, + { 9, 0, 1, 2, 5, 29, }, + { 10, 0, 1, 2, 5, 30, }, + { 11, 0, 1, 2, 5, 30, }, + { 0, 0, 1, 2, 6, 30, }, + { 2, 0, 1, 2, 6, 30, }, { 1, 0, 1, 2, 6, 30, }, { 3, 0, 1, 2, 6, 30, }, { 4, 0, 1, 2, 6, 30, }, { 5, 0, 1, 2, 6, 30, }, { 6, 0, 1, 2, 6, 30, }, { 7, 0, 1, 2, 6, 30, }, + { 8, 0, 1, 2, 6, 30, }, + { 9, 0, 1, 2, 6, 29, }, + { 10, 0, 1, 2, 6, 30, }, + { 11, 0, 1, 2, 6, 30, }, + { 0, 0, 1, 2, 7, 30, }, + { 2, 0, 1, 2, 7, 30, }, { 1, 0, 1, 2, 7, 30, }, { 3, 0, 1, 2, 7, 30, }, { 4, 0, 1, 2, 7, 30, }, { 5, 0, 1, 2, 7, 30, }, { 6, 0, 1, 2, 7, 30, }, { 7, 0, 1, 2, 7, 30, }, + { 8, 0, 1, 2, 7, 30, }, + { 9, 0, 1, 2, 7, 29, }, + { 10, 0, 1, 2, 7, 30, }, + { 11, 0, 1, 2, 7, 30, }, + { 0, 0, 1, 2, 8, 26, }, + { 2, 0, 1, 2, 8, 30, }, { 1, 0, 1, 2, 8, 30, }, { 3, 0, 1, 2, 8, 26, }, { 4, 0, 1, 2, 8, 30, }, { 5, 0, 1, 2, 8, 30, }, { 6, 0, 1, 2, 8, 26, }, { 7, 0, 1, 2, 8, 30, }, + { 8, 0, 1, 2, 8, 26, }, + { 9, 0, 1, 2, 8, 25, }, + { 10, 0, 1, 2, 8, 30, }, + { 11, 0, 1, 2, 8, 30, }, + { 0, 0, 1, 2, 9, 26, }, + { 2, 0, 1, 2, 9, 30, }, { 1, 0, 1, 2, 9, 30, }, { 3, 0, 1, 2, 9, 26, }, { 4, 0, 1, 2, 9, 30, }, { 5, 0, 1, 2, 9, 30, }, { 6, 0, 1, 2, 9, 26, }, { 7, 0, 1, 2, 9, 30, }, + { 8, 0, 1, 2, 9, 26, }, + { 9, 0, 1, 2, 9, 21, }, + { 10, 0, 1, 2, 9, 30, }, + { 11, 0, 1, 2, 9, 30, }, + { 0, 0, 1, 2, 10, 28, }, + { 2, 0, 1, 2, 10, 30, }, { 1, 0, 1, 2, 10, 30, }, { 3, 0, 1, 2, 10, 28, }, { 4, 0, 1, 2, 10, 30, }, { 5, 0, 1, 2, 10, 30, }, { 6, 0, 1, 2, 10, 28, }, { 7, 0, 1, 2, 10, 30, }, + { 8, 0, 1, 2, 10, 28, }, + { 9, 0, 1, 2, 10, 17, }, + { 10, 0, 1, 2, 10, 30, }, + { 11, 0, 1, 2, 10, 30, }, + { 0, 0, 1, 2, 11, 20, }, + { 2, 0, 1, 2, 11, 30, }, { 1, 0, 1, 2, 11, 30, }, { 3, 0, 1, 2, 11, 20, }, { 4, 0, 1, 2, 11, 30, }, { 5, 0, 1, 2, 11, 30, }, { 6, 0, 1, 2, 11, 20, }, { 7, 0, 1, 2, 11, 30, }, + { 8, 0, 1, 2, 11, 20, }, + { 9, 0, 1, 2, 11, 5, }, + { 10, 0, 1, 2, 11, 30, }, + { 11, 0, 1, 2, 11, 30, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 63, }, { 1, 0, 1, 2, 12, 63, }, { 3, 0, 1, 2, 12, 63, }, { 4, 0, 1, 2, 12, 63, }, { 5, 0, 1, 2, 12, 63, }, { 6, 0, 1, 2, 12, 63, }, { 7, 0, 1, 2, 12, 63, }, + { 8, 0, 1, 2, 12, 63, }, + { 9, 0, 1, 2, 12, 63, }, + { 10, 0, 1, 2, 12, 63, }, + { 11, 0, 1, 2, 12, 63, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 63, }, { 1, 0, 1, 2, 13, 63, }, { 3, 0, 1, 2, 13, 63, }, { 4, 0, 1, 2, 13, 63, }, { 5, 0, 1, 2, 13, 63, }, { 6, 0, 1, 2, 13, 63, }, { 7, 0, 1, 2, 13, 63, }, + { 8, 0, 1, 2, 13, 63, }, + { 9, 0, 1, 2, 13, 63, }, + { 10, 0, 1, 2, 13, 63, }, + { 11, 0, 1, 2, 13, 63, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, { 1, 0, 1, 2, 14, 63, }, { 3, 0, 1, 2, 14, 63, }, { 4, 0, 1, 2, 14, 63, }, { 5, 0, 1, 2, 14, 63, }, { 6, 0, 1, 2, 14, 63, }, { 7, 0, 1, 2, 14, 63, }, + { 8, 0, 1, 2, 14, 63, }, + { 9, 0, 1, 2, 14, 63, }, + { 10, 0, 1, 2, 14, 63, }, + { 11, 0, 1, 2, 14, 63, }, + { 0, 1, 0, 1, 36, 31, }, + { 2, 1, 0, 1, 36, 32, }, { 1, 1, 0, 1, 36, 33, }, { 3, 1, 0, 1, 36, 31, }, { 4, 1, 0, 1, 36, 29, }, { 5, 1, 0, 1, 36, 32, }, - { 6, 1, 0, 1, 36, 29, }, + { 6, 1, 0, 1, 36, 31, }, { 7, 1, 0, 1, 36, 27, }, + { 8, 1, 0, 1, 36, 31, }, + { 9, 1, 0, 1, 36, 29, }, + { 10, 1, 0, 1, 36, 63, }, + { 11, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 33, }, + { 2, 1, 0, 1, 40, 32, }, { 1, 1, 0, 1, 40, 33, }, { 3, 1, 0, 1, 40, 31, }, { 4, 1, 0, 1, 40, 28, }, { 5, 1, 0, 1, 40, 32, }, - { 6, 1, 0, 1, 40, 29, }, + { 6, 1, 0, 1, 40, 33, }, { 7, 1, 0, 1, 40, 27, }, + { 8, 1, 0, 1, 40, 31, }, + { 9, 1, 0, 1, 40, 29, }, + { 10, 1, 0, 1, 40, 63, }, + { 11, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 33, }, + { 2, 1, 0, 1, 44, 32, }, { 1, 1, 0, 1, 44, 33, }, { 3, 1, 0, 1, 44, 31, }, { 4, 1, 0, 1, 44, 28, }, { 5, 1, 0, 1, 44, 32, }, - { 6, 1, 0, 1, 44, 30, }, + { 6, 1, 0, 1, 44, 33, }, { 7, 1, 0, 1, 44, 27, }, + { 8, 1, 0, 1, 44, 31, }, + { 9, 1, 0, 1, 44, 29, }, + { 10, 1, 0, 1, 44, 63, }, + { 11, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 31, }, + { 2, 1, 0, 1, 48, 32, }, { 1, 1, 0, 1, 48, 33, }, { 3, 1, 0, 1, 48, 31, }, { 4, 1, 0, 1, 48, 27, }, { 5, 1, 0, 1, 48, 32, }, - { 6, 1, 0, 1, 48, 30, }, + { 6, 1, 0, 1, 48, 31, }, { 7, 1, 0, 1, 48, 27, }, + { 8, 1, 0, 1, 48, 31, }, + { 9, 1, 0, 1, 48, 29, }, + { 10, 1, 0, 1, 48, 63, }, + { 11, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 33, }, + { 2, 1, 0, 1, 52, 32, }, { 1, 1, 0, 1, 52, 33, }, { 3, 1, 0, 1, 52, 32, }, { 4, 1, 0, 1, 52, 16, }, { 5, 1, 0, 1, 52, 32, }, - { 6, 1, 0, 1, 52, 30, }, + { 6, 1, 0, 1, 52, 33, }, { 7, 1, 0, 1, 52, 27, }, + { 8, 1, 0, 1, 52, 33, }, + { 9, 1, 0, 1, 52, 29, }, + { 10, 1, 0, 1, 52, 63, }, + { 11, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 33, }, + { 2, 1, 0, 1, 56, 32, }, { 1, 1, 0, 1, 56, 33, }, { 3, 1, 0, 1, 56, 32, }, { 4, 1, 0, 1, 56, 33, }, { 5, 1, 0, 1, 56, 32, }, - { 6, 1, 0, 1, 56, 30, }, + { 6, 1, 0, 1, 56, 33, }, { 7, 1, 0, 1, 56, 27, }, + { 8, 1, 0, 1, 56, 33, }, + { 9, 1, 0, 1, 56, 29, }, + { 10, 1, 0, 1, 56, 63, }, + { 11, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 33, }, + { 2, 1, 0, 1, 60, 32, }, { 1, 1, 0, 1, 60, 33, }, { 3, 1, 0, 1, 60, 32, }, { 4, 1, 0, 1, 60, 33, }, { 5, 1, 0, 1, 60, 32, }, - { 6, 1, 0, 1, 60, 30, }, + { 6, 1, 0, 1, 60, 33, }, { 7, 1, 0, 1, 60, 27, }, + { 8, 1, 0, 1, 60, 33, }, + { 9, 1, 0, 1, 60, 29, }, + { 10, 1, 0, 1, 60, 63, }, + { 11, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 30, }, + { 2, 1, 0, 1, 64, 32, }, { 1, 1, 0, 1, 64, 33, }, { 3, 1, 0, 1, 64, 30, }, { 4, 1, 0, 1, 64, 33, }, { 5, 1, 0, 1, 64, 32, }, - { 6, 1, 0, 1, 64, 29, }, + { 6, 1, 0, 1, 64, 30, }, { 7, 1, 0, 1, 64, 27, }, + { 8, 1, 0, 1, 64, 30, }, + { 9, 1, 0, 1, 64, 29, }, + { 10, 1, 0, 1, 64, 63, }, + { 11, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 30, }, + { 2, 1, 0, 1, 100, 32, }, { 1, 1, 0, 1, 100, 33, }, { 3, 1, 0, 1, 100, 30, }, { 4, 1, 0, 1, 100, 33, }, { 5, 1, 0, 1, 100, 32, }, - { 6, 1, 0, 1, 100, 30, }, + { 6, 1, 0, 1, 100, 33, }, { 7, 1, 0, 1, 100, 27, }, + { 8, 1, 0, 1, 100, 30, }, + { 9, 1, 0, 1, 100, 63, }, + { 10, 1, 0, 1, 100, 63, }, + { 11, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 33, }, + { 2, 1, 0, 1, 104, 32, }, { 1, 1, 0, 1, 104, 33, }, { 3, 1, 0, 1, 104, 33, }, { 4, 1, 0, 1, 104, 33, }, { 5, 1, 0, 1, 104, 32, }, - { 6, 1, 0, 1, 104, 30, }, + { 6, 1, 0, 1, 104, 33, }, { 7, 1, 0, 1, 104, 27, }, + { 8, 1, 0, 1, 104, 33, }, + { 9, 1, 0, 1, 104, 63, }, + { 10, 1, 0, 1, 104, 63, }, + { 11, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 33, }, + { 2, 1, 0, 1, 108, 32, }, { 1, 1, 0, 1, 108, 33, }, { 3, 1, 0, 1, 108, 33, }, { 4, 1, 0, 1, 108, 33, }, { 5, 1, 0, 1, 108, 32, }, - { 6, 1, 0, 1, 108, 30, }, + { 6, 1, 0, 1, 108, 33, }, { 7, 1, 0, 1, 108, 27, }, + { 8, 1, 0, 1, 108, 33, }, + { 9, 1, 0, 1, 108, 63, }, + { 10, 1, 0, 1, 108, 63, }, + { 11, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 33, }, + { 2, 1, 0, 1, 112, 32, }, { 1, 1, 0, 1, 112, 33, }, { 3, 1, 0, 1, 112, 33, }, { 4, 1, 0, 1, 112, 33, }, { 5, 1, 0, 1, 112, 32, }, - { 6, 1, 0, 1, 112, 30, }, + { 6, 1, 0, 1, 112, 33, }, { 7, 1, 0, 1, 112, 27, }, + { 8, 1, 0, 1, 112, 33, }, + { 9, 1, 0, 1, 112, 63, }, + { 10, 1, 0, 1, 112, 63, }, + { 11, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 33, }, + { 2, 1, 0, 1, 116, 32, }, { 1, 1, 0, 1, 116, 33, }, { 3, 1, 0, 1, 116, 33, }, { 4, 1, 0, 1, 116, 33, }, { 5, 1, 0, 1, 116, 32, }, - { 6, 1, 0, 1, 116, 30, }, + { 6, 1, 0, 1, 116, 33, }, { 7, 1, 0, 1, 116, 27, }, + { 8, 1, 0, 1, 116, 33, }, + { 9, 1, 0, 1, 116, 63, }, + { 10, 1, 0, 1, 116, 63, }, + { 11, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 33, }, + { 2, 1, 0, 1, 120, 32, }, { 1, 1, 0, 1, 120, 33, }, { 3, 1, 0, 1, 120, 63, }, { 4, 1, 0, 1, 120, 33, }, { 5, 1, 0, 1, 120, 63, }, - { 6, 1, 0, 1, 120, 30, }, + { 6, 1, 0, 1, 120, 33, }, { 7, 1, 0, 1, 120, 27, }, + { 8, 1, 0, 1, 120, 33, }, + { 9, 1, 0, 1, 120, 63, }, + { 10, 1, 0, 1, 120, 63, }, + { 11, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 33, }, + { 2, 1, 0, 1, 124, 32, }, { 1, 1, 0, 1, 124, 33, }, { 3, 1, 0, 1, 124, 63, }, { 4, 1, 0, 1, 124, 33, }, { 5, 1, 0, 1, 124, 63, }, - { 6, 1, 0, 1, 124, 30, }, + { 6, 1, 0, 1, 124, 33, }, { 7, 1, 0, 1, 124, 27, }, + { 8, 1, 0, 1, 124, 33, }, + { 9, 1, 0, 1, 124, 63, }, + { 10, 1, 0, 1, 124, 63, }, + { 11, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 33, }, + { 2, 1, 0, 1, 128, 32, }, { 1, 1, 0, 1, 128, 33, }, { 3, 1, 0, 1, 128, 63, }, - { 4, 1, 0, 1, 128, 63, }, + { 4, 1, 0, 1, 128, 33, }, { 5, 1, 0, 1, 128, 63, }, - { 6, 1, 0, 1, 128, 30, }, + { 6, 1, 0, 1, 128, 33, }, { 7, 1, 0, 1, 128, 27, }, + { 8, 1, 0, 1, 128, 33, }, + { 9, 1, 0, 1, 128, 63, }, + { 10, 1, 0, 1, 128, 63, }, + { 11, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 33, }, + { 2, 1, 0, 1, 132, 32, }, { 1, 1, 0, 1, 132, 33, }, { 3, 1, 0, 1, 132, 33, }, - { 4, 1, 0, 1, 132, 63, }, + { 4, 1, 0, 1, 132, 33, }, { 5, 1, 0, 1, 132, 32, }, - { 6, 1, 0, 1, 132, 30, }, + { 6, 1, 0, 1, 132, 33, }, { 7, 1, 0, 1, 132, 27, }, + { 8, 1, 0, 1, 132, 33, }, + { 9, 1, 0, 1, 132, 63, }, + { 10, 1, 0, 1, 132, 63, }, + { 11, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 33, }, + { 2, 1, 0, 1, 136, 32, }, { 1, 1, 0, 1, 136, 33, }, { 3, 1, 0, 1, 136, 33, }, - { 4, 1, 0, 1, 136, 63, }, + { 4, 1, 0, 1, 136, 33, }, { 5, 1, 0, 1, 136, 32, }, - { 6, 1, 0, 1, 136, 30, }, - { 7, 1, 0, 1, 136, 63, }, + { 6, 1, 0, 1, 136, 33, }, + { 7, 1, 0, 1, 136, 27, }, + { 8, 1, 0, 1, 136, 33, }, + { 9, 1, 0, 1, 136, 63, }, + { 10, 1, 0, 1, 136, 63, }, + { 11, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 31, }, + { 2, 1, 0, 1, 140, 32, }, { 1, 1, 0, 1, 140, 33, }, { 3, 1, 0, 1, 140, 31, }, - { 4, 1, 0, 1, 140, 63, }, + { 4, 1, 0, 1, 140, 33, }, { 5, 1, 0, 1, 140, 32, }, - { 6, 1, 0, 1, 140, 30, }, - { 7, 1, 0, 1, 140, 63, }, + { 6, 1, 0, 1, 140, 33, }, + { 7, 1, 0, 1, 140, 27, }, + { 8, 1, 0, 1, 140, 31, }, + { 9, 1, 0, 1, 140, 63, }, + { 10, 1, 0, 1, 140, 63, }, + { 11, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 144, 30, }, + { 2, 1, 0, 1, 144, 63, }, { 1, 1, 0, 1, 144, 63, }, { 3, 1, 0, 1, 144, 30, }, - { 4, 1, 0, 1, 144, 63, }, + { 4, 1, 0, 1, 144, 33, }, { 5, 1, 0, 1, 144, 63, }, - { 6, 1, 0, 1, 144, 30, }, + { 6, 1, 0, 1, 144, 33, }, { 7, 1, 0, 1, 144, 63, }, + { 8, 1, 0, 1, 144, 30, }, + { 9, 1, 0, 1, 144, 63, }, + { 10, 1, 0, 1, 144, 63, }, + { 11, 1, 0, 1, 144, 32, }, + { 0, 1, 0, 1, 149, 33, }, + { 2, 1, 0, 1, 149, 14, }, { 1, 1, 0, 1, 149, 63, }, { 3, 1, 0, 1, 149, 30, }, { 4, 1, 0, 1, 149, 33, }, { 5, 1, 0, 1, 149, 33, }, - { 6, 1, 0, 1, 149, 30, }, + { 6, 1, 0, 1, 149, 33, }, { 7, 1, 0, 1, 149, 27, }, + { 8, 1, 0, 1, 149, 33, }, + { 9, 1, 0, 1, 149, 30, }, + { 10, 1, 0, 1, 149, 14, }, + { 11, 1, 0, 1, 149, 31, }, + { 0, 1, 0, 1, 153, 33, }, + { 2, 1, 0, 1, 153, 14, }, { 1, 1, 0, 1, 153, 63, }, { 3, 1, 0, 1, 153, 33, }, { 4, 1, 0, 1, 153, 33, }, { 5, 1, 0, 1, 153, 33, }, - { 6, 1, 0, 1, 153, 30, }, + { 6, 1, 0, 1, 153, 33, }, { 7, 1, 0, 1, 153, 27, }, + { 8, 1, 0, 1, 153, 33, }, + { 9, 1, 0, 1, 153, 30, }, + { 10, 1, 0, 1, 153, 14, }, + { 11, 1, 0, 1, 153, 31, }, + { 0, 1, 0, 1, 157, 33, }, + { 2, 1, 0, 1, 157, 14, }, { 1, 1, 0, 1, 157, 63, }, { 3, 1, 0, 1, 157, 33, }, { 4, 1, 0, 1, 157, 33, }, { 5, 1, 0, 1, 157, 33, }, - { 6, 1, 0, 1, 157, 30, }, + { 6, 1, 0, 1, 157, 33, }, { 7, 1, 0, 1, 157, 27, }, + { 8, 1, 0, 1, 157, 33, }, + { 9, 1, 0, 1, 157, 30, }, + { 10, 1, 0, 1, 157, 14, }, + { 11, 1, 0, 1, 157, 31, }, + { 0, 1, 0, 1, 161, 33, }, + { 2, 1, 0, 1, 161, 14, }, { 1, 1, 0, 1, 161, 63, }, { 3, 1, 0, 1, 161, 33, }, { 4, 1, 0, 1, 161, 31, }, { 5, 1, 0, 1, 161, 33, }, - { 6, 1, 0, 1, 161, 30, }, + { 6, 1, 0, 1, 161, 33, }, { 7, 1, 0, 1, 161, 27, }, + { 8, 1, 0, 1, 161, 33, }, + { 9, 1, 0, 1, 161, 30, }, + { 10, 1, 0, 1, 161, 14, }, + { 11, 1, 0, 1, 161, 31, }, + { 0, 1, 0, 1, 165, 33, }, + { 2, 1, 0, 1, 165, 14, }, { 1, 1, 0, 1, 165, 63, }, { 3, 1, 0, 1, 165, 33, }, - { 4, 1, 0, 1, 165, 63, }, + { 4, 1, 0, 1, 165, 33, }, { 5, 1, 0, 1, 165, 33, }, - { 6, 1, 0, 1, 165, 30, }, + { 6, 1, 0, 1, 165, 33, }, { 7, 1, 0, 1, 165, 27, }, + { 8, 1, 0, 1, 165, 30, }, + { 9, 1, 0, 1, 165, 30, }, + { 10, 1, 0, 1, 165, 14, }, + { 11, 1, 0, 1, 165, 31, }, + { 0, 1, 0, 2, 36, 30, }, + { 2, 1, 0, 2, 36, 32, }, { 1, 1, 0, 2, 36, 33, }, { 3, 1, 0, 2, 36, 30, }, { 4, 1, 0, 2, 36, 27, }, { 5, 1, 0, 2, 36, 32, }, { 6, 1, 0, 2, 36, 30, }, { 7, 1, 0, 2, 36, 27, }, + { 8, 1, 0, 2, 36, 30, }, + { 9, 1, 0, 2, 36, 29, }, + { 10, 1, 0, 2, 36, 63, }, + { 11, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 33, }, + { 2, 1, 0, 2, 40, 32, }, { 1, 1, 0, 2, 40, 33, }, { 3, 1, 0, 2, 40, 31, }, { 4, 1, 0, 2, 40, 29, }, { 5, 1, 0, 2, 40, 32, }, - { 6, 1, 0, 2, 40, 30, }, + { 6, 1, 0, 2, 40, 33, }, { 7, 1, 0, 2, 40, 27, }, + { 8, 1, 0, 2, 40, 31, }, + { 9, 1, 0, 2, 40, 29, }, + { 10, 1, 0, 2, 40, 63, }, + { 11, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 33, }, + { 2, 1, 0, 2, 44, 32, }, { 1, 1, 0, 2, 44, 33, }, { 3, 1, 0, 2, 44, 31, }, { 4, 1, 0, 2, 44, 29, }, { 5, 1, 0, 2, 44, 32, }, - { 6, 1, 0, 2, 44, 30, }, + { 6, 1, 0, 2, 44, 33, }, { 7, 1, 0, 2, 44, 27, }, + { 8, 1, 0, 2, 44, 31, }, + { 9, 1, 0, 2, 44, 29, }, + { 10, 1, 0, 2, 44, 63, }, + { 11, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 33, }, + { 2, 1, 0, 2, 48, 32, }, { 1, 1, 0, 2, 48, 33, }, { 3, 1, 0, 2, 48, 31, }, { 4, 1, 0, 2, 48, 26, }, { 5, 1, 0, 2, 48, 32, }, - { 6, 1, 0, 2, 48, 30, }, + { 6, 1, 0, 2, 48, 33, }, { 7, 1, 0, 2, 48, 27, }, + { 8, 1, 0, 2, 48, 31, }, + { 9, 1, 0, 2, 48, 29, }, + { 10, 1, 0, 2, 48, 63, }, + { 11, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 33, }, + { 2, 1, 0, 2, 52, 32, }, { 1, 1, 0, 2, 52, 33, }, { 3, 1, 0, 2, 52, 32, }, { 4, 1, 0, 2, 52, 7, }, { 5, 1, 0, 2, 52, 32, }, - { 6, 1, 0, 2, 52, 30, }, + { 6, 1, 0, 2, 52, 33, }, { 7, 1, 0, 2, 52, 27, }, + { 8, 1, 0, 2, 52, 33, }, + { 9, 1, 0, 2, 52, 29, }, + { 10, 1, 0, 2, 52, 63, }, + { 11, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 33, }, + { 2, 1, 0, 2, 56, 32, }, { 1, 1, 0, 2, 56, 33, }, { 3, 1, 0, 2, 56, 32, }, { 4, 1, 0, 2, 56, 33, }, { 5, 1, 0, 2, 56, 32, }, - { 6, 1, 0, 2, 56, 30, }, + { 6, 1, 0, 2, 56, 33, }, { 7, 1, 0, 2, 56, 27, }, + { 8, 1, 0, 2, 56, 33, }, + { 9, 1, 0, 2, 56, 29, }, + { 10, 1, 0, 2, 56, 63, }, + { 11, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 33, }, + { 2, 1, 0, 2, 60, 32, }, { 1, 1, 0, 2, 60, 33, }, { 3, 1, 0, 2, 60, 32, }, { 4, 1, 0, 2, 60, 33, }, { 5, 1, 0, 2, 60, 32, }, - { 6, 1, 0, 2, 60, 30, }, + { 6, 1, 0, 2, 60, 33, }, { 7, 1, 0, 2, 60, 27, }, + { 8, 1, 0, 2, 60, 33, }, + { 9, 1, 0, 2, 60, 29, }, + { 10, 1, 0, 2, 60, 63, }, + { 11, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 30, }, + { 2, 1, 0, 2, 64, 32, }, { 1, 1, 0, 2, 64, 33, }, { 3, 1, 0, 2, 64, 30, }, { 4, 1, 0, 2, 64, 33, }, { 5, 1, 0, 2, 64, 32, }, { 6, 1, 0, 2, 64, 30, }, { 7, 1, 0, 2, 64, 27, }, + { 8, 1, 0, 2, 64, 30, }, + { 9, 1, 0, 2, 64, 29, }, + { 10, 1, 0, 2, 64, 63, }, + { 11, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 30, }, + { 2, 1, 0, 2, 100, 32, }, { 1, 1, 0, 2, 100, 33, }, { 3, 1, 0, 2, 100, 30, }, { 4, 1, 0, 2, 100, 33, }, { 5, 1, 0, 2, 100, 32, }, - { 6, 1, 0, 2, 100, 30, }, + { 6, 1, 0, 2, 100, 33, }, { 7, 1, 0, 2, 100, 27, }, + { 8, 1, 0, 2, 100, 30, }, + { 9, 1, 0, 2, 100, 63, }, + { 10, 1, 0, 2, 100, 63, }, + { 11, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 33, }, + { 2, 1, 0, 2, 104, 32, }, { 1, 1, 0, 2, 104, 33, }, { 3, 1, 0, 2, 104, 33, }, { 4, 1, 0, 2, 104, 33, }, { 5, 1, 0, 2, 104, 32, }, - { 6, 1, 0, 2, 104, 30, }, + { 6, 1, 0, 2, 104, 33, }, { 7, 1, 0, 2, 104, 27, }, + { 8, 1, 0, 2, 104, 33, }, + { 9, 1, 0, 2, 104, 63, }, + { 10, 1, 0, 2, 104, 63, }, + { 11, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 33, }, + { 2, 1, 0, 2, 108, 32, }, { 1, 1, 0, 2, 108, 33, }, { 3, 1, 0, 2, 108, 33, }, { 4, 1, 0, 2, 108, 33, }, { 5, 1, 0, 2, 108, 32, }, - { 6, 1, 0, 2, 108, 30, }, + { 6, 1, 0, 2, 108, 33, }, { 7, 1, 0, 2, 108, 27, }, + { 8, 1, 0, 2, 108, 33, }, + { 9, 1, 0, 2, 108, 63, }, + { 10, 1, 0, 2, 108, 63, }, + { 11, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 33, }, + { 2, 1, 0, 2, 112, 32, }, { 1, 1, 0, 2, 112, 33, }, { 3, 1, 0, 2, 112, 33, }, { 4, 1, 0, 2, 112, 33, }, { 5, 1, 0, 2, 112, 32, }, - { 6, 1, 0, 2, 112, 30, }, + { 6, 1, 0, 2, 112, 33, }, { 7, 1, 0, 2, 112, 27, }, + { 8, 1, 0, 2, 112, 33, }, + { 9, 1, 0, 2, 112, 63, }, + { 10, 1, 0, 2, 112, 63, }, + { 11, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 33, }, + { 2, 1, 0, 2, 116, 32, }, { 1, 1, 0, 2, 116, 33, }, { 3, 1, 0, 2, 116, 33, }, { 4, 1, 0, 2, 116, 33, }, { 5, 1, 0, 2, 116, 32, }, - { 6, 1, 0, 2, 116, 30, }, + { 6, 1, 0, 2, 116, 33, }, { 7, 1, 0, 2, 116, 27, }, + { 8, 1, 0, 2, 116, 33, }, + { 9, 1, 0, 2, 116, 63, }, + { 10, 1, 0, 2, 116, 63, }, + { 11, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 33, }, + { 2, 1, 0, 2, 120, 32, }, { 1, 1, 0, 2, 120, 33, }, { 3, 1, 0, 2, 120, 63, }, { 4, 1, 0, 2, 120, 33, }, { 5, 1, 0, 2, 120, 63, }, - { 6, 1, 0, 2, 120, 30, }, + { 6, 1, 0, 2, 120, 33, }, { 7, 1, 0, 2, 120, 27, }, + { 8, 1, 0, 2, 120, 33, }, + { 9, 1, 0, 2, 120, 63, }, + { 10, 1, 0, 2, 120, 63, }, + { 11, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 33, }, + { 2, 1, 0, 2, 124, 32, }, { 1, 1, 0, 2, 124, 33, }, { 3, 1, 0, 2, 124, 63, }, { 4, 1, 0, 2, 124, 33, }, { 5, 1, 0, 2, 124, 63, }, - { 6, 1, 0, 2, 124, 30, }, + { 6, 1, 0, 2, 124, 33, }, { 7, 1, 0, 2, 124, 27, }, + { 8, 1, 0, 2, 124, 33, }, + { 9, 1, 0, 2, 124, 63, }, + { 10, 1, 0, 2, 124, 63, }, + { 11, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 33, }, + { 2, 1, 0, 2, 128, 32, }, { 1, 1, 0, 2, 128, 33, }, { 3, 1, 0, 2, 128, 63, }, - { 4, 1, 0, 2, 128, 63, }, + { 4, 1, 0, 2, 128, 33, }, { 5, 1, 0, 2, 128, 63, }, - { 6, 1, 0, 2, 128, 30, }, + { 6, 1, 0, 2, 128, 33, }, { 7, 1, 0, 2, 128, 27, }, + { 8, 1, 0, 2, 128, 33, }, + { 9, 1, 0, 2, 128, 63, }, + { 10, 1, 0, 2, 128, 63, }, + { 11, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 33, }, + { 2, 1, 0, 2, 132, 32, }, { 1, 1, 0, 2, 132, 33, }, { 3, 1, 0, 2, 132, 33, }, - { 4, 1, 0, 2, 132, 63, }, + { 4, 1, 0, 2, 132, 33, }, { 5, 1, 0, 2, 132, 32, }, - { 6, 1, 0, 2, 132, 30, }, + { 6, 1, 0, 2, 132, 33, }, { 7, 1, 0, 2, 132, 27, }, + { 8, 1, 0, 2, 132, 33, }, + { 9, 1, 0, 2, 132, 63, }, + { 10, 1, 0, 2, 132, 63, }, + { 11, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 33, }, + { 2, 1, 0, 2, 136, 32, }, { 1, 1, 0, 2, 136, 33, }, { 3, 1, 0, 2, 136, 33, }, - { 4, 1, 0, 2, 136, 63, }, + { 4, 1, 0, 2, 136, 33, }, { 5, 1, 0, 2, 136, 32, }, - { 6, 1, 0, 2, 136, 30, }, - { 7, 1, 0, 2, 136, 63, }, + { 6, 1, 0, 2, 136, 33, }, + { 7, 1, 0, 2, 136, 27, }, + { 8, 1, 0, 2, 136, 33, }, + { 9, 1, 0, 2, 136, 63, }, + { 10, 1, 0, 2, 136, 63, }, + { 11, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 29, }, + { 2, 1, 0, 2, 140, 32, }, { 1, 1, 0, 2, 140, 33, }, { 3, 1, 0, 2, 140, 29, }, - { 4, 1, 0, 2, 140, 63, }, + { 4, 1, 0, 2, 140, 33, }, { 5, 1, 0, 2, 140, 32, }, - { 6, 1, 0, 2, 140, 30, }, - { 7, 1, 0, 2, 140, 63, }, + { 6, 1, 0, 2, 140, 33, }, + { 7, 1, 0, 2, 140, 27, }, + { 8, 1, 0, 2, 140, 29, }, + { 9, 1, 0, 2, 140, 63, }, + { 10, 1, 0, 2, 140, 63, }, + { 11, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 144, 27, }, + { 2, 1, 0, 2, 144, 63, }, { 1, 1, 0, 2, 144, 63, }, { 3, 1, 0, 2, 144, 27, }, - { 4, 1, 0, 2, 144, 63, }, + { 4, 1, 0, 2, 144, 33, }, { 5, 1, 0, 2, 144, 63, }, - { 6, 1, 0, 2, 144, 30, }, + { 6, 1, 0, 2, 144, 33, }, { 7, 1, 0, 2, 144, 63, }, + { 8, 1, 0, 2, 144, 27, }, + { 9, 1, 0, 2, 144, 63, }, + { 10, 1, 0, 2, 144, 63, }, + { 11, 1, 0, 2, 144, 31, }, + { 0, 1, 0, 2, 149, 33, }, + { 2, 1, 0, 2, 149, 14, }, { 1, 1, 0, 2, 149, 63, }, { 3, 1, 0, 2, 149, 33, }, { 4, 1, 0, 2, 149, 33, }, { 5, 1, 0, 2, 149, 33, }, - { 6, 1, 0, 2, 149, 30, }, + { 6, 1, 0, 2, 149, 33, }, { 7, 1, 0, 2, 149, 27, }, + { 8, 1, 0, 2, 149, 33, }, + { 9, 1, 0, 2, 149, 31, }, + { 10, 1, 0, 2, 149, 14, }, + { 11, 1, 0, 2, 149, 31, }, + { 0, 1, 0, 2, 153, 33, }, + { 2, 1, 0, 2, 153, 14, }, { 1, 1, 0, 2, 153, 63, }, { 3, 1, 0, 2, 153, 33, }, { 4, 1, 0, 2, 153, 33, }, { 5, 1, 0, 2, 153, 33, }, - { 6, 1, 0, 2, 153, 30, }, + { 6, 1, 0, 2, 153, 33, }, { 7, 1, 0, 2, 153, 27, }, + { 8, 1, 0, 2, 153, 33, }, + { 9, 1, 0, 2, 153, 31, }, + { 10, 1, 0, 2, 153, 14, }, + { 11, 1, 0, 2, 153, 31, }, + { 0, 1, 0, 2, 157, 33, }, + { 2, 1, 0, 2, 157, 14, }, { 1, 1, 0, 2, 157, 63, }, { 3, 1, 0, 2, 157, 33, }, { 4, 1, 0, 2, 157, 33, }, { 5, 1, 0, 2, 157, 33, }, - { 6, 1, 0, 2, 157, 30, }, + { 6, 1, 0, 2, 157, 33, }, { 7, 1, 0, 2, 157, 27, }, + { 8, 1, 0, 2, 157, 33, }, + { 9, 1, 0, 2, 157, 31, }, + { 10, 1, 0, 2, 157, 14, }, + { 11, 1, 0, 2, 157, 31, }, + { 0, 1, 0, 2, 161, 33, }, + { 2, 1, 0, 2, 161, 14, }, { 1, 1, 0, 2, 161, 63, }, { 3, 1, 0, 2, 161, 33, }, { 4, 1, 0, 2, 161, 31, }, { 5, 1, 0, 2, 161, 33, }, - { 6, 1, 0, 2, 161, 30, }, + { 6, 1, 0, 2, 161, 33, }, { 7, 1, 0, 2, 161, 27, }, + { 8, 1, 0, 2, 161, 33, }, + { 9, 1, 0, 2, 161, 31, }, + { 10, 1, 0, 2, 161, 14, }, + { 11, 1, 0, 2, 161, 31, }, + { 0, 1, 0, 2, 165, 33, }, + { 2, 1, 0, 2, 165, 14, }, { 1, 1, 0, 2, 165, 63, }, { 3, 1, 0, 2, 165, 33, }, - { 4, 1, 0, 2, 165, 63, }, + { 4, 1, 0, 2, 165, 33, }, { 5, 1, 0, 2, 165, 33, }, - { 6, 1, 0, 2, 165, 30, }, + { 6, 1, 0, 2, 165, 33, }, { 7, 1, 0, 2, 165, 27, }, + { 8, 1, 0, 2, 165, 30, }, + { 9, 1, 0, 2, 165, 31, }, + { 10, 1, 0, 2, 165, 14, }, + { 11, 1, 0, 2, 165, 31, }, + { 0, 1, 1, 2, 38, 22, }, + { 2, 1, 1, 2, 38, 32, }, { 1, 1, 1, 2, 38, 32, }, { 3, 1, 1, 2, 38, 22, }, { 4, 1, 1, 2, 38, 26, }, { 5, 1, 1, 2, 38, 32, }, { 6, 1, 1, 2, 38, 22, }, { 7, 1, 1, 2, 38, 27, }, + { 8, 1, 1, 2, 38, 22, }, + { 9, 1, 1, 2, 38, 29, }, + { 10, 1, 1, 2, 38, 63, }, + { 11, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 32, }, + { 2, 1, 1, 2, 46, 32, }, { 1, 1, 1, 2, 46, 32, }, { 3, 1, 1, 2, 46, 32, }, { 4, 1, 1, 2, 46, 28, }, { 5, 1, 1, 2, 46, 32, }, - { 6, 1, 1, 2, 46, 30, }, + { 6, 1, 1, 2, 46, 32, }, { 7, 1, 1, 2, 46, 27, }, + { 8, 1, 1, 2, 46, 31, }, + { 9, 1, 1, 2, 46, 29, }, + { 10, 1, 1, 2, 46, 63, }, + { 11, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, { 1, 1, 1, 2, 54, 32, }, { 3, 1, 1, 2, 54, 32, }, { 4, 1, 1, 2, 54, 22, }, { 5, 1, 1, 2, 54, 32, }, - { 6, 1, 1, 2, 54, 30, }, + { 6, 1, 1, 2, 54, 32, }, { 7, 1, 1, 2, 54, 27, }, + { 8, 1, 1, 2, 54, 32, }, + { 9, 1, 1, 2, 54, 28, }, + { 10, 1, 1, 2, 54, 63, }, + { 11, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 23, }, + { 2, 1, 1, 2, 62, 32, }, { 1, 1, 1, 2, 62, 32, }, { 3, 1, 1, 2, 62, 23, }, { 4, 1, 1, 2, 62, 31, }, { 5, 1, 1, 2, 62, 32, }, { 6, 1, 1, 2, 62, 23, }, { 7, 1, 1, 2, 62, 27, }, + { 8, 1, 1, 2, 62, 23, }, + { 9, 1, 1, 2, 62, 28, }, + { 10, 1, 1, 2, 62, 63, }, + { 11, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 21, }, + { 2, 1, 1, 2, 102, 32, }, { 1, 1, 1, 2, 102, 32, }, { 3, 1, 1, 2, 102, 21, }, { 4, 1, 1, 2, 102, 31, }, { 5, 1, 1, 2, 102, 32, }, - { 6, 1, 1, 2, 102, 30, }, + { 6, 1, 1, 2, 102, 32, }, { 7, 1, 1, 2, 102, 27, }, + { 8, 1, 1, 2, 102, 21, }, + { 9, 1, 1, 2, 102, 63, }, + { 10, 1, 1, 2, 102, 63, }, + { 11, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, { 1, 1, 1, 2, 110, 32, }, { 3, 1, 1, 2, 110, 32, }, { 4, 1, 1, 2, 110, 32, }, { 5, 1, 1, 2, 110, 32, }, - { 6, 1, 1, 2, 110, 30, }, + { 6, 1, 1, 2, 110, 32, }, { 7, 1, 1, 2, 110, 27, }, + { 8, 1, 1, 2, 110, 32, }, + { 9, 1, 1, 2, 110, 63, }, + { 10, 1, 1, 2, 110, 63, }, + { 11, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 32, }, + { 2, 1, 1, 2, 118, 32, }, { 1, 1, 1, 2, 118, 32, }, { 3, 1, 1, 2, 118, 63, }, { 4, 1, 1, 2, 118, 32, }, { 5, 1, 1, 2, 118, 63, }, - { 6, 1, 1, 2, 118, 30, }, + { 6, 1, 1, 2, 118, 32, }, { 7, 1, 1, 2, 118, 27, }, + { 8, 1, 1, 2, 118, 32, }, + { 9, 1, 1, 2, 118, 63, }, + { 10, 1, 1, 2, 118, 63, }, + { 11, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 32, }, + { 2, 1, 1, 2, 126, 32, }, { 1, 1, 1, 2, 126, 32, }, { 3, 1, 1, 2, 126, 63, }, - { 4, 1, 1, 2, 126, 63, }, + { 4, 1, 1, 2, 126, 32, }, { 5, 1, 1, 2, 126, 63, }, - { 6, 1, 1, 2, 126, 30, }, + { 6, 1, 1, 2, 126, 32, }, { 7, 1, 1, 2, 126, 27, }, + { 8, 1, 1, 2, 126, 32, }, + { 9, 1, 1, 2, 126, 63, }, + { 10, 1, 1, 2, 126, 63, }, + { 11, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 32, }, + { 2, 1, 1, 2, 134, 32, }, { 1, 1, 1, 2, 134, 32, }, { 3, 1, 1, 2, 134, 32, }, - { 4, 1, 1, 2, 134, 63, }, + { 4, 1, 1, 2, 134, 32, }, { 5, 1, 1, 2, 134, 32, }, - { 6, 1, 1, 2, 134, 30, }, - { 7, 1, 1, 2, 134, 63, }, + { 6, 1, 1, 2, 134, 32, }, + { 7, 1, 1, 2, 134, 27, }, + { 8, 1, 1, 2, 134, 32, }, + { 9, 1, 1, 2, 134, 63, }, + { 10, 1, 1, 2, 134, 63, }, + { 11, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 142, 29, }, + { 2, 1, 1, 2, 142, 63, }, { 1, 1, 1, 2, 142, 63, }, { 3, 1, 1, 2, 142, 29, }, - { 4, 1, 1, 2, 142, 63, }, + { 4, 1, 1, 2, 142, 32, }, { 5, 1, 1, 2, 142, 63, }, - { 6, 1, 1, 2, 142, 30, }, + { 6, 1, 1, 2, 142, 32, }, { 7, 1, 1, 2, 142, 63, }, + { 8, 1, 1, 2, 142, 29, }, + { 9, 1, 1, 2, 142, 63, }, + { 10, 1, 1, 2, 142, 63, }, + { 11, 1, 1, 2, 142, 31, }, + { 0, 1, 1, 2, 151, 32, }, + { 2, 1, 1, 2, 151, 14, }, { 1, 1, 1, 2, 151, 63, }, { 3, 1, 1, 2, 151, 32, }, { 4, 1, 1, 2, 151, 27, }, { 5, 1, 1, 2, 151, 32, }, - { 6, 1, 1, 2, 151, 30, }, + { 6, 1, 1, 2, 151, 32, }, { 7, 1, 1, 2, 151, 27, }, + { 8, 1, 1, 2, 151, 32, }, + { 9, 1, 1, 2, 151, 27, }, + { 10, 1, 1, 2, 151, 14, }, + { 11, 1, 1, 2, 151, 30, }, + { 0, 1, 1, 2, 159, 32, }, + { 2, 1, 1, 2, 159, 14, }, { 1, 1, 1, 2, 159, 63, }, { 3, 1, 1, 2, 159, 32, }, { 4, 1, 1, 2, 159, 26, }, { 5, 1, 1, 2, 159, 32, }, - { 6, 1, 1, 2, 159, 30, }, + { 6, 1, 1, 2, 159, 32, }, { 7, 1, 1, 2, 159, 27, }, + { 8, 1, 1, 2, 159, 32, }, + { 9, 1, 1, 2, 159, 31, }, + { 10, 1, 1, 2, 159, 14, }, + { 11, 1, 1, 2, 159, 30, }, + { 0, 1, 2, 4, 42, 19, }, + { 2, 1, 2, 4, 42, 32, }, { 1, 1, 2, 4, 42, 28, }, { 3, 1, 2, 4, 42, 19, }, { 4, 1, 2, 4, 42, 25, }, { 5, 1, 2, 4, 42, 32, }, { 6, 1, 2, 4, 42, 19, }, { 7, 1, 2, 4, 42, 27, }, + { 8, 1, 2, 4, 42, 19, }, + { 9, 1, 2, 4, 42, 25, }, + { 10, 1, 2, 4, 42, 63, }, + { 11, 1, 2, 4, 42, 32, }, + { 0, 1, 2, 4, 58, 22, }, + { 2, 1, 2, 4, 58, 32, }, { 1, 1, 2, 4, 58, 28, }, { 3, 1, 2, 4, 58, 22, }, { 4, 1, 2, 4, 58, 28, }, { 5, 1, 2, 4, 58, 32, }, { 6, 1, 2, 4, 58, 22, }, { 7, 1, 2, 4, 58, 27, }, + { 8, 1, 2, 4, 58, 22, }, + { 9, 1, 2, 4, 58, 23, }, + { 10, 1, 2, 4, 58, 63, }, + { 11, 1, 2, 4, 58, 32, }, + { 0, 1, 2, 4, 106, 18, }, + { 2, 1, 2, 4, 106, 32, }, { 1, 1, 2, 4, 106, 32, }, { 3, 1, 2, 4, 106, 18, }, { 4, 1, 2, 4, 106, 30, }, { 5, 1, 2, 4, 106, 32, }, - { 6, 1, 2, 4, 106, 30, }, + { 6, 1, 2, 4, 106, 32, }, { 7, 1, 2, 4, 106, 27, }, + { 8, 1, 2, 4, 106, 18, }, + { 9, 1, 2, 4, 106, 63, }, + { 10, 1, 2, 4, 106, 63, }, + { 11, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 32, }, + { 2, 1, 2, 4, 122, 32, }, { 1, 1, 2, 4, 122, 32, }, { 3, 1, 2, 4, 122, 63, }, { 4, 1, 2, 4, 122, 26, }, { 5, 1, 2, 4, 122, 63, }, - { 6, 1, 2, 4, 122, 30, }, + { 6, 1, 2, 4, 122, 32, }, { 7, 1, 2, 4, 122, 27, }, + { 8, 1, 2, 4, 122, 32, }, + { 9, 1, 2, 4, 122, 63, }, + { 10, 1, 2, 4, 122, 63, }, + { 11, 1, 2, 4, 122, 32, }, + { 0, 1, 2, 4, 138, 28, }, + { 2, 1, 2, 4, 138, 63, }, { 1, 1, 2, 4, 138, 63, }, { 3, 1, 2, 4, 138, 28, }, - { 4, 1, 2, 4, 138, 63, }, + { 4, 1, 2, 4, 138, 32, }, { 5, 1, 2, 4, 138, 63, }, - { 6, 1, 2, 4, 138, 30, }, + { 6, 1, 2, 4, 138, 32, }, { 7, 1, 2, 4, 138, 63, }, + { 8, 1, 2, 4, 138, 28, }, + { 9, 1, 2, 4, 138, 63, }, + { 10, 1, 2, 4, 138, 63, }, + { 11, 1, 2, 4, 138, 30, }, + { 0, 1, 2, 4, 155, 32, }, + { 2, 1, 2, 4, 155, 14, }, { 1, 1, 2, 4, 155, 63, }, { 3, 1, 2, 4, 155, 32, }, { 4, 1, 2, 4, 155, 27, }, { 5, 1, 2, 4, 155, 32, }, - { 6, 1, 2, 4, 155, 30, }, + { 6, 1, 2, 4, 155, 32, }, { 7, 1, 2, 4, 155, 27, }, + { 8, 1, 2, 4, 155, 32, }, + { 9, 1, 2, 4, 155, 20, }, + { 10, 1, 2, 4, 155, 14, }, + { 11, 1, 2, 4, 155, 30, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8821c_txpwr_lmt_type0); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c index f9e3d0779c59..5699846a399b 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c @@ -39832,6 +39832,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 1, 60, }, { 8, 0, 0, 0, 1, 72, }, { 9, 0, 0, 0, 1, 60, }, + { 10, 0, 0, 0, 1, 60, }, + { 11, 0, 0, 0, 1, 60, }, { 0, 0, 0, 0, 2, 72, }, { 2, 0, 0, 0, 2, 60, }, { 1, 0, 0, 0, 2, 68, }, @@ -39842,6 +39844,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 2, 60, }, { 8, 0, 0, 0, 2, 72, }, { 9, 0, 0, 0, 2, 60, }, + { 10, 0, 0, 0, 2, 60, }, + { 11, 0, 0, 0, 2, 60, }, { 0, 0, 0, 0, 3, 76, }, { 2, 0, 0, 0, 3, 60, }, { 1, 0, 0, 0, 3, 68, }, @@ -39852,6 +39856,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 3, 60, }, { 8, 0, 0, 0, 3, 76, }, { 9, 0, 0, 0, 3, 60, }, + { 10, 0, 0, 0, 3, 60, }, + { 11, 0, 0, 0, 3, 60, }, { 0, 0, 0, 0, 4, 76, }, { 2, 0, 0, 0, 4, 60, }, { 1, 0, 0, 0, 4, 68, }, @@ -39862,6 +39868,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 4, 60, }, { 8, 0, 0, 0, 4, 76, }, { 9, 0, 0, 0, 4, 60, }, + { 10, 0, 0, 0, 4, 60, }, + { 11, 0, 0, 0, 4, 60, }, { 0, 0, 0, 0, 5, 76, }, { 2, 0, 0, 0, 5, 60, }, { 1, 0, 0, 0, 5, 68, }, @@ -39872,6 +39880,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 5, 60, }, { 8, 0, 0, 0, 5, 76, }, { 9, 0, 0, 0, 5, 60, }, + { 10, 0, 0, 0, 5, 60, }, + { 11, 0, 0, 0, 5, 60, }, { 0, 0, 0, 0, 6, 76, }, { 2, 0, 0, 0, 6, 60, }, { 1, 0, 0, 0, 6, 68, }, @@ -39882,6 +39892,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 6, 60, }, { 8, 0, 0, 0, 6, 76, }, { 9, 0, 0, 0, 6, 60, }, + { 10, 0, 0, 0, 6, 60, }, + { 11, 0, 0, 0, 6, 60, }, { 0, 0, 0, 0, 7, 76, }, { 2, 0, 0, 0, 7, 60, }, { 1, 0, 0, 0, 7, 68, }, @@ -39892,6 +39904,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 7, 60, }, { 8, 0, 0, 0, 7, 76, }, { 9, 0, 0, 0, 7, 60, }, + { 10, 0, 0, 0, 7, 60, }, + { 11, 0, 0, 0, 7, 60, }, { 0, 0, 0, 0, 8, 76, }, { 2, 0, 0, 0, 8, 60, }, { 1, 0, 0, 0, 8, 68, }, @@ -39902,6 +39916,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 8, 60, }, { 8, 0, 0, 0, 8, 76, }, { 9, 0, 0, 0, 8, 60, }, + { 10, 0, 0, 0, 8, 60, }, + { 11, 0, 0, 0, 8, 60, }, { 0, 0, 0, 0, 9, 76, }, { 2, 0, 0, 0, 9, 60, }, { 1, 0, 0, 0, 9, 68, }, @@ -39912,6 +39928,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 9, 60, }, { 8, 0, 0, 0, 9, 76, }, { 9, 0, 0, 0, 9, 60, }, + { 10, 0, 0, 0, 9, 60, }, + { 11, 0, 0, 0, 9, 60, }, { 0, 0, 0, 0, 10, 72, }, { 2, 0, 0, 0, 10, 60, }, { 1, 0, 0, 0, 10, 68, }, @@ -39922,6 +39940,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 10, 60, }, { 8, 0, 0, 0, 10, 72, }, { 9, 0, 0, 0, 10, 60, }, + { 10, 0, 0, 0, 10, 60, }, + { 11, 0, 0, 0, 10, 60, }, { 0, 0, 0, 0, 11, 72, }, { 2, 0, 0, 0, 11, 60, }, { 1, 0, 0, 0, 11, 68, }, @@ -39932,7 +39952,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 11, 60, }, { 8, 0, 0, 0, 11, 72, }, { 9, 0, 0, 0, 11, 60, }, - { 0, 0, 0, 0, 12, 44, }, + { 10, 0, 0, 0, 11, 60, }, + { 11, 0, 0, 0, 11, 60, }, + { 0, 0, 0, 0, 12, 52, }, { 2, 0, 0, 0, 12, 60, }, { 1, 0, 0, 0, 12, 68, }, { 3, 0, 0, 0, 12, 52, }, @@ -39942,7 +39964,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 12, 60, }, { 8, 0, 0, 0, 12, 52, }, { 9, 0, 0, 0, 12, 60, }, - { 0, 0, 0, 0, 13, 40, }, + { 10, 0, 0, 0, 12, 60, }, + { 11, 0, 0, 0, 12, 60, }, + { 0, 0, 0, 0, 13, 48, }, { 2, 0, 0, 0, 13, 60, }, { 1, 0, 0, 0, 13, 68, }, { 3, 0, 0, 0, 13, 48, }, @@ -39952,6 +39976,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 13, 60, }, { 8, 0, 0, 0, 13, 48, }, { 9, 0, 0, 0, 13, 60, }, + { 10, 0, 0, 0, 13, 60, }, + { 11, 0, 0, 0, 13, 60, }, { 0, 0, 0, 0, 14, 127, }, { 2, 0, 0, 0, 14, 127, }, { 1, 0, 0, 0, 14, 68, }, @@ -39962,6 +39988,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 0, 14, 127, }, { 8, 0, 0, 0, 14, 127, }, { 9, 0, 0, 0, 14, 127, }, + { 10, 0, 0, 0, 14, 127, }, + { 11, 0, 0, 0, 14, 127, }, { 0, 0, 0, 1, 1, 52, }, { 2, 0, 0, 1, 1, 60, }, { 1, 0, 0, 1, 1, 76, }, @@ -39972,6 +40000,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 1, 60, }, { 8, 0, 0, 1, 1, 52, }, { 9, 0, 0, 1, 1, 60, }, + { 10, 0, 0, 1, 1, 60, }, + { 11, 0, 0, 1, 1, 60, }, { 0, 0, 0, 1, 2, 60, }, { 2, 0, 0, 1, 2, 60, }, { 1, 0, 0, 1, 2, 76, }, @@ -39982,6 +40012,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 2, 60, }, { 8, 0, 0, 1, 2, 60, }, { 9, 0, 0, 1, 2, 60, }, + { 10, 0, 0, 1, 2, 60, }, + { 11, 0, 0, 1, 2, 60, }, { 0, 0, 0, 1, 3, 64, }, { 2, 0, 0, 1, 3, 60, }, { 1, 0, 0, 1, 3, 76, }, @@ -39992,6 +40024,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 3, 60, }, { 8, 0, 0, 1, 3, 64, }, { 9, 0, 0, 1, 3, 60, }, + { 10, 0, 0, 1, 3, 60, }, + { 11, 0, 0, 1, 3, 60, }, { 0, 0, 0, 1, 4, 68, }, { 2, 0, 0, 1, 4, 60, }, { 1, 0, 0, 1, 4, 76, }, @@ -40002,6 +40036,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 4, 60, }, { 8, 0, 0, 1, 4, 68, }, { 9, 0, 0, 1, 4, 60, }, + { 10, 0, 0, 1, 4, 60, }, + { 11, 0, 0, 1, 4, 60, }, { 0, 0, 0, 1, 5, 76, }, { 2, 0, 0, 1, 5, 60, }, { 1, 0, 0, 1, 5, 76, }, @@ -40012,6 +40048,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 5, 60, }, { 8, 0, 0, 1, 5, 76, }, { 9, 0, 0, 1, 5, 60, }, + { 10, 0, 0, 1, 5, 60, }, + { 11, 0, 0, 1, 5, 60, }, { 0, 0, 0, 1, 6, 76, }, { 2, 0, 0, 1, 6, 60, }, { 1, 0, 0, 1, 6, 76, }, @@ -40022,6 +40060,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 6, 60, }, { 8, 0, 0, 1, 6, 76, }, { 9, 0, 0, 1, 6, 60, }, + { 10, 0, 0, 1, 6, 60, }, + { 11, 0, 0, 1, 6, 60, }, { 0, 0, 0, 1, 7, 76, }, { 2, 0, 0, 1, 7, 60, }, { 1, 0, 0, 1, 7, 76, }, @@ -40032,6 +40072,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 7, 60, }, { 8, 0, 0, 1, 7, 76, }, { 9, 0, 0, 1, 7, 60, }, + { 10, 0, 0, 1, 7, 60, }, + { 11, 0, 0, 1, 7, 60, }, { 0, 0, 0, 1, 8, 68, }, { 2, 0, 0, 1, 8, 60, }, { 1, 0, 0, 1, 8, 76, }, @@ -40042,6 +40084,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 8, 60, }, { 8, 0, 0, 1, 8, 68, }, { 9, 0, 0, 1, 8, 60, }, + { 10, 0, 0, 1, 8, 60, }, + { 11, 0, 0, 1, 8, 60, }, { 0, 0, 0, 1, 9, 64, }, { 2, 0, 0, 1, 9, 60, }, { 1, 0, 0, 1, 9, 76, }, @@ -40052,6 +40096,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 9, 60, }, { 8, 0, 0, 1, 9, 64, }, { 9, 0, 0, 1, 9, 60, }, + { 10, 0, 0, 1, 9, 60, }, + { 11, 0, 0, 1, 9, 60, }, { 0, 0, 0, 1, 10, 60, }, { 2, 0, 0, 1, 10, 60, }, { 1, 0, 0, 1, 10, 76, }, @@ -40062,6 +40108,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 10, 60, }, { 8, 0, 0, 1, 10, 60, }, { 9, 0, 0, 1, 10, 60, }, + { 10, 0, 0, 1, 10, 60, }, + { 11, 0, 0, 1, 10, 60, }, { 0, 0, 0, 1, 11, 52, }, { 2, 0, 0, 1, 11, 60, }, { 1, 0, 0, 1, 11, 76, }, @@ -40071,8 +40119,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 1, 11, 52, }, { 7, 0, 0, 1, 11, 60, }, { 8, 0, 0, 1, 11, 52, }, - { 9, 0, 0, 1, 11, 60, }, - { 0, 0, 0, 1, 12, 32, }, + { 9, 0, 0, 1, 11, 44, }, + { 10, 0, 0, 1, 11, 60, }, + { 11, 0, 0, 1, 11, 60, }, + { 0, 0, 0, 1, 12, 40, }, { 2, 0, 0, 1, 12, 60, }, { 1, 0, 0, 1, 12, 76, }, { 3, 0, 0, 1, 12, 40, }, @@ -40081,8 +40131,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 1, 12, 40, }, { 7, 0, 0, 1, 12, 60, }, { 8, 0, 0, 1, 12, 40, }, - { 9, 0, 0, 1, 12, 60, }, - { 0, 0, 0, 1, 13, 20, }, + { 9, 0, 0, 1, 12, 44, }, + { 10, 0, 0, 1, 12, 60, }, + { 11, 0, 0, 1, 12, 60, }, + { 0, 0, 0, 1, 13, 28, }, { 2, 0, 0, 1, 13, 60, }, { 1, 0, 0, 1, 13, 76, }, { 3, 0, 0, 1, 13, 28, }, @@ -40091,7 +40143,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 1, 13, 28, }, { 7, 0, 0, 1, 13, 60, }, { 8, 0, 0, 1, 13, 28, }, - { 9, 0, 0, 1, 13, 60, }, + { 9, 0, 0, 1, 13, 36, }, + { 10, 0, 0, 1, 13, 60, }, + { 11, 0, 0, 1, 13, 60, }, { 0, 0, 0, 1, 14, 127, }, { 2, 0, 0, 1, 14, 127, }, { 1, 0, 0, 1, 14, 127, }, @@ -40102,6 +40156,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 1, 14, 127, }, { 8, 0, 0, 1, 14, 127, }, { 9, 0, 0, 1, 14, 127, }, + { 10, 0, 0, 1, 14, 127, }, + { 11, 0, 0, 1, 14, 127, }, { 0, 0, 0, 2, 1, 52, }, { 2, 0, 0, 2, 1, 60, }, { 1, 0, 0, 2, 1, 76, }, @@ -40112,6 +40168,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 1, 60, }, { 8, 0, 0, 2, 1, 52, }, { 9, 0, 0, 2, 1, 60, }, + { 10, 0, 0, 2, 1, 60, }, + { 11, 0, 0, 2, 1, 60, }, { 0, 0, 0, 2, 2, 60, }, { 2, 0, 0, 2, 2, 60, }, { 1, 0, 0, 2, 2, 76, }, @@ -40122,6 +40180,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 2, 60, }, { 8, 0, 0, 2, 2, 60, }, { 9, 0, 0, 2, 2, 60, }, + { 10, 0, 0, 2, 2, 60, }, + { 11, 0, 0, 2, 2, 60, }, { 0, 0, 0, 2, 3, 64, }, { 2, 0, 0, 2, 3, 60, }, { 1, 0, 0, 2, 3, 76, }, @@ -40132,6 +40192,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 3, 60, }, { 8, 0, 0, 2, 3, 64, }, { 9, 0, 0, 2, 3, 60, }, + { 10, 0, 0, 2, 3, 60, }, + { 11, 0, 0, 2, 3, 60, }, { 0, 0, 0, 2, 4, 68, }, { 2, 0, 0, 2, 4, 60, }, { 1, 0, 0, 2, 4, 76, }, @@ -40142,6 +40204,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 4, 60, }, { 8, 0, 0, 2, 4, 68, }, { 9, 0, 0, 2, 4, 60, }, + { 10, 0, 0, 2, 4, 60, }, + { 11, 0, 0, 2, 4, 60, }, { 0, 0, 0, 2, 5, 76, }, { 2, 0, 0, 2, 5, 60, }, { 1, 0, 0, 2, 5, 76, }, @@ -40152,6 +40216,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 5, 60, }, { 8, 0, 0, 2, 5, 76, }, { 9, 0, 0, 2, 5, 60, }, + { 10, 0, 0, 2, 5, 60, }, + { 11, 0, 0, 2, 5, 60, }, { 0, 0, 0, 2, 6, 76, }, { 2, 0, 0, 2, 6, 60, }, { 1, 0, 0, 2, 6, 76, }, @@ -40162,6 +40228,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 6, 60, }, { 8, 0, 0, 2, 6, 76, }, { 9, 0, 0, 2, 6, 60, }, + { 10, 0, 0, 2, 6, 60, }, + { 11, 0, 0, 2, 6, 60, }, { 0, 0, 0, 2, 7, 76, }, { 2, 0, 0, 2, 7, 60, }, { 1, 0, 0, 2, 7, 76, }, @@ -40172,6 +40240,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 7, 60, }, { 8, 0, 0, 2, 7, 76, }, { 9, 0, 0, 2, 7, 60, }, + { 10, 0, 0, 2, 7, 60, }, + { 11, 0, 0, 2, 7, 60, }, { 0, 0, 0, 2, 8, 68, }, { 2, 0, 0, 2, 8, 60, }, { 1, 0, 0, 2, 8, 76, }, @@ -40182,6 +40252,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 8, 60, }, { 8, 0, 0, 2, 8, 68, }, { 9, 0, 0, 2, 8, 60, }, + { 10, 0, 0, 2, 8, 60, }, + { 11, 0, 0, 2, 8, 60, }, { 0, 0, 0, 2, 9, 64, }, { 2, 0, 0, 2, 9, 60, }, { 1, 0, 0, 2, 9, 76, }, @@ -40192,6 +40264,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 9, 60, }, { 8, 0, 0, 2, 9, 64, }, { 9, 0, 0, 2, 9, 60, }, + { 10, 0, 0, 2, 9, 60, }, + { 11, 0, 0, 2, 9, 60, }, { 0, 0, 0, 2, 10, 60, }, { 2, 0, 0, 2, 10, 60, }, { 1, 0, 0, 2, 10, 76, }, @@ -40202,6 +40276,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 10, 60, }, { 8, 0, 0, 2, 10, 60, }, { 9, 0, 0, 2, 10, 60, }, + { 10, 0, 0, 2, 10, 60, }, + { 11, 0, 0, 2, 10, 60, }, { 0, 0, 0, 2, 11, 52, }, { 2, 0, 0, 2, 11, 60, }, { 1, 0, 0, 2, 11, 76, }, @@ -40211,8 +40287,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 2, 11, 52, }, { 7, 0, 0, 2, 11, 60, }, { 8, 0, 0, 2, 11, 52, }, - { 9, 0, 0, 2, 11, 60, }, - { 0, 0, 0, 2, 12, 32, }, + { 9, 0, 0, 2, 11, 46, }, + { 10, 0, 0, 2, 11, 60, }, + { 11, 0, 0, 2, 11, 60, }, + { 0, 0, 0, 2, 12, 40, }, { 2, 0, 0, 2, 12, 60, }, { 1, 0, 0, 2, 12, 76, }, { 3, 0, 0, 2, 12, 40, }, @@ -40221,8 +40299,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 2, 12, 40, }, { 7, 0, 0, 2, 12, 60, }, { 8, 0, 0, 2, 12, 40, }, - { 9, 0, 0, 2, 12, 60, }, - { 0, 0, 0, 2, 13, 20, }, + { 9, 0, 0, 2, 12, 42, }, + { 10, 0, 0, 2, 12, 60, }, + { 11, 0, 0, 2, 12, 60, }, + { 0, 0, 0, 2, 13, 28, }, { 2, 0, 0, 2, 13, 60, }, { 1, 0, 0, 2, 13, 76, }, { 3, 0, 0, 2, 13, 28, }, @@ -40231,7 +40311,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 2, 13, 28, }, { 7, 0, 0, 2, 13, 60, }, { 8, 0, 0, 2, 13, 28, }, - { 9, 0, 0, 2, 13, 60, }, + { 9, 0, 0, 2, 13, 34, }, + { 10, 0, 0, 2, 13, 60, }, + { 11, 0, 0, 2, 13, 60, }, { 0, 0, 0, 2, 14, 127, }, { 2, 0, 0, 2, 14, 127, }, { 1, 0, 0, 2, 14, 127, }, @@ -40242,6 +40324,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 2, 14, 127, }, { 8, 0, 0, 2, 14, 127, }, { 9, 0, 0, 2, 14, 127, }, + { 10, 0, 0, 2, 14, 127, }, + { 11, 0, 0, 2, 14, 127, }, { 0, 0, 0, 3, 1, 52, }, { 2, 0, 0, 3, 1, 36, }, { 1, 0, 0, 3, 1, 66, }, @@ -40252,6 +40336,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 1, 36, }, { 8, 0, 0, 3, 1, 52, }, { 9, 0, 0, 3, 1, 36, }, + { 10, 0, 0, 3, 1, 36, }, + { 11, 0, 0, 3, 1, 36, }, { 0, 0, 0, 3, 2, 60, }, { 2, 0, 0, 3, 2, 36, }, { 1, 0, 0, 3, 2, 66, }, @@ -40262,6 +40348,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 2, 36, }, { 8, 0, 0, 3, 2, 60, }, { 9, 0, 0, 3, 2, 36, }, + { 10, 0, 0, 3, 2, 36, }, + { 11, 0, 0, 3, 2, 36, }, { 0, 0, 0, 3, 3, 64, }, { 2, 0, 0, 3, 3, 36, }, { 1, 0, 0, 3, 3, 66, }, @@ -40272,6 +40360,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 3, 36, }, { 8, 0, 0, 3, 3, 64, }, { 9, 0, 0, 3, 3, 36, }, + { 10, 0, 0, 3, 3, 36, }, + { 11, 0, 0, 3, 3, 36, }, { 0, 0, 0, 3, 4, 68, }, { 2, 0, 0, 3, 4, 36, }, { 1, 0, 0, 3, 4, 66, }, @@ -40282,6 +40372,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 4, 36, }, { 8, 0, 0, 3, 4, 68, }, { 9, 0, 0, 3, 4, 36, }, + { 10, 0, 0, 3, 4, 36, }, + { 11, 0, 0, 3, 4, 36, }, { 0, 0, 0, 3, 5, 76, }, { 2, 0, 0, 3, 5, 36, }, { 1, 0, 0, 3, 5, 66, }, @@ -40292,6 +40384,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 5, 36, }, { 8, 0, 0, 3, 5, 76, }, { 9, 0, 0, 3, 5, 36, }, + { 10, 0, 0, 3, 5, 36, }, + { 11, 0, 0, 3, 5, 36, }, { 0, 0, 0, 3, 6, 76, }, { 2, 0, 0, 3, 6, 36, }, { 1, 0, 0, 3, 6, 66, }, @@ -40302,6 +40396,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 6, 36, }, { 8, 0, 0, 3, 6, 76, }, { 9, 0, 0, 3, 6, 36, }, + { 10, 0, 0, 3, 6, 36, }, + { 11, 0, 0, 3, 6, 36, }, { 0, 0, 0, 3, 7, 76, }, { 2, 0, 0, 3, 7, 36, }, { 1, 0, 0, 3, 7, 66, }, @@ -40312,6 +40408,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 7, 36, }, { 8, 0, 0, 3, 7, 76, }, { 9, 0, 0, 3, 7, 36, }, + { 10, 0, 0, 3, 7, 36, }, + { 11, 0, 0, 3, 7, 36, }, { 0, 0, 0, 3, 8, 68, }, { 2, 0, 0, 3, 8, 36, }, { 1, 0, 0, 3, 8, 66, }, @@ -40322,6 +40420,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 8, 36, }, { 8, 0, 0, 3, 8, 68, }, { 9, 0, 0, 3, 8, 36, }, + { 10, 0, 0, 3, 8, 36, }, + { 11, 0, 0, 3, 8, 36, }, { 0, 0, 0, 3, 9, 64, }, { 2, 0, 0, 3, 9, 36, }, { 1, 0, 0, 3, 9, 66, }, @@ -40332,6 +40432,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 9, 36, }, { 8, 0, 0, 3, 9, 64, }, { 9, 0, 0, 3, 9, 36, }, + { 10, 0, 0, 3, 9, 36, }, + { 11, 0, 0, 3, 9, 36, }, { 0, 0, 0, 3, 10, 60, }, { 2, 0, 0, 3, 10, 36, }, { 1, 0, 0, 3, 10, 66, }, @@ -40342,6 +40444,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 10, 36, }, { 8, 0, 0, 3, 10, 60, }, { 9, 0, 0, 3, 10, 36, }, + { 10, 0, 0, 3, 10, 36, }, + { 11, 0, 0, 3, 10, 36, }, { 0, 0, 0, 3, 11, 52, }, { 2, 0, 0, 3, 11, 36, }, { 1, 0, 0, 3, 11, 66, }, @@ -40352,7 +40456,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 11, 36, }, { 8, 0, 0, 3, 11, 52, }, { 9, 0, 0, 3, 11, 36, }, - { 0, 0, 0, 3, 12, 32, }, + { 10, 0, 0, 3, 11, 36, }, + { 11, 0, 0, 3, 11, 36, }, + { 0, 0, 0, 3, 12, 40, }, { 2, 0, 0, 3, 12, 36, }, { 1, 0, 0, 3, 12, 66, }, { 3, 0, 0, 3, 12, 40, }, @@ -40362,7 +40468,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 12, 36, }, { 8, 0, 0, 3, 12, 40, }, { 9, 0, 0, 3, 12, 36, }, - { 0, 0, 0, 3, 13, 20, }, + { 10, 0, 0, 3, 12, 36, }, + { 11, 0, 0, 3, 12, 36, }, + { 0, 0, 0, 3, 13, 28, }, { 2, 0, 0, 3, 13, 36, }, { 1, 0, 0, 3, 13, 66, }, { 3, 0, 0, 3, 13, 28, }, @@ -40371,7 +40479,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 0, 3, 13, 28, }, { 7, 0, 0, 3, 13, 36, }, { 8, 0, 0, 3, 13, 28, }, - { 9, 0, 0, 3, 13, 36, }, + { 9, 0, 0, 3, 13, 34, }, + { 10, 0, 0, 3, 13, 36, }, + { 11, 0, 0, 3, 13, 36, }, { 0, 0, 0, 3, 14, 127, }, { 2, 0, 0, 3, 14, 127, }, { 1, 0, 0, 3, 14, 127, }, @@ -40382,6 +40492,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 0, 3, 14, 127, }, { 8, 0, 0, 3, 14, 127, }, { 9, 0, 0, 3, 14, 127, }, + { 10, 0, 0, 3, 14, 127, }, + { 11, 0, 0, 3, 14, 127, }, { 0, 0, 1, 2, 1, 127, }, { 2, 0, 1, 2, 1, 127, }, { 1, 0, 1, 2, 1, 127, }, @@ -40392,6 +40504,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 1, 127, }, { 8, 0, 1, 2, 1, 127, }, { 9, 0, 1, 2, 1, 127, }, + { 10, 0, 1, 2, 1, 127, }, + { 11, 0, 1, 2, 1, 127, }, { 0, 0, 1, 2, 2, 127, }, { 2, 0, 1, 2, 2, 127, }, { 1, 0, 1, 2, 2, 127, }, @@ -40402,6 +40516,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 2, 127, }, { 8, 0, 1, 2, 2, 127, }, { 9, 0, 1, 2, 2, 127, }, + { 10, 0, 1, 2, 2, 127, }, + { 11, 0, 1, 2, 2, 127, }, { 0, 0, 1, 2, 3, 52, }, { 2, 0, 1, 2, 3, 60, }, { 1, 0, 1, 2, 3, 72, }, @@ -40412,6 +40528,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 3, 60, }, { 8, 0, 1, 2, 3, 52, }, { 9, 0, 1, 2, 3, 60, }, + { 10, 0, 1, 2, 3, 60, }, + { 11, 0, 1, 2, 3, 60, }, { 0, 0, 1, 2, 4, 52, }, { 2, 0, 1, 2, 4, 60, }, { 1, 0, 1, 2, 4, 72, }, @@ -40422,6 +40540,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 4, 60, }, { 8, 0, 1, 2, 4, 52, }, { 9, 0, 1, 2, 4, 60, }, + { 10, 0, 1, 2, 4, 60, }, + { 11, 0, 1, 2, 4, 60, }, { 0, 0, 1, 2, 5, 60, }, { 2, 0, 1, 2, 5, 60, }, { 1, 0, 1, 2, 5, 72, }, @@ -40432,6 +40552,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 5, 60, }, { 8, 0, 1, 2, 5, 60, }, { 9, 0, 1, 2, 5, 60, }, + { 10, 0, 1, 2, 5, 60, }, + { 11, 0, 1, 2, 5, 60, }, { 0, 0, 1, 2, 6, 64, }, { 2, 0, 1, 2, 6, 60, }, { 1, 0, 1, 2, 6, 72, }, @@ -40442,6 +40564,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 6, 60, }, { 8, 0, 1, 2, 6, 64, }, { 9, 0, 1, 2, 6, 60, }, + { 10, 0, 1, 2, 6, 60, }, + { 11, 0, 1, 2, 6, 60, }, { 0, 0, 1, 2, 7, 60, }, { 2, 0, 1, 2, 7, 60, }, { 1, 0, 1, 2, 7, 72, }, @@ -40452,6 +40576,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 7, 60, }, { 8, 0, 1, 2, 7, 60, }, { 9, 0, 1, 2, 7, 60, }, + { 10, 0, 1, 2, 7, 60, }, + { 11, 0, 1, 2, 7, 60, }, { 0, 0, 1, 2, 8, 52, }, { 2, 0, 1, 2, 8, 60, }, { 1, 0, 1, 2, 8, 72, }, @@ -40462,6 +40588,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 8, 60, }, { 8, 0, 1, 2, 8, 52, }, { 9, 0, 1, 2, 8, 60, }, + { 10, 0, 1, 2, 8, 60, }, + { 11, 0, 1, 2, 8, 60, }, { 0, 0, 1, 2, 9, 52, }, { 2, 0, 1, 2, 9, 60, }, { 1, 0, 1, 2, 9, 72, }, @@ -40471,7 +40599,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 2, 9, 52, }, { 7, 0, 1, 2, 9, 60, }, { 8, 0, 1, 2, 9, 52, }, - { 9, 0, 1, 2, 9, 60, }, + { 9, 0, 1, 2, 9, 44, }, + { 10, 0, 1, 2, 9, 60, }, + { 11, 0, 1, 2, 9, 60, }, { 0, 0, 1, 2, 10, 40, }, { 2, 0, 1, 2, 10, 60, }, { 1, 0, 1, 2, 10, 72, }, @@ -40481,7 +40611,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 2, 10, 40, }, { 7, 0, 1, 2, 10, 60, }, { 8, 0, 1, 2, 10, 40, }, - { 9, 0, 1, 2, 10, 60, }, + { 9, 0, 1, 2, 10, 44, }, + { 10, 0, 1, 2, 10, 60, }, + { 11, 0, 1, 2, 10, 60, }, { 0, 0, 1, 2, 11, 28, }, { 2, 0, 1, 2, 11, 60, }, { 1, 0, 1, 2, 11, 72, }, @@ -40491,7 +40623,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 2, 11, 28, }, { 7, 0, 1, 2, 11, 60, }, { 8, 0, 1, 2, 11, 28, }, - { 9, 0, 1, 2, 11, 60, }, + { 9, 0, 1, 2, 11, 16, }, + { 10, 0, 1, 2, 11, 60, }, + { 11, 0, 1, 2, 11, 60, }, { 0, 0, 1, 2, 12, 127, }, { 2, 0, 1, 2, 12, 127, }, { 1, 0, 1, 2, 12, 127, }, @@ -40502,6 +40636,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 12, 127, }, { 8, 0, 1, 2, 12, 127, }, { 9, 0, 1, 2, 12, 127, }, + { 10, 0, 1, 2, 12, 127, }, + { 11, 0, 1, 2, 12, 127, }, { 0, 0, 1, 2, 13, 127, }, { 2, 0, 1, 2, 13, 127, }, { 1, 0, 1, 2, 13, 127, }, @@ -40512,6 +40648,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 13, 127, }, { 8, 0, 1, 2, 13, 127, }, { 9, 0, 1, 2, 13, 127, }, + { 10, 0, 1, 2, 13, 127, }, + { 11, 0, 1, 2, 13, 127, }, { 0, 0, 1, 2, 14, 127, }, { 2, 0, 1, 2, 14, 127, }, { 1, 0, 1, 2, 14, 127, }, @@ -40522,6 +40660,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 2, 14, 127, }, { 8, 0, 1, 2, 14, 127, }, { 9, 0, 1, 2, 14, 127, }, + { 10, 0, 1, 2, 14, 127, }, + { 11, 0, 1, 2, 14, 127, }, { 0, 0, 1, 3, 1, 127, }, { 2, 0, 1, 3, 1, 127, }, { 1, 0, 1, 3, 1, 127, }, @@ -40532,6 +40672,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 1, 127, }, { 8, 0, 1, 3, 1, 127, }, { 9, 0, 1, 3, 1, 127, }, + { 10, 0, 1, 3, 1, 127, }, + { 11, 0, 1, 3, 1, 127, }, { 0, 0, 1, 3, 2, 127, }, { 2, 0, 1, 3, 2, 127, }, { 1, 0, 1, 3, 2, 127, }, @@ -40542,6 +40684,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 2, 127, }, { 8, 0, 1, 3, 2, 127, }, { 9, 0, 1, 3, 2, 127, }, + { 10, 0, 1, 3, 2, 127, }, + { 11, 0, 1, 3, 2, 127, }, { 0, 0, 1, 3, 3, 48, }, { 2, 0, 1, 3, 3, 36, }, { 1, 0, 1, 3, 3, 66, }, @@ -40552,6 +40696,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 3, 36, }, { 8, 0, 1, 3, 3, 48, }, { 9, 0, 1, 3, 3, 36, }, + { 10, 0, 1, 3, 3, 36, }, + { 11, 0, 1, 3, 3, 36, }, { 0, 0, 1, 3, 4, 48, }, { 2, 0, 1, 3, 4, 36, }, { 1, 0, 1, 3, 4, 66, }, @@ -40562,6 +40708,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 4, 36, }, { 8, 0, 1, 3, 4, 48, }, { 9, 0, 1, 3, 4, 36, }, + { 10, 0, 1, 3, 4, 36, }, + { 11, 0, 1, 3, 4, 36, }, { 0, 0, 1, 3, 5, 60, }, { 2, 0, 1, 3, 5, 36, }, { 1, 0, 1, 3, 5, 66, }, @@ -40572,6 +40720,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 5, 36, }, { 8, 0, 1, 3, 5, 60, }, { 9, 0, 1, 3, 5, 36, }, + { 10, 0, 1, 3, 5, 36, }, + { 11, 0, 1, 3, 5, 36, }, { 0, 0, 1, 3, 6, 64, }, { 2, 0, 1, 3, 6, 36, }, { 1, 0, 1, 3, 6, 66, }, @@ -40582,6 +40732,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 6, 36, }, { 8, 0, 1, 3, 6, 64, }, { 9, 0, 1, 3, 6, 36, }, + { 10, 0, 1, 3, 6, 36, }, + { 11, 0, 1, 3, 6, 36, }, { 0, 0, 1, 3, 7, 60, }, { 2, 0, 1, 3, 7, 36, }, { 1, 0, 1, 3, 7, 66, }, @@ -40592,6 +40744,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 7, 36, }, { 8, 0, 1, 3, 7, 60, }, { 9, 0, 1, 3, 7, 36, }, + { 10, 0, 1, 3, 7, 36, }, + { 11, 0, 1, 3, 7, 36, }, { 0, 0, 1, 3, 8, 52, }, { 2, 0, 1, 3, 8, 36, }, { 1, 0, 1, 3, 8, 66, }, @@ -40602,6 +40756,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 8, 36, }, { 8, 0, 1, 3, 8, 52, }, { 9, 0, 1, 3, 8, 36, }, + { 10, 0, 1, 3, 8, 36, }, + { 11, 0, 1, 3, 8, 36, }, { 0, 0, 1, 3, 9, 52, }, { 2, 0, 1, 3, 9, 36, }, { 1, 0, 1, 3, 9, 66, }, @@ -40612,6 +40768,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 9, 36, }, { 8, 0, 1, 3, 9, 52, }, { 9, 0, 1, 3, 9, 36, }, + { 10, 0, 1, 3, 9, 36, }, + { 11, 0, 1, 3, 9, 36, }, { 0, 0, 1, 3, 10, 40, }, { 2, 0, 1, 3, 10, 36, }, { 1, 0, 1, 3, 10, 66, }, @@ -40622,6 +40780,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 10, 36, }, { 8, 0, 1, 3, 10, 40, }, { 9, 0, 1, 3, 10, 36, }, + { 10, 0, 1, 3, 10, 36, }, + { 11, 0, 1, 3, 10, 36, }, { 0, 0, 1, 3, 11, 26, }, { 2, 0, 1, 3, 11, 36, }, { 1, 0, 1, 3, 11, 66, }, @@ -40631,7 +40791,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 0, 1, 3, 11, 26, }, { 7, 0, 1, 3, 11, 36, }, { 8, 0, 1, 3, 11, 26, }, - { 9, 0, 1, 3, 11, 36, }, + { 9, 0, 1, 3, 11, 16, }, + { 10, 0, 1, 3, 11, 36, }, + { 11, 0, 1, 3, 11, 36, }, { 0, 0, 1, 3, 12, 127, }, { 2, 0, 1, 3, 12, 127, }, { 1, 0, 1, 3, 12, 127, }, @@ -40642,6 +40804,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 12, 127, }, { 8, 0, 1, 3, 12, 127, }, { 9, 0, 1, 3, 12, 127, }, + { 10, 0, 1, 3, 12, 127, }, + { 11, 0, 1, 3, 12, 127, }, { 0, 0, 1, 3, 13, 127, }, { 2, 0, 1, 3, 13, 127, }, { 1, 0, 1, 3, 13, 127, }, @@ -40652,6 +40816,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 13, 127, }, { 8, 0, 1, 3, 13, 127, }, { 9, 0, 1, 3, 13, 127, }, + { 10, 0, 1, 3, 13, 127, }, + { 11, 0, 1, 3, 13, 127, }, { 0, 0, 1, 3, 14, 127, }, { 2, 0, 1, 3, 14, 127, }, { 1, 0, 1, 3, 14, 127, }, @@ -40662,6 +40828,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 0, 1, 3, 14, 127, }, { 8, 0, 1, 3, 14, 127, }, { 9, 0, 1, 3, 14, 127, }, + { 10, 0, 1, 3, 14, 127, }, + { 11, 0, 1, 3, 14, 127, }, { 0, 1, 0, 1, 36, 74, }, { 2, 1, 0, 1, 36, 62, }, { 1, 1, 0, 1, 36, 60, }, @@ -40672,6 +40840,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 36, 54, }, { 8, 1, 0, 1, 36, 62, }, { 9, 1, 0, 1, 36, 62, }, + { 10, 1, 0, 1, 36, 62, }, + { 11, 1, 0, 1, 36, 62, }, { 0, 1, 0, 1, 40, 76, }, { 2, 1, 0, 1, 40, 62, }, { 1, 1, 0, 1, 40, 62, }, @@ -40682,6 +40852,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 40, 54, }, { 8, 1, 0, 1, 40, 62, }, { 9, 1, 0, 1, 40, 62, }, + { 10, 1, 0, 1, 40, 62, }, + { 11, 1, 0, 1, 40, 62, }, { 0, 1, 0, 1, 44, 76, }, { 2, 1, 0, 1, 44, 62, }, { 1, 1, 0, 1, 44, 62, }, @@ -40692,6 +40864,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 44, 54, }, { 8, 1, 0, 1, 44, 62, }, { 9, 1, 0, 1, 44, 62, }, + { 10, 1, 0, 1, 44, 62, }, + { 11, 1, 0, 1, 44, 62, }, { 0, 1, 0, 1, 48, 76, }, { 2, 1, 0, 1, 48, 62, }, { 1, 1, 0, 1, 48, 62, }, @@ -40702,6 +40876,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 48, 54, }, { 8, 1, 0, 1, 48, 62, }, { 9, 1, 0, 1, 48, 62, }, + { 10, 1, 0, 1, 48, 62, }, + { 11, 1, 0, 1, 48, 62, }, { 0, 1, 0, 1, 52, 76, }, { 2, 1, 0, 1, 52, 62, }, { 1, 1, 0, 1, 52, 62, }, @@ -40712,6 +40888,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 52, 54, }, { 8, 1, 0, 1, 52, 76, }, { 9, 1, 0, 1, 52, 62, }, + { 10, 1, 0, 1, 52, 62, }, + { 11, 1, 0, 1, 52, 62, }, { 0, 1, 0, 1, 56, 76, }, { 2, 1, 0, 1, 56, 62, }, { 1, 1, 0, 1, 56, 62, }, @@ -40722,6 +40900,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 56, 54, }, { 8, 1, 0, 1, 56, 76, }, { 9, 1, 0, 1, 56, 62, }, + { 10, 1, 0, 1, 56, 62, }, + { 11, 1, 0, 1, 56, 62, }, { 0, 1, 0, 1, 60, 76, }, { 2, 1, 0, 1, 60, 62, }, { 1, 1, 0, 1, 60, 62, }, @@ -40732,6 +40912,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 60, 54, }, { 8, 1, 0, 1, 60, 76, }, { 9, 1, 0, 1, 60, 62, }, + { 10, 1, 0, 1, 60, 62, }, + { 11, 1, 0, 1, 60, 62, }, { 0, 1, 0, 1, 64, 74, }, { 2, 1, 0, 1, 64, 62, }, { 1, 1, 0, 1, 64, 60, }, @@ -40742,6 +40924,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 64, 54, }, { 8, 1, 0, 1, 64, 74, }, { 9, 1, 0, 1, 64, 62, }, + { 10, 1, 0, 1, 64, 62, }, + { 11, 1, 0, 1, 64, 62, }, { 0, 1, 0, 1, 100, 72, }, { 2, 1, 0, 1, 100, 62, }, { 1, 1, 0, 1, 100, 76, }, @@ -40752,6 +40936,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 100, 54, }, { 8, 1, 0, 1, 100, 72, }, { 9, 1, 0, 1, 100, 127, }, + { 10, 1, 0, 1, 100, 54, }, + { 11, 1, 0, 1, 100, 62, }, { 0, 1, 0, 1, 104, 76, }, { 2, 1, 0, 1, 104, 62, }, { 1, 1, 0, 1, 104, 76, }, @@ -40762,6 +40948,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 104, 54, }, { 8, 1, 0, 1, 104, 76, }, { 9, 1, 0, 1, 104, 127, }, + { 10, 1, 0, 1, 104, 54, }, + { 11, 1, 0, 1, 104, 62, }, { 0, 1, 0, 1, 108, 76, }, { 2, 1, 0, 1, 108, 62, }, { 1, 1, 0, 1, 108, 76, }, @@ -40772,6 +40960,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 108, 54, }, { 8, 1, 0, 1, 108, 76, }, { 9, 1, 0, 1, 108, 127, }, + { 10, 1, 0, 1, 108, 54, }, + { 11, 1, 0, 1, 108, 62, }, { 0, 1, 0, 1, 112, 76, }, { 2, 1, 0, 1, 112, 62, }, { 1, 1, 0, 1, 112, 76, }, @@ -40782,6 +40972,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 112, 54, }, { 8, 1, 0, 1, 112, 76, }, { 9, 1, 0, 1, 112, 127, }, + { 10, 1, 0, 1, 112, 54, }, + { 11, 1, 0, 1, 112, 62, }, { 0, 1, 0, 1, 116, 76, }, { 2, 1, 0, 1, 116, 62, }, { 1, 1, 0, 1, 116, 76, }, @@ -40792,6 +40984,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 116, 54, }, { 8, 1, 0, 1, 116, 76, }, { 9, 1, 0, 1, 116, 127, }, + { 10, 1, 0, 1, 116, 54, }, + { 11, 1, 0, 1, 116, 62, }, { 0, 1, 0, 1, 120, 76, }, { 2, 1, 0, 1, 120, 62, }, { 1, 1, 0, 1, 120, 76, }, @@ -40802,6 +40996,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 120, 54, }, { 8, 1, 0, 1, 120, 76, }, { 9, 1, 0, 1, 120, 127, }, + { 10, 1, 0, 1, 120, 54, }, + { 11, 1, 0, 1, 120, 62, }, { 0, 1, 0, 1, 124, 76, }, { 2, 1, 0, 1, 124, 62, }, { 1, 1, 0, 1, 124, 76, }, @@ -40812,6 +41008,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 124, 54, }, { 8, 1, 0, 1, 124, 76, }, { 9, 1, 0, 1, 124, 127, }, + { 10, 1, 0, 1, 124, 54, }, + { 11, 1, 0, 1, 124, 62, }, { 0, 1, 0, 1, 128, 76, }, { 2, 1, 0, 1, 128, 62, }, { 1, 1, 0, 1, 128, 76, }, @@ -40822,6 +41020,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 128, 54, }, { 8, 1, 0, 1, 128, 76, }, { 9, 1, 0, 1, 128, 127, }, + { 10, 1, 0, 1, 128, 54, }, + { 11, 1, 0, 1, 128, 62, }, { 0, 1, 0, 1, 132, 76, }, { 2, 1, 0, 1, 132, 62, }, { 1, 1, 0, 1, 132, 76, }, @@ -40832,6 +41032,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 132, 54, }, { 8, 1, 0, 1, 132, 76, }, { 9, 1, 0, 1, 132, 127, }, + { 10, 1, 0, 1, 132, 54, }, + { 11, 1, 0, 1, 132, 62, }, { 0, 1, 0, 1, 136, 76, }, { 2, 1, 0, 1, 136, 62, }, { 1, 1, 0, 1, 136, 76, }, @@ -40842,6 +41044,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 136, 54, }, { 8, 1, 0, 1, 136, 76, }, { 9, 1, 0, 1, 136, 127, }, + { 10, 1, 0, 1, 136, 54, }, + { 11, 1, 0, 1, 136, 62, }, { 0, 1, 0, 1, 140, 72, }, { 2, 1, 0, 1, 140, 62, }, { 1, 1, 0, 1, 140, 76, }, @@ -40852,6 +41056,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 140, 54, }, { 8, 1, 0, 1, 140, 72, }, { 9, 1, 0, 1, 140, 127, }, + { 10, 1, 0, 1, 140, 54, }, + { 11, 1, 0, 1, 140, 62, }, { 0, 1, 0, 1, 144, 76, }, { 2, 1, 0, 1, 144, 127, }, { 1, 1, 0, 1, 144, 127, }, @@ -40862,8 +41068,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 1, 144, 127, }, { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, + { 10, 1, 0, 1, 144, 127, }, + { 11, 1, 0, 1, 144, 76, }, { 0, 1, 0, 1, 149, 76, }, - { 2, 1, 0, 1, 149, 54, }, + { 2, 1, 0, 1, 149, 28, }, { 1, 1, 0, 1, 149, 127, }, { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, @@ -40871,9 +41079,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 149, 76, }, { 7, 1, 0, 1, 149, 54, }, { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, 54, }, + { 9, 1, 0, 1, 149, 28, }, + { 10, 1, 0, 1, 149, 28, }, + { 11, 1, 0, 1, 149, 58, }, { 0, 1, 0, 1, 153, 76, }, - { 2, 1, 0, 1, 153, 54, }, + { 2, 1, 0, 1, 153, 28, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, @@ -40881,9 +41091,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 153, 76, }, { 7, 1, 0, 1, 153, 54, }, { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, 54, }, + { 9, 1, 0, 1, 153, 28, }, + { 10, 1, 0, 1, 153, 28, }, + { 11, 1, 0, 1, 153, 58, }, { 0, 1, 0, 1, 157, 76, }, - { 2, 1, 0, 1, 157, 54, }, + { 2, 1, 0, 1, 157, 28, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, @@ -40891,9 +41103,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 157, 76, }, { 7, 1, 0, 1, 157, 54, }, { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, 54, }, + { 9, 1, 0, 1, 157, 28, }, + { 10, 1, 0, 1, 157, 28, }, + { 11, 1, 0, 1, 157, 58, }, { 0, 1, 0, 1, 161, 76, }, - { 2, 1, 0, 1, 161, 54, }, + { 2, 1, 0, 1, 161, 28, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, @@ -40901,9 +41115,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 161, 76, }, { 7, 1, 0, 1, 161, 54, }, { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, 54, }, + { 9, 1, 0, 1, 161, 28, }, + { 10, 1, 0, 1, 161, 28, }, + { 11, 1, 0, 1, 161, 58, }, { 0, 1, 0, 1, 165, 76, }, - { 2, 1, 0, 1, 165, 54, }, + { 2, 1, 0, 1, 165, 28, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, @@ -40911,7 +41127,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 1, 165, 76, }, { 7, 1, 0, 1, 165, 54, }, { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, 54, }, + { 9, 1, 0, 1, 165, 28, }, + { 10, 1, 0, 1, 165, 28, }, + { 11, 1, 0, 1, 165, 58, }, { 0, 1, 0, 2, 36, 72, }, { 2, 1, 0, 2, 36, 62, }, { 1, 1, 0, 2, 36, 62, }, @@ -40922,6 +41140,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 36, 54, }, { 8, 1, 0, 2, 36, 62, }, { 9, 1, 0, 2, 36, 62, }, + { 10, 1, 0, 2, 36, 62, }, + { 11, 1, 0, 2, 36, 62, }, { 0, 1, 0, 2, 40, 76, }, { 2, 1, 0, 2, 40, 62, }, { 1, 1, 0, 2, 40, 62, }, @@ -40932,6 +41152,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 40, 54, }, { 8, 1, 0, 2, 40, 62, }, { 9, 1, 0, 2, 40, 62, }, + { 10, 1, 0, 2, 40, 62, }, + { 11, 1, 0, 2, 40, 62, }, { 0, 1, 0, 2, 44, 76, }, { 2, 1, 0, 2, 44, 62, }, { 1, 1, 0, 2, 44, 62, }, @@ -40942,6 +41164,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 44, 54, }, { 8, 1, 0, 2, 44, 62, }, { 9, 1, 0, 2, 44, 62, }, + { 10, 1, 0, 2, 44, 62, }, + { 11, 1, 0, 2, 44, 62, }, { 0, 1, 0, 2, 48, 76, }, { 2, 1, 0, 2, 48, 62, }, { 1, 1, 0, 2, 48, 62, }, @@ -40952,6 +41176,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 48, 54, }, { 8, 1, 0, 2, 48, 62, }, { 9, 1, 0, 2, 48, 62, }, + { 10, 1, 0, 2, 48, 62, }, + { 11, 1, 0, 2, 48, 62, }, { 0, 1, 0, 2, 52, 76, }, { 2, 1, 0, 2, 52, 62, }, { 1, 1, 0, 2, 52, 62, }, @@ -40962,6 +41188,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 52, 54, }, { 8, 1, 0, 2, 52, 76, }, { 9, 1, 0, 2, 52, 62, }, + { 10, 1, 0, 2, 52, 62, }, + { 11, 1, 0, 2, 52, 62, }, { 0, 1, 0, 2, 56, 76, }, { 2, 1, 0, 2, 56, 62, }, { 1, 1, 0, 2, 56, 62, }, @@ -40972,6 +41200,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 56, 54, }, { 8, 1, 0, 2, 56, 76, }, { 9, 1, 0, 2, 56, 62, }, + { 10, 1, 0, 2, 56, 62, }, + { 11, 1, 0, 2, 56, 62, }, { 0, 1, 0, 2, 60, 76, }, { 2, 1, 0, 2, 60, 62, }, { 1, 1, 0, 2, 60, 62, }, @@ -40982,6 +41212,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 60, 54, }, { 8, 1, 0, 2, 60, 76, }, { 9, 1, 0, 2, 60, 62, }, + { 10, 1, 0, 2, 60, 62, }, + { 11, 1, 0, 2, 60, 62, }, { 0, 1, 0, 2, 64, 74, }, { 2, 1, 0, 2, 64, 62, }, { 1, 1, 0, 2, 64, 60, }, @@ -40992,6 +41224,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 64, 54, }, { 8, 1, 0, 2, 64, 74, }, { 9, 1, 0, 2, 64, 62, }, + { 10, 1, 0, 2, 64, 62, }, + { 11, 1, 0, 2, 64, 62, }, { 0, 1, 0, 2, 100, 70, }, { 2, 1, 0, 2, 100, 62, }, { 1, 1, 0, 2, 100, 76, }, @@ -41002,6 +41236,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 100, 54, }, { 8, 1, 0, 2, 100, 70, }, { 9, 1, 0, 2, 100, 127, }, + { 10, 1, 0, 2, 100, 54, }, + { 11, 1, 0, 2, 100, 62, }, { 0, 1, 0, 2, 104, 76, }, { 2, 1, 0, 2, 104, 62, }, { 1, 1, 0, 2, 104, 76, }, @@ -41012,6 +41248,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 104, 54, }, { 8, 1, 0, 2, 104, 76, }, { 9, 1, 0, 2, 104, 127, }, + { 10, 1, 0, 2, 104, 54, }, + { 11, 1, 0, 2, 104, 62, }, { 0, 1, 0, 2, 108, 76, }, { 2, 1, 0, 2, 108, 62, }, { 1, 1, 0, 2, 108, 76, }, @@ -41022,6 +41260,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 108, 54, }, { 8, 1, 0, 2, 108, 76, }, { 9, 1, 0, 2, 108, 127, }, + { 10, 1, 0, 2, 108, 54, }, + { 11, 1, 0, 2, 108, 62, }, { 0, 1, 0, 2, 112, 76, }, { 2, 1, 0, 2, 112, 62, }, { 1, 1, 0, 2, 112, 76, }, @@ -41032,6 +41272,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 112, 54, }, { 8, 1, 0, 2, 112, 76, }, { 9, 1, 0, 2, 112, 127, }, + { 10, 1, 0, 2, 112, 54, }, + { 11, 1, 0, 2, 112, 62, }, { 0, 1, 0, 2, 116, 76, }, { 2, 1, 0, 2, 116, 62, }, { 1, 1, 0, 2, 116, 76, }, @@ -41042,6 +41284,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 116, 54, }, { 8, 1, 0, 2, 116, 76, }, { 9, 1, 0, 2, 116, 127, }, + { 10, 1, 0, 2, 116, 54, }, + { 11, 1, 0, 2, 116, 62, }, { 0, 1, 0, 2, 120, 76, }, { 2, 1, 0, 2, 120, 62, }, { 1, 1, 0, 2, 120, 76, }, @@ -41052,6 +41296,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 120, 54, }, { 8, 1, 0, 2, 120, 76, }, { 9, 1, 0, 2, 120, 127, }, + { 10, 1, 0, 2, 120, 54, }, + { 11, 1, 0, 2, 120, 62, }, { 0, 1, 0, 2, 124, 76, }, { 2, 1, 0, 2, 124, 62, }, { 1, 1, 0, 2, 124, 76, }, @@ -41062,6 +41308,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 124, 54, }, { 8, 1, 0, 2, 124, 76, }, { 9, 1, 0, 2, 124, 127, }, + { 10, 1, 0, 2, 124, 54, }, + { 11, 1, 0, 2, 124, 62, }, { 0, 1, 0, 2, 128, 76, }, { 2, 1, 0, 2, 128, 62, }, { 1, 1, 0, 2, 128, 76, }, @@ -41072,6 +41320,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 128, 54, }, { 8, 1, 0, 2, 128, 76, }, { 9, 1, 0, 2, 128, 127, }, + { 10, 1, 0, 2, 128, 54, }, + { 11, 1, 0, 2, 128, 62, }, { 0, 1, 0, 2, 132, 76, }, { 2, 1, 0, 2, 132, 62, }, { 1, 1, 0, 2, 132, 76, }, @@ -41082,6 +41332,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 132, 54, }, { 8, 1, 0, 2, 132, 76, }, { 9, 1, 0, 2, 132, 127, }, + { 10, 1, 0, 2, 132, 54, }, + { 11, 1, 0, 2, 132, 62, }, { 0, 1, 0, 2, 136, 76, }, { 2, 1, 0, 2, 136, 62, }, { 1, 1, 0, 2, 136, 76, }, @@ -41092,6 +41344,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 136, 54, }, { 8, 1, 0, 2, 136, 76, }, { 9, 1, 0, 2, 136, 127, }, + { 10, 1, 0, 2, 136, 54, }, + { 11, 1, 0, 2, 136, 62, }, { 0, 1, 0, 2, 140, 70, }, { 2, 1, 0, 2, 140, 62, }, { 1, 1, 0, 2, 140, 76, }, @@ -41102,6 +41356,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 140, 54, }, { 8, 1, 0, 2, 140, 70, }, { 9, 1, 0, 2, 140, 127, }, + { 10, 1, 0, 2, 140, 54, }, + { 11, 1, 0, 2, 140, 62, }, { 0, 1, 0, 2, 144, 76, }, { 2, 1, 0, 2, 144, 127, }, { 1, 1, 0, 2, 144, 127, }, @@ -41112,8 +41368,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 2, 144, 127, }, { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, + { 10, 1, 0, 2, 144, 127, }, + { 11, 1, 0, 2, 144, 76, }, { 0, 1, 0, 2, 149, 76, }, - { 2, 1, 0, 2, 149, 54, }, + { 2, 1, 0, 2, 149, 28, }, { 1, 1, 0, 2, 149, 127, }, { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, @@ -41121,9 +41379,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 149, 76, }, { 7, 1, 0, 2, 149, 54, }, { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, 54, }, + { 9, 1, 0, 2, 149, 28, }, + { 10, 1, 0, 2, 149, 28, }, + { 11, 1, 0, 2, 149, 60, }, { 0, 1, 0, 2, 153, 76, }, - { 2, 1, 0, 2, 153, 54, }, + { 2, 1, 0, 2, 153, 28, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, @@ -41131,9 +41391,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 153, 76, }, { 7, 1, 0, 2, 153, 54, }, { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, 54, }, + { 9, 1, 0, 2, 153, 28, }, + { 10, 1, 0, 2, 153, 28, }, + { 11, 1, 0, 2, 153, 60, }, { 0, 1, 0, 2, 157, 76, }, - { 2, 1, 0, 2, 157, 54, }, + { 2, 1, 0, 2, 157, 28, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, @@ -41141,9 +41403,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 157, 76, }, { 7, 1, 0, 2, 157, 54, }, { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, 54, }, + { 9, 1, 0, 2, 157, 28, }, + { 10, 1, 0, 2, 157, 28, }, + { 11, 1, 0, 2, 157, 60, }, { 0, 1, 0, 2, 161, 76, }, - { 2, 1, 0, 2, 161, 54, }, + { 2, 1, 0, 2, 161, 28, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, @@ -41151,9 +41415,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 161, 76, }, { 7, 1, 0, 2, 161, 54, }, { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, 54, }, + { 9, 1, 0, 2, 161, 28, }, + { 10, 1, 0, 2, 161, 28, }, + { 11, 1, 0, 2, 161, 60, }, { 0, 1, 0, 2, 165, 76, }, - { 2, 1, 0, 2, 165, 54, }, + { 2, 1, 0, 2, 165, 28, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, @@ -41161,7 +41427,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 2, 165, 76, }, { 7, 1, 0, 2, 165, 54, }, { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, 54, }, + { 9, 1, 0, 2, 165, 28, }, + { 10, 1, 0, 2, 165, 28, }, + { 11, 1, 0, 2, 165, 60, }, { 0, 1, 0, 3, 36, 68, }, { 2, 1, 0, 3, 36, 38, }, { 1, 1, 0, 3, 36, 50, }, @@ -41172,6 +41440,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 36, 30, }, { 8, 1, 0, 3, 36, 50, }, { 9, 1, 0, 3, 36, 38, }, + { 10, 1, 0, 3, 36, 38, }, + { 11, 1, 0, 3, 36, 38, }, { 0, 1, 0, 3, 40, 68, }, { 2, 1, 0, 3, 40, 38, }, { 1, 1, 0, 3, 40, 50, }, @@ -41182,6 +41452,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 40, 30, }, { 8, 1, 0, 3, 40, 50, }, { 9, 1, 0, 3, 40, 38, }, + { 10, 1, 0, 3, 40, 38, }, + { 11, 1, 0, 3, 40, 38, }, { 0, 1, 0, 3, 44, 68, }, { 2, 1, 0, 3, 44, 38, }, { 1, 1, 0, 3, 44, 50, }, @@ -41192,6 +41464,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 44, 30, }, { 8, 1, 0, 3, 44, 50, }, { 9, 1, 0, 3, 44, 38, }, + { 10, 1, 0, 3, 44, 38, }, + { 11, 1, 0, 3, 44, 38, }, { 0, 1, 0, 3, 48, 68, }, { 2, 1, 0, 3, 48, 38, }, { 1, 1, 0, 3, 48, 50, }, @@ -41202,6 +41476,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 48, 30, }, { 8, 1, 0, 3, 48, 50, }, { 9, 1, 0, 3, 48, 38, }, + { 10, 1, 0, 3, 48, 38, }, + { 11, 1, 0, 3, 48, 38, }, { 0, 1, 0, 3, 52, 68, }, { 2, 1, 0, 3, 52, 38, }, { 1, 1, 0, 3, 52, 50, }, @@ -41212,6 +41488,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 52, 30, }, { 8, 1, 0, 3, 52, 68, }, { 9, 1, 0, 3, 52, 38, }, + { 10, 1, 0, 3, 52, 38, }, + { 11, 1, 0, 3, 52, 38, }, { 0, 1, 0, 3, 56, 68, }, { 2, 1, 0, 3, 56, 38, }, { 1, 1, 0, 3, 56, 50, }, @@ -41222,6 +41500,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 56, 30, }, { 8, 1, 0, 3, 56, 68, }, { 9, 1, 0, 3, 56, 38, }, + { 10, 1, 0, 3, 56, 38, }, + { 11, 1, 0, 3, 56, 38, }, { 0, 1, 0, 3, 60, 66, }, { 2, 1, 0, 3, 60, 38, }, { 1, 1, 0, 3, 60, 50, }, @@ -41232,6 +41512,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 60, 30, }, { 8, 1, 0, 3, 60, 66, }, { 9, 1, 0, 3, 60, 38, }, + { 10, 1, 0, 3, 60, 38, }, + { 11, 1, 0, 3, 60, 38, }, { 0, 1, 0, 3, 64, 68, }, { 2, 1, 0, 3, 64, 38, }, { 1, 1, 0, 3, 64, 50, }, @@ -41242,6 +41524,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 64, 30, }, { 8, 1, 0, 3, 64, 68, }, { 9, 1, 0, 3, 64, 38, }, + { 10, 1, 0, 3, 64, 38, }, + { 11, 1, 0, 3, 64, 38, }, { 0, 1, 0, 3, 100, 60, }, { 2, 1, 0, 3, 100, 38, }, { 1, 1, 0, 3, 100, 70, }, @@ -41252,6 +41536,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 100, 30, }, { 8, 1, 0, 3, 100, 60, }, { 9, 1, 0, 3, 100, 127, }, + { 10, 1, 0, 3, 100, 30, }, + { 11, 1, 0, 3, 100, 38, }, { 0, 1, 0, 3, 104, 68, }, { 2, 1, 0, 3, 104, 38, }, { 1, 1, 0, 3, 104, 70, }, @@ -41262,6 +41548,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 104, 30, }, { 8, 1, 0, 3, 104, 68, }, { 9, 1, 0, 3, 104, 127, }, + { 10, 1, 0, 3, 104, 30, }, + { 11, 1, 0, 3, 104, 38, }, { 0, 1, 0, 3, 108, 68, }, { 2, 1, 0, 3, 108, 38, }, { 1, 1, 0, 3, 108, 70, }, @@ -41272,6 +41560,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 108, 30, }, { 8, 1, 0, 3, 108, 68, }, { 9, 1, 0, 3, 108, 127, }, + { 10, 1, 0, 3, 108, 30, }, + { 11, 1, 0, 3, 108, 38, }, { 0, 1, 0, 3, 112, 68, }, { 2, 1, 0, 3, 112, 38, }, { 1, 1, 0, 3, 112, 70, }, @@ -41282,6 +41572,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 112, 30, }, { 8, 1, 0, 3, 112, 68, }, { 9, 1, 0, 3, 112, 127, }, + { 10, 1, 0, 3, 112, 30, }, + { 11, 1, 0, 3, 112, 38, }, { 0, 1, 0, 3, 116, 68, }, { 2, 1, 0, 3, 116, 38, }, { 1, 1, 0, 3, 116, 70, }, @@ -41292,6 +41584,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 116, 30, }, { 8, 1, 0, 3, 116, 68, }, { 9, 1, 0, 3, 116, 127, }, + { 10, 1, 0, 3, 116, 30, }, + { 11, 1, 0, 3, 116, 38, }, { 0, 1, 0, 3, 120, 68, }, { 2, 1, 0, 3, 120, 38, }, { 1, 1, 0, 3, 120, 70, }, @@ -41302,6 +41596,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 120, 30, }, { 8, 1, 0, 3, 120, 68, }, { 9, 1, 0, 3, 120, 127, }, + { 10, 1, 0, 3, 120, 30, }, + { 11, 1, 0, 3, 120, 38, }, { 0, 1, 0, 3, 124, 68, }, { 2, 1, 0, 3, 124, 38, }, { 1, 1, 0, 3, 124, 70, }, @@ -41312,6 +41608,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 124, 30, }, { 8, 1, 0, 3, 124, 68, }, { 9, 1, 0, 3, 124, 127, }, + { 10, 1, 0, 3, 124, 30, }, + { 11, 1, 0, 3, 124, 38, }, { 0, 1, 0, 3, 128, 68, }, { 2, 1, 0, 3, 128, 38, }, { 1, 1, 0, 3, 128, 70, }, @@ -41322,6 +41620,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 128, 30, }, { 8, 1, 0, 3, 128, 68, }, { 9, 1, 0, 3, 128, 127, }, + { 10, 1, 0, 3, 128, 30, }, + { 11, 1, 0, 3, 128, 38, }, { 0, 1, 0, 3, 132, 68, }, { 2, 1, 0, 3, 132, 38, }, { 1, 1, 0, 3, 132, 70, }, @@ -41332,6 +41632,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 132, 30, }, { 8, 1, 0, 3, 132, 68, }, { 9, 1, 0, 3, 132, 127, }, + { 10, 1, 0, 3, 132, 30, }, + { 11, 1, 0, 3, 132, 38, }, { 0, 1, 0, 3, 136, 68, }, { 2, 1, 0, 3, 136, 38, }, { 1, 1, 0, 3, 136, 70, }, @@ -41342,6 +41644,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 136, 30, }, { 8, 1, 0, 3, 136, 68, }, { 9, 1, 0, 3, 136, 127, }, + { 10, 1, 0, 3, 136, 30, }, + { 11, 1, 0, 3, 136, 38, }, { 0, 1, 0, 3, 140, 60, }, { 2, 1, 0, 3, 140, 38, }, { 1, 1, 0, 3, 140, 70, }, @@ -41352,6 +41656,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 140, 30, }, { 8, 1, 0, 3, 140, 60, }, { 9, 1, 0, 3, 140, 127, }, + { 10, 1, 0, 3, 140, 30, }, + { 11, 1, 0, 3, 140, 38, }, { 0, 1, 0, 3, 144, 68, }, { 2, 1, 0, 3, 144, 127, }, { 1, 1, 0, 3, 144, 127, }, @@ -41362,8 +41668,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 0, 3, 144, 127, }, { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, + { 10, 1, 0, 3, 144, 127, }, + { 11, 1, 0, 3, 144, 60, }, { 0, 1, 0, 3, 149, 76, }, - { 2, 1, 0, 3, 149, 30, }, + { 2, 1, 0, 3, 149, 4, }, { 1, 1, 0, 3, 149, 127, }, { 3, 1, 0, 3, 149, 76, }, { 4, 1, 0, 3, 149, 60, }, @@ -41371,9 +41679,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 149, 76, }, { 7, 1, 0, 3, 149, 30, }, { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, 30, }, + { 9, 1, 0, 3, 149, 4, }, + { 10, 1, 0, 3, 149, 4, }, + { 11, 1, 0, 3, 149, 36, }, { 0, 1, 0, 3, 153, 76, }, - { 2, 1, 0, 3, 153, 30, }, + { 2, 1, 0, 3, 153, 4, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, { 4, 1, 0, 3, 153, 60, }, @@ -41381,9 +41691,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 153, 76, }, { 7, 1, 0, 3, 153, 30, }, { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, 30, }, + { 9, 1, 0, 3, 153, 4, }, + { 10, 1, 0, 3, 153, 4, }, + { 11, 1, 0, 3, 153, 36, }, { 0, 1, 0, 3, 157, 76, }, - { 2, 1, 0, 3, 157, 30, }, + { 2, 1, 0, 3, 157, 4, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, { 4, 1, 0, 3, 157, 60, }, @@ -41391,9 +41703,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 157, 76, }, { 7, 1, 0, 3, 157, 30, }, { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, 30, }, + { 9, 1, 0, 3, 157, 4, }, + { 10, 1, 0, 3, 157, 4, }, + { 11, 1, 0, 3, 157, 36, }, { 0, 1, 0, 3, 161, 76, }, - { 2, 1, 0, 3, 161, 30, }, + { 2, 1, 0, 3, 161, 4, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, { 4, 1, 0, 3, 161, 60, }, @@ -41401,9 +41715,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 161, 76, }, { 7, 1, 0, 3, 161, 30, }, { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, 30, }, + { 9, 1, 0, 3, 161, 4, }, + { 10, 1, 0, 3, 161, 4, }, + { 11, 1, 0, 3, 161, 36, }, { 0, 1, 0, 3, 165, 76, }, - { 2, 1, 0, 3, 165, 30, }, + { 2, 1, 0, 3, 165, 4, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, { 4, 1, 0, 3, 165, 60, }, @@ -41411,7 +41727,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 0, 3, 165, 76, }, { 7, 1, 0, 3, 165, 30, }, { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, 30, }, + { 9, 1, 0, 3, 165, 4, }, + { 10, 1, 0, 3, 165, 4, }, + { 11, 1, 0, 3, 165, 36, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, { 1, 1, 1, 2, 38, 62, }, @@ -41422,6 +41740,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 38, 54, }, { 8, 1, 1, 2, 38, 62, }, { 9, 1, 1, 2, 38, 64, }, + { 10, 1, 1, 2, 38, 64, }, + { 11, 1, 1, 2, 38, 64, }, { 0, 1, 1, 2, 46, 72, }, { 2, 1, 1, 2, 46, 64, }, { 1, 1, 1, 2, 46, 62, }, @@ -41432,6 +41752,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 46, 54, }, { 8, 1, 1, 2, 46, 62, }, { 9, 1, 1, 2, 46, 64, }, + { 10, 1, 1, 2, 46, 64, }, + { 11, 1, 1, 2, 46, 64, }, { 0, 1, 1, 2, 54, 72, }, { 2, 1, 1, 2, 54, 64, }, { 1, 1, 1, 2, 54, 62, }, @@ -41442,6 +41764,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 54, 54, }, { 8, 1, 1, 2, 54, 72, }, { 9, 1, 1, 2, 54, 64, }, + { 10, 1, 1, 2, 54, 64, }, + { 11, 1, 1, 2, 54, 64, }, { 0, 1, 1, 2, 62, 64, }, { 2, 1, 1, 2, 62, 64, }, { 1, 1, 1, 2, 62, 62, }, @@ -41452,6 +41776,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 62, 54, }, { 8, 1, 1, 2, 62, 64, }, { 9, 1, 1, 2, 62, 64, }, + { 10, 1, 1, 2, 62, 64, }, + { 11, 1, 1, 2, 62, 64, }, { 0, 1, 1, 2, 102, 58, }, { 2, 1, 1, 2, 102, 64, }, { 1, 1, 1, 2, 102, 72, }, @@ -41462,6 +41788,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 102, 54, }, { 8, 1, 1, 2, 102, 58, }, { 9, 1, 1, 2, 102, 127, }, + { 10, 1, 1, 2, 102, 54, }, + { 11, 1, 1, 2, 102, 64, }, { 0, 1, 1, 2, 110, 72, }, { 2, 1, 1, 2, 110, 64, }, { 1, 1, 1, 2, 110, 72, }, @@ -41472,6 +41800,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 110, 54, }, { 8, 1, 1, 2, 110, 72, }, { 9, 1, 1, 2, 110, 127, }, + { 10, 1, 1, 2, 110, 54, }, + { 11, 1, 1, 2, 110, 64, }, { 0, 1, 1, 2, 118, 72, }, { 2, 1, 1, 2, 118, 64, }, { 1, 1, 1, 2, 118, 72, }, @@ -41482,6 +41812,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 118, 54, }, { 8, 1, 1, 2, 118, 72, }, { 9, 1, 1, 2, 118, 127, }, + { 10, 1, 1, 2, 118, 54, }, + { 11, 1, 1, 2, 118, 64, }, { 0, 1, 1, 2, 126, 72, }, { 2, 1, 1, 2, 126, 64, }, { 1, 1, 1, 2, 126, 72, }, @@ -41492,6 +41824,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 126, 54, }, { 8, 1, 1, 2, 126, 72, }, { 9, 1, 1, 2, 126, 127, }, + { 10, 1, 1, 2, 126, 54, }, + { 11, 1, 1, 2, 126, 64, }, { 0, 1, 1, 2, 134, 72, }, { 2, 1, 1, 2, 134, 64, }, { 1, 1, 1, 2, 134, 72, }, @@ -41502,6 +41836,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 134, 54, }, { 8, 1, 1, 2, 134, 72, }, { 9, 1, 1, 2, 134, 127, }, + { 10, 1, 1, 2, 134, 54, }, + { 11, 1, 1, 2, 134, 64, }, { 0, 1, 1, 2, 142, 72, }, { 2, 1, 1, 2, 142, 127, }, { 1, 1, 1, 2, 142, 127, }, @@ -41512,8 +41848,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 2, 142, 127, }, { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, + { 10, 1, 1, 2, 142, 127, }, + { 11, 1, 1, 2, 142, 72, }, { 0, 1, 1, 2, 151, 72, }, - { 2, 1, 1, 2, 151, 54, }, + { 2, 1, 1, 2, 151, 28, }, { 1, 1, 1, 2, 151, 127, }, { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, @@ -41521,9 +41859,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 151, 72, }, { 7, 1, 1, 2, 151, 54, }, { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, 54, }, + { 9, 1, 1, 2, 151, 28, }, + { 10, 1, 1, 2, 151, 28, }, + { 11, 1, 1, 2, 151, 64, }, { 0, 1, 1, 2, 159, 72, }, - { 2, 1, 1, 2, 159, 54, }, + { 2, 1, 1, 2, 159, 28, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, @@ -41531,7 +41871,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 2, 159, 72, }, { 7, 1, 1, 2, 159, 54, }, { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, 54, }, + { 9, 1, 1, 2, 159, 28, }, + { 10, 1, 1, 2, 159, 28, }, + { 11, 1, 1, 2, 159, 64, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, @@ -41542,6 +41884,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 38, 30, }, { 8, 1, 1, 3, 38, 50, }, { 9, 1, 1, 3, 38, 40, }, + { 10, 1, 1, 3, 38, 40, }, + { 11, 1, 1, 3, 38, 40, }, { 0, 1, 1, 3, 46, 68, }, { 2, 1, 1, 3, 46, 40, }, { 1, 1, 1, 3, 46, 50, }, @@ -41552,6 +41896,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 46, 30, }, { 8, 1, 1, 3, 46, 50, }, { 9, 1, 1, 3, 46, 40, }, + { 10, 1, 1, 3, 46, 40, }, + { 11, 1, 1, 3, 46, 40, }, { 0, 1, 1, 3, 54, 68, }, { 2, 1, 1, 3, 54, 40, }, { 1, 1, 1, 3, 54, 50, }, @@ -41562,6 +41908,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 54, 30, }, { 8, 1, 1, 3, 54, 68, }, { 9, 1, 1, 3, 54, 40, }, + { 10, 1, 1, 3, 54, 40, }, + { 11, 1, 1, 3, 54, 40, }, { 0, 1, 1, 3, 62, 58, }, { 2, 1, 1, 3, 62, 40, }, { 1, 1, 1, 3, 62, 48, }, @@ -41572,6 +41920,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 62, 30, }, { 8, 1, 1, 3, 62, 58, }, { 9, 1, 1, 3, 62, 40, }, + { 10, 1, 1, 3, 62, 40, }, + { 11, 1, 1, 3, 62, 40, }, { 0, 1, 1, 3, 102, 54, }, { 2, 1, 1, 3, 102, 40, }, { 1, 1, 1, 3, 102, 70, }, @@ -41582,6 +41932,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 102, 30, }, { 8, 1, 1, 3, 102, 54, }, { 9, 1, 1, 3, 102, 127, }, + { 10, 1, 1, 3, 102, 30, }, + { 11, 1, 1, 3, 102, 40, }, { 0, 1, 1, 3, 110, 68, }, { 2, 1, 1, 3, 110, 40, }, { 1, 1, 1, 3, 110, 70, }, @@ -41592,6 +41944,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 110, 30, }, { 8, 1, 1, 3, 110, 68, }, { 9, 1, 1, 3, 110, 127, }, + { 10, 1, 1, 3, 110, 30, }, + { 11, 1, 1, 3, 110, 40, }, { 0, 1, 1, 3, 118, 68, }, { 2, 1, 1, 3, 118, 40, }, { 1, 1, 1, 3, 118, 70, }, @@ -41602,6 +41956,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 118, 30, }, { 8, 1, 1, 3, 118, 68, }, { 9, 1, 1, 3, 118, 127, }, + { 10, 1, 1, 3, 118, 30, }, + { 11, 1, 1, 3, 118, 40, }, { 0, 1, 1, 3, 126, 68, }, { 2, 1, 1, 3, 126, 40, }, { 1, 1, 1, 3, 126, 70, }, @@ -41612,6 +41968,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 126, 30, }, { 8, 1, 1, 3, 126, 68, }, { 9, 1, 1, 3, 126, 127, }, + { 10, 1, 1, 3, 126, 30, }, + { 11, 1, 1, 3, 126, 40, }, { 0, 1, 1, 3, 134, 68, }, { 2, 1, 1, 3, 134, 40, }, { 1, 1, 1, 3, 134, 70, }, @@ -41622,6 +41980,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 134, 30, }, { 8, 1, 1, 3, 134, 68, }, { 9, 1, 1, 3, 134, 127, }, + { 10, 1, 1, 3, 134, 30, }, + { 11, 1, 1, 3, 134, 40, }, { 0, 1, 1, 3, 142, 68, }, { 2, 1, 1, 3, 142, 127, }, { 1, 1, 1, 3, 142, 127, }, @@ -41632,8 +41992,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 1, 3, 142, 127, }, { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, + { 10, 1, 1, 3, 142, 127, }, + { 11, 1, 1, 3, 142, 62, }, { 0, 1, 1, 3, 151, 72, }, - { 2, 1, 1, 3, 151, 30, }, + { 2, 1, 1, 3, 151, 4, }, { 1, 1, 1, 3, 151, 127, }, { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, @@ -41641,9 +42003,11 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 151, 72, }, { 7, 1, 1, 3, 151, 30, }, { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, 30, }, + { 9, 1, 1, 3, 151, 4, }, + { 10, 1, 1, 3, 151, 4, }, + { 11, 1, 1, 3, 151, 40, }, { 0, 1, 1, 3, 159, 72, }, - { 2, 1, 1, 3, 159, 30, }, + { 2, 1, 1, 3, 159, 4, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, @@ -41651,7 +42015,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 1, 3, 159, 72, }, { 7, 1, 1, 3, 159, 30, }, { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, 30, }, + { 9, 1, 1, 3, 159, 4, }, + { 10, 1, 1, 3, 159, 4, }, + { 11, 1, 1, 3, 159, 40, }, { 0, 1, 2, 4, 42, 64, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, @@ -41662,6 +42028,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 42, 54, }, { 8, 1, 2, 4, 42, 62, }, { 9, 1, 2, 4, 42, 64, }, + { 10, 1, 2, 4, 42, 64, }, + { 11, 1, 2, 4, 42, 64, }, { 0, 1, 2, 4, 58, 62, }, { 2, 1, 2, 4, 58, 64, }, { 1, 1, 2, 4, 58, 64, }, @@ -41672,6 +42040,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 58, 54, }, { 8, 1, 2, 4, 58, 62, }, { 9, 1, 2, 4, 58, 64, }, + { 10, 1, 2, 4, 58, 64, }, + { 11, 1, 2, 4, 58, 64, }, { 0, 1, 2, 4, 106, 58, }, { 2, 1, 2, 4, 106, 64, }, { 1, 1, 2, 4, 106, 72, }, @@ -41682,6 +42052,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 106, 54, }, { 8, 1, 2, 4, 106, 58, }, { 9, 1, 2, 4, 106, 127, }, + { 10, 1, 2, 4, 106, 54, }, + { 11, 1, 2, 4, 106, 64, }, { 0, 1, 2, 4, 122, 72, }, { 2, 1, 2, 4, 122, 64, }, { 1, 1, 2, 4, 122, 72, }, @@ -41692,6 +42064,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 122, 54, }, { 8, 1, 2, 4, 122, 72, }, { 9, 1, 2, 4, 122, 127, }, + { 10, 1, 2, 4, 122, 54, }, + { 11, 1, 2, 4, 122, 64, }, { 0, 1, 2, 4, 138, 72, }, { 2, 1, 2, 4, 138, 127, }, { 1, 1, 2, 4, 138, 127, }, @@ -41702,8 +42076,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 4, 138, 127, }, { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, + { 10, 1, 2, 4, 138, 127, }, + { 11, 1, 2, 4, 138, 72, }, { 0, 1, 2, 4, 155, 72, }, - { 2, 1, 2, 4, 155, 54, }, + { 2, 1, 2, 4, 155, 28, }, { 1, 1, 2, 4, 155, 127, }, { 3, 1, 2, 4, 155, 72, }, { 4, 1, 2, 4, 155, 68, }, @@ -41711,7 +42087,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 4, 155, 72, }, { 7, 1, 2, 4, 155, 54, }, { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, 54, }, + { 9, 1, 2, 4, 155, 28, }, + { 10, 1, 2, 4, 155, 28, }, + { 11, 1, 2, 4, 155, 64, }, { 0, 1, 2, 5, 42, 54, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, @@ -41722,6 +42100,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 42, 30, }, { 8, 1, 2, 5, 42, 50, }, { 9, 1, 2, 5, 42, 40, }, + { 10, 1, 2, 5, 42, 40, }, + { 11, 1, 2, 5, 42, 40, }, { 0, 1, 2, 5, 58, 52, }, { 2, 1, 2, 5, 58, 40, }, { 1, 1, 2, 5, 58, 50, }, @@ -41732,6 +42112,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 58, 30, }, { 8, 1, 2, 5, 58, 52, }, { 9, 1, 2, 5, 58, 40, }, + { 10, 1, 2, 5, 58, 40, }, + { 11, 1, 2, 5, 58, 40, }, { 0, 1, 2, 5, 106, 50, }, { 2, 1, 2, 5, 106, 40, }, { 1, 1, 2, 5, 106, 72, }, @@ -41742,6 +42124,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 106, 30, }, { 8, 1, 2, 5, 106, 50, }, { 9, 1, 2, 5, 106, 127, }, + { 10, 1, 2, 5, 106, 30, }, + { 11, 1, 2, 5, 106, 40, }, { 0, 1, 2, 5, 122, 66, }, { 2, 1, 2, 5, 122, 40, }, { 1, 1, 2, 5, 122, 72, }, @@ -41752,6 +42136,8 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 122, 30, }, { 8, 1, 2, 5, 122, 66, }, { 9, 1, 2, 5, 122, 127, }, + { 10, 1, 2, 5, 122, 30, }, + { 11, 1, 2, 5, 122, 40, }, { 0, 1, 2, 5, 138, 66, }, { 2, 1, 2, 5, 138, 127, }, { 1, 1, 2, 5, 138, 127, }, @@ -41762,8 +42148,10 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 7, 1, 2, 5, 138, 127, }, { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, + { 10, 1, 2, 5, 138, 127, }, + { 11, 1, 2, 5, 138, 60, }, { 0, 1, 2, 5, 155, 62, }, - { 2, 1, 2, 5, 155, 30, }, + { 2, 1, 2, 5, 155, 4, }, { 1, 1, 2, 5, 155, 127, }, { 3, 1, 2, 5, 155, 62, }, { 4, 1, 2, 5, 155, 58, }, @@ -41771,7 +42159,9 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = { { 6, 1, 2, 5, 155, 62, }, { 7, 1, 2, 5, 155, 30, }, { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, 30, }, + { 9, 1, 2, 5, 155, 4, }, + { 10, 1, 2, 5, 155, 4, }, + { 11, 1, 2, 5, 155, 40, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0); @@ -41783,9 +42173,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 1, 72, }, { 4, 0, 0, 0, 1, 76, }, { 5, 0, 0, 0, 1, 56, }, - { 6, 0, 0, 0, 1, 72, }, - { 7, 0, 0, 0, 1, 60, }, - { 8, 0, 0, 0, 1, 72, }, { 9, 0, 0, 0, 1, 60, }, { 0, 0, 0, 0, 2, 72, }, { 2, 0, 0, 0, 2, 56, }, @@ -41793,9 +42180,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 2, 72, }, { 4, 0, 0, 0, 2, 76, }, { 5, 0, 0, 0, 2, 56, }, - { 6, 0, 0, 0, 2, 72, }, - { 7, 0, 0, 0, 2, 60, }, - { 8, 0, 0, 0, 2, 72, }, { 9, 0, 0, 0, 2, 60, }, { 0, 0, 0, 0, 3, 76, }, { 2, 0, 0, 0, 3, 56, }, @@ -41803,9 +42187,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 3, 76, }, { 4, 0, 0, 0, 3, 76, }, { 5, 0, 0, 0, 3, 56, }, - { 6, 0, 0, 0, 3, 76, }, - { 7, 0, 0, 0, 3, 60, }, - { 8, 0, 0, 0, 3, 76, }, { 9, 0, 0, 0, 3, 60, }, { 0, 0, 0, 0, 4, 76, }, { 2, 0, 0, 0, 4, 56, }, @@ -41813,9 +42194,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 4, 76, }, { 4, 0, 0, 0, 4, 76, }, { 5, 0, 0, 0, 4, 56, }, - { 6, 0, 0, 0, 4, 76, }, - { 7, 0, 0, 0, 4, 60, }, - { 8, 0, 0, 0, 4, 76, }, { 9, 0, 0, 0, 4, 60, }, { 0, 0, 0, 0, 5, 76, }, { 2, 0, 0, 0, 5, 56, }, @@ -41823,9 +42201,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 5, 76, }, { 4, 0, 0, 0, 5, 76, }, { 5, 0, 0, 0, 5, 56, }, - { 6, 0, 0, 0, 5, 76, }, - { 7, 0, 0, 0, 5, 60, }, - { 8, 0, 0, 0, 5, 76, }, { 9, 0, 0, 0, 5, 60, }, { 0, 0, 0, 0, 6, 76, }, { 2, 0, 0, 0, 6, 56, }, @@ -41833,9 +42208,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 6, 76, }, { 4, 0, 0, 0, 6, 76, }, { 5, 0, 0, 0, 6, 56, }, - { 6, 0, 0, 0, 6, 76, }, - { 7, 0, 0, 0, 6, 60, }, - { 8, 0, 0, 0, 6, 76, }, { 9, 0, 0, 0, 6, 60, }, { 0, 0, 0, 0, 7, 76, }, { 2, 0, 0, 0, 7, 56, }, @@ -41843,9 +42215,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 7, 76, }, { 4, 0, 0, 0, 7, 76, }, { 5, 0, 0, 0, 7, 56, }, - { 6, 0, 0, 0, 7, 76, }, - { 7, 0, 0, 0, 7, 60, }, - { 8, 0, 0, 0, 7, 76, }, { 9, 0, 0, 0, 7, 60, }, { 0, 0, 0, 0, 8, 76, }, { 2, 0, 0, 0, 8, 56, }, @@ -41853,9 +42222,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 8, 76, }, { 4, 0, 0, 0, 8, 76, }, { 5, 0, 0, 0, 8, 56, }, - { 6, 0, 0, 0, 8, 76, }, - { 7, 0, 0, 0, 8, 60, }, - { 8, 0, 0, 0, 8, 76, }, { 9, 0, 0, 0, 8, 60, }, { 0, 0, 0, 0, 9, 76, }, { 2, 0, 0, 0, 9, 56, }, @@ -41863,9 +42229,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 9, 76, }, { 4, 0, 0, 0, 9, 76, }, { 5, 0, 0, 0, 9, 56, }, - { 6, 0, 0, 0, 9, 76, }, - { 7, 0, 0, 0, 9, 60, }, - { 8, 0, 0, 0, 9, 76, }, { 9, 0, 0, 0, 9, 60, }, { 0, 0, 0, 0, 10, 72, }, { 2, 0, 0, 0, 10, 56, }, @@ -41873,9 +42236,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 10, 72, }, { 4, 0, 0, 0, 10, 76, }, { 5, 0, 0, 0, 10, 56, }, - { 6, 0, 0, 0, 10, 72, }, - { 7, 0, 0, 0, 10, 60, }, - { 8, 0, 0, 0, 10, 72, }, { 9, 0, 0, 0, 10, 60, }, { 0, 0, 0, 0, 11, 72, }, { 2, 0, 0, 0, 11, 56, }, @@ -41883,29 +42243,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 11, 72, }, { 4, 0, 0, 0, 11, 76, }, { 5, 0, 0, 0, 11, 56, }, - { 6, 0, 0, 0, 11, 72, }, - { 7, 0, 0, 0, 11, 60, }, - { 8, 0, 0, 0, 11, 72, }, { 9, 0, 0, 0, 11, 60, }, - { 0, 0, 0, 0, 12, 44, }, + { 0, 0, 0, 0, 12, 52, }, { 2, 0, 0, 0, 12, 56, }, { 1, 0, 0, 0, 12, 72, }, { 3, 0, 0, 0, 12, 52, }, { 4, 0, 0, 0, 12, 76, }, { 5, 0, 0, 0, 12, 56, }, - { 6, 0, 0, 0, 12, 52, }, - { 7, 0, 0, 0, 12, 60, }, - { 8, 0, 0, 0, 12, 52, }, { 9, 0, 0, 0, 12, 60, }, - { 0, 0, 0, 0, 13, 40, }, + { 0, 0, 0, 0, 13, 48, }, { 2, 0, 0, 0, 13, 56, }, { 1, 0, 0, 0, 13, 72, }, { 3, 0, 0, 0, 13, 48, }, { 4, 0, 0, 0, 13, 76, }, { 5, 0, 0, 0, 13, 56, }, - { 6, 0, 0, 0, 13, 48, }, - { 7, 0, 0, 0, 13, 60, }, - { 8, 0, 0, 0, 13, 48, }, { 9, 0, 0, 0, 13, 60, }, { 0, 0, 0, 0, 14, 127, }, { 2, 0, 0, 0, 14, 127, }, @@ -41913,9 +42264,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 0, 14, 127, }, { 4, 0, 0, 0, 14, 127, }, { 5, 0, 0, 0, 14, 127, }, - { 6, 0, 0, 0, 14, 127, }, - { 7, 0, 0, 0, 14, 127, }, - { 8, 0, 0, 0, 14, 127, }, { 9, 0, 0, 0, 14, 127, }, { 0, 0, 0, 1, 1, 52, }, { 2, 0, 0, 1, 1, 60, }, @@ -41923,9 +42271,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 1, 52, }, { 4, 0, 0, 1, 1, 76, }, { 5, 0, 0, 1, 1, 60, }, - { 6, 0, 0, 1, 1, 52, }, - { 7, 0, 0, 1, 1, 60, }, - { 8, 0, 0, 1, 1, 52, }, { 9, 0, 0, 1, 1, 60, }, { 0, 0, 0, 1, 2, 60, }, { 2, 0, 0, 1, 2, 60, }, @@ -41933,9 +42278,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 2, 60, }, { 4, 0, 0, 1, 2, 76, }, { 5, 0, 0, 1, 2, 60, }, - { 6, 0, 0, 1, 2, 60, }, - { 7, 0, 0, 1, 2, 60, }, - { 8, 0, 0, 1, 2, 60, }, { 9, 0, 0, 1, 2, 60, }, { 0, 0, 0, 1, 3, 64, }, { 2, 0, 0, 1, 3, 60, }, @@ -41943,9 +42285,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 3, 64, }, { 4, 0, 0, 1, 3, 76, }, { 5, 0, 0, 1, 3, 60, }, - { 6, 0, 0, 1, 3, 64, }, - { 7, 0, 0, 1, 3, 60, }, - { 8, 0, 0, 1, 3, 64, }, { 9, 0, 0, 1, 3, 60, }, { 0, 0, 0, 1, 4, 68, }, { 2, 0, 0, 1, 4, 60, }, @@ -41953,9 +42292,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 4, 68, }, { 4, 0, 0, 1, 4, 76, }, { 5, 0, 0, 1, 4, 60, }, - { 6, 0, 0, 1, 4, 68, }, - { 7, 0, 0, 1, 4, 60, }, - { 8, 0, 0, 1, 4, 68, }, { 9, 0, 0, 1, 4, 60, }, { 0, 0, 0, 1, 5, 76, }, { 2, 0, 0, 1, 5, 60, }, @@ -41963,9 +42299,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 5, 76, }, { 4, 0, 0, 1, 5, 76, }, { 5, 0, 0, 1, 5, 60, }, - { 6, 0, 0, 1, 5, 76, }, - { 7, 0, 0, 1, 5, 60, }, - { 8, 0, 0, 1, 5, 76, }, { 9, 0, 0, 1, 5, 60, }, { 0, 0, 0, 1, 6, 76, }, { 2, 0, 0, 1, 6, 60, }, @@ -41973,9 +42306,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 6, 76, }, { 4, 0, 0, 1, 6, 76, }, { 5, 0, 0, 1, 6, 60, }, - { 6, 0, 0, 1, 6, 76, }, - { 7, 0, 0, 1, 6, 60, }, - { 8, 0, 0, 1, 6, 76, }, { 9, 0, 0, 1, 6, 60, }, { 0, 0, 0, 1, 7, 76, }, { 2, 0, 0, 1, 7, 60, }, @@ -41983,9 +42313,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 7, 76, }, { 4, 0, 0, 1, 7, 76, }, { 5, 0, 0, 1, 7, 60, }, - { 6, 0, 0, 1, 7, 76, }, - { 7, 0, 0, 1, 7, 60, }, - { 8, 0, 0, 1, 7, 76, }, { 9, 0, 0, 1, 7, 60, }, { 0, 0, 0, 1, 8, 68, }, { 2, 0, 0, 1, 8, 60, }, @@ -41993,9 +42320,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 8, 68, }, { 4, 0, 0, 1, 8, 76, }, { 5, 0, 0, 1, 8, 60, }, - { 6, 0, 0, 1, 8, 68, }, - { 7, 0, 0, 1, 8, 60, }, - { 8, 0, 0, 1, 8, 68, }, { 9, 0, 0, 1, 8, 60, }, { 0, 0, 0, 1, 9, 64, }, { 2, 0, 0, 1, 9, 60, }, @@ -42003,9 +42327,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 9, 64, }, { 4, 0, 0, 1, 9, 76, }, { 5, 0, 0, 1, 9, 60, }, - { 6, 0, 0, 1, 9, 64, }, - { 7, 0, 0, 1, 9, 60, }, - { 8, 0, 0, 1, 9, 64, }, { 9, 0, 0, 1, 9, 60, }, { 0, 0, 0, 1, 10, 60, }, { 2, 0, 0, 1, 10, 60, }, @@ -42013,9 +42334,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 10, 60, }, { 4, 0, 0, 1, 10, 76, }, { 5, 0, 0, 1, 10, 60, }, - { 6, 0, 0, 1, 10, 60, }, - { 7, 0, 0, 1, 10, 60, }, - { 8, 0, 0, 1, 10, 60, }, { 9, 0, 0, 1, 10, 60, }, { 0, 0, 0, 1, 11, 52, }, { 2, 0, 0, 1, 11, 60, }, @@ -42023,39 +42341,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 1, 11, 52, }, { 4, 0, 0, 1, 11, 76, }, { 5, 0, 0, 1, 11, 60, }, - { 6, 0, 0, 1, 11, 52, }, - { 7, 0, 0, 1, 11, 60, }, - { 8, 0, 0, 1, 11, 52, }, - { 9, 0, 0, 1, 11, 60, }, - { 0, 0, 0, 1, 12, 32, }, + { 9, 0, 0, 1, 11, 52, }, + { 0, 0, 0, 1, 12, 40, }, { 2, 0, 0, 1, 12, 60, }, { 1, 0, 0, 1, 12, 76, }, { 3, 0, 0, 1, 12, 40, }, { 4, 0, 0, 1, 12, 76, }, { 5, 0, 0, 1, 12, 60, }, - { 6, 0, 0, 1, 12, 40, }, - { 7, 0, 0, 1, 12, 60, }, - { 8, 0, 0, 1, 12, 40, }, - { 9, 0, 0, 1, 12, 60, }, - { 0, 0, 0, 1, 13, 20, }, + { 9, 0, 0, 1, 12, 48, }, + { 0, 0, 0, 1, 13, 28, }, { 2, 0, 0, 1, 13, 60, }, { 1, 0, 0, 1, 13, 76, }, { 3, 0, 0, 1, 13, 28, }, { 4, 0, 0, 1, 13, 74, }, { 5, 0, 0, 1, 13, 60, }, - { 6, 0, 0, 1, 13, 28, }, - { 7, 0, 0, 1, 13, 60, }, - { 8, 0, 0, 1, 13, 28, }, - { 9, 0, 0, 1, 13, 60, }, + { 9, 0, 0, 1, 13, 40, }, { 0, 0, 0, 1, 14, 127, }, { 2, 0, 0, 1, 14, 127, }, { 1, 0, 0, 1, 14, 127, }, { 3, 0, 0, 1, 14, 127, }, { 4, 0, 0, 1, 14, 127, }, { 5, 0, 0, 1, 14, 127, }, - { 6, 0, 0, 1, 14, 127, }, - { 7, 0, 0, 1, 14, 127, }, - { 8, 0, 0, 1, 14, 127, }, { 9, 0, 0, 1, 14, 127, }, { 0, 0, 0, 2, 1, 52, }, { 2, 0, 0, 2, 1, 60, }, @@ -42063,9 +42369,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 1, 52, }, { 4, 0, 0, 2, 1, 76, }, { 5, 0, 0, 2, 1, 60, }, - { 6, 0, 0, 2, 1, 52, }, - { 7, 0, 0, 2, 1, 60, }, - { 8, 0, 0, 2, 1, 52, }, { 9, 0, 0, 2, 1, 60, }, { 0, 0, 0, 2, 2, 60, }, { 2, 0, 0, 2, 2, 60, }, @@ -42073,9 +42376,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 2, 60, }, { 4, 0, 0, 2, 2, 76, }, { 5, 0, 0, 2, 2, 60, }, - { 6, 0, 0, 2, 2, 60, }, - { 7, 0, 0, 2, 2, 60, }, - { 8, 0, 0, 2, 2, 60, }, { 9, 0, 0, 2, 2, 60, }, { 0, 0, 0, 2, 3, 64, }, { 2, 0, 0, 2, 3, 60, }, @@ -42083,9 +42383,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 3, 64, }, { 4, 0, 0, 2, 3, 76, }, { 5, 0, 0, 2, 3, 60, }, - { 6, 0, 0, 2, 3, 64, }, - { 7, 0, 0, 2, 3, 60, }, - { 8, 0, 0, 2, 3, 64, }, { 9, 0, 0, 2, 3, 60, }, { 0, 0, 0, 2, 4, 68, }, { 2, 0, 0, 2, 4, 60, }, @@ -42093,9 +42390,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 4, 68, }, { 4, 0, 0, 2, 4, 76, }, { 5, 0, 0, 2, 4, 60, }, - { 6, 0, 0, 2, 4, 68, }, - { 7, 0, 0, 2, 4, 60, }, - { 8, 0, 0, 2, 4, 68, }, { 9, 0, 0, 2, 4, 60, }, { 0, 0, 0, 2, 5, 76, }, { 2, 0, 0, 2, 5, 60, }, @@ -42103,9 +42397,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 5, 76, }, { 4, 0, 0, 2, 5, 76, }, { 5, 0, 0, 2, 5, 60, }, - { 6, 0, 0, 2, 5, 76, }, - { 7, 0, 0, 2, 5, 60, }, - { 8, 0, 0, 2, 5, 76, }, { 9, 0, 0, 2, 5, 60, }, { 0, 0, 0, 2, 6, 76, }, { 2, 0, 0, 2, 6, 60, }, @@ -42113,9 +42404,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 6, 76, }, { 4, 0, 0, 2, 6, 76, }, { 5, 0, 0, 2, 6, 60, }, - { 6, 0, 0, 2, 6, 76, }, - { 7, 0, 0, 2, 6, 60, }, - { 8, 0, 0, 2, 6, 76, }, { 9, 0, 0, 2, 6, 60, }, { 0, 0, 0, 2, 7, 76, }, { 2, 0, 0, 2, 7, 60, }, @@ -42123,9 +42411,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 7, 76, }, { 4, 0, 0, 2, 7, 76, }, { 5, 0, 0, 2, 7, 60, }, - { 6, 0, 0, 2, 7, 76, }, - { 7, 0, 0, 2, 7, 60, }, - { 8, 0, 0, 2, 7, 76, }, { 9, 0, 0, 2, 7, 60, }, { 0, 0, 0, 2, 8, 68, }, { 2, 0, 0, 2, 8, 60, }, @@ -42133,9 +42418,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 8, 68, }, { 4, 0, 0, 2, 8, 76, }, { 5, 0, 0, 2, 8, 60, }, - { 6, 0, 0, 2, 8, 68, }, - { 7, 0, 0, 2, 8, 60, }, - { 8, 0, 0, 2, 8, 68, }, { 9, 0, 0, 2, 8, 60, }, { 0, 0, 0, 2, 9, 64, }, { 2, 0, 0, 2, 9, 60, }, @@ -42143,9 +42425,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 9, 64, }, { 4, 0, 0, 2, 9, 76, }, { 5, 0, 0, 2, 9, 60, }, - { 6, 0, 0, 2, 9, 64, }, - { 7, 0, 0, 2, 9, 60, }, - { 8, 0, 0, 2, 9, 64, }, { 9, 0, 0, 2, 9, 60, }, { 0, 0, 0, 2, 10, 60, }, { 2, 0, 0, 2, 10, 60, }, @@ -42153,9 +42432,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 10, 60, }, { 4, 0, 0, 2, 10, 76, }, { 5, 0, 0, 2, 10, 60, }, - { 6, 0, 0, 2, 10, 60, }, - { 7, 0, 0, 2, 10, 60, }, - { 8, 0, 0, 2, 10, 60, }, { 9, 0, 0, 2, 10, 60, }, { 0, 0, 0, 2, 11, 52, }, { 2, 0, 0, 2, 11, 60, }, @@ -42163,39 +42439,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 2, 11, 52, }, { 4, 0, 0, 2, 11, 76, }, { 5, 0, 0, 2, 11, 60, }, - { 6, 0, 0, 2, 11, 52, }, - { 7, 0, 0, 2, 11, 60, }, - { 8, 0, 0, 2, 11, 52, }, - { 9, 0, 0, 2, 11, 60, }, - { 0, 0, 0, 2, 12, 32, }, + { 9, 0, 0, 2, 11, 52, }, + { 0, 0, 0, 2, 12, 40, }, { 2, 0, 0, 2, 12, 60, }, { 1, 0, 0, 2, 12, 76, }, { 3, 0, 0, 2, 12, 40, }, { 4, 0, 0, 2, 12, 76, }, { 5, 0, 0, 2, 12, 60, }, - { 6, 0, 0, 2, 12, 40, }, - { 7, 0, 0, 2, 12, 60, }, - { 8, 0, 0, 2, 12, 40, }, - { 9, 0, 0, 2, 12, 60, }, - { 0, 0, 0, 2, 13, 20, }, + { 9, 0, 0, 2, 12, 48, }, + { 0, 0, 0, 2, 13, 28, }, { 2, 0, 0, 2, 13, 60, }, { 1, 0, 0, 2, 13, 76, }, { 3, 0, 0, 2, 13, 28, }, { 4, 0, 0, 2, 13, 74, }, { 5, 0, 0, 2, 13, 60, }, - { 6, 0, 0, 2, 13, 28, }, - { 7, 0, 0, 2, 13, 60, }, - { 8, 0, 0, 2, 13, 28, }, - { 9, 0, 0, 2, 13, 60, }, + { 9, 0, 0, 2, 13, 40, }, { 0, 0, 0, 2, 14, 127, }, { 2, 0, 0, 2, 14, 127, }, { 1, 0, 0, 2, 14, 127, }, { 3, 0, 0, 2, 14, 127, }, { 4, 0, 0, 2, 14, 127, }, { 5, 0, 0, 2, 14, 127, }, - { 6, 0, 0, 2, 14, 127, }, - { 7, 0, 0, 2, 14, 127, }, - { 8, 0, 0, 2, 14, 127, }, { 9, 0, 0, 2, 14, 127, }, { 0, 0, 0, 3, 1, 52, }, { 2, 0, 0, 3, 1, 36, }, @@ -42203,9 +42467,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 1, 52, }, { 4, 0, 0, 3, 1, 72, }, { 5, 0, 0, 3, 1, 36, }, - { 6, 0, 0, 3, 1, 52, }, - { 7, 0, 0, 3, 1, 36, }, - { 8, 0, 0, 3, 1, 52, }, { 9, 0, 0, 3, 1, 36, }, { 0, 0, 0, 3, 2, 60, }, { 2, 0, 0, 3, 2, 36, }, @@ -42213,9 +42474,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 2, 60, }, { 4, 0, 0, 3, 2, 72, }, { 5, 0, 0, 3, 2, 36, }, - { 6, 0, 0, 3, 2, 60, }, - { 7, 0, 0, 3, 2, 36, }, - { 8, 0, 0, 3, 2, 60, }, { 9, 0, 0, 3, 2, 36, }, { 0, 0, 0, 3, 3, 64, }, { 2, 0, 0, 3, 3, 36, }, @@ -42223,9 +42481,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 3, 64, }, { 4, 0, 0, 3, 3, 72, }, { 5, 0, 0, 3, 3, 36, }, - { 6, 0, 0, 3, 3, 64, }, - { 7, 0, 0, 3, 3, 36, }, - { 8, 0, 0, 3, 3, 64, }, { 9, 0, 0, 3, 3, 36, }, { 0, 0, 0, 3, 4, 68, }, { 2, 0, 0, 3, 4, 36, }, @@ -42233,9 +42488,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 4, 68, }, { 4, 0, 0, 3, 4, 72, }, { 5, 0, 0, 3, 4, 36, }, - { 6, 0, 0, 3, 4, 68, }, - { 7, 0, 0, 3, 4, 36, }, - { 8, 0, 0, 3, 4, 68, }, { 9, 0, 0, 3, 4, 36, }, { 0, 0, 0, 3, 5, 76, }, { 2, 0, 0, 3, 5, 36, }, @@ -42243,9 +42495,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 5, 76, }, { 4, 0, 0, 3, 5, 72, }, { 5, 0, 0, 3, 5, 36, }, - { 6, 0, 0, 3, 5, 76, }, - { 7, 0, 0, 3, 5, 36, }, - { 8, 0, 0, 3, 5, 76, }, { 9, 0, 0, 3, 5, 36, }, { 0, 0, 0, 3, 6, 76, }, { 2, 0, 0, 3, 6, 36, }, @@ -42253,9 +42502,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 6, 76, }, { 4, 0, 0, 3, 6, 72, }, { 5, 0, 0, 3, 6, 36, }, - { 6, 0, 0, 3, 6, 76, }, - { 7, 0, 0, 3, 6, 36, }, - { 8, 0, 0, 3, 6, 76, }, { 9, 0, 0, 3, 6, 36, }, { 0, 0, 0, 3, 7, 76, }, { 2, 0, 0, 3, 7, 36, }, @@ -42263,9 +42509,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 7, 76, }, { 4, 0, 0, 3, 7, 72, }, { 5, 0, 0, 3, 7, 36, }, - { 6, 0, 0, 3, 7, 76, }, - { 7, 0, 0, 3, 7, 36, }, - { 8, 0, 0, 3, 7, 76, }, { 9, 0, 0, 3, 7, 36, }, { 0, 0, 0, 3, 8, 68, }, { 2, 0, 0, 3, 8, 36, }, @@ -42273,9 +42516,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 8, 68, }, { 4, 0, 0, 3, 8, 72, }, { 5, 0, 0, 3, 8, 36, }, - { 6, 0, 0, 3, 8, 68, }, - { 7, 0, 0, 3, 8, 36, }, - { 8, 0, 0, 3, 8, 68, }, { 9, 0, 0, 3, 8, 36, }, { 0, 0, 0, 3, 9, 64, }, { 2, 0, 0, 3, 9, 36, }, @@ -42283,9 +42523,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 9, 64, }, { 4, 0, 0, 3, 9, 72, }, { 5, 0, 0, 3, 9, 36, }, - { 6, 0, 0, 3, 9, 64, }, - { 7, 0, 0, 3, 9, 36, }, - { 8, 0, 0, 3, 9, 64, }, { 9, 0, 0, 3, 9, 36, }, { 0, 0, 0, 3, 10, 60, }, { 2, 0, 0, 3, 10, 36, }, @@ -42293,9 +42530,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 10, 60, }, { 4, 0, 0, 3, 10, 72, }, { 5, 0, 0, 3, 10, 36, }, - { 6, 0, 0, 3, 10, 60, }, - { 7, 0, 0, 3, 10, 36, }, - { 8, 0, 0, 3, 10, 60, }, { 9, 0, 0, 3, 10, 36, }, { 0, 0, 0, 3, 11, 52, }, { 2, 0, 0, 3, 11, 36, }, @@ -42303,39 +42537,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 0, 3, 11, 52, }, { 4, 0, 0, 3, 11, 72, }, { 5, 0, 0, 3, 11, 36, }, - { 6, 0, 0, 3, 11, 52, }, - { 7, 0, 0, 3, 11, 36, }, - { 8, 0, 0, 3, 11, 52, }, - { 9, 0, 0, 3, 11, 36, }, - { 0, 0, 0, 3, 12, 32, }, + { 9, 0, 0, 3, 11, 40, }, + { 0, 0, 0, 3, 12, 40, }, { 2, 0, 0, 3, 12, 36, }, { 1, 0, 0, 3, 12, 66, }, { 3, 0, 0, 3, 12, 40, }, { 4, 0, 0, 3, 12, 72, }, { 5, 0, 0, 3, 12, 36, }, - { 6, 0, 0, 3, 12, 40, }, - { 7, 0, 0, 3, 12, 36, }, - { 8, 0, 0, 3, 12, 40, }, { 9, 0, 0, 3, 12, 36, }, - { 0, 0, 0, 3, 13, 20, }, + { 0, 0, 0, 3, 13, 28, }, { 2, 0, 0, 3, 13, 36, }, { 1, 0, 0, 3, 13, 66, }, { 3, 0, 0, 3, 13, 28, }, { 4, 0, 0, 3, 13, 68, }, { 5, 0, 0, 3, 13, 36, }, - { 6, 0, 0, 3, 13, 28, }, - { 7, 0, 0, 3, 13, 36, }, - { 8, 0, 0, 3, 13, 28, }, - { 9, 0, 0, 3, 13, 36, }, + { 9, 0, 0, 3, 13, 28, }, { 0, 0, 0, 3, 14, 127, }, { 2, 0, 0, 3, 14, 127, }, { 1, 0, 0, 3, 14, 127, }, { 3, 0, 0, 3, 14, 127, }, { 4, 0, 0, 3, 14, 127, }, { 5, 0, 0, 3, 14, 127, }, - { 6, 0, 0, 3, 14, 127, }, - { 7, 0, 0, 3, 14, 127, }, - { 8, 0, 0, 3, 14, 127, }, { 9, 0, 0, 3, 14, 127, }, { 0, 0, 1, 2, 1, 127, }, { 2, 0, 1, 2, 1, 127, }, @@ -42343,29 +42565,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 1, 127, }, { 4, 0, 1, 2, 1, 127, }, { 5, 0, 1, 2, 1, 127, }, - { 6, 0, 1, 2, 1, 127, }, - { 7, 0, 1, 2, 1, 127, }, - { 8, 0, 1, 2, 1, 127, }, - { 9, 0, 1, 2, 1, 127, }, + { 9, 0, 1, 2, 1, 60, }, { 0, 0, 1, 2, 2, 127, }, { 2, 0, 1, 2, 2, 127, }, { 1, 0, 1, 2, 2, 127, }, { 3, 0, 1, 2, 2, 127, }, { 4, 0, 1, 2, 2, 127, }, { 5, 0, 1, 2, 2, 127, }, - { 6, 0, 1, 2, 2, 127, }, - { 7, 0, 1, 2, 2, 127, }, - { 8, 0, 1, 2, 2, 127, }, - { 9, 0, 1, 2, 2, 127, }, + { 9, 0, 1, 2, 2, 60, }, { 0, 0, 1, 2, 3, 52, }, { 2, 0, 1, 2, 3, 60, }, { 1, 0, 1, 2, 3, 72, }, { 3, 0, 1, 2, 3, 52, }, { 4, 0, 1, 2, 3, 72, }, { 5, 0, 1, 2, 3, 60, }, - { 6, 0, 1, 2, 3, 52, }, - { 7, 0, 1, 2, 3, 60, }, - { 8, 0, 1, 2, 3, 52, }, { 9, 0, 1, 2, 3, 60, }, { 0, 0, 1, 2, 4, 52, }, { 2, 0, 1, 2, 4, 60, }, @@ -42373,9 +42586,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 4, 52, }, { 4, 0, 1, 2, 4, 72, }, { 5, 0, 1, 2, 4, 60, }, - { 6, 0, 1, 2, 4, 52, }, - { 7, 0, 1, 2, 4, 60, }, - { 8, 0, 1, 2, 4, 52, }, { 9, 0, 1, 2, 4, 60, }, { 0, 0, 1, 2, 5, 60, }, { 2, 0, 1, 2, 5, 60, }, @@ -42383,9 +42593,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 5, 60, }, { 4, 0, 1, 2, 5, 72, }, { 5, 0, 1, 2, 5, 60, }, - { 6, 0, 1, 2, 5, 60, }, - { 7, 0, 1, 2, 5, 60, }, - { 8, 0, 1, 2, 5, 60, }, { 9, 0, 1, 2, 5, 60, }, { 0, 0, 1, 2, 6, 64, }, { 2, 0, 1, 2, 6, 60, }, @@ -42393,9 +42600,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 6, 64, }, { 4, 0, 1, 2, 6, 72, }, { 5, 0, 1, 2, 6, 60, }, - { 6, 0, 1, 2, 6, 64, }, - { 7, 0, 1, 2, 6, 60, }, - { 8, 0, 1, 2, 6, 64, }, { 9, 0, 1, 2, 6, 60, }, { 0, 0, 1, 2, 7, 60, }, { 2, 0, 1, 2, 7, 60, }, @@ -42403,9 +42607,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 7, 60, }, { 4, 0, 1, 2, 7, 72, }, { 5, 0, 1, 2, 7, 60, }, - { 6, 0, 1, 2, 7, 60, }, - { 7, 0, 1, 2, 7, 60, }, - { 8, 0, 1, 2, 7, 60, }, { 9, 0, 1, 2, 7, 60, }, { 0, 0, 1, 2, 8, 52, }, { 2, 0, 1, 2, 8, 60, }, @@ -42413,9 +42614,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 8, 52, }, { 4, 0, 1, 2, 8, 72, }, { 5, 0, 1, 2, 8, 60, }, - { 6, 0, 1, 2, 8, 52, }, - { 7, 0, 1, 2, 8, 60, }, - { 8, 0, 1, 2, 8, 52, }, { 9, 0, 1, 2, 8, 60, }, { 0, 0, 1, 2, 9, 52, }, { 2, 0, 1, 2, 9, 60, }, @@ -42423,9 +42621,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 9, 52, }, { 4, 0, 1, 2, 9, 72, }, { 5, 0, 1, 2, 9, 60, }, - { 6, 0, 1, 2, 9, 52, }, - { 7, 0, 1, 2, 9, 60, }, - { 8, 0, 1, 2, 9, 52, }, { 9, 0, 1, 2, 9, 60, }, { 0, 0, 1, 2, 10, 40, }, { 2, 0, 1, 2, 10, 60, }, @@ -42433,9 +42628,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 10, 40, }, { 4, 0, 1, 2, 10, 72, }, { 5, 0, 1, 2, 10, 60, }, - { 6, 0, 1, 2, 10, 40, }, - { 7, 0, 1, 2, 10, 60, }, - { 8, 0, 1, 2, 10, 40, }, { 9, 0, 1, 2, 10, 60, }, { 0, 0, 1, 2, 11, 28, }, { 2, 0, 1, 2, 11, 60, }, @@ -42443,39 +42635,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 2, 11, 28, }, { 4, 0, 1, 2, 11, 70, }, { 5, 0, 1, 2, 11, 60, }, - { 6, 0, 1, 2, 11, 28, }, - { 7, 0, 1, 2, 11, 60, }, - { 8, 0, 1, 2, 11, 28, }, - { 9, 0, 1, 2, 11, 60, }, + { 9, 0, 1, 2, 11, 44, }, { 0, 0, 1, 2, 12, 127, }, { 2, 0, 1, 2, 12, 127, }, { 1, 0, 1, 2, 12, 127, }, { 3, 0, 1, 2, 12, 127, }, { 4, 0, 1, 2, 12, 127, }, { 5, 0, 1, 2, 12, 127, }, - { 6, 0, 1, 2, 12, 127, }, - { 7, 0, 1, 2, 12, 127, }, - { 8, 0, 1, 2, 12, 127, }, - { 9, 0, 1, 2, 12, 127, }, + { 9, 0, 1, 2, 12, 44, }, { 0, 0, 1, 2, 13, 127, }, { 2, 0, 1, 2, 13, 127, }, { 1, 0, 1, 2, 13, 127, }, { 3, 0, 1, 2, 13, 127, }, { 4, 0, 1, 2, 13, 127, }, { 5, 0, 1, 2, 13, 127, }, - { 6, 0, 1, 2, 13, 127, }, - { 7, 0, 1, 2, 13, 127, }, - { 8, 0, 1, 2, 13, 127, }, - { 9, 0, 1, 2, 13, 127, }, + { 9, 0, 1, 2, 13, 20, }, { 0, 0, 1, 2, 14, 127, }, { 2, 0, 1, 2, 14, 127, }, { 1, 0, 1, 2, 14, 127, }, { 3, 0, 1, 2, 14, 127, }, { 4, 0, 1, 2, 14, 127, }, { 5, 0, 1, 2, 14, 127, }, - { 6, 0, 1, 2, 14, 127, }, - { 7, 0, 1, 2, 14, 127, }, - { 8, 0, 1, 2, 14, 127, }, { 9, 0, 1, 2, 14, 127, }, { 0, 0, 1, 3, 1, 127, }, { 2, 0, 1, 3, 1, 127, }, @@ -42483,29 +42663,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 1, 127, }, { 4, 0, 1, 3, 1, 127, }, { 5, 0, 1, 3, 1, 127, }, - { 6, 0, 1, 3, 1, 127, }, - { 7, 0, 1, 3, 1, 127, }, - { 8, 0, 1, 3, 1, 127, }, - { 9, 0, 1, 3, 1, 127, }, + { 9, 0, 1, 3, 1, 36, }, { 0, 0, 1, 3, 2, 127, }, { 2, 0, 1, 3, 2, 127, }, { 1, 0, 1, 3, 2, 127, }, { 3, 0, 1, 3, 2, 127, }, { 4, 0, 1, 3, 2, 127, }, { 5, 0, 1, 3, 2, 127, }, - { 6, 0, 1, 3, 2, 127, }, - { 7, 0, 1, 3, 2, 127, }, - { 8, 0, 1, 3, 2, 127, }, - { 9, 0, 1, 3, 2, 127, }, + { 9, 0, 1, 3, 2, 36, }, { 0, 0, 1, 3, 3, 48, }, { 2, 0, 1, 3, 3, 36, }, { 1, 0, 1, 3, 3, 66, }, { 3, 0, 1, 3, 3, 48, }, { 4, 0, 1, 3, 3, 68, }, { 5, 0, 1, 3, 3, 36, }, - { 6, 0, 1, 3, 3, 48, }, - { 7, 0, 1, 3, 3, 36, }, - { 8, 0, 1, 3, 3, 48, }, { 9, 0, 1, 3, 3, 36, }, { 0, 0, 1, 3, 4, 48, }, { 2, 0, 1, 3, 4, 36, }, @@ -42513,9 +42684,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 4, 48, }, { 4, 0, 1, 3, 4, 70, }, { 5, 0, 1, 3, 4, 36, }, - { 6, 0, 1, 3, 4, 48, }, - { 7, 0, 1, 3, 4, 36, }, - { 8, 0, 1, 3, 4, 48, }, { 9, 0, 1, 3, 4, 36, }, { 0, 0, 1, 3, 5, 60, }, { 2, 0, 1, 3, 5, 36, }, @@ -42523,9 +42691,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 5, 60, }, { 4, 0, 1, 3, 5, 70, }, { 5, 0, 1, 3, 5, 36, }, - { 6, 0, 1, 3, 5, 60, }, - { 7, 0, 1, 3, 5, 36, }, - { 8, 0, 1, 3, 5, 60, }, { 9, 0, 1, 3, 5, 36, }, { 0, 0, 1, 3, 6, 64, }, { 2, 0, 1, 3, 6, 36, }, @@ -42533,9 +42698,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 6, 64, }, { 4, 0, 1, 3, 6, 70, }, { 5, 0, 1, 3, 6, 36, }, - { 6, 0, 1, 3, 6, 64, }, - { 7, 0, 1, 3, 6, 36, }, - { 8, 0, 1, 3, 6, 64, }, { 9, 0, 1, 3, 6, 36, }, { 0, 0, 1, 3, 7, 60, }, { 2, 0, 1, 3, 7, 36, }, @@ -42543,9 +42705,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 7, 60, }, { 4, 0, 1, 3, 7, 70, }, { 5, 0, 1, 3, 7, 36, }, - { 6, 0, 1, 3, 7, 60, }, - { 7, 0, 1, 3, 7, 36, }, - { 8, 0, 1, 3, 7, 60, }, { 9, 0, 1, 3, 7, 36, }, { 0, 0, 1, 3, 8, 52, }, { 2, 0, 1, 3, 8, 36, }, @@ -42553,9 +42712,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 8, 52, }, { 4, 0, 1, 3, 8, 70, }, { 5, 0, 1, 3, 8, 36, }, - { 6, 0, 1, 3, 8, 52, }, - { 7, 0, 1, 3, 8, 36, }, - { 8, 0, 1, 3, 8, 52, }, { 9, 0, 1, 3, 8, 36, }, { 0, 0, 1, 3, 9, 52, }, { 2, 0, 1, 3, 9, 36, }, @@ -42563,9 +42719,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 9, 52, }, { 4, 0, 1, 3, 9, 70, }, { 5, 0, 1, 3, 9, 36, }, - { 6, 0, 1, 3, 9, 52, }, - { 7, 0, 1, 3, 9, 36, }, - { 8, 0, 1, 3, 9, 52, }, { 9, 0, 1, 3, 9, 36, }, { 0, 0, 1, 3, 10, 40, }, { 2, 0, 1, 3, 10, 36, }, @@ -42573,9 +42726,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 10, 40, }, { 4, 0, 1, 3, 10, 70, }, { 5, 0, 1, 3, 10, 36, }, - { 6, 0, 1, 3, 10, 40, }, - { 7, 0, 1, 3, 10, 36, }, - { 8, 0, 1, 3, 10, 40, }, { 9, 0, 1, 3, 10, 36, }, { 0, 0, 1, 3, 11, 26, }, { 2, 0, 1, 3, 11, 36, }, @@ -42583,39 +42733,27 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 0, 1, 3, 11, 26, }, { 4, 0, 1, 3, 11, 66, }, { 5, 0, 1, 3, 11, 36, }, - { 6, 0, 1, 3, 11, 26, }, - { 7, 0, 1, 3, 11, 36, }, - { 8, 0, 1, 3, 11, 26, }, - { 9, 0, 1, 3, 11, 36, }, + { 9, 0, 1, 3, 11, 32, }, { 0, 0, 1, 3, 12, 127, }, { 2, 0, 1, 3, 12, 127, }, { 1, 0, 1, 3, 12, 127, }, { 3, 0, 1, 3, 12, 127, }, { 4, 0, 1, 3, 12, 127, }, { 5, 0, 1, 3, 12, 127, }, - { 6, 0, 1, 3, 12, 127, }, - { 7, 0, 1, 3, 12, 127, }, - { 8, 0, 1, 3, 12, 127, }, - { 9, 0, 1, 3, 12, 127, }, + { 9, 0, 1, 3, 12, 32, }, { 0, 0, 1, 3, 13, 127, }, { 2, 0, 1, 3, 13, 127, }, { 1, 0, 1, 3, 13, 127, }, { 3, 0, 1, 3, 13, 127, }, { 4, 0, 1, 3, 13, 127, }, { 5, 0, 1, 3, 13, 127, }, - { 6, 0, 1, 3, 13, 127, }, - { 7, 0, 1, 3, 13, 127, }, - { 8, 0, 1, 3, 13, 127, }, - { 9, 0, 1, 3, 13, 127, }, + { 9, 0, 1, 3, 13, 8, }, { 0, 0, 1, 3, 14, 127, }, { 2, 0, 1, 3, 14, 127, }, { 1, 0, 1, 3, 14, 127, }, { 3, 0, 1, 3, 14, 127, }, { 4, 0, 1, 3, 14, 127, }, { 5, 0, 1, 3, 14, 127, }, - { 6, 0, 1, 3, 14, 127, }, - { 7, 0, 1, 3, 14, 127, }, - { 8, 0, 1, 3, 14, 127, }, { 9, 0, 1, 3, 14, 127, }, { 0, 1, 0, 1, 36, 74, }, { 2, 1, 0, 1, 36, 58, }, @@ -42623,89 +42761,62 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 36, 62, }, { 4, 1, 0, 1, 36, 74, }, { 5, 1, 0, 1, 36, 58, }, - { 6, 1, 0, 1, 36, 64, }, - { 7, 1, 0, 1, 36, 54, }, - { 8, 1, 0, 1, 36, 62, }, - { 9, 1, 0, 1, 36, 62, }, + { 9, 1, 0, 1, 36, 64, }, { 0, 1, 0, 1, 40, 76, }, { 2, 1, 0, 1, 40, 58, }, { 1, 1, 0, 1, 40, 62, }, { 3, 1, 0, 1, 40, 62, }, { 4, 1, 0, 1, 40, 76, }, { 5, 1, 0, 1, 40, 58, }, - { 6, 1, 0, 1, 40, 64, }, - { 7, 1, 0, 1, 40, 54, }, - { 8, 1, 0, 1, 40, 62, }, - { 9, 1, 0, 1, 40, 62, }, + { 9, 1, 0, 1, 40, 64, }, { 0, 1, 0, 1, 44, 76, }, { 2, 1, 0, 1, 44, 58, }, { 1, 1, 0, 1, 44, 62, }, { 3, 1, 0, 1, 44, 62, }, { 4, 1, 0, 1, 44, 76, }, { 5, 1, 0, 1, 44, 58, }, - { 6, 1, 0, 1, 44, 64, }, - { 7, 1, 0, 1, 44, 54, }, - { 8, 1, 0, 1, 44, 62, }, - { 9, 1, 0, 1, 44, 62, }, + { 9, 1, 0, 1, 44, 64, }, { 0, 1, 0, 1, 48, 76, }, { 2, 1, 0, 1, 48, 58, }, { 1, 1, 0, 1, 48, 62, }, { 3, 1, 0, 1, 48, 62, }, { 4, 1, 0, 1, 48, 58, }, { 5, 1, 0, 1, 48, 58, }, - { 6, 1, 0, 1, 48, 64, }, - { 7, 1, 0, 1, 48, 54, }, - { 8, 1, 0, 1, 48, 62, }, - { 9, 1, 0, 1, 48, 62, }, + { 9, 1, 0, 1, 48, 64, }, { 0, 1, 0, 1, 52, 76, }, { 2, 1, 0, 1, 52, 58, }, { 1, 1, 0, 1, 52, 62, }, { 3, 1, 0, 1, 52, 64, }, { 4, 1, 0, 1, 52, 76, }, { 5, 1, 0, 1, 52, 58, }, - { 6, 1, 0, 1, 52, 76, }, - { 7, 1, 0, 1, 52, 54, }, - { 8, 1, 0, 1, 52, 76, }, - { 9, 1, 0, 1, 52, 62, }, + { 9, 1, 0, 1, 52, 64, }, { 0, 1, 0, 1, 56, 76, }, { 2, 1, 0, 1, 56, 58, }, { 1, 1, 0, 1, 56, 62, }, { 3, 1, 0, 1, 56, 64, }, { 4, 1, 0, 1, 56, 76, }, { 5, 1, 0, 1, 56, 58, }, - { 6, 1, 0, 1, 56, 76, }, - { 7, 1, 0, 1, 56, 54, }, - { 8, 1, 0, 1, 56, 76, }, - { 9, 1, 0, 1, 56, 62, }, + { 9, 1, 0, 1, 56, 64, }, { 0, 1, 0, 1, 60, 76, }, { 2, 1, 0, 1, 60, 58, }, { 1, 1, 0, 1, 60, 62, }, { 3, 1, 0, 1, 60, 64, }, { 4, 1, 0, 1, 60, 76, }, { 5, 1, 0, 1, 60, 58, }, - { 6, 1, 0, 1, 60, 76, }, - { 7, 1, 0, 1, 60, 54, }, - { 8, 1, 0, 1, 60, 76, }, - { 9, 1, 0, 1, 60, 62, }, + { 9, 1, 0, 1, 60, 64, }, { 0, 1, 0, 1, 64, 76, }, { 2, 1, 0, 1, 64, 58, }, { 1, 1, 0, 1, 64, 62, }, { 3, 1, 0, 1, 64, 64, }, { 4, 1, 0, 1, 64, 76, }, { 5, 1, 0, 1, 64, 58, }, - { 6, 1, 0, 1, 64, 74, }, - { 7, 1, 0, 1, 64, 54, }, - { 8, 1, 0, 1, 64, 74, }, - { 9, 1, 0, 1, 64, 62, }, + { 9, 1, 0, 1, 64, 64, }, { 0, 1, 0, 1, 100, 68, }, { 2, 1, 0, 1, 100, 58, }, { 1, 1, 0, 1, 100, 76, }, { 3, 1, 0, 1, 100, 68, }, { 4, 1, 0, 1, 100, 76, }, { 5, 1, 0, 1, 100, 58, }, - { 6, 1, 0, 1, 100, 72, }, - { 7, 1, 0, 1, 100, 54, }, - { 8, 1, 0, 1, 100, 72, }, { 9, 1, 0, 1, 100, 127, }, { 0, 1, 0, 1, 104, 76, }, { 2, 1, 0, 1, 104, 58, }, @@ -42713,9 +42824,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 104, 76, }, { 4, 1, 0, 1, 104, 76, }, { 5, 1, 0, 1, 104, 58, }, - { 6, 1, 0, 1, 104, 76, }, - { 7, 1, 0, 1, 104, 54, }, - { 8, 1, 0, 1, 104, 76, }, { 9, 1, 0, 1, 104, 127, }, { 0, 1, 0, 1, 108, 76, }, { 2, 1, 0, 1, 108, 58, }, @@ -42723,9 +42831,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 108, 76, }, { 4, 1, 0, 1, 108, 76, }, { 5, 1, 0, 1, 108, 58, }, - { 6, 1, 0, 1, 108, 76, }, - { 7, 1, 0, 1, 108, 54, }, - { 8, 1, 0, 1, 108, 76, }, { 9, 1, 0, 1, 108, 127, }, { 0, 1, 0, 1, 112, 76, }, { 2, 1, 0, 1, 112, 58, }, @@ -42733,9 +42838,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 112, 76, }, { 4, 1, 0, 1, 112, 76, }, { 5, 1, 0, 1, 112, 58, }, - { 6, 1, 0, 1, 112, 76, }, - { 7, 1, 0, 1, 112, 54, }, - { 8, 1, 0, 1, 112, 76, }, { 9, 1, 0, 1, 112, 127, }, { 0, 1, 0, 1, 116, 76, }, { 2, 1, 0, 1, 116, 58, }, @@ -42743,9 +42845,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 116, 76, }, { 4, 1, 0, 1, 116, 76, }, { 5, 1, 0, 1, 116, 58, }, - { 6, 1, 0, 1, 116, 76, }, - { 7, 1, 0, 1, 116, 54, }, - { 8, 1, 0, 1, 116, 76, }, { 9, 1, 0, 1, 116, 127, }, { 0, 1, 0, 1, 120, 76, }, { 2, 1, 0, 1, 120, 58, }, @@ -42753,9 +42852,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 120, 127, }, { 4, 1, 0, 1, 120, 76, }, { 5, 1, 0, 1, 120, 127, }, - { 6, 1, 0, 1, 120, 76, }, - { 7, 1, 0, 1, 120, 54, }, - { 8, 1, 0, 1, 120, 76, }, { 9, 1, 0, 1, 120, 127, }, { 0, 1, 0, 1, 124, 76, }, { 2, 1, 0, 1, 124, 58, }, @@ -42763,9 +42859,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 124, 127, }, { 4, 1, 0, 1, 124, 76, }, { 5, 1, 0, 1, 124, 127, }, - { 6, 1, 0, 1, 124, 76, }, - { 7, 1, 0, 1, 124, 54, }, - { 8, 1, 0, 1, 124, 76, }, { 9, 1, 0, 1, 124, 127, }, { 0, 1, 0, 1, 128, 76, }, { 2, 1, 0, 1, 128, 58, }, @@ -42773,9 +42866,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 128, 127, }, { 4, 1, 0, 1, 128, 76, }, { 5, 1, 0, 1, 128, 127, }, - { 6, 1, 0, 1, 128, 76, }, - { 7, 1, 0, 1, 128, 54, }, - { 8, 1, 0, 1, 128, 76, }, { 9, 1, 0, 1, 128, 127, }, { 0, 1, 0, 1, 132, 76, }, { 2, 1, 0, 1, 132, 58, }, @@ -42783,9 +42873,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 132, 76, }, { 4, 1, 0, 1, 132, 76, }, { 5, 1, 0, 1, 132, 58, }, - { 6, 1, 0, 1, 132, 76, }, - { 7, 1, 0, 1, 132, 54, }, - { 8, 1, 0, 1, 132, 76, }, { 9, 1, 0, 1, 132, 127, }, { 0, 1, 0, 1, 136, 76, }, { 2, 1, 0, 1, 136, 58, }, @@ -42793,9 +42880,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 136, 76, }, { 4, 1, 0, 1, 136, 76, }, { 5, 1, 0, 1, 136, 58, }, - { 6, 1, 0, 1, 136, 76, }, - { 7, 1, 0, 1, 136, 54, }, - { 8, 1, 0, 1, 136, 76, }, { 9, 1, 0, 1, 136, 127, }, { 0, 1, 0, 1, 140, 74, }, { 2, 1, 0, 1, 140, 58, }, @@ -42803,9 +42887,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 140, 74, }, { 4, 1, 0, 1, 140, 76, }, { 5, 1, 0, 1, 140, 58, }, - { 6, 1, 0, 1, 140, 72, }, - { 7, 1, 0, 1, 140, 54, }, - { 8, 1, 0, 1, 140, 72, }, { 9, 1, 0, 1, 140, 127, }, { 0, 1, 0, 1, 144, 76, }, { 2, 1, 0, 1, 144, 127, }, @@ -42813,9 +42894,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 144, 76, }, { 4, 1, 0, 1, 144, 76, }, { 5, 1, 0, 1, 144, 127, }, - { 6, 1, 0, 1, 144, 76, }, - { 7, 1, 0, 1, 144, 127, }, - { 8, 1, 0, 1, 144, 76, }, { 9, 1, 0, 1, 144, 127, }, { 0, 1, 0, 1, 149, 76, }, { 2, 1, 0, 1, 149, 28, }, @@ -42823,139 +42901,97 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 1, 149, 76, }, { 4, 1, 0, 1, 149, 74, }, { 5, 1, 0, 1, 149, 76, }, - { 6, 1, 0, 1, 149, 76, }, - { 7, 1, 0, 1, 149, 54, }, - { 8, 1, 0, 1, 149, 76, }, - { 9, 1, 0, 1, 149, 28, }, + { 9, 1, 0, 1, 149, 76, }, { 0, 1, 0, 1, 153, 76, }, { 2, 1, 0, 1, 153, 28, }, { 1, 1, 0, 1, 153, 127, }, { 3, 1, 0, 1, 153, 76, }, { 4, 1, 0, 1, 153, 74, }, { 5, 1, 0, 1, 153, 76, }, - { 6, 1, 0, 1, 153, 76, }, - { 7, 1, 0, 1, 153, 54, }, - { 8, 1, 0, 1, 153, 76, }, - { 9, 1, 0, 1, 153, 28, }, + { 9, 1, 0, 1, 153, 76, }, { 0, 1, 0, 1, 157, 76, }, { 2, 1, 0, 1, 157, 28, }, { 1, 1, 0, 1, 157, 127, }, { 3, 1, 0, 1, 157, 76, }, { 4, 1, 0, 1, 157, 74, }, { 5, 1, 0, 1, 157, 76, }, - { 6, 1, 0, 1, 157, 76, }, - { 7, 1, 0, 1, 157, 54, }, - { 8, 1, 0, 1, 157, 76, }, - { 9, 1, 0, 1, 157, 28, }, + { 9, 1, 0, 1, 157, 76, }, { 0, 1, 0, 1, 161, 76, }, { 2, 1, 0, 1, 161, 28, }, { 1, 1, 0, 1, 161, 127, }, { 3, 1, 0, 1, 161, 76, }, { 4, 1, 0, 1, 161, 74, }, { 5, 1, 0, 1, 161, 76, }, - { 6, 1, 0, 1, 161, 76, }, - { 7, 1, 0, 1, 161, 54, }, - { 8, 1, 0, 1, 161, 76, }, - { 9, 1, 0, 1, 161, 28, }, + { 9, 1, 0, 1, 161, 76, }, { 0, 1, 0, 1, 165, 76, }, { 2, 1, 0, 1, 165, 28, }, { 1, 1, 0, 1, 165, 127, }, { 3, 1, 0, 1, 165, 76, }, { 4, 1, 0, 1, 165, 74, }, { 5, 1, 0, 1, 165, 76, }, - { 6, 1, 0, 1, 165, 76, }, - { 7, 1, 0, 1, 165, 54, }, - { 8, 1, 0, 1, 165, 76, }, - { 9, 1, 0, 1, 165, 28, }, + { 9, 1, 0, 1, 165, 76, }, { 0, 1, 0, 2, 36, 70, }, { 2, 1, 0, 2, 36, 58, }, { 1, 1, 0, 2, 36, 64, }, { 3, 1, 0, 2, 36, 62, }, { 4, 1, 0, 2, 36, 76, }, { 5, 1, 0, 2, 36, 58, }, - { 6, 1, 0, 2, 36, 64, }, - { 7, 1, 0, 2, 36, 54, }, - { 8, 1, 0, 2, 36, 62, }, - { 9, 1, 0, 2, 36, 62, }, + { 9, 1, 0, 2, 36, 60, }, { 0, 1, 0, 2, 40, 76, }, { 2, 1, 0, 2, 40, 58, }, { 1, 1, 0, 2, 40, 62, }, { 3, 1, 0, 2, 40, 62, }, { 4, 1, 0, 2, 40, 76, }, { 5, 1, 0, 2, 40, 58, }, - { 6, 1, 0, 2, 40, 64, }, - { 7, 1, 0, 2, 40, 54, }, - { 8, 1, 0, 2, 40, 62, }, - { 9, 1, 0, 2, 40, 62, }, + { 9, 1, 0, 2, 40, 60, }, { 0, 1, 0, 2, 44, 76, }, { 2, 1, 0, 2, 44, 58, }, { 1, 1, 0, 2, 44, 62, }, { 3, 1, 0, 2, 44, 62, }, { 4, 1, 0, 2, 44, 76, }, { 5, 1, 0, 2, 44, 58, }, - { 6, 1, 0, 2, 44, 64, }, - { 7, 1, 0, 2, 44, 54, }, - { 8, 1, 0, 2, 44, 62, }, - { 9, 1, 0, 2, 44, 62, }, + { 9, 1, 0, 2, 44, 60, }, { 0, 1, 0, 2, 48, 76, }, { 2, 1, 0, 2, 48, 58, }, { 1, 1, 0, 2, 48, 62, }, { 3, 1, 0, 2, 48, 62, }, { 4, 1, 0, 2, 48, 58, }, { 5, 1, 0, 2, 48, 58, }, - { 6, 1, 0, 2, 48, 64, }, - { 7, 1, 0, 2, 48, 54, }, - { 8, 1, 0, 2, 48, 62, }, - { 9, 1, 0, 2, 48, 62, }, + { 9, 1, 0, 2, 48, 60, }, { 0, 1, 0, 2, 52, 76, }, { 2, 1, 0, 2, 52, 58, }, { 1, 1, 0, 2, 52, 62, }, { 3, 1, 0, 2, 52, 64, }, { 4, 1, 0, 2, 52, 76, }, { 5, 1, 0, 2, 52, 58, }, - { 6, 1, 0, 2, 52, 76, }, - { 7, 1, 0, 2, 52, 54, }, - { 8, 1, 0, 2, 52, 76, }, - { 9, 1, 0, 2, 52, 62, }, + { 9, 1, 0, 2, 52, 60, }, { 0, 1, 0, 2, 56, 76, }, { 2, 1, 0, 2, 56, 58, }, { 1, 1, 0, 2, 56, 62, }, { 3, 1, 0, 2, 56, 64, }, { 4, 1, 0, 2, 56, 76, }, { 5, 1, 0, 2, 56, 58, }, - { 6, 1, 0, 2, 56, 76, }, - { 7, 1, 0, 2, 56, 54, }, - { 8, 1, 0, 2, 56, 76, }, - { 9, 1, 0, 2, 56, 62, }, + { 9, 1, 0, 2, 56, 60, }, { 0, 1, 0, 2, 60, 76, }, { 2, 1, 0, 2, 60, 58, }, { 1, 1, 0, 2, 60, 62, }, { 3, 1, 0, 2, 60, 64, }, { 4, 1, 0, 2, 60, 76, }, { 5, 1, 0, 2, 60, 58, }, - { 6, 1, 0, 2, 60, 76, }, - { 7, 1, 0, 2, 60, 54, }, - { 8, 1, 0, 2, 60, 76, }, - { 9, 1, 0, 2, 60, 62, }, + { 9, 1, 0, 2, 60, 60, }, { 0, 1, 0, 2, 64, 70, }, { 2, 1, 0, 2, 64, 58, }, { 1, 1, 0, 2, 64, 62, }, { 3, 1, 0, 2, 64, 64, }, { 4, 1, 0, 2, 64, 74, }, { 5, 1, 0, 2, 64, 58, }, - { 6, 1, 0, 2, 64, 74, }, - { 7, 1, 0, 2, 64, 54, }, - { 8, 1, 0, 2, 64, 74, }, - { 9, 1, 0, 2, 64, 62, }, + { 9, 1, 0, 2, 64, 60, }, { 0, 1, 0, 2, 100, 66, }, { 2, 1, 0, 2, 100, 58, }, { 1, 1, 0, 2, 100, 76, }, { 3, 1, 0, 2, 100, 66, }, { 4, 1, 0, 2, 100, 76, }, { 5, 1, 0, 2, 100, 58, }, - { 6, 1, 0, 2, 100, 70, }, - { 7, 1, 0, 2, 100, 54, }, - { 8, 1, 0, 2, 100, 70, }, { 9, 1, 0, 2, 100, 127, }, { 0, 1, 0, 2, 104, 76, }, { 2, 1, 0, 2, 104, 58, }, @@ -42963,9 +42999,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 104, 76, }, { 4, 1, 0, 2, 104, 76, }, { 5, 1, 0, 2, 104, 58, }, - { 6, 1, 0, 2, 104, 76, }, - { 7, 1, 0, 2, 104, 54, }, - { 8, 1, 0, 2, 104, 76, }, { 9, 1, 0, 2, 104, 127, }, { 0, 1, 0, 2, 108, 76, }, { 2, 1, 0, 2, 108, 58, }, @@ -42973,9 +43006,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 108, 76, }, { 4, 1, 0, 2, 108, 76, }, { 5, 1, 0, 2, 108, 58, }, - { 6, 1, 0, 2, 108, 76, }, - { 7, 1, 0, 2, 108, 54, }, - { 8, 1, 0, 2, 108, 76, }, { 9, 1, 0, 2, 108, 127, }, { 0, 1, 0, 2, 112, 76, }, { 2, 1, 0, 2, 112, 58, }, @@ -42983,9 +43013,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 112, 76, }, { 4, 1, 0, 2, 112, 76, }, { 5, 1, 0, 2, 112, 58, }, - { 6, 1, 0, 2, 112, 76, }, - { 7, 1, 0, 2, 112, 54, }, - { 8, 1, 0, 2, 112, 76, }, { 9, 1, 0, 2, 112, 127, }, { 0, 1, 0, 2, 116, 76, }, { 2, 1, 0, 2, 116, 58, }, @@ -42993,9 +43020,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 116, 76, }, { 4, 1, 0, 2, 116, 76, }, { 5, 1, 0, 2, 116, 58, }, - { 6, 1, 0, 2, 116, 76, }, - { 7, 1, 0, 2, 116, 54, }, - { 8, 1, 0, 2, 116, 76, }, { 9, 1, 0, 2, 116, 127, }, { 0, 1, 0, 2, 120, 76, }, { 2, 1, 0, 2, 120, 58, }, @@ -43003,9 +43027,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 120, 127, }, { 4, 1, 0, 2, 120, 76, }, { 5, 1, 0, 2, 120, 127, }, - { 6, 1, 0, 2, 120, 76, }, - { 7, 1, 0, 2, 120, 54, }, - { 8, 1, 0, 2, 120, 76, }, { 9, 1, 0, 2, 120, 127, }, { 0, 1, 0, 2, 124, 76, }, { 2, 1, 0, 2, 124, 58, }, @@ -43013,9 +43034,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 124, 127, }, { 4, 1, 0, 2, 124, 76, }, { 5, 1, 0, 2, 124, 127, }, - { 6, 1, 0, 2, 124, 76, }, - { 7, 1, 0, 2, 124, 54, }, - { 8, 1, 0, 2, 124, 76, }, { 9, 1, 0, 2, 124, 127, }, { 0, 1, 0, 2, 128, 76, }, { 2, 1, 0, 2, 128, 58, }, @@ -43023,9 +43041,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 128, 127, }, { 4, 1, 0, 2, 128, 76, }, { 5, 1, 0, 2, 128, 127, }, - { 6, 1, 0, 2, 128, 76, }, - { 7, 1, 0, 2, 128, 54, }, - { 8, 1, 0, 2, 128, 76, }, { 9, 1, 0, 2, 128, 127, }, { 0, 1, 0, 2, 132, 76, }, { 2, 1, 0, 2, 132, 58, }, @@ -43033,9 +43048,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 132, 76, }, { 4, 1, 0, 2, 132, 76, }, { 5, 1, 0, 2, 132, 58, }, - { 6, 1, 0, 2, 132, 76, }, - { 7, 1, 0, 2, 132, 54, }, - { 8, 1, 0, 2, 132, 76, }, { 9, 1, 0, 2, 132, 127, }, { 0, 1, 0, 2, 136, 76, }, { 2, 1, 0, 2, 136, 58, }, @@ -43043,9 +43055,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 136, 76, }, { 4, 1, 0, 2, 136, 76, }, { 5, 1, 0, 2, 136, 58, }, - { 6, 1, 0, 2, 136, 76, }, - { 7, 1, 0, 2, 136, 54, }, - { 8, 1, 0, 2, 136, 76, }, { 9, 1, 0, 2, 136, 127, }, { 0, 1, 0, 2, 140, 66, }, { 2, 1, 0, 2, 140, 58, }, @@ -43053,9 +43062,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 140, 66, }, { 4, 1, 0, 2, 140, 76, }, { 5, 1, 0, 2, 140, 58, }, - { 6, 1, 0, 2, 140, 70, }, - { 7, 1, 0, 2, 140, 54, }, - { 8, 1, 0, 2, 140, 70, }, { 9, 1, 0, 2, 140, 127, }, { 0, 1, 0, 2, 144, 76, }, { 2, 1, 0, 2, 144, 127, }, @@ -43063,9 +43069,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 144, 76, }, { 4, 1, 0, 2, 144, 76, }, { 5, 1, 0, 2, 144, 127, }, - { 6, 1, 0, 2, 144, 76, }, - { 7, 1, 0, 2, 144, 127, }, - { 8, 1, 0, 2, 144, 76, }, { 9, 1, 0, 2, 144, 127, }, { 0, 1, 0, 2, 149, 76, }, { 2, 1, 0, 2, 149, 28, }, @@ -43073,139 +43076,97 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 2, 149, 76, }, { 4, 1, 0, 2, 149, 74, }, { 5, 1, 0, 2, 149, 76, }, - { 6, 1, 0, 2, 149, 76, }, - { 7, 1, 0, 2, 149, 54, }, - { 8, 1, 0, 2, 149, 76, }, - { 9, 1, 0, 2, 149, 28, }, + { 9, 1, 0, 2, 149, 76, }, { 0, 1, 0, 2, 153, 76, }, { 2, 1, 0, 2, 153, 28, }, { 1, 1, 0, 2, 153, 127, }, { 3, 1, 0, 2, 153, 76, }, { 4, 1, 0, 2, 153, 74, }, { 5, 1, 0, 2, 153, 76, }, - { 6, 1, 0, 2, 153, 76, }, - { 7, 1, 0, 2, 153, 54, }, - { 8, 1, 0, 2, 153, 76, }, - { 9, 1, 0, 2, 153, 28, }, + { 9, 1, 0, 2, 153, 76, }, { 0, 1, 0, 2, 157, 76, }, { 2, 1, 0, 2, 157, 28, }, { 1, 1, 0, 2, 157, 127, }, { 3, 1, 0, 2, 157, 76, }, { 4, 1, 0, 2, 157, 74, }, { 5, 1, 0, 2, 157, 76, }, - { 6, 1, 0, 2, 157, 76, }, - { 7, 1, 0, 2, 157, 54, }, - { 8, 1, 0, 2, 157, 76, }, - { 9, 1, 0, 2, 157, 28, }, + { 9, 1, 0, 2, 157, 76, }, { 0, 1, 0, 2, 161, 76, }, { 2, 1, 0, 2, 161, 28, }, { 1, 1, 0, 2, 161, 127, }, { 3, 1, 0, 2, 161, 76, }, { 4, 1, 0, 2, 161, 74, }, { 5, 1, 0, 2, 161, 76, }, - { 6, 1, 0, 2, 161, 76, }, - { 7, 1, 0, 2, 161, 54, }, - { 8, 1, 0, 2, 161, 76, }, - { 9, 1, 0, 2, 161, 28, }, + { 9, 1, 0, 2, 161, 76, }, { 0, 1, 0, 2, 165, 76, }, { 2, 1, 0, 2, 165, 28, }, { 1, 1, 0, 2, 165, 127, }, { 3, 1, 0, 2, 165, 76, }, { 4, 1, 0, 2, 165, 74, }, { 5, 1, 0, 2, 165, 76, }, - { 6, 1, 0, 2, 165, 76, }, - { 7, 1, 0, 2, 165, 54, }, - { 8, 1, 0, 2, 165, 76, }, - { 9, 1, 0, 2, 165, 28, }, + { 9, 1, 0, 2, 165, 76, }, { 0, 1, 0, 3, 36, 64, }, { 2, 1, 0, 3, 36, 36, }, { 1, 1, 0, 3, 36, 50, }, { 3, 1, 0, 3, 36, 38, }, { 4, 1, 0, 3, 36, 66, }, { 5, 1, 0, 3, 36, 36, }, - { 6, 1, 0, 3, 36, 52, }, - { 7, 1, 0, 3, 36, 30, }, - { 8, 1, 0, 3, 36, 50, }, - { 9, 1, 0, 3, 36, 38, }, + { 9, 1, 0, 3, 36, 36, }, { 0, 1, 0, 3, 40, 68, }, { 2, 1, 0, 3, 40, 36, }, { 1, 1, 0, 3, 40, 50, }, { 3, 1, 0, 3, 40, 38, }, { 4, 1, 0, 3, 40, 66, }, { 5, 1, 0, 3, 40, 36, }, - { 6, 1, 0, 3, 40, 52, }, - { 7, 1, 0, 3, 40, 30, }, - { 8, 1, 0, 3, 40, 50, }, - { 9, 1, 0, 3, 40, 38, }, + { 9, 1, 0, 3, 40, 36, }, { 0, 1, 0, 3, 44, 68, }, { 2, 1, 0, 3, 44, 36, }, { 1, 1, 0, 3, 44, 50, }, { 3, 1, 0, 3, 44, 38, }, { 4, 1, 0, 3, 44, 66, }, { 5, 1, 0, 3, 44, 36, }, - { 6, 1, 0, 3, 44, 52, }, - { 7, 1, 0, 3, 44, 30, }, - { 8, 1, 0, 3, 44, 50, }, - { 9, 1, 0, 3, 44, 38, }, + { 9, 1, 0, 3, 44, 36, }, { 0, 1, 0, 3, 48, 68, }, { 2, 1, 0, 3, 48, 36, }, { 1, 1, 0, 3, 48, 50, }, { 3, 1, 0, 3, 48, 38, }, { 4, 1, 0, 3, 48, 42, }, { 5, 1, 0, 3, 48, 36, }, - { 6, 1, 0, 3, 48, 52, }, - { 7, 1, 0, 3, 48, 30, }, - { 8, 1, 0, 3, 48, 50, }, - { 9, 1, 0, 3, 48, 38, }, + { 9, 1, 0, 3, 48, 36, }, { 0, 1, 0, 3, 52, 68, }, { 2, 1, 0, 3, 52, 36, }, { 1, 1, 0, 3, 52, 50, }, { 3, 1, 0, 3, 52, 40, }, { 4, 1, 0, 3, 52, 66, }, { 5, 1, 0, 3, 52, 36, }, - { 6, 1, 0, 3, 52, 68, }, - { 7, 1, 0, 3, 52, 30, }, - { 8, 1, 0, 3, 52, 68, }, - { 9, 1, 0, 3, 52, 38, }, + { 9, 1, 0, 3, 52, 36, }, { 0, 1, 0, 3, 56, 68, }, { 2, 1, 0, 3, 56, 36, }, { 1, 1, 0, 3, 56, 50, }, { 3, 1, 0, 3, 56, 40, }, { 4, 1, 0, 3, 56, 66, }, { 5, 1, 0, 3, 56, 36, }, - { 6, 1, 0, 3, 56, 68, }, - { 7, 1, 0, 3, 56, 30, }, - { 8, 1, 0, 3, 56, 68, }, - { 9, 1, 0, 3, 56, 38, }, + { 9, 1, 0, 3, 56, 36, }, { 0, 1, 0, 3, 60, 68, }, { 2, 1, 0, 3, 60, 36, }, { 1, 1, 0, 3, 60, 50, }, { 3, 1, 0, 3, 60, 40, }, { 4, 1, 0, 3, 60, 66, }, { 5, 1, 0, 3, 60, 36, }, - { 6, 1, 0, 3, 60, 66, }, - { 7, 1, 0, 3, 60, 30, }, - { 8, 1, 0, 3, 60, 66, }, - { 9, 1, 0, 3, 60, 38, }, + { 9, 1, 0, 3, 60, 36, }, { 0, 1, 0, 3, 64, 66, }, { 2, 1, 0, 3, 64, 36, }, { 1, 1, 0, 3, 64, 50, }, { 3, 1, 0, 3, 64, 40, }, { 4, 1, 0, 3, 64, 66, }, { 5, 1, 0, 3, 64, 36, }, - { 6, 1, 0, 3, 64, 68, }, - { 7, 1, 0, 3, 64, 30, }, - { 8, 1, 0, 3, 64, 68, }, - { 9, 1, 0, 3, 64, 38, }, + { 9, 1, 0, 3, 64, 36, }, { 0, 1, 0, 3, 100, 64, }, { 2, 1, 0, 3, 100, 36, }, { 1, 1, 0, 3, 100, 70, }, { 3, 1, 0, 3, 100, 64, }, { 4, 1, 0, 3, 100, 66, }, { 5, 1, 0, 3, 100, 36, }, - { 6, 1, 0, 3, 100, 60, }, - { 7, 1, 0, 3, 100, 30, }, - { 8, 1, 0, 3, 100, 60, }, { 9, 1, 0, 3, 100, 127, }, { 0, 1, 0, 3, 104, 68, }, { 2, 1, 0, 3, 104, 36, }, @@ -43213,9 +43174,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 104, 68, }, { 4, 1, 0, 3, 104, 66, }, { 5, 1, 0, 3, 104, 36, }, - { 6, 1, 0, 3, 104, 68, }, - { 7, 1, 0, 3, 104, 30, }, - { 8, 1, 0, 3, 104, 68, }, { 9, 1, 0, 3, 104, 127, }, { 0, 1, 0, 3, 108, 68, }, { 2, 1, 0, 3, 108, 36, }, @@ -43223,9 +43181,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 108, 68, }, { 4, 1, 0, 3, 108, 66, }, { 5, 1, 0, 3, 108, 36, }, - { 6, 1, 0, 3, 108, 68, }, - { 7, 1, 0, 3, 108, 30, }, - { 8, 1, 0, 3, 108, 68, }, { 9, 1, 0, 3, 108, 127, }, { 0, 1, 0, 3, 112, 68, }, { 2, 1, 0, 3, 112, 36, }, @@ -43233,9 +43188,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 112, 68, }, { 4, 1, 0, 3, 112, 66, }, { 5, 1, 0, 3, 112, 36, }, - { 6, 1, 0, 3, 112, 68, }, - { 7, 1, 0, 3, 112, 30, }, - { 8, 1, 0, 3, 112, 68, }, { 9, 1, 0, 3, 112, 127, }, { 0, 1, 0, 3, 116, 68, }, { 2, 1, 0, 3, 116, 36, }, @@ -43243,9 +43195,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 116, 68, }, { 4, 1, 0, 3, 116, 66, }, { 5, 1, 0, 3, 116, 36, }, - { 6, 1, 0, 3, 116, 68, }, - { 7, 1, 0, 3, 116, 30, }, - { 8, 1, 0, 3, 116, 68, }, { 9, 1, 0, 3, 116, 127, }, { 0, 1, 0, 3, 120, 68, }, { 2, 1, 0, 3, 120, 36, }, @@ -43253,9 +43202,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 120, 127, }, { 4, 1, 0, 3, 120, 66, }, { 5, 1, 0, 3, 120, 127, }, - { 6, 1, 0, 3, 120, 68, }, - { 7, 1, 0, 3, 120, 30, }, - { 8, 1, 0, 3, 120, 68, }, { 9, 1, 0, 3, 120, 127, }, { 0, 1, 0, 3, 124, 68, }, { 2, 1, 0, 3, 124, 36, }, @@ -43263,9 +43209,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 124, 127, }, { 4, 1, 0, 3, 124, 66, }, { 5, 1, 0, 3, 124, 127, }, - { 6, 1, 0, 3, 124, 68, }, - { 7, 1, 0, 3, 124, 30, }, - { 8, 1, 0, 3, 124, 68, }, { 9, 1, 0, 3, 124, 127, }, { 0, 1, 0, 3, 128, 68, }, { 2, 1, 0, 3, 128, 36, }, @@ -43273,9 +43216,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 128, 127, }, { 4, 1, 0, 3, 128, 66, }, { 5, 1, 0, 3, 128, 127, }, - { 6, 1, 0, 3, 128, 68, }, - { 7, 1, 0, 3, 128, 30, }, - { 8, 1, 0, 3, 128, 68, }, { 9, 1, 0, 3, 128, 127, }, { 0, 1, 0, 3, 132, 68, }, { 2, 1, 0, 3, 132, 36, }, @@ -43283,9 +43223,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 132, 68, }, { 4, 1, 0, 3, 132, 66, }, { 5, 1, 0, 3, 132, 36, }, - { 6, 1, 0, 3, 132, 68, }, - { 7, 1, 0, 3, 132, 30, }, - { 8, 1, 0, 3, 132, 68, }, { 9, 1, 0, 3, 132, 127, }, { 0, 1, 0, 3, 136, 68, }, { 2, 1, 0, 3, 136, 36, }, @@ -43293,9 +43230,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 136, 68, }, { 4, 1, 0, 3, 136, 66, }, { 5, 1, 0, 3, 136, 36, }, - { 6, 1, 0, 3, 136, 68, }, - { 7, 1, 0, 3, 136, 30, }, - { 8, 1, 0, 3, 136, 68, }, { 9, 1, 0, 3, 136, 127, }, { 0, 1, 0, 3, 140, 58, }, { 2, 1, 0, 3, 140, 36, }, @@ -43303,9 +43237,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 140, 58, }, { 4, 1, 0, 3, 140, 66, }, { 5, 1, 0, 3, 140, 36, }, - { 6, 1, 0, 3, 140, 60, }, - { 7, 1, 0, 3, 140, 30, }, - { 8, 1, 0, 3, 140, 60, }, { 9, 1, 0, 3, 140, 127, }, { 0, 1, 0, 3, 144, 68, }, { 2, 1, 0, 3, 144, 127, }, @@ -43313,9 +43244,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 144, 68, }, { 4, 1, 0, 3, 144, 66, }, { 5, 1, 0, 3, 144, 127, }, - { 6, 1, 0, 3, 144, 68, }, - { 7, 1, 0, 3, 144, 127, }, - { 8, 1, 0, 3, 144, 68, }, { 9, 1, 0, 3, 144, 127, }, { 0, 1, 0, 3, 149, 76, }, { 2, 1, 0, 3, 149, 4, }, @@ -43323,59 +43251,41 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 0, 3, 149, 76, }, { 4, 1, 0, 3, 149, 62, }, { 5, 1, 0, 3, 149, 76, }, - { 6, 1, 0, 3, 149, 76, }, - { 7, 1, 0, 3, 149, 30, }, - { 8, 1, 0, 3, 149, 72, }, - { 9, 1, 0, 3, 149, 4, }, + { 9, 1, 0, 3, 149, 68, }, { 0, 1, 0, 3, 153, 76, }, { 2, 1, 0, 3, 153, 4, }, { 1, 1, 0, 3, 153, 127, }, { 3, 1, 0, 3, 153, 76, }, { 4, 1, 0, 3, 153, 62, }, { 5, 1, 0, 3, 153, 76, }, - { 6, 1, 0, 3, 153, 76, }, - { 7, 1, 0, 3, 153, 30, }, - { 8, 1, 0, 3, 153, 76, }, - { 9, 1, 0, 3, 153, 4, }, + { 9, 1, 0, 3, 153, 68, }, { 0, 1, 0, 3, 157, 76, }, { 2, 1, 0, 3, 157, 4, }, { 1, 1, 0, 3, 157, 127, }, { 3, 1, 0, 3, 157, 76, }, { 4, 1, 0, 3, 157, 62, }, { 5, 1, 0, 3, 157, 76, }, - { 6, 1, 0, 3, 157, 76, }, - { 7, 1, 0, 3, 157, 30, }, - { 8, 1, 0, 3, 157, 76, }, - { 9, 1, 0, 3, 157, 4, }, + { 9, 1, 0, 3, 157, 68, }, { 0, 1, 0, 3, 161, 76, }, { 2, 1, 0, 3, 161, 4, }, { 1, 1, 0, 3, 161, 127, }, { 3, 1, 0, 3, 161, 76, }, { 4, 1, 0, 3, 161, 62, }, { 5, 1, 0, 3, 161, 76, }, - { 6, 1, 0, 3, 161, 76, }, - { 7, 1, 0, 3, 161, 30, }, - { 8, 1, 0, 3, 161, 76, }, - { 9, 1, 0, 3, 161, 4, }, + { 9, 1, 0, 3, 161, 72, }, { 0, 1, 0, 3, 165, 76, }, { 2, 1, 0, 3, 165, 4, }, { 1, 1, 0, 3, 165, 127, }, { 3, 1, 0, 3, 165, 76, }, { 4, 1, 0, 3, 165, 62, }, { 5, 1, 0, 3, 165, 76, }, - { 6, 1, 0, 3, 165, 76, }, - { 7, 1, 0, 3, 165, 30, }, - { 8, 1, 0, 3, 165, 76, }, - { 9, 1, 0, 3, 165, 4, }, + { 9, 1, 0, 3, 165, 72, }, { 0, 1, 1, 2, 38, 66, }, { 2, 1, 1, 2, 38, 64, }, { 1, 1, 1, 2, 38, 64, }, { 3, 1, 1, 2, 38, 64, }, { 4, 1, 1, 2, 38, 64, }, { 5, 1, 1, 2, 38, 64, }, - { 6, 1, 1, 2, 38, 64, }, - { 7, 1, 1, 2, 38, 54, }, - { 8, 1, 1, 2, 38, 62, }, { 9, 1, 1, 2, 38, 64, }, { 0, 1, 1, 2, 46, 72, }, { 2, 1, 1, 2, 46, 64, }, @@ -43383,9 +43293,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 46, 64, }, { 4, 1, 1, 2, 46, 70, }, { 5, 1, 1, 2, 46, 64, }, - { 6, 1, 1, 2, 46, 64, }, - { 7, 1, 1, 2, 46, 54, }, - { 8, 1, 1, 2, 46, 62, }, { 9, 1, 1, 2, 46, 64, }, { 0, 1, 1, 2, 54, 72, }, { 2, 1, 1, 2, 54, 64, }, @@ -43393,9 +43300,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 54, 64, }, { 4, 1, 1, 2, 54, 72, }, { 5, 1, 1, 2, 54, 64, }, - { 6, 1, 1, 2, 54, 72, }, - { 7, 1, 1, 2, 54, 54, }, - { 8, 1, 1, 2, 54, 72, }, { 9, 1, 1, 2, 54, 64, }, { 0, 1, 1, 2, 62, 60, }, { 2, 1, 1, 2, 62, 64, }, @@ -43403,9 +43307,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 62, 60, }, { 4, 1, 1, 2, 62, 60, }, { 5, 1, 1, 2, 62, 64, }, - { 6, 1, 1, 2, 62, 64, }, - { 7, 1, 1, 2, 62, 54, }, - { 8, 1, 1, 2, 62, 64, }, { 9, 1, 1, 2, 62, 64, }, { 0, 1, 1, 2, 102, 60, }, { 2, 1, 1, 2, 102, 64, }, @@ -43413,9 +43314,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 102, 60, }, { 4, 1, 1, 2, 102, 64, }, { 5, 1, 1, 2, 102, 64, }, - { 6, 1, 1, 2, 102, 58, }, - { 7, 1, 1, 2, 102, 54, }, - { 8, 1, 1, 2, 102, 58, }, { 9, 1, 1, 2, 102, 127, }, { 0, 1, 1, 2, 110, 72, }, { 2, 1, 1, 2, 110, 64, }, @@ -43423,9 +43321,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 110, 72, }, { 4, 1, 1, 2, 110, 72, }, { 5, 1, 1, 2, 110, 64, }, - { 6, 1, 1, 2, 110, 72, }, - { 7, 1, 1, 2, 110, 54, }, - { 8, 1, 1, 2, 110, 72, }, { 9, 1, 1, 2, 110, 127, }, { 0, 1, 1, 2, 118, 72, }, { 2, 1, 1, 2, 118, 64, }, @@ -43433,9 +43328,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 118, 127, }, { 4, 1, 1, 2, 118, 72, }, { 5, 1, 1, 2, 118, 127, }, - { 6, 1, 1, 2, 118, 72, }, - { 7, 1, 1, 2, 118, 54, }, - { 8, 1, 1, 2, 118, 72, }, { 9, 1, 1, 2, 118, 127, }, { 0, 1, 1, 2, 126, 72, }, { 2, 1, 1, 2, 126, 64, }, @@ -43443,9 +43335,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 126, 127, }, { 4, 1, 1, 2, 126, 72, }, { 5, 1, 1, 2, 126, 127, }, - { 6, 1, 1, 2, 126, 72, }, - { 7, 1, 1, 2, 126, 54, }, - { 8, 1, 1, 2, 126, 72, }, { 9, 1, 1, 2, 126, 127, }, { 0, 1, 1, 2, 134, 72, }, { 2, 1, 1, 2, 134, 64, }, @@ -43453,9 +43342,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 134, 72, }, { 4, 1, 1, 2, 134, 72, }, { 5, 1, 1, 2, 134, 64, }, - { 6, 1, 1, 2, 134, 72, }, - { 7, 1, 1, 2, 134, 54, }, - { 8, 1, 1, 2, 134, 72, }, { 9, 1, 1, 2, 134, 127, }, { 0, 1, 1, 2, 142, 72, }, { 2, 1, 1, 2, 142, 127, }, @@ -43463,9 +43349,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 142, 72, }, { 4, 1, 1, 2, 142, 72, }, { 5, 1, 1, 2, 142, 127, }, - { 6, 1, 1, 2, 142, 72, }, - { 7, 1, 1, 2, 142, 127, }, - { 8, 1, 1, 2, 142, 72, }, { 9, 1, 1, 2, 142, 127, }, { 0, 1, 1, 2, 151, 72, }, { 2, 1, 1, 2, 151, 28, }, @@ -43473,29 +43356,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 2, 151, 72, }, { 4, 1, 1, 2, 151, 72, }, { 5, 1, 1, 2, 151, 72, }, - { 6, 1, 1, 2, 151, 72, }, - { 7, 1, 1, 2, 151, 54, }, - { 8, 1, 1, 2, 151, 72, }, - { 9, 1, 1, 2, 151, 28, }, + { 9, 1, 1, 2, 151, 72, }, { 0, 1, 1, 2, 159, 72, }, { 2, 1, 1, 2, 159, 28, }, { 1, 1, 1, 2, 159, 127, }, { 3, 1, 1, 2, 159, 72, }, { 4, 1, 1, 2, 159, 72, }, { 5, 1, 1, 2, 159, 72, }, - { 6, 1, 1, 2, 159, 72, }, - { 7, 1, 1, 2, 159, 54, }, - { 8, 1, 1, 2, 159, 72, }, - { 9, 1, 1, 2, 159, 28, }, + { 9, 1, 1, 2, 159, 72, }, { 0, 1, 1, 3, 38, 60, }, { 2, 1, 1, 3, 38, 40, }, { 1, 1, 1, 3, 38, 50, }, { 3, 1, 1, 3, 38, 40, }, { 4, 1, 1, 3, 38, 54, }, { 5, 1, 1, 3, 38, 40, }, - { 6, 1, 1, 3, 38, 52, }, - { 7, 1, 1, 3, 38, 30, }, - { 8, 1, 1, 3, 38, 50, }, { 9, 1, 1, 3, 38, 40, }, { 0, 1, 1, 3, 46, 68, }, { 2, 1, 1, 3, 46, 40, }, @@ -43503,9 +43377,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 46, 40, }, { 4, 1, 1, 3, 46, 54, }, { 5, 1, 1, 3, 46, 40, }, - { 6, 1, 1, 3, 46, 52, }, - { 7, 1, 1, 3, 46, 30, }, - { 8, 1, 1, 3, 46, 50, }, { 9, 1, 1, 3, 46, 40, }, { 0, 1, 1, 3, 54, 68, }, { 2, 1, 1, 3, 54, 40, }, @@ -43513,9 +43384,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 54, 40, }, { 4, 1, 1, 3, 54, 66, }, { 5, 1, 1, 3, 54, 40, }, - { 6, 1, 1, 3, 54, 68, }, - { 7, 1, 1, 3, 54, 30, }, - { 8, 1, 1, 3, 54, 68, }, { 9, 1, 1, 3, 54, 40, }, { 0, 1, 1, 3, 62, 58, }, { 2, 1, 1, 3, 62, 40, }, @@ -43523,9 +43391,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 62, 40, }, { 4, 1, 1, 3, 62, 50, }, { 5, 1, 1, 3, 62, 40, }, - { 6, 1, 1, 3, 62, 58, }, - { 7, 1, 1, 3, 62, 30, }, - { 8, 1, 1, 3, 62, 58, }, { 9, 1, 1, 3, 62, 40, }, { 0, 1, 1, 3, 102, 56, }, { 2, 1, 1, 3, 102, 40, }, @@ -43533,9 +43398,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 102, 56, }, { 4, 1, 1, 3, 102, 54, }, { 5, 1, 1, 3, 102, 40, }, - { 6, 1, 1, 3, 102, 54, }, - { 7, 1, 1, 3, 102, 30, }, - { 8, 1, 1, 3, 102, 54, }, { 9, 1, 1, 3, 102, 127, }, { 0, 1, 1, 3, 110, 68, }, { 2, 1, 1, 3, 110, 40, }, @@ -43543,9 +43405,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 110, 68, }, { 4, 1, 1, 3, 110, 66, }, { 5, 1, 1, 3, 110, 40, }, - { 6, 1, 1, 3, 110, 68, }, - { 7, 1, 1, 3, 110, 30, }, - { 8, 1, 1, 3, 110, 68, }, { 9, 1, 1, 3, 110, 127, }, { 0, 1, 1, 3, 118, 68, }, { 2, 1, 1, 3, 118, 40, }, @@ -43553,9 +43412,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 118, 127, }, { 4, 1, 1, 3, 118, 66, }, { 5, 1, 1, 3, 118, 127, }, - { 6, 1, 1, 3, 118, 68, }, - { 7, 1, 1, 3, 118, 30, }, - { 8, 1, 1, 3, 118, 68, }, { 9, 1, 1, 3, 118, 127, }, { 0, 1, 1, 3, 126, 68, }, { 2, 1, 1, 3, 126, 40, }, @@ -43563,9 +43419,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 126, 127, }, { 4, 1, 1, 3, 126, 66, }, { 5, 1, 1, 3, 126, 127, }, - { 6, 1, 1, 3, 126, 68, }, - { 7, 1, 1, 3, 126, 30, }, - { 8, 1, 1, 3, 126, 68, }, { 9, 1, 1, 3, 126, 127, }, { 0, 1, 1, 3, 134, 68, }, { 2, 1, 1, 3, 134, 40, }, @@ -43573,9 +43426,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 134, 68, }, { 4, 1, 1, 3, 134, 66, }, { 5, 1, 1, 3, 134, 40, }, - { 6, 1, 1, 3, 134, 68, }, - { 7, 1, 1, 3, 134, 30, }, - { 8, 1, 1, 3, 134, 68, }, { 9, 1, 1, 3, 134, 127, }, { 0, 1, 1, 3, 142, 68, }, { 2, 1, 1, 3, 142, 127, }, @@ -43583,9 +43433,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 142, 68, }, { 4, 1, 1, 3, 142, 66, }, { 5, 1, 1, 3, 142, 127, }, - { 6, 1, 1, 3, 142, 68, }, - { 7, 1, 1, 3, 142, 127, }, - { 8, 1, 1, 3, 142, 68, }, { 9, 1, 1, 3, 142, 127, }, { 0, 1, 1, 3, 151, 72, }, { 2, 1, 1, 3, 151, 4, }, @@ -43593,29 +43440,20 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 1, 3, 151, 72, }, { 4, 1, 1, 3, 151, 66, }, { 5, 1, 1, 3, 151, 72, }, - { 6, 1, 1, 3, 151, 72, }, - { 7, 1, 1, 3, 151, 30, }, - { 8, 1, 1, 3, 151, 68, }, - { 9, 1, 1, 3, 151, 4, }, + { 9, 1, 1, 3, 151, 64, }, { 0, 1, 1, 3, 159, 72, }, { 2, 1, 1, 3, 159, 4, }, { 1, 1, 1, 3, 159, 127, }, { 3, 1, 1, 3, 159, 72, }, { 4, 1, 1, 3, 159, 66, }, { 5, 1, 1, 3, 159, 72, }, - { 6, 1, 1, 3, 159, 72, }, - { 7, 1, 1, 3, 159, 30, }, - { 8, 1, 1, 3, 159, 72, }, - { 9, 1, 1, 3, 159, 4, }, + { 9, 1, 1, 3, 159, 72, }, { 0, 1, 2, 4, 42, 68, }, { 2, 1, 2, 4, 42, 64, }, { 1, 1, 2, 4, 42, 64, }, { 3, 1, 2, 4, 42, 64, }, { 4, 1, 2, 4, 42, 60, }, { 5, 1, 2, 4, 42, 64, }, - { 6, 1, 2, 4, 42, 64, }, - { 7, 1, 2, 4, 42, 54, }, - { 8, 1, 2, 4, 42, 62, }, { 9, 1, 2, 4, 42, 64, }, { 0, 1, 2, 4, 58, 60, }, { 2, 1, 2, 4, 58, 64, }, @@ -43623,9 +43461,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 58, 60, }, { 4, 1, 2, 4, 58, 56, }, { 5, 1, 2, 4, 58, 64, }, - { 6, 1, 2, 4, 58, 62, }, - { 7, 1, 2, 4, 58, 54, }, - { 8, 1, 2, 4, 58, 62, }, { 9, 1, 2, 4, 58, 64, }, { 0, 1, 2, 4, 106, 60, }, { 2, 1, 2, 4, 106, 64, }, @@ -43633,9 +43468,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 106, 60, }, { 4, 1, 2, 4, 106, 58, }, { 5, 1, 2, 4, 106, 64, }, - { 6, 1, 2, 4, 106, 58, }, - { 7, 1, 2, 4, 106, 54, }, - { 8, 1, 2, 4, 106, 58, }, { 9, 1, 2, 4, 106, 127, }, { 0, 1, 2, 4, 122, 72, }, { 2, 1, 2, 4, 122, 64, }, @@ -43643,9 +43475,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 122, 127, }, { 4, 1, 2, 4, 122, 68, }, { 5, 1, 2, 4, 122, 127, }, - { 6, 1, 2, 4, 122, 72, }, - { 7, 1, 2, 4, 122, 54, }, - { 8, 1, 2, 4, 122, 72, }, { 9, 1, 2, 4, 122, 127, }, { 0, 1, 2, 4, 138, 72, }, { 2, 1, 2, 4, 138, 127, }, @@ -43653,9 +43482,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 138, 72, }, { 4, 1, 2, 4, 138, 70, }, { 5, 1, 2, 4, 138, 127, }, - { 6, 1, 2, 4, 138, 72, }, - { 7, 1, 2, 4, 138, 127, }, - { 8, 1, 2, 4, 138, 72, }, { 9, 1, 2, 4, 138, 127, }, { 0, 1, 2, 4, 155, 72, }, { 2, 1, 2, 4, 155, 28, }, @@ -43663,19 +43489,13 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 4, 155, 72, }, { 4, 1, 2, 4, 155, 62, }, { 5, 1, 2, 4, 155, 72, }, - { 6, 1, 2, 4, 155, 72, }, - { 7, 1, 2, 4, 155, 54, }, - { 8, 1, 2, 4, 155, 68, }, - { 9, 1, 2, 4, 155, 28, }, + { 9, 1, 2, 4, 155, 72, }, { 0, 1, 2, 5, 42, 56, }, { 2, 1, 2, 5, 42, 40, }, { 1, 1, 2, 5, 42, 50, }, { 3, 1, 2, 5, 42, 40, }, { 4, 1, 2, 5, 42, 50, }, { 5, 1, 2, 5, 42, 40, }, - { 6, 1, 2, 5, 42, 52, }, - { 7, 1, 2, 5, 42, 30, }, - { 8, 1, 2, 5, 42, 50, }, { 9, 1, 2, 5, 42, 40, }, { 0, 1, 2, 5, 58, 54, }, { 2, 1, 2, 5, 58, 40, }, @@ -43683,9 +43503,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 58, 40, }, { 4, 1, 2, 5, 58, 46, }, { 5, 1, 2, 5, 58, 40, }, - { 6, 1, 2, 5, 58, 52, }, - { 7, 1, 2, 5, 58, 30, }, - { 8, 1, 2, 5, 58, 52, }, { 9, 1, 2, 5, 58, 40, }, { 0, 1, 2, 5, 106, 48, }, { 2, 1, 2, 5, 106, 40, }, @@ -43693,9 +43510,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 106, 48, }, { 4, 1, 2, 5, 106, 50, }, { 5, 1, 2, 5, 106, 40, }, - { 6, 1, 2, 5, 106, 50, }, - { 7, 1, 2, 5, 106, 30, }, - { 8, 1, 2, 5, 106, 50, }, { 9, 1, 2, 5, 106, 127, }, { 0, 1, 2, 5, 122, 70, }, { 2, 1, 2, 5, 122, 40, }, @@ -43703,9 +43517,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 122, 127, }, { 4, 1, 2, 5, 122, 62, }, { 5, 1, 2, 5, 122, 127, }, - { 6, 1, 2, 5, 122, 66, }, - { 7, 1, 2, 5, 122, 30, }, - { 8, 1, 2, 5, 122, 66, }, { 9, 1, 2, 5, 122, 127, }, { 0, 1, 2, 5, 138, 70, }, { 2, 1, 2, 5, 138, 127, }, @@ -43713,9 +43524,6 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 138, 70, }, { 4, 1, 2, 5, 138, 62, }, { 5, 1, 2, 5, 138, 127, }, - { 6, 1, 2, 5, 138, 66, }, - { 7, 1, 2, 5, 138, 127, }, - { 8, 1, 2, 5, 138, 66, }, { 9, 1, 2, 5, 138, 127, }, { 0, 1, 2, 5, 155, 72, }, { 2, 1, 2, 5, 155, 4, }, @@ -43723,10 +43531,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = { { 3, 1, 2, 5, 155, 72, }, { 4, 1, 2, 5, 155, 52, }, { 5, 1, 2, 5, 155, 72, }, - { 6, 1, 2, 5, 155, 62, }, - { 7, 1, 2, 5, 155, 30, }, - { 8, 1, 2, 5, 155, 62, }, - { 9, 1, 2, 5, 155, 4, }, + { 9, 1, 2, 5, 155, 66, }, }; RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type5); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c index af28ca09d41f..157d5102a4b1 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c @@ -25,7 +25,7 @@ static const struct usb_device_id rtw_8822cu_id_table[] = { }; MODULE_DEVICE_TABLE(usb, rtw_8822cu_id_table); -static int rtw8822bu_probe(struct usb_interface *intf, +static int rtw8822cu_probe(struct usb_interface *intf, const struct usb_device_id *id) { return rtw_usb_probe(intf, id); @@ -34,7 +34,7 @@ static int rtw8822bu_probe(struct usb_interface *intf, static struct usb_driver rtw_8822cu_driver = { .name = "rtw_8822cu", .id_table = rtw_8822cu_id_table, - .probe = rtw8822bu_probe, + .probe = rtw8822cu_probe, .disconnect = rtw_usb_disconnect, }; module_usb_driver(rtw_8822cu_driver); diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index 2821119dc930..f63900b6621d 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -606,8 +606,6 @@ static int rtw_txq_push_skb(struct rtw_dev *rtwdev, rtw_err(rtwdev, "failed to write TX skb to HCI\n"); return ret; } - rtwtxq->last_push = jiffies; - return 0; } diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index 4a57efdba97b..e6ab1ac6d709 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -142,7 +142,6 @@ static int rtw_usb_parse(struct rtw_dev *rtwdev, struct usb_host_interface *host_interface = &interface->altsetting[0]; struct usb_interface_descriptor *interface_desc = &host_interface->desc; struct usb_endpoint_descriptor *endpoint; - struct usb_device *usbd = interface_to_usbdev(interface); int num_out_pipes = 0; int i; u8 num; @@ -184,22 +183,6 @@ static int rtw_usb_parse(struct rtw_dev *rtwdev, } } - switch (usbd->speed) { - case USB_SPEED_LOW: - case USB_SPEED_FULL: - rtwusb->bulkout_size = RTW_USB_FULL_SPEED_BULK_SIZE; - break; - case USB_SPEED_HIGH: - rtwusb->bulkout_size = RTW_USB_HIGH_SPEED_BULK_SIZE; - break; - case USB_SPEED_SUPER: - rtwusb->bulkout_size = RTW_USB_SUPER_SPEED_BULK_SIZE; - break; - default: - rtw_err(rtwdev, "failed to detect usb speed\n"); - return -EINVAL; - } - rtwdev->hci.bulkout_num = num_out_pipes; if (num_out_pipes < 1 || num_out_pipes > 4) { @@ -628,8 +611,7 @@ static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb) for (i = 0; i < RTW_USB_RXCB_NUM; i++) { rxcb = &rtwusb->rx_cb[i]; - if (rxcb->rx_urb) - usb_kill_urb(rxcb->rx_urb); + usb_kill_urb(rxcb->rx_urb); } } @@ -640,10 +622,8 @@ static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb) for (i = 0; i < RTW_USB_RXCB_NUM; i++) { rxcb = &rtwusb->rx_cb[i]; - if (rxcb->rx_urb) { - usb_kill_urb(rxcb->rx_urb); - usb_free_urb(rxcb->rx_urb); - } + usb_kill_urb(rxcb->rx_urb); + usb_free_urb(rxcb->rx_urb); } } @@ -654,7 +634,6 @@ static int rtw_usb_alloc_rx_bufs(struct rtw_usb *rtwusb) for (i = 0; i < RTW_USB_RXCB_NUM; i++) { struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; - rxcb->n = i; rxcb->rtwdev = rtwusb->rtwdev; rxcb->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rxcb->rx_urb) @@ -844,7 +823,7 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) ret = rtw_core_init(rtwdev); if (ret) - goto err_release_hw; + goto err_free_rx_bufs; ret = rtw_usb_intf_init(rtwdev, intf); if (ret) { @@ -890,6 +869,9 @@ err_destroy_usb: err_deinit_core: rtw_core_deinit(rtwdev); +err_free_rx_bufs: + rtw_usb_free_rx_bufs(rtwusb); + err_release_hw: ieee80211_free_hw(hw); @@ -927,5 +909,5 @@ void rtw_usb_disconnect(struct usb_interface *intf) EXPORT_SYMBOL(rtw_usb_disconnect); MODULE_AUTHOR("Realtek Corporation"); -MODULE_DESCRIPTION("Realtek 802.11ac wireless USB driver"); +MODULE_DESCRIPTION("Realtek USB 802.11ac wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/usb.h b/drivers/net/wireless/realtek/rtw88/usb.h index ad1d7955c6a5..86697a5c0103 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.h +++ b/drivers/net/wireless/realtek/rtw88/usb.h @@ -18,10 +18,6 @@ #define RTW_USB_VENQT_CMD_IDX 0x00 -#define RTW_USB_SUPER_SPEED_BULK_SIZE 1024 -#define RTW_USB_HIGH_SPEED_BULK_SIZE 512 -#define RTW_USB_FULL_SPEED_BULK_SIZE 64 - #define RTW_USB_TX_SEL_HQ BIT(0) #define RTW_USB_TX_SEL_LQ BIT(1) #define RTW_USB_TX_SEL_NQ BIT(2) @@ -58,7 +54,6 @@ struct rx_usb_ctrl_block { struct rtw_dev *rtwdev; struct urb *rx_urb; struct sk_buff *rx_skb; - int n; }; struct rtw_usb_tx_data { @@ -74,12 +69,10 @@ struct rtw_usb { __le32 *usb_data; unsigned int usb_data_index; - u32 bulkout_size; u8 pipe_interrupt; u8 pipe_in; u8 out_ep[RTW_USB_EP_MAX]; int qsel_to_ep[TX_DESC_QSEL_MAX]; - u8 usb_txagg_num; struct workqueue_struct *txwq, *rxwq; diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c index ff3c269fb1a7..e222d3c01a77 100644 --- a/drivers/net/wireless/realtek/rtw88/util.c +++ b/drivers/net/wireless/realtek/rtw88/util.c @@ -159,7 +159,6 @@ void rtw_iterate_stas(struct rtw_dev *rtwdev, struct rtw_vifs_entry { struct list_head list; struct ieee80211_vif *vif; - u8 mac[ETH_ALEN]; }; struct rtw_iter_vifs_data { @@ -177,13 +176,11 @@ static void rtw_collect_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) return; vifs_entry->vif = vif; - ether_addr_copy(vifs_entry->mac, mac); list_add_tail(&vifs_entry->list, &iter_stas->list); } void rtw_iterate_vifs(struct rtw_dev *rtwdev, - void (*iterator)(void *data, u8 *mac, - struct ieee80211_vif *vif), + void (*iterator)(void *data, struct ieee80211_vif *vif), void *data) { struct rtw_iter_vifs_data iter_data; @@ -204,7 +201,7 @@ void rtw_iterate_vifs(struct rtw_dev *rtwdev, list_for_each_entry_safe(vif_entry, tmp, &iter_data.list, list) { list_del_init(&vif_entry->list); - iterator(data, vif_entry->mac, vif_entry->vif); + iterator(data, vif_entry->vif); kfree(vif_entry); } } diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h index dc8965525400..f8399128a9a3 100644 --- a/drivers/net/wireless/realtek/rtw88/util.h +++ b/drivers/net/wireless/realtek/rtw88/util.h @@ -18,8 +18,7 @@ struct rtw_dev; ieee80211_iter_keys_rcu((rtwdev)->hw, vif, iterator, data) void rtw_iterate_vifs(struct rtw_dev *rtwdev, - void (*iterator)(void *data, u8 *mac, - struct ieee80211_vif *vif), + void (*iterator)(void *data, struct ieee80211_vif *vif), void *data); void rtw_iterate_stas(struct rtw_dev *rtwdev, void (*iterator)(void *data, diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index 4663db4ce2f6..cbf6821af6b8 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -3,7 +3,11 @@ */ #include "chan.h" +#include "coex.h" #include "debug.h" +#include "fw.h" +#include "mac.h" +#include "ps.h" #include "util.h" static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band, @@ -83,6 +87,19 @@ static enum rtw89_sc_offset rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw, return primary_chan_idx; } +static u8 rtw89_get_primary_sb_idx(u8 central_ch, u8 pri_ch, + enum rtw89_bandwidth bw) +{ + static const u8 prisb_cal_ofst[RTW89_CHANNEL_WIDTH_ORDINARY_NUM] = { + 0, 2, 6, 14, 30 + }; + + if (bw >= RTW89_CHANNEL_WIDTH_ORDINARY_NUM) + return 0; + + return (prisb_cal_ofst[bw] + pri_ch - central_ch) / 4; +} + void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, enum rtw89_band band, enum rtw89_bandwidth bandwidth) { @@ -102,6 +119,8 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, chan->subband_type = rtw89_get_subband_type(band, center_chan); chan->pri_ch_idx = rtw89_get_primary_chan_idx(bandwidth, center_freq, primary_freq); + chan->pri_sb_idx = rtw89_get_primary_sb_idx(center_chan, primary_chan, + bandwidth); } bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, @@ -116,6 +135,7 @@ bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, rcd->prev_primary_channel = chan->primary_channel; rcd->prev_band_type = chan->band_type; band_changed = new->band_type != chan->band_type; + rcd->band_changed = band_changed; *chan = *new; return band_changed; @@ -185,7 +205,9 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + hal->entity_pause = false; bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); + bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES); atomic_set(&hal->roc_entity_idx, RTW89_SUB_ENTITY_IDLE); rtw89_config_default_chandef(rtwdev); } @@ -193,8 +215,14 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev) enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + const struct cfg80211_chan_def *chandef; enum rtw89_entity_mode mode; + struct rtw89_chan chan; u8 weight; + u8 last; + u8 idx; + + lockdep_assert_held(&rtwdev->mutex); weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY); switch (weight) { @@ -206,14 +234,1672 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) rtw89_config_default_chandef(rtwdev); fallthrough; case 1: + last = RTW89_SUB_ENTITY_0; mode = RTW89_ENTITY_MODE_SCC; break; + case 2: + last = RTW89_SUB_ENTITY_1; + mode = rtw89_get_entity_mode(rtwdev); + if (mode == RTW89_ENTITY_MODE_MCC) + break; + + mode = RTW89_ENTITY_MODE_MCC_PREPARE; + break; } + for (idx = 0; idx <= last; idx++) { + chandef = rtw89_chandef_get(rtwdev, idx); + rtw89_get_channel_params(chandef, &chan); + if (chan.channel == 0) { + WARN(1, "Invalid channel on chanctx %d\n", idx); + return RTW89_ENTITY_MODE_INVALID; + } + + rtw89_assign_entity_chan(rtwdev, idx, &chan); + } + + if (hal->entity_pause) + return rtw89_get_entity_mode(rtwdev); + rtw89_set_entity_mode(rtwdev, mode); return mode; } +static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_chanctx_listener *listener = chip->chanctx_listener; + int i; + + if (!listener) + return; + + for (i = 0; i < NUM_OF_RTW89_CHANCTX_CALLBACKS; i++) { + if (!listener->callbacks[i]) + continue; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "chanctx notify listener: cb %d, state %d\n", + i, state); + + listener->callbacks[i](rtwdev, state); + } +} + +/* This function centrally manages how MCC roles are sorted and iterated. + * And, it guarantees that ordered_idx is less than NUM_OF_RTW89_MCC_ROLES. + * So, if data needs to pass an array for ordered_idx, the array can declare + * with NUM_OF_RTW89_MCC_ROLES. Besides, the entire iteration will stop + * immediately as long as iterator returns a non-zero value. + */ +static +int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev, + int (*iterator)(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data), + void *data) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role * const roles[] = { + &mcc->role_ref, + &mcc->role_aux, + }; + unsigned int idx; + int ret; + + BUILD_BUG_ON(ARRAY_SIZE(roles) != NUM_OF_RTW89_MCC_ROLES); + + for (idx = 0; idx < NUM_OF_RTW89_MCC_ROLES; idx++) { + ret = iterator(rtwdev, roles[idx], idx, data); + if (ret) + return ret; + } + + return 0; +} + +/* For now, IEEE80211_HW_TIMING_BEACON_ONLY can make things simple to ensure + * correctness of MCC calculation logic below. We have noticed that once driver + * declares WIPHY_FLAG_SUPPORTS_MLO, the use of IEEE80211_HW_TIMING_BEACON_ONLY + * will be restricted. We will make an alternative in driver when it is ready + * for MLO. + */ +static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *role, u64 tsf) +{ + struct rtw89_vif *rtwvif = role->rtwvif; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval); + u64 sync_tsf = vif->bss_conf.sync_tsf; + u32 remainder; + + if (tsf < sync_tsf) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC get tbtt ofst: tsf might not update yet\n"); + sync_tsf = 0; + } + + div_u64_rem(tsf - sync_tsf, bcn_intvl_us, &remainder); + + return remainder; +} + +static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mac_mcc_tsf_rpt rpt = {}; + struct rtw89_fw_mcc_tsf_req req = {}; + u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval); + u32 tbtt_ofst_ref, tbtt_ofst_aux; + u64 tsf_ref, tsf_aux; + int ret; + + req.group = mcc->group; + req.macid_x = ref->rtwvif->mac_id; + req.macid_y = aux->rtwvif->mac_id; + ret = rtw89_fw_h2c_mcc_req_tsf(rtwdev, &req, &rpt); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to request tsf: %d\n", ret); + return RTW89_MCC_DFLT_BCN_OFST_TIME; + } + + tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low; + tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low; + tbtt_ofst_ref = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf_ref); + tbtt_ofst_aux = rtw89_mcc_get_tbtt_ofst(rtwdev, aux, tsf_aux); + + while (tbtt_ofst_ref < tbtt_ofst_aux) + tbtt_ofst_ref += bcn_intvl_ref_us; + + return (tbtt_ofst_ref - tbtt_ofst_aux) / 1024; +} + +static +void rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role *mcc_role, + unsigned int bit) +{ + unsigned int idx = bit / 8; + unsigned int pos = bit % 8; + + if (idx >= ARRAY_SIZE(mcc_role->macid_bitmap)) + return; + + mcc_role->macid_bitmap[idx] |= BIT(pos); +} + +static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_vif *rtwvif = rtwsta->rtwvif; + struct rtw89_mcc_role *mcc_role = data; + struct rtw89_vif *target = mcc_role->rtwvif; + + if (rtwvif != target) + return; + + rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta->mac_id); +} + +static void rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role) +{ + struct rtw89_vif *rtwvif = mcc_role->rtwvif; + + rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwvif->mac_id); + ieee80211_iterate_stations_atomic(rtwdev->hw, + rtw89_mcc_role_macid_sta_iter, + mcc_role); +} + +static void rtw89_mcc_fill_role_policy(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role) +{ + struct rtw89_mcc_policy *policy = &mcc_role->policy; + + policy->c2h_rpt = RTW89_FW_MCC_C2H_RPT_ALL; + policy->tx_null_early = RTW89_MCC_DFLT_TX_NULL_EARLY; + policy->in_curr_ch = false; + policy->dis_sw_retry = true; + policy->sw_retry_count = false; + + if (mcc_role->is_go) + policy->dis_tx_null = true; + else + policy->dis_tx_null = false; +} + +static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(mcc_role->rtwvif); + struct ieee80211_p2p_noa_desc *noa_desc; + u32 bcn_intvl_us = ieee80211_tu_to_usec(mcc_role->beacon_interval); + u32 max_toa_us, max_tob_us, max_dur_us; + u32 start_time, interval, duration; + u64 tsf, tsf_lmt; + int ret; + int i; + + if (!mcc_role->is_go && !mcc_role->is_gc) + return; + + /* find the first periodic NoA */ + for (i = 0; i < RTW89_P2P_MAX_NOA_NUM; i++) { + noa_desc = &vif->bss_conf.p2p_noa_attr.desc[i]; + if (noa_desc->count == 255) + goto fill; + } + + return; + +fill: + start_time = le32_to_cpu(noa_desc->start_time); + interval = le32_to_cpu(noa_desc->interval); + duration = le32_to_cpu(noa_desc->duration); + + if (interval != bcn_intvl_us) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: mismatch interval: %d vs. %d\n", + interval, bcn_intvl_us); + return; + } + + ret = rtw89_mac_port_get_tsf(rtwdev, mcc_role->rtwvif, &tsf); + if (ret) { + rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); + return; + } + + tsf_lmt = (tsf & GENMASK_ULL(63, 32)) | start_time; + max_toa_us = rtw89_mcc_get_tbtt_ofst(rtwdev, mcc_role, tsf_lmt); + max_dur_us = interval - duration; + max_tob_us = max_dur_us - max_toa_us; + + if (!max_toa_us || !max_tob_us) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: hit boundary\n"); + return; + } + + if (max_dur_us < max_toa_us) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: insufficient duration\n"); + return; + } + + mcc_role->limit.max_toa = max_toa_us / 1024; + mcc_role->limit.max_tob = max_tob_us / 1024; + mcc_role->limit.max_dur = max_dur_us / 1024; + mcc_role->limit.enable = true; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role limit: max_toa %d, max_tob %d, max_dur %d\n", + mcc_role->limit.max_toa, mcc_role->limit.max_tob, + mcc_role->limit.max_dur); +} + +static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_mcc_role *role) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + const struct rtw89_chan *chan; + + memset(role, 0, sizeof(*role)); + role->rtwvif = rtwvif; + role->beacon_interval = vif->bss_conf.beacon_int; + + if (!role->beacon_interval) { + rtw89_warn(rtwdev, + "cannot handle MCC role without beacon interval\n"); + return -EINVAL; + } + + role->duration = role->beacon_interval / 2; + + chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + role->is_2ghz = chan->band_type == RTW89_BAND_2G; + role->is_go = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_GO; + role->is_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; + + rtw89_mcc_fill_role_macid_bitmap(rtwdev, role); + rtw89_mcc_fill_role_policy(rtwdev, role); + rtw89_mcc_fill_role_limit(rtwdev, role); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC role: bcn_intvl %d, is_2ghz %d, is_go %d, is_gc %d\n", + role->beacon_interval, role->is_2ghz, role->is_go, role->is_gc); + return 0; +} + +static void rtw89_mcc_fill_bt_role(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role; + + memset(bt_role, 0, sizeof(*bt_role)); + bt_role->duration = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC bt role: dur %d\n", + bt_role->duration); +} + +struct rtw89_mcc_fill_role_selector { + struct rtw89_vif *bind_vif[NUM_OF_RTW89_SUB_ENTITY]; +}; + +static_assert((u8)NUM_OF_RTW89_SUB_ENTITY >= NUM_OF_RTW89_MCC_ROLES); + +static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_fill_role_selector *sel = data; + struct rtw89_vif *role_vif = sel->bind_vif[ordered_idx]; + int ret; + + if (!role_vif) { + rtw89_warn(rtwdev, "cannot handle MCC without role[%d]\n", + ordered_idx); + return -EINVAL; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC fill role[%d] with vif <macid %d>\n", + ordered_idx, role_vif->mac_id); + + ret = rtw89_mcc_fill_role(rtwdev, role_vif, mcc_role); + if (ret) + return ret; + + return 0; +} + +static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_fill_role_selector sel = {}; + struct rtw89_vif *rtwvif; + int ret; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + if (sel.bind_vif[rtwvif->sub_entity_idx]) { + rtw89_warn(rtwdev, + "MCC skip extra vif <macid %d> on chanctx[%d]\n", + rtwvif->mac_id, rtwvif->sub_entity_idx); + continue; + } + + sel.bind_vif[rtwvif->sub_entity_idx] = rtwvif; + } + + ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel); + if (ret) + return ret; + + rtw89_mcc_fill_bt_role(rtwdev); + return 0; +} + +static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev, + const struct rtw89_mcc_pattern *new) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC assign pattern: ref {%d | %d}, aux {%d | %d}\n", + new->tob_ref, new->toa_ref, new->tob_aux, new->toa_aux); + + *pattern = *new; + memset(&pattern->courtesy, 0, sizeof(pattern->courtesy)); + + if (pattern->tob_aux <= 0 || pattern->toa_aux <= 0) { + pattern->courtesy.macid_tgt = aux->rtwvif->mac_id; + pattern->courtesy.macid_src = ref->rtwvif->mac_id; + pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; + pattern->courtesy.enable = true; + } else if (pattern->tob_ref <= 0 || pattern->toa_ref <= 0) { + pattern->courtesy.macid_tgt = ref->rtwvif->mac_id; + pattern->courtesy.macid_src = aux->rtwvif->mac_id; + pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; + pattern->courtesy.enable = true; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC pattern flags: plan %d, courtesy_en %d\n", + pattern->plan, pattern->courtesy.enable); + + if (!pattern->courtesy.enable) + return; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC pattern courtesy: tgt %d, src %d, slot %d\n", + pattern->courtesy.macid_tgt, pattern->courtesy.macid_src, + pattern->courtesy.slot_num); +} + +/* The follow-up roughly shows the relationship between the parameters + * for pattern calculation. + * + * |< duration ref >| (if mid bt) |< duration aux >| + * |< tob ref >|< toa ref >| ... |< tob aux >|< toa aux >| + * V V + * tbtt ref tbtt aux + * |< beacon offset >| + * + * In loose pattern calculation, we only ensure at least tob_ref and + * toa_ref have positive results. If tob_aux or toa_aux is negative + * unfortunately, FW will be notified to handle it with courtesy + * mechanism. + */ +static void __rtw89_mcc_calc_pattern_loose(struct rtw89_dev *rtwdev, + struct rtw89_mcc_pattern *ptrn, + bool hdl_bt) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 bcn_ofst = config->beacon_offset; + u16 bt_dur_in_mid = 0; + u16 max_bcn_ofst; + s16 upper, lower; + u16 res; + + *ptrn = (typeof(*ptrn)){ + .plan = hdl_bt ? RTW89_MCC_PLAN_TAIL_BT : RTW89_MCC_PLAN_NO_BT, + }; + + if (!hdl_bt) + goto calc; + + max_bcn_ofst = ref->duration + aux->duration; + if (ref->limit.enable) + max_bcn_ofst = min_t(u16, max_bcn_ofst, + ref->limit.max_toa + aux->duration); + else if (aux->limit.enable) + max_bcn_ofst = min_t(u16, max_bcn_ofst, + ref->duration + aux->limit.max_tob); + + if (bcn_ofst > max_bcn_ofst && bcn_ofst >= mcc->bt_role.duration) { + bt_dur_in_mid = mcc->bt_role.duration; + ptrn->plan = RTW89_MCC_PLAN_MID_BT; + } + +calc: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_ls: plan %d, bcn_ofst %d\n", + ptrn->plan, bcn_ofst); + + res = bcn_ofst - bt_dur_in_mid; + upper = min_t(s16, ref->duration, res); + lower = 0; + + if (ref->limit.enable) { + upper = min_t(s16, upper, ref->limit.max_toa); + lower = max_t(s16, lower, ref->duration - ref->limit.max_tob); + } else if (aux->limit.enable) { + upper = min_t(s16, upper, + res - (aux->duration - aux->limit.max_toa)); + lower = max_t(s16, lower, res - aux->limit.max_tob); + } + + if (lower < upper) + ptrn->toa_ref = (upper + lower) / 2; + else + ptrn->toa_ref = lower; + + ptrn->tob_ref = ref->duration - ptrn->toa_ref; + ptrn->tob_aux = res - ptrn->toa_ref; + ptrn->toa_aux = aux->duration - ptrn->tob_aux; +} + +/* In strict pattern calculation, we consider timing that might need + * for HW stuffs, i.e. min_tob and min_toa. + */ +static int __rtw89_mcc_calc_pattern_strict(struct rtw89_dev *rtwdev, + struct rtw89_mcc_pattern *ptrn) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 min_tob = RTW89_MCC_EARLY_RX_BCN_TIME; + u16 min_toa = RTW89_MCC_MIN_RX_BCN_TIME; + u16 bcn_ofst = config->beacon_offset; + s16 upper_toa_ref, lower_toa_ref; + s16 upper_tob_aux, lower_tob_aux; + u16 bt_dur_in_mid; + s16 res; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: plan %d, bcn_ofst %d\n", + ptrn->plan, bcn_ofst); + + if (ptrn->plan == RTW89_MCC_PLAN_MID_BT) + bt_dur_in_mid = mcc->bt_role.duration; + else + bt_dur_in_mid = 0; + + if (ref->duration < min_tob + min_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: not meet ref dur cond\n"); + return -EINVAL; + } + + if (aux->duration < min_tob + min_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: not meet aux dur cond\n"); + return -EINVAL; + } + + res = bcn_ofst - min_toa - min_tob - bt_dur_in_mid; + if (res < 0) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: not meet bcn_ofst cond\n"); + return -EINVAL; + } + + upper_toa_ref = min_t(s16, min_toa + res, ref->duration - min_tob); + lower_toa_ref = min_toa; + upper_tob_aux = min_t(s16, min_tob + res, aux->duration - min_toa); + lower_tob_aux = min_tob; + + if (ref->limit.enable) { + if (min_tob > ref->limit.max_tob || min_toa > ref->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: conflict ref limit\n"); + return -EINVAL; + } + + upper_toa_ref = min_t(s16, upper_toa_ref, ref->limit.max_toa); + lower_toa_ref = max_t(s16, lower_toa_ref, + ref->duration - ref->limit.max_tob); + } else if (aux->limit.enable) { + if (min_tob > aux->limit.max_tob || min_toa > aux->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: conflict aux limit\n"); + return -EINVAL; + } + + upper_tob_aux = min_t(s16, upper_tob_aux, aux->limit.max_tob); + lower_tob_aux = max_t(s16, lower_tob_aux, + aux->duration - aux->limit.max_toa); + } + + upper_toa_ref = min_t(s16, upper_toa_ref, + bcn_ofst - bt_dur_in_mid - lower_tob_aux); + lower_toa_ref = max_t(s16, lower_toa_ref, + bcn_ofst - bt_dur_in_mid - upper_tob_aux); + if (lower_toa_ref > upper_toa_ref) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st: conflict boundary\n"); + return -EINVAL; + } + + ptrn->toa_ref = (upper_toa_ref + lower_toa_ref) / 2; + ptrn->tob_ref = ref->duration - ptrn->toa_ref; + ptrn->tob_aux = bcn_ofst - ptrn->toa_ref - bt_dur_in_mid; + ptrn->toa_aux = aux->duration - ptrn->tob_aux; + return 0; +} + +static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + bool sel_plan[NUM_OF_RTW89_MCC_PLAN] = {}; + struct rtw89_mcc_pattern ptrn; + int ret; + int i; + + if (ref->limit.enable && aux->limit.enable) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn: not support dual limited roles\n"); + return -EINVAL; + } + + if (ref->limit.enable && + ref->duration > ref->limit.max_tob + ref->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn: not fit ref limit\n"); + return -EINVAL; + } + + if (aux->limit.enable && + aux->duration > aux->limit.max_tob + aux->limit.max_toa) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn: not fit aux limit\n"); + return -EINVAL; + } + + if (hdl_bt) { + sel_plan[RTW89_MCC_PLAN_TAIL_BT] = true; + sel_plan[RTW89_MCC_PLAN_MID_BT] = true; + } else { + sel_plan[RTW89_MCC_PLAN_NO_BT] = true; + } + + for (i = 0; i < NUM_OF_RTW89_MCC_PLAN; i++) { + if (!sel_plan[i]) + continue; + + ptrn = (typeof(ptrn)){ + .plan = i, + }; + + ret = __rtw89_mcc_calc_pattern_strict(rtwdev, &ptrn); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_st with plan %d: fail\n", i); + else + goto done; + } + + __rtw89_mcc_calc_pattern_loose(rtwdev, &ptrn, hdl_bt); + +done: + rtw89_mcc_assign_pattern(rtwdev, &ptrn); + return 0; +} + +static void rtw89_mcc_set_default_pattern(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_pattern tmp = {}; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC use default pattern unexpectedly\n"); + + tmp.plan = RTW89_MCC_PLAN_NO_BT; + tmp.tob_ref = ref->duration / 2; + tmp.toa_ref = ref->duration - tmp.tob_ref; + tmp.tob_aux = aux->duration / 2; + tmp.toa_aux = aux->duration - tmp.tob_aux; + + rtw89_mcc_assign_pattern(rtwdev, &tmp); +} + +static void rtw89_mcc_set_duration_go_sta(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *role_go, + struct rtw89_mcc_role *role_sta) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + u16 mcc_intvl = config->mcc_interval; + u16 dur_go, dur_sta; + + dur_go = clamp_t(u16, role_go->duration, RTW89_MCC_MIN_GO_DURATION, + mcc_intvl - RTW89_MCC_MIN_STA_DURATION); + if (role_go->limit.enable) + dur_go = min(dur_go, role_go->limit.max_dur); + dur_sta = mcc_intvl - dur_go; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC set dur: (go, sta) {%d, %d} -> {%d, %d}\n", + role_go->duration, role_sta->duration, dur_go, dur_sta); + + role_go->duration = dur_go; + role_sta->duration = dur_sta; +} + +static void rtw89_mcc_set_duration_gc_sta(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 mcc_intvl = config->mcc_interval; + u16 dur_ref, dur_aux; + + if (ref->duration < RTW89_MCC_MIN_STA_DURATION) { + dur_ref = RTW89_MCC_MIN_STA_DURATION; + dur_aux = mcc_intvl - dur_ref; + } else if (aux->duration < RTW89_MCC_MIN_STA_DURATION) { + dur_aux = RTW89_MCC_MIN_STA_DURATION; + dur_ref = mcc_intvl - dur_aux; + } else { + dur_ref = ref->duration; + dur_aux = mcc_intvl - dur_ref; + } + + if (ref->limit.enable) { + dur_ref = min(dur_ref, ref->limit.max_dur); + dur_aux = mcc_intvl - dur_ref; + } else if (aux->limit.enable) { + dur_aux = min(dur_aux, aux->limit.max_dur); + dur_ref = mcc_intvl - dur_aux; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC set dur: (ref, aux) {%d ~ %d} -> {%d ~ %d}\n", + ref->duration, aux->duration, dur_ref, dur_aux); + + ref->duration = dur_ref; + aux->duration = dur_aux; +} + +struct rtw89_mcc_mod_dur_data { + u16 available; + struct { + u16 dur; + u16 room; + } parm[NUM_OF_RTW89_MCC_ROLES]; +}; + +static int rtw89_mcc_mod_dur_get_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_mod_dur_data *p = data; + u16 min; + + p->parm[ordered_idx].dur = mcc_role->duration; + + if (mcc_role->is_go) + min = RTW89_MCC_MIN_GO_DURATION; + else + min = RTW89_MCC_MIN_STA_DURATION; + + p->parm[ordered_idx].room = max_t(s32, p->parm[ordered_idx].dur - min, 0); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: chk role[%u]: dur %u, min %u, room %u\n", + ordered_idx, p->parm[ordered_idx].dur, min, + p->parm[ordered_idx].room); + + p->available += p->parm[ordered_idx].room; + return 0; +} + +static int rtw89_mcc_mod_dur_put_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_mod_dur_data *p = data; + + mcc_role->duration = p->parm[ordered_idx].dur; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: set role[%u]: dur %u\n", + ordered_idx, p->parm[ordered_idx].dur); + return 0; +} + +static void rtw89_mcc_mod_duration_dual_2ghz_with_bt(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_mod_dur_data data = {}; + u16 mcc_intvl = config->mcc_interval; + u16 bt_dur = mcc->bt_role.duration; + u16 wifi_dur; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur (dual 2ghz): mcc_intvl %u, raw bt_dur %u\n", + mcc_intvl, bt_dur); + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_mod_dur_get_iterator, &data); + + bt_dur = clamp_t(u16, bt_dur, 1, data.available / 3); + wifi_dur = mcc_intvl - bt_dur; + + if (data.parm[0].room <= data.parm[1].room) { + data.parm[0].dur -= min_t(u16, bt_dur / 2, data.parm[0].room); + data.parm[1].dur = wifi_dur - data.parm[0].dur; + } else { + data.parm[1].dur -= min_t(u16, bt_dur / 2, data.parm[1].room); + data.parm[0].dur = wifi_dur - data.parm[1].dur; + } + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_mod_dur_put_iterator, &data); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC mod dur: set bt: dur %u\n", bt_dur); + mcc->bt_role.duration = bt_dur; +} + +static +void rtw89_mcc_mod_duration_diff_band_with_bt(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *role_2ghz, + struct rtw89_mcc_role *role_non_2ghz) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + u16 dur_2ghz, dur_non_2ghz; + u16 bt_dur, mcc_intvl; + + dur_2ghz = role_2ghz->duration; + dur_non_2ghz = role_non_2ghz->duration; + mcc_intvl = config->mcc_interval; + bt_dur = mcc->bt_role.duration; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur (diff band): mcc_intvl %u, bt_dur %u\n", + mcc_intvl, bt_dur); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: check dur_2ghz %u, dur_non_2ghz %u\n", + dur_2ghz, dur_non_2ghz); + + if (dur_non_2ghz >= bt_dur) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: dur_non_2ghz is enough for bt\n"); + return; + } + + dur_non_2ghz = bt_dur; + dur_2ghz = mcc_intvl - dur_non_2ghz; + + if (role_non_2ghz->limit.enable) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: dur_non_2ghz is limited with max %u\n", + role_non_2ghz->limit.max_dur); + + dur_non_2ghz = min(dur_non_2ghz, role_non_2ghz->limit.max_dur); + dur_2ghz = mcc_intvl - dur_non_2ghz; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC mod dur: set dur_2ghz %u, dur_non_2ghz %u\n", + dur_2ghz, dur_non_2ghz); + + role_2ghz->duration = dur_2ghz; + role_non_2ghz->duration = dur_non_2ghz; +} + +static bool rtw89_mcc_duration_decision_on_bt(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role; + + if (!bt_role->duration) + return false; + + if (ref->is_2ghz && aux->is_2ghz) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC dual roles are on 2GHz; consider BT duration\n"); + + rtw89_mcc_mod_duration_dual_2ghz_with_bt(rtwdev); + return true; + } + + if (!ref->is_2ghz && !aux->is_2ghz) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC dual roles are not on 2GHz; ignore BT duration\n"); + return false; + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC one role is on 2GHz; modify another for BT duration\n"); + + if (ref->is_2ghz) + rtw89_mcc_mod_duration_diff_band_with_bt(rtwdev, ref, aux); + else + rtw89_mcc_mod_duration_diff_band_with_bt(rtwdev, aux, ref); + + return false; +} + +static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *tgt, + struct rtw89_mcc_role *src, + bool ref_is_src) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + u16 beacon_offset_us = ieee80211_tu_to_usec(config->beacon_offset); + u32 bcn_intvl_src_us = ieee80211_tu_to_usec(src->beacon_interval); + u32 cur_tbtt_ofst_src; + u32 tsf_ofst_tgt; + u32 remainder; + u64 tbtt_tgt; + u64 tsf_src; + int ret; + + ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif, &tsf_src); + if (ret) { + rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); + return; + } + + cur_tbtt_ofst_src = rtw89_mcc_get_tbtt_ofst(rtwdev, src, tsf_src); + + if (ref_is_src) + tbtt_tgt = tsf_src - cur_tbtt_ofst_src + beacon_offset_us; + else + tbtt_tgt = tsf_src - cur_tbtt_ofst_src + + (bcn_intvl_src_us - beacon_offset_us); + + div_u64_rem(tbtt_tgt, bcn_intvl_src_us, &remainder); + tsf_ofst_tgt = bcn_intvl_src_us - remainder; + + config->sync.macid_tgt = tgt->rtwvif->mac_id; + config->sync.macid_src = src->rtwvif->mac_id; + config->sync.offset = tsf_ofst_tgt / 1024; + config->sync.enable = true; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC sync tbtt: tgt %d, src %d, offset %d\n", + config->sync.macid_tgt, config->sync.macid_src, + config->sync.offset); + + rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif, src->rtwvif, + config->sync.offset); +} + +static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_config *config = &mcc->config; + u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval); + u32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref); + struct rtw89_vif *rtwvif = ref->rtwvif; + u64 tsf, start_tsf; + u32 cur_tbtt_ofst; + u64 min_time; + int ret; + + ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf); + if (ret) { + rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); + return ret; + } + + min_time = tsf; + if (ref->is_go) + min_time += ieee80211_tu_to_usec(RTW89_MCC_SHORT_TRIGGER_TIME); + else + min_time += ieee80211_tu_to_usec(RTW89_MCC_LONG_TRIGGER_TIME); + + cur_tbtt_ofst = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf); + start_tsf = tsf - cur_tbtt_ofst + bcn_intvl_ref_us - tob_ref_us; + while (start_tsf < min_time) + start_tsf += bcn_intvl_ref_us; + + config->start_tsf = start_tsf; + return 0; +} + +static int rtw89_mcc_fill_config(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + bool hdl_bt; + int ret; + + memset(config, 0, sizeof(*config)); + + switch (mcc->mode) { + case RTW89_MCC_MODE_GO_STA: + config->beacon_offset = RTW89_MCC_DFLT_BCN_OFST_TIME; + if (ref->is_go) { + rtw89_mcc_sync_tbtt(rtwdev, ref, aux, false); + config->mcc_interval = ref->beacon_interval; + rtw89_mcc_set_duration_go_sta(rtwdev, ref, aux); + } else { + rtw89_mcc_sync_tbtt(rtwdev, aux, ref, true); + config->mcc_interval = aux->beacon_interval; + rtw89_mcc_set_duration_go_sta(rtwdev, aux, ref); + } + break; + case RTW89_MCC_MODE_GC_STA: + config->beacon_offset = rtw89_mcc_get_bcn_ofst(rtwdev); + config->mcc_interval = ref->beacon_interval; + rtw89_mcc_set_duration_gc_sta(rtwdev); + break; + default: + rtw89_warn(rtwdev, "MCC unknown mode: %d\n", mcc->mode); + return -EFAULT; + } + + hdl_bt = rtw89_mcc_duration_decision_on_bt(rtwdev); + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC handle bt: %d\n", hdl_bt); + + ret = rtw89_mcc_calc_pattern(rtwdev, hdl_bt); + if (!ret) + goto bottom; + + rtw89_mcc_set_default_pattern(rtwdev); + +bottom: + return rtw89_mcc_fill_start_tsf(rtwdev); +} + +static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy; + struct rtw89_mcc_policy *policy = &role->policy; + struct rtw89_fw_mcc_add_req req = {}; + const struct rtw89_chan *chan; + int ret; + + chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx); + req.central_ch_seg0 = chan->channel; + req.primary_ch = chan->primary_channel; + req.bandwidth = chan->band_width; + req.ch_band_type = chan->band_type; + + req.macid = role->rtwvif->mac_id; + req.group = mcc->group; + req.c2h_rpt = policy->c2h_rpt; + req.tx_null_early = policy->tx_null_early; + req.dis_tx_null = policy->dis_tx_null; + req.in_curr_ch = policy->in_curr_ch; + req.sw_retry_count = policy->sw_retry_count; + req.dis_sw_retry = policy->dis_sw_retry; + req.duration = role->duration; + req.btc_in_2g = false; + + if (courtesy->enable && courtesy->macid_src == req.macid) { + req.courtesy_target = courtesy->macid_tgt; + req.courtesy_num = courtesy->slot_num; + req.courtesy_en = true; + } + + ret = rtw89_fw_h2c_add_mcc(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to add wifi role: %d\n", ret); + return ret; + } + + ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group, + role->rtwvif->mac_id, + role->macid_bitmap); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to set macid bitmap: %d\n", ret); + return ret; + } + + return 0; +} + +static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role; + struct rtw89_fw_mcc_add_req req = {}; + int ret; + + req.group = mcc->group; + req.duration = bt_role->duration; + req.btc_in_2g = true; + + ret = rtw89_fw_h2c_add_mcc(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to add bt role: %d\n", ret); + return ret; + } + + return 0; +} + +static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + struct rtw89_mcc_sync *sync = &config->sync; + struct rtw89_fw_mcc_start_req req = {}; + int ret; + + if (replace) { + req.old_group = mcc->group; + req.old_group_action = RTW89_FW_MCC_OLD_GROUP_ACT_REPLACE; + mcc->group = RTW89_MCC_NEXT_GROUP(mcc->group); + } + + req.group = mcc->group; + + switch (pattern->plan) { + case RTW89_MCC_PLAN_TAIL_BT: + ret = __mcc_fw_add_role(rtwdev, ref); + if (ret) + return ret; + ret = __mcc_fw_add_role(rtwdev, aux); + if (ret) + return ret; + ret = __mcc_fw_add_bt_role(rtwdev); + if (ret) + return ret; + + req.btc_in_group = true; + break; + case RTW89_MCC_PLAN_MID_BT: + ret = __mcc_fw_add_role(rtwdev, ref); + if (ret) + return ret; + ret = __mcc_fw_add_bt_role(rtwdev); + if (ret) + return ret; + ret = __mcc_fw_add_role(rtwdev, aux); + if (ret) + return ret; + + req.btc_in_group = true; + break; + case RTW89_MCC_PLAN_NO_BT: + ret = __mcc_fw_add_role(rtwdev, ref); + if (ret) + return ret; + ret = __mcc_fw_add_role(rtwdev, aux); + if (ret) + return ret; + + req.btc_in_group = false; + break; + default: + rtw89_warn(rtwdev, "MCC unknown plan: %d\n", pattern->plan); + return -EFAULT; + } + + if (sync->enable) { + ret = rtw89_fw_h2c_mcc_sync(rtwdev, req.group, sync->macid_src, + sync->macid_tgt, sync->offset); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger sync: %d\n", ret); + return ret; + } + } + + req.macid = ref->rtwvif->mac_id; + req.tsf_high = config->start_tsf >> 32; + req.tsf_low = config->start_tsf; + + ret = rtw89_fw_h2c_start_mcc(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger start: %d\n", ret); + return ret; + } + + return 0; +} + +static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_sync *sync = &config->sync; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_fw_mcc_duration req = { + .group = mcc->group, + .btc_in_group = false, + .start_macid = ref->rtwvif->mac_id, + .macid_x = ref->rtwvif->mac_id, + .macid_y = aux->rtwvif->mac_id, + .duration_x = ref->duration, + .duration_y = aux->duration, + .start_tsf_high = config->start_tsf >> 32, + .start_tsf_low = config->start_tsf, + }; + int ret; + + ret = rtw89_fw_h2c_mcc_set_duration(rtwdev, &req); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to set duration: %d\n", ret); + return ret; + } + + if (!sync->enable || !sync_changed) + return 0; + + ret = rtw89_fw_h2c_mcc_sync(rtwdev, mcc->group, sync->macid_src, + sync->macid_tgt, sync->offset); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger sync: %d\n", ret); + return ret; + } + + return 0; +} + +static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + struct rtw89_mcc_sync *sync = &config->sync; + struct ieee80211_p2p_noa_desc noa_desc = {}; + u64 start_time = config->start_tsf; + u32 interval = config->mcc_interval; + struct rtw89_vif *rtwvif_go; + u32 duration; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + if (ref->is_go) { + rtwvif_go = ref->rtwvif; + start_time += ieee80211_tu_to_usec(ref->duration); + duration = config->mcc_interval - ref->duration; + } else if (aux->is_go) { + rtwvif_go = aux->rtwvif; + start_time += ieee80211_tu_to_usec(pattern->tob_ref) + + ieee80211_tu_to_usec(config->beacon_offset) + + ieee80211_tu_to_usec(pattern->toa_aux); + duration = config->mcc_interval - aux->duration; + + /* convert time domain from sta(ref) to GO(aux) */ + start_time += ieee80211_tu_to_usec(sync->offset); + } else { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC find no GO: skip updating beacon NoA\n"); + return; + } + + rtw89_p2p_noa_renew(rtwvif_go); + + if (enable) { + noa_desc.start_time = cpu_to_le32(start_time); + noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(interval)); + noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(duration)); + noa_desc.count = 255; + rtw89_p2p_noa_append(rtwvif_go, &noa_desc); + } + + /* without chanctx, we cannot get beacon from mac80211 stack */ + if (!rtwvif_go->chanctx_assigned) + return; + + rtw89_fw_h2c_update_beacon(rtwdev, rtwvif_go); +} + +static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + if (ref->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, true); + else if (aux->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, true); + + rtw89_mcc_handle_beacon_noa(rtwdev, true); +} + +static void rtw89_mcc_stop_beacon_noa(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + if (ref->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, false); + else if (aux->is_go) + rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, false); + + rtw89_mcc_handle_beacon_noa(rtwdev, false); +} + +static int rtw89_mcc_start(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + int ret; + + if (rtwdev->scanning) + rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); + + rtw89_leave_lps(rtwdev); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC start\n"); + + ret = rtw89_mcc_fill_all_roles(rtwdev); + if (ret) + return ret; + + if (ref->is_go || aux->is_go) + mcc->mode = RTW89_MCC_MODE_GO_STA; + else + mcc->mode = RTW89_MCC_MODE_GC_STA; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC sel mode: %d\n", mcc->mode); + + mcc->group = RTW89_MCC_DFLT_GROUP; + + ret = rtw89_mcc_fill_config(rtwdev); + if (ret) + return ret; + + ret = __mcc_fw_start(rtwdev, false); + if (ret) + return ret; + + rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_START); + + rtw89_mcc_start_beacon_noa(rtwdev); + return 0; +} + +static void rtw89_mcc_stop(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n"); + + ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group, + ref->rtwvif->mac_id, true); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to trigger stop: %d\n", ret); + + ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to delete group: %d\n", ret); + + rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP); + + rtw89_mcc_stop_beacon_noa(rtwdev); +} + +static int rtw89_mcc_update(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_config old_cfg = *config; + bool sync_changed; + int ret; + + if (rtwdev->scanning) + rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC update\n"); + + ret = rtw89_mcc_fill_config(rtwdev); + if (ret) + return ret; + + if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT || + config->pattern.plan != RTW89_MCC_PLAN_NO_BT) { + ret = __mcc_fw_start(rtwdev, true); + if (ret) + return ret; + } else { + if (memcmp(&old_cfg.sync, &config->sync, sizeof(old_cfg.sync)) == 0) + sync_changed = false; + else + sync_changed = true; + + ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed); + if (ret) + return ret; + } + + rtw89_mcc_handle_beacon_noa(rtwdev, true); + return 0; +} + +static void rtw89_mcc_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_config *config = &mcc->config; + struct rtw89_mcc_pattern *pattern = &config->pattern; + s16 tolerance; + u16 bcn_ofst; + u16 diff; + + if (mcc->mode != RTW89_MCC_MODE_GC_STA) + return; + + bcn_ofst = rtw89_mcc_get_bcn_ofst(rtwdev); + if (bcn_ofst > config->beacon_offset) { + diff = bcn_ofst - config->beacon_offset; + if (pattern->tob_aux < 0) + tolerance = -pattern->tob_aux; + else + tolerance = pattern->toa_aux; + } else { + diff = config->beacon_offset - bcn_ofst; + if (pattern->toa_aux < 0) + tolerance = -pattern->toa_aux; + else + tolerance = pattern->tob_aux; + } + + if (diff <= tolerance) + return; + + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BCN_OFFSET_CHANGE); +} + +static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role upd = { + .rtwvif = mcc_role->rtwvif, + }; + int ret; + + if (!mcc_role->is_go) + return 0; + + rtw89_mcc_fill_role_macid_bitmap(rtwdev, &upd); + if (memcmp(mcc_role->macid_bitmap, upd.macid_bitmap, + sizeof(mcc_role->macid_bitmap)) == 0) + return 0; + + ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group, + upd.rtwvif->mac_id, + upd.macid_bitmap); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC h2c failed to update macid bitmap: %d\n", ret); + return ret; + } + + memcpy(mcc_role->macid_bitmap, upd.macid_bitmap, + sizeof(mcc_role->macid_bitmap)); + return 0; +} + +static void rtw89_mcc_update_macid_bitmap(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + + if (mcc->mode != RTW89_MCC_MODE_GO_STA) + return; + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_map_iterator, NULL); +} + +static int rtw89_mcc_upd_lmt_iterator(struct rtw89_dev *rtwdev, + struct rtw89_mcc_role *mcc_role, + unsigned int ordered_idx, + void *data) +{ + memset(&mcc_role->limit, 0, sizeof(mcc_role->limit)); + rtw89_mcc_fill_role_limit(rtwdev, mcc_role); + return 0; +} + +static void rtw89_mcc_update_limit(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + + if (mcc->mode != RTW89_MCC_MODE_GC_STA) + return; + + rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_lmt_iterator, NULL); +} + +void rtw89_chanctx_work(struct work_struct *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + chanctx_work.work); + struct rtw89_hal *hal = &rtwdev->hal; + bool update_mcc_pattern = false; + enum rtw89_entity_mode mode; + u32 changed = 0; + int ret; + int i; + + mutex_lock(&rtwdev->mutex); + + if (hal->entity_pause) { + mutex_unlock(&rtwdev->mutex); + return; + } + + for (i = 0; i < NUM_OF_RTW89_CHANCTX_CHANGES; i++) { + if (test_and_clear_bit(i, hal->changes)) + changed |= BIT(i); + } + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC_PREPARE: + rtw89_set_entity_mode(rtwdev, RTW89_ENTITY_MODE_MCC); + rtw89_set_channel(rtwdev); + + ret = rtw89_mcc_start(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret); + break; + case RTW89_ENTITY_MODE_MCC: + if (changed & BIT(RTW89_CHANCTX_BCN_OFFSET_CHANGE) || + changed & BIT(RTW89_CHANCTX_P2P_PS_CHANGE) || + changed & BIT(RTW89_CHANCTX_BT_SLOT_CHANGE) || + changed & BIT(RTW89_CHANCTX_TSF32_TOGGLE_CHANGE)) + update_mcc_pattern = true; + if (changed & BIT(RTW89_CHANCTX_REMOTE_STA_CHANGE)) + rtw89_mcc_update_macid_bitmap(rtwdev); + if (changed & BIT(RTW89_CHANCTX_P2P_PS_CHANGE)) + rtw89_mcc_update_limit(rtwdev); + if (changed & BIT(RTW89_CHANCTX_BT_SLOT_CHANGE)) + rtw89_mcc_fill_bt_role(rtwdev); + if (update_mcc_pattern) { + ret = rtw89_mcc_update(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to update MCC: %d\n", + ret); + } + break; + default: + break; + } + + mutex_unlock(&rtwdev->mutex); +} + +void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_changes change) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + u32 delay; + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + default: + return; + case RTW89_ENTITY_MODE_MCC_PREPARE: + delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC_PREPARE); + break; + case RTW89_ENTITY_MODE_MCC: + delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC); + break; + } + + if (change != RTW89_CHANCTX_CHANGE_DFLT) { + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "set chanctx change %d\n", + change); + set_bit(change, hal->changes); + } + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "queue chanctx work for mode %d with delay %d us\n", + mode, delay); + ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work, + usecs_to_jiffies(delay)); +} + +void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev) +{ + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_CHANGE_DFLT); +} + +void rtw89_chanctx_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + + lockdep_assert_held(&rtwdev->mutex); + + if (hal->entity_pause) + return; + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + rtw89_mcc_track(rtwdev); + break; + default: + break; + } +} + +void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_pause_reasons rsn) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + + lockdep_assert_held(&rtwdev->mutex); + + if (hal->entity_pause) + return; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", rsn); + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + rtw89_mcc_stop(rtwdev); + break; + default: + break; + } + + hal->entity_pause = true; +} + +void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_entity_mode mode; + int ret; + + lockdep_assert_held(&rtwdev->mutex); + + if (!hal->entity_pause) + return; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx proceed\n"); + + hal->entity_pause = false; + rtw89_set_channel(rtwdev); + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + ret = rtw89_mcc_start(rtwdev); + if (ret) + rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret); + break; + default: + break; + } + + rtw89_queue_chanctx_work(rtwdev); +} + int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx) { @@ -238,6 +1924,7 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, { struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; + enum rtw89_entity_mode mode; struct rtw89_vif *rtwvif; u8 drop, roll; @@ -267,6 +1954,15 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, drop = roll; out: + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC: + rtw89_mcc_stop(rtwdev); + break; + default: + break; + } + clear_bit(drop, hal->entity_map); rtw89_set_channel(rtwdev); } @@ -291,6 +1987,7 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev, struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; rtwvif->sub_entity_idx = cfg->idx; + rtwvif->chanctx_assigned = true; return 0; } @@ -299,4 +1996,5 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx) { rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0; + rtwvif->chanctx_assigned = false; } diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index bdf369db5041..9b98d8f4ee9d 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -7,6 +7,37 @@ #include "core.h" +/* The dwell time in TU before doing rtw89_chanctx_work(). */ +#define RTW89_CHANCTX_TIME_MCC_PREPARE 100 +#define RTW89_CHANCTX_TIME_MCC 100 + +/* various MCC setting time in TU */ +#define RTW89_MCC_LONG_TRIGGER_TIME 300 +#define RTW89_MCC_SHORT_TRIGGER_TIME 100 +#define RTW89_MCC_EARLY_TX_BCN_TIME 10 +#define RTW89_MCC_EARLY_RX_BCN_TIME 5 +#define RTW89_MCC_MIN_RX_BCN_TIME 10 +#define RTW89_MCC_DFLT_BCN_OFST_TIME 40 + +#define RTW89_MCC_MIN_GO_DURATION \ + (RTW89_MCC_EARLY_TX_BCN_TIME + RTW89_MCC_MIN_RX_BCN_TIME) + +#define RTW89_MCC_MIN_STA_DURATION \ + (RTW89_MCC_EARLY_RX_BCN_TIME + RTW89_MCC_MIN_RX_BCN_TIME) + +#define RTW89_MCC_DFLT_GROUP 0 +#define RTW89_MCC_NEXT_GROUP(cur) (((cur) + 1) % 4) + +#define RTW89_MCC_DFLT_TX_NULL_EARLY 3 +#define RTW89_MCC_DFLT_COURTESY_SLOT 3 + +#define NUM_OF_RTW89_MCC_ROLES 2 + +enum rtw89_chanctx_pause_reasons { + RTW89_CHANCTX_PAUSE_REASON_HW_SCAN, + RTW89_CHANCTX_PAUSE_REASON_ROC, +}; + static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; @@ -50,6 +81,14 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, const struct cfg80211_chan_def *chandef); void rtw89_entity_init(struct rtw89_dev *rtwdev); enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev); +void rtw89_chanctx_work(struct work_struct *work); +void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev); +void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_changes change); +void rtw89_chanctx_track(struct rtw89_dev *rtwdev); +void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_pause_reasons rsn); +void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev); int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx); void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index bda0e1e99a8c..bdcc172639e4 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -237,13 +237,13 @@ struct rtw89_btc_btf_set_report { struct rtw89_btc_btf_set_slot_table { u8 fver; u8 tbl_num; - u8 buf[]; + struct rtw89_btc_fbtc_slot tbls[] __counted_by(tbl_num); } __packed; struct rtw89_btc_btf_set_mon_reg { u8 fver; u8 reg_num; - u8 buf[]; + struct rtw89_btc_fbtc_mreg regs[] __counted_by(reg_num); } __packed; enum btc_btf_set_cx_policy { @@ -1821,19 +1821,17 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev, static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num, struct rtw89_btc_fbtc_slot *s) { - struct rtw89_btc_btf_set_slot_table *tbl = NULL; - u8 *ptr = NULL; - u16 n = 0; + struct rtw89_btc_btf_set_slot_table *tbl; + u16 n; - n = sizeof(*s) * num + sizeof(*tbl); + n = struct_size(tbl, tbls, num); tbl = kmalloc(n, GFP_KERNEL); if (!tbl) return; tbl->fver = BTF_SET_SLOT_TABLE_VER; tbl->tbl_num = num; - ptr = &tbl->buf[0]; - memcpy(ptr, s, num * sizeof(*s)); + memcpy(tbl->tbls, s, flex_array_size(tbl, tbls, num)); _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n); @@ -1845,7 +1843,7 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_btc_ver *ver = rtwdev->btc.ver; struct rtw89_btc_btf_set_mon_reg *monreg = NULL; - u8 n, *ptr = NULL, ulen, cxmreg_max; + u8 n, ulen, cxmreg_max; u16 sz = 0; n = chip->mon_reg_num; @@ -1866,16 +1864,15 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) return; } - ulen = sizeof(struct rtw89_btc_fbtc_mreg); - sz = (ulen * n) + sizeof(*monreg); + ulen = sizeof(monreg->regs[0]); + sz = struct_size(monreg, regs, n); monreg = kmalloc(sz, GFP_KERNEL); if (!monreg) return; monreg->fver = ver->fcxmreg; monreg->reg_num = n; - ptr = &monreg->buf[0]; - memcpy(ptr, chip->mon_reg, n * ulen); + memcpy(monreg->regs, chip->mon_reg, flex_array_size(monreg, regs, n)); rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): sz=%d ulen=%d n=%d\n", __func__, sz, ulen, n); @@ -3840,7 +3837,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) if (mode == BTC_WLINK_25G_MCC) return; - rtw89_ctrl_btg(rtwdev, is_btg); + rtw89_ctrl_btg_bt_rx(rtwdev, is_btg, RTW89_PHY_0); } struct rtw89_txtime_data { @@ -5666,7 +5663,8 @@ enum btc_wl_mode { void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta, enum btc_role_state state) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); struct rtw89_btc *btc = &rtwdev->btc; diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h index f16421cb30ef..e76153709793 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.h +++ b/drivers/net/wireless/realtek/rtw89/coex.h @@ -193,4 +193,13 @@ static inline u8 rtw89_btc_path_phymap(struct rtw89_dev *rtwdev, return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path)); } +/* return bt req len in TU */ +static inline u16 rtw89_coex_query_bt_req_len(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + return btc->bt_req_len; +} + #endif diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 69b181fa2966..3d75165e48be 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -172,13 +172,31 @@ static const struct ieee80211_iface_limit rtw89_iface_limits[] = { }, }; +static const struct ieee80211_iface_limit rtw89_iface_limits_mcc[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), + }, +}; + static const struct ieee80211_iface_combination rtw89_iface_combs[] = { { .limits = rtw89_iface_limits, .n_limits = ARRAY_SIZE(rtw89_iface_limits), .max_interfaces = 2, .num_different_channels = 1, - } + }, + { + .limits = rtw89_iface_limits_mcc, + .n_limits = ARRAY_SIZE(rtw89_iface_limits_mcc), + .max_interfaces = 2, + .num_different_channels = 2, + }, }; bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate) @@ -256,8 +274,8 @@ void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef) NL80211_CHAN_NO_HT); } -static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, - struct rtw89_chan *chan) +void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, + struct rtw89_chan *chan) { struct ieee80211_channel *channel = chandef->chan; enum nl80211_chan_width width = chandef->width; @@ -318,9 +336,11 @@ static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) { + struct rtw89_hal *hal = &rtwdev->hal; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_chan *chan; enum rtw89_sub_entity_idx sub_entity_idx; + enum rtw89_sub_entity_idx roc_idx; enum rtw89_phy_idx phy_idx; enum rtw89_entity_mode mode; bool entity_active; @@ -330,10 +350,23 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) return; mode = rtw89_get_entity_mode(rtwdev); - if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode)) + switch (mode) { + case RTW89_ENTITY_MODE_SCC: + case RTW89_ENTITY_MODE_MCC: + sub_entity_idx = RTW89_SUB_ENTITY_0; + break; + case RTW89_ENTITY_MODE_MCC_PREPARE: + sub_entity_idx = RTW89_SUB_ENTITY_1; + break; + default: + WARN(1, "Invalid ent mode: %d\n", mode); return; + } + + roc_idx = atomic_read(&hal->roc_entity_idx); + if (roc_idx != RTW89_SUB_ENTITY_IDLE) + sub_entity_idx = roc_idx; - sub_entity_idx = RTW89_SUB_ENTITY_0; phy_idx = RTW89_PHY_0; chan = rtw89_chan_get(rtwdev, sub_entity_idx); chip->ops->set_txpwr(rtwdev, chan, phy_idx); @@ -341,43 +374,54 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) void rtw89_set_channel(struct rtw89_dev *rtwdev) { + struct rtw89_hal *hal = &rtwdev->hal; const struct rtw89_chip_info *chip = rtwdev->chip; - const struct cfg80211_chan_def *chandef; + const struct rtw89_chan_rcd *chan_rcd; + const struct rtw89_chan *chan; enum rtw89_sub_entity_idx sub_entity_idx; + enum rtw89_sub_entity_idx roc_idx; enum rtw89_mac_idx mac_idx; enum rtw89_phy_idx phy_idx; - struct rtw89_chan chan; struct rtw89_channel_help_params bak; enum rtw89_entity_mode mode; - bool band_changed; bool entity_active; entity_active = rtw89_get_entity_state(rtwdev); mode = rtw89_entity_recalc(rtwdev); - if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode)) + switch (mode) { + case RTW89_ENTITY_MODE_SCC: + case RTW89_ENTITY_MODE_MCC: + sub_entity_idx = RTW89_SUB_ENTITY_0; + break; + case RTW89_ENTITY_MODE_MCC_PREPARE: + sub_entity_idx = RTW89_SUB_ENTITY_1; + break; + default: + WARN(1, "Invalid ent mode: %d\n", mode); return; + } + + roc_idx = atomic_read(&hal->roc_entity_idx); + if (roc_idx != RTW89_SUB_ENTITY_IDLE) + sub_entity_idx = roc_idx; - sub_entity_idx = RTW89_SUB_ENTITY_0; mac_idx = RTW89_MAC_0; phy_idx = RTW89_PHY_0; - chandef = rtw89_chandef_get(rtwdev, sub_entity_idx); - rtw89_get_channel_params(chandef, &chan); - if (WARN(chan.channel == 0, "Invalid channel\n")) - return; - band_changed = rtw89_assign_entity_chan(rtwdev, sub_entity_idx, &chan); + chan = rtw89_chan_get(rtwdev, sub_entity_idx); + chan_rcd = rtw89_chan_rcd_get(rtwdev, sub_entity_idx); - rtw89_chip_set_channel_prepare(rtwdev, &bak, &chan, mac_idx, phy_idx); + rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx); - chip->ops->set_channel(rtwdev, &chan, mac_idx, phy_idx); + chip->ops->set_channel(rtwdev, chan, mac_idx, phy_idx); - chip->ops->set_txpwr(rtwdev, &chan, phy_idx); + chip->ops->set_txpwr(rtwdev, chan, phy_idx); - rtw89_chip_set_channel_done(rtwdev, &bak, &chan, mac_idx, phy_idx); + rtw89_chip_set_channel_done(rtwdev, &bak, chan, mac_idx, phy_idx); - if (!entity_active || band_changed) { - rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan.band_type); + if (!entity_active || chan_rcd->band_changed) { + rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type); rtw89_chip_rfk_band_changed(rtwdev, phy_idx); } @@ -523,12 +567,12 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, } static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev, - struct rtw89_core_tx_request *tx_req) + struct rtw89_core_tx_request *tx_req, + const struct rtw89_chan *chan) { struct sk_buff *skb = tx_req->skb; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = tx_info->control.vif; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u16 lowest_rate; if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || @@ -567,7 +611,8 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif = tx_req->vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); u8 qsel, ch_dma; qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT; @@ -584,7 +629,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, desc_info->en_wd_info = true; desc_info->use_rate = true; desc_info->dis_data_fb = true; - desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req); + desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan); rtw89_debug(rtwdev, RTW89_DBG_TXRX, "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n", @@ -603,7 +648,8 @@ rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev, desc_info->ch_dma = RTW89_DMA_H2C; } -static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc) +static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc, + const struct rtw89_chan *chan) { static const u8 rtw89_bandwidth_to_om[] = { [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20, @@ -614,7 +660,6 @@ static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc }; const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_hal *hal = &rtwdev->hal; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 om_bandwidth; if (!chip->dis_2g_40m_ul_ofdma || @@ -1188,6 +1233,136 @@ void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1); +static __le32 rtw89_build_txwd_body0_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) | + FIELD_PREP(BE_TXD_BODY0_WDINFO_EN, desc_info->en_wd_info) | + FIELD_PREP(BE_TXD_BODY0_CH_DMA, desc_info->ch_dma) | + FIELD_PREP(BE_TXD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | + FIELD_PREP(BE_TXD_BODY0_WD_PAGE, desc_info->wd_page); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body1_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) | + FIELD_PREP(BE_TXD_BODY1_SEC_KEYID, desc_info->sec_keyid) | + FIELD_PREP(BE_TXD_BODY1_SEC_TYPE, desc_info->sec_type); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY2_TID_IND, desc_info->tid_indicate) | + FIELD_PREP(BE_TXD_BODY2_QSEL, desc_info->qsel) | + FIELD_PREP(BE_TXD_BODY2_TXPKTSIZE, desc_info->pkt_size) | + FIELD_PREP(BE_TXD_BODY2_AGG_EN, desc_info->agg_en) | + FIELD_PREP(BE_TXD_BODY2_BK, desc_info->bk) | + FIELD_PREP(BE_TXD_BODY2_MACID, desc_info->mac_id); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body4_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) | + FIELD_PREP(BE_TXD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body5_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) | + FIELD_PREP(BE_TXD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) | + FIELD_PREP(BE_TXD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) | + FIELD_PREP(BE_TXD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY7_USERATE_SEL, desc_info->use_rate) | + FIELD_PREP(BE_TXD_BODY7_DATA_ER, desc_info->er_cap) | + FIELD_PREP(BE_TXD_BODY7_DATA_BW_ER, 0) | + FIELD_PREP(BE_TXD_BODY7_DATARATE, desc_info->data_rate); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) | + FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info1_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO1_MAX_AGG_NUM, desc_info->ampdu_num) | + FIELD_PREP(BE_TXD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) | + FIELD_PREP(BE_TXD_INFO1_DATA_RTY_LOWEST_RATE, + desc_info->data_retry_lowest_rate); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | + FIELD_PREP(BE_TXD_INFO2_FORCE_KEY_EN, desc_info->sec_en) | + FIELD_PREP(BE_TXD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, 1) | + FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1); + + return cpu_to_le32(dword); +} + +void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + struct rtw89_txwd_body_v2 *txwd_body = txdesc; + struct rtw89_txwd_info_v2 *txwd_info; + + txwd_body->dword0 = rtw89_build_txwd_body0_v2(desc_info); + txwd_body->dword1 = rtw89_build_txwd_body1_v2(desc_info); + txwd_body->dword2 = rtw89_build_txwd_body2_v2(desc_info); + txwd_body->dword3 = rtw89_build_txwd_body3_v2(desc_info); + if (desc_info->sec_en) { + txwd_body->dword4 = rtw89_build_txwd_body4_v2(desc_info); + txwd_body->dword5 = rtw89_build_txwd_body5_v2(desc_info); + } + txwd_body->dword7 = rtw89_build_txwd_body7_v2(desc_info); + + if (!desc_info->en_wd_info) + return; + + txwd_info = (struct rtw89_txwd_info_v2 *)(txwd_body + 1); + txwd_info->dword0 = rtw89_build_txwd_info0_v2(desc_info); + txwd_info->dword1 = rtw89_build_txwd_info1_v2(desc_info); + txwd_info->dword2 = rtw89_build_txwd_info2_v2(desc_info); + txwd_info->dword4 = rtw89_build_txwd_info4_v2(desc_info); +} +EXPORT_SYMBOL(rtw89_core_fill_txdesc_v2); + static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | @@ -1208,6 +1383,26 @@ void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1); +static __le32 rtw89_build_txwd_fwcmd0_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | + FIELD_PREP(BE_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ? + RTW89_CORE_RX_TYPE_FWDL : + RTW89_CORE_RX_TYPE_H2C); + + return cpu_to_le32(dword); +} + +void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + struct rtw89_rxdesc_short_v2 *txwd_v2 = (struct rtw89_rxdesc_short_v2 *)txdesc; + + txwd_v2->dword0 = rtw89_build_txwd_fwcmd0_v2(desc_info); +} +EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v2); + static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct rtw89_rx_phy_ppdu *phy_ppdu) @@ -1426,55 +1621,73 @@ static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev, phy_ppdu); } -static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev, - const struct rtw89_rx_desc_info *desc_info, - bool rx_status) +static u8 rtw89_rxdesc_to_nl_he_eht_gi(struct rtw89_dev *rtwdev, + u8 desc_info_gi, + bool rx_status, bool eht) { - switch (desc_info->gi_ltf) { + switch (desc_info_gi) { case RTW89_GILTF_SGI_4XHE08: case RTW89_GILTF_2XHE08: case RTW89_GILTF_1XHE08: - return NL80211_RATE_INFO_HE_GI_0_8; + return eht ? NL80211_RATE_INFO_EHT_GI_0_8 : + NL80211_RATE_INFO_HE_GI_0_8; case RTW89_GILTF_2XHE16: case RTW89_GILTF_1XHE16: - return NL80211_RATE_INFO_HE_GI_1_6; + return eht ? NL80211_RATE_INFO_EHT_GI_1_6 : + NL80211_RATE_INFO_HE_GI_1_6; case RTW89_GILTF_LGI_4XHE32: - return NL80211_RATE_INFO_HE_GI_3_2; + return eht ? NL80211_RATE_INFO_EHT_GI_3_2 : + NL80211_RATE_INFO_HE_GI_3_2; default: - rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info->gi_ltf); - return rx_status ? NL80211_RATE_INFO_HE_GI_3_2 : U8_MAX; + rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi); + if (rx_status) + return eht ? NL80211_RATE_INFO_EHT_GI_3_2 : + NL80211_RATE_INFO_HE_GI_3_2; + return U8_MAX; } } +static +bool rtw89_check_rx_statu_gi_match(struct ieee80211_rx_status *status, u8 gi_ltf, + bool eht) +{ + if (eht) + return status->eht.gi == gi_ltf; + + return status->he_gi == gi_ltf; +} + static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, struct ieee80211_rx_status *status) { u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf; + bool eht = false; u16 data_rate; bool ret; data_rate = desc_info->data_rate; - data_rate_mode = GET_DATA_RATE_MODE(data_rate); + data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate); if (data_rate_mode == DATA_RATE_MODE_NON_HT) { - rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); + rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate); /* rate_idx is still hardware value here */ } else if (data_rate_mode == DATA_RATE_MODE_HT) { - rate_idx = GET_DATA_RATE_HT_IDX(data_rate); - } else if (data_rate_mode == DATA_RATE_MODE_VHT) { - rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); - } else if (data_rate_mode == DATA_RATE_MODE_HE) { - rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); + rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate); + } else if (data_rate_mode == DATA_RATE_MODE_VHT || + data_rate_mode == DATA_RATE_MODE_HE || + data_rate_mode == DATA_RATE_MODE_EHT) { + rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); } else { rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); } + eht = data_rate_mode == DATA_RATE_MODE_EHT; bw = rtw89_hw_to_rate_info_bw(desc_info->bw); - gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false); + gi_ltf = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, false, eht); ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt && status->rate_idx == rate_idx && - status->he_gi == gi_ltf && + rtw89_check_rx_statu_gi_match(status, gi_ltf, eht) && status->bw == bw; return ret; @@ -1494,8 +1707,8 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data; - u8 *pos, *end, type; - u16 aid; + u8 *pos, *end, type, tf_bw; + u16 aid, tf_rua; if (!ether_addr_equal(vif->bss_conf.bssid, tf->ta) || rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || @@ -1503,7 +1716,7 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, return; type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK); - if (type != IEEE80211_TRIGGER_TYPE_BASIC) + if (type != IEEE80211_TRIGGER_TYPE_BASIC && type != IEEE80211_TRIGGER_TYPE_MU_BAR) return; end = (u8 *)tf + skb->len; @@ -1511,17 +1724,24 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) { aid = RTW89_GET_TF_USER_INFO_AID12(pos); + tf_rua = RTW89_GET_TF_USER_INFO_RUA(pos); + tf_bw = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_ULBW_MASK); rtw89_debug(rtwdev, RTW89_DBG_TXRX, - "[TF] aid: %d, ul_mcs: %d, rua: %d\n", + "[TF] aid: %d, ul_mcs: %d, rua: %d, bw: %d\n", aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos), - RTW89_GET_TF_USER_INFO_RUA(pos)); + tf_rua, tf_bw); if (aid == RTW89_TF_PAD) break; if (aid == vif->cfg.aid) { + enum nl80211_he_ru_alloc rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1); + rtwvif->stats.rx_tf_acc++; rtwdev->stats.rx_tf_acc++; + if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ && + rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106) + rtwvif->pwr_diff_en = true; break; } @@ -1659,8 +1879,7 @@ static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev, const struct rtw89_chan_rcd *rcd = rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0); u16 chan = rcd->prev_primary_channel; - u8 band = rcd->prev_band_type == RTW89_BAND_2G ? - NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type); if (status->band != NL80211_BAND_2GHZ && status->encoding == RX_ENC_LEGACY && @@ -1688,6 +1907,72 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status) rx_status->rate_idx -= 4; } +static const u8 rx_status_bw_to_radiotap_eht_usig[] = { + [RATE_INFO_BW_20] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ, + [RATE_INFO_BW_5] = U8_MAX, + [RATE_INFO_BW_10] = U8_MAX, + [RATE_INFO_BW_40] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ, + [RATE_INFO_BW_80] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ, + [RATE_INFO_BW_160] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ, + [RATE_INFO_BW_HE_RU] = U8_MAX, + [RATE_INFO_BW_320] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1, + [RATE_INFO_BW_EHT_RU] = U8_MAX, +}; + +static void rtw89_core_update_radiotap_eht(struct rtw89_dev *rtwdev, + struct sk_buff *skb, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_radiotap_eht_usig *usig; + struct ieee80211_radiotap_eht *eht; + struct ieee80211_radiotap_tlv *tlv; + int eht_len = struct_size(eht, user_info, 1); + int usig_len = sizeof(*usig); + int len; + u8 bw; + + len = sizeof(*tlv) + ALIGN(eht_len, 4) + + sizeof(*tlv) + ALIGN(usig_len, 4); + + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + skb_reset_mac_header(skb); + + /* EHT */ + tlv = skb_push(skb, len); + memset(tlv, 0, len); + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT); + tlv->len = cpu_to_le16(eht_len); + + eht = (struct ieee80211_radiotap_eht *)tlv->data; + eht->known = cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI); + eht->data[0] = + le32_encode_bits(rx_status->eht.gi, IEEE80211_RADIOTAP_EHT_DATA0_GI); + + eht->user_info[0] = + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O); + eht->user_info[0] |= + le32_encode_bits(rx_status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + le32_encode_bits(rx_status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O); + + /* U-SIG */ + tlv = (void *)tlv + sizeof(*tlv) + ALIGN(eht_len, 4); + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG); + tlv->len = cpu_to_le16(usig_len); + + if (rx_status->bw >= ARRAY_SIZE(rx_status_bw_to_radiotap_eht_usig)) + return; + + bw = rx_status_bw_to_radiotap_eht_usig[rx_status->bw]; + if (bw == U8_MAX) + return; + + usig = (struct ieee80211_radiotap_eht_usig *)tlv->data; + usig->common = + le32_encode_bits(1, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN) | + le32_encode_bits(bw, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW); +} + static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct ieee80211_rx_status *rx_status) @@ -1706,6 +1991,8 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev, rx_status->flag |= RX_FLAG_RADIOTAP_HE; he = skb_push(skb, sizeof(*he)); *he = known_he; + } else if (rx_status->encoding == RX_ENC_EHT) { + rtw89_core_update_radiotap_eht(rtwdev, skb, rx_status); } } @@ -1718,7 +2005,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, struct napi_struct *napi = &rtwdev->napi; /* In low power mode, napi isn't scheduled. Receive it to netif. */ - if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state))) + if (unlikely(!napi_is_scheduled(napi))) napi = NULL; rtw89_core_hw_to_sband_rate(rx_status); @@ -1849,6 +2136,71 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_query_rxdesc); +void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + u8 *data, u32 data_offset) +{ + struct rtw89_rxdesc_short_v2 *rxd_s; + struct rtw89_rxdesc_long_v2 *rxd_l; + u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len; + + rxd_s = (struct rtw89_rxdesc_short_v2 *)(data + data_offset); + + desc_info->pkt_size = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_LEN_MASK); + desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, BE_RXD_DRV_INFO_SZ_MASK); + desc_info->phy_rpt_size = le32_get_bits(rxd_s->dword0, BE_RXD_PHY_RPT_SZ_MASK); + desc_info->hdr_cnv_size = le32_get_bits(rxd_s->dword0, BE_RXD_HDR_CNV_SZ_MASK); + desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK); + desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD); + desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK); + if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT) + desc_info->mac_info_valid = true; + + desc_info->frame_type = le32_get_bits(rxd_s->dword2, BE_RXD_TYPE_MASK); + desc_info->mac_id = le32_get_bits(rxd_s->dword2, BE_RXD_MAC_ID_MASK); + desc_info->addr_cam_valid = le32_get_bits(rxd_s->dword2, BE_RXD_ADDR_CAM_VLD); + + desc_info->icv_err = le32_get_bits(rxd_s->dword3, BE_RXD_ICV_ERR); + desc_info->crc32_err = le32_get_bits(rxd_s->dword3, BE_RXD_CRC32_ERR); + desc_info->hw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_HW_DEC); + desc_info->sw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_SW_DEC); + desc_info->addr1_match = le32_get_bits(rxd_s->dword3, BE_RXD_A1_MATCH); + + desc_info->bw = le32_get_bits(rxd_s->dword4, BE_RXD_BW_MASK); + desc_info->data_rate = le32_get_bits(rxd_s->dword4, BE_RXD_RX_DATARATE_MASK); + desc_info->gi_ltf = le32_get_bits(rxd_s->dword4, BE_RXD_RX_GI_LTF_MASK); + desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_CNT_MASK); + desc_info->ppdu_type = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_TYPE_MASK); + + desc_info->free_run_cnt = le32_to_cpu(rxd_s->dword5); + + shift_len = desc_info->shift << 1; /* 2-byte unit */ + drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */ + phy_rtp_len = desc_info->phy_rpt_size << 3; /* 8-byte unit */ + hdr_cnv_len = desc_info->hdr_cnv_size << 4; /* 16-byte unit */ + desc_info->offset = data_offset + shift_len + drv_info_len + + phy_rtp_len + hdr_cnv_len; + + if (desc_info->long_rxdesc) + desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long_v2); + else + desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2); + desc_info->ready = true; + + if (!desc_info->long_rxdesc) + return; + + rxd_l = (struct rtw89_rxdesc_long_v2 *)(data + data_offset); + + desc_info->sr_en = le32_get_bits(rxd_l->dword6, BE_RXD_SR_EN); + desc_info->user_id = le32_get_bits(rxd_l->dword6, BE_RXD_USER_ID_MASK); + desc_info->addr_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_ADDR_CAM_MASK); + desc_info->sec_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_SEC_CAM_IDX_MASK); + + desc_info->rx_pl_id = le32_get_bits(rxd_l->dword7, BE_RXD_RX_PL_ID_MASK); +} +EXPORT_SYMBOL(rtw89_core_query_rxdesc_v2); + struct rtw89_core_iter_rx_status { struct rtw89_dev *rtwdev; struct ieee80211_rx_status *rx_status; @@ -1900,9 +2252,10 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, { const struct cfg80211_chan_def *chandef = rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0); - const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u16 data_rate; u8 data_rate_mode; + bool eht = false; + u8 gi; /* currently using single PHY */ rx_status->freq = chandef->chan->center_freq; @@ -1910,6 +2263,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, if (rtwdev->scanning && RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { + const struct rtw89_chan *cur = rtw89_scan_chan_get(rtwdev); u8 chan = cur->primary_channel; u8 band = cur->band_type; enum nl80211_band nl_band; @@ -1929,32 +2283,41 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw); data_rate = desc_info->data_rate; - data_rate_mode = GET_DATA_RATE_MODE(data_rate); + data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate); if (data_rate_mode == DATA_RATE_MODE_NON_HT) { rx_status->encoding = RX_ENC_LEGACY; - rx_status->rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); + rx_status->rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate); /* convert rate_idx after we get the correct band */ } else if (data_rate_mode == DATA_RATE_MODE_HT) { rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = GET_DATA_RATE_HT_IDX(data_rate); + rx_status->rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate); if (desc_info->gi_ltf) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; } else if (data_rate_mode == DATA_RATE_MODE_VHT) { rx_status->encoding = RX_ENC_VHT; - rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); - rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1; + rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); + rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; if (desc_info->gi_ltf) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; } else if (data_rate_mode == DATA_RATE_MODE_HE) { rx_status->encoding = RX_ENC_HE; - rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate); - rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1; + rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); + rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; + } else if (data_rate_mode == DATA_RATE_MODE_EHT) { + rx_status->encoding = RX_ENC_EHT; + rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate); + rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1; + eht = true; } else { rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode); } /* he_gi is used to match ppdu, so we always fill it. */ - rx_status->he_gi = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, true); + gi = rtw89_rxdesc_to_nl_he_eht_gi(rtwdev, desc_info->gi_ltf, true, eht); + if (eht) + rx_status->eht.gi = gi; + else + rx_status->he_gi = gi; rx_status->flag |= RX_FLAG_MACTIME_START; rx_status->mactime = desc_info->free_run_cnt; @@ -2451,6 +2814,7 @@ out: void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct ieee80211_hw *hw = rtwdev->hw; struct rtw89_roc *roc = &rtwvif->roc; struct cfg80211_chan_def roc_chan; @@ -2464,6 +2828,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_ips_by_hwflags(rtwdev); rtw89_leave_lps(rtwdev); + rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC); ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, true); if (ret) @@ -2478,7 +2843,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, &roc_chan); rtw89_set_channel(rtwdev); rtw89_write32_clr(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH); ieee80211_ready_on_channel(hw); @@ -2486,6 +2851,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct ieee80211_hw *hw = rtwdev->hw; struct rtw89_roc *roc = &rtwvif->roc; struct rtw89_vif *tmp; @@ -2499,13 +2865,13 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_lps(rtwdev); rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); roc->state = RTW89_ROC_IDLE; rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, NULL); - rtw89_set_channel(rtwdev); + rtw89_chanctx_proceed(rtwdev); ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, false); if (ret) rtw89_debug(rtwdev, RTW89_DBG_TXRX, @@ -2634,6 +3000,27 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev) rtw89_vif_enter_lps(rtwdev, rtwvif); } +static void rtw89_core_rfk_track(struct rtw89_dev *rtwdev) +{ + enum rtw89_entity_mode mode; + + mode = rtw89_get_entity_mode(rtwdev); + if (mode == RTW89_ENTITY_MODE_MCC) + return; + + rtw89_chip_rfk_track(rtwdev); +} + +void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) +{ + enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); + + if (mode == RTW89_ENTITY_MODE_MCC) + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_P2P_PS_CHANGE); + else + rtw89_process_p2p_ps(rtwdev, vif); +} + void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, struct rtw89_traffic_stats *stats) { @@ -2676,12 +3063,14 @@ static void rtw89_track_work(struct work_struct *work) rtw89_phy_stat_track(rtwdev); rtw89_phy_env_monitor_track(rtwdev); rtw89_phy_dig(rtwdev); - rtw89_chip_rfk_track(rtwdev); + rtw89_core_rfk_track(rtwdev); rtw89_phy_ra_update(rtwdev); rtw89_phy_cfo_track(rtwdev); rtw89_phy_tx_path_div_track(rtwdev); rtw89_phy_antdiv_track(rtwdev); rtw89_phy_ul_tb_ctrl_track(rtwdev); + rtw89_tas_track(rtwdev); + rtw89_chanctx_track(rtwdev); if (rtwdev->lps_enabled && !rtwdev->btc.lps) rtw89_enter_lps_track(rtwdev); @@ -2894,6 +3283,8 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, rtw89_warn(rtwdev, "failed to send h2c role info\n"); return ret; } + + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE); } return 0; @@ -2970,6 +3361,8 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); int ret; if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { @@ -3023,7 +3416,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_CONN_END); - rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template); + rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template, chan); rtw89_phy_ul_tb_assoc(rtwdev, rtwvif); ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id); @@ -3057,6 +3450,8 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, rtw89_warn(rtwdev, "failed to send h2c role info\n"); return ret; } + + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE); } return 0; @@ -3328,8 +3723,7 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev, idx++; } - sband->iftype_data = iftype_data; - sband->n_iftype_data = idx; + _ieee80211_set_sband_iftype_data(sband, iftype_data, idx); } static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev) @@ -3374,11 +3768,11 @@ err: hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL; if (sband_2ghz) - kfree(sband_2ghz->iftype_data); + kfree((__force void *)sband_2ghz->iftype_data); if (sband_5ghz) - kfree(sband_5ghz->iftype_data); + kfree((__force void *)sband_5ghz->iftype_data); if (sband_6ghz) - kfree(sband_6ghz->iftype_data); + kfree((__force void *)sband_6ghz->iftype_data); kfree(sband_2ghz); kfree(sband_5ghz); kfree(sband_6ghz); @@ -3390,11 +3784,11 @@ static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev) struct ieee80211_hw *hw = rtwdev->hw; if (hw->wiphy->bands[NL80211_BAND_2GHZ]) - kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data); + kfree((__force void *)hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data); if (hw->wiphy->bands[NL80211_BAND_5GHZ]) - kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data); + kfree((__force void *)hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data); if (hw->wiphy->bands[NL80211_BAND_6GHZ]) - kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data); + kfree((__force void *)hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data); kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]); @@ -3463,6 +3857,28 @@ void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond, complete(&wait->completion); } +void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event) +{ + u16 bt_req_len; + + switch (event) { + case RTW89_BTC_HMSG_SET_BT_REQ_SLOT: + bt_req_len = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0); + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "coex updates BT req len to %d TU\n", bt_req_len); + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BT_SLOT_CHANGE); + break; + default: + if (event < NUM_OF_RTW89_BTC_HMSG) + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "unhandled BTC HMSG event: %d\n", event); + else + rtw89_warn(rtwdev, + "unrecognized BTC HMSG event: %d\n", event); + break; + } +} + int rtw89_core_start(struct rtw89_dev *rtwdev) { int ret; @@ -3496,6 +3912,8 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0); + rtw89_tas_reset(rtwdev); + ret = rtw89_hci_start(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to start hci\n"); @@ -3508,7 +3926,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); - rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.fw_log_enable); + rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable); rtw89_fw_h2c_init_ba_cam(rtwdev); return 0; @@ -3536,6 +3954,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) cancel_work_sync(&btc->icmp_notify_work); cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); cancel_delayed_work_sync(&rtwdev->track_work); + cancel_delayed_work_sync(&rtwdev->chanctx_work); cancel_delayed_work_sync(&rtwdev->coex_act1_work); cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work); cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work); @@ -3572,6 +3991,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work); + INIT_DELAYED_WORK(&rtwdev->chanctx_work, rtw89_chanctx_work); INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work); INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); @@ -3612,6 +4032,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) rtw89_ser_init(rtwdev); rtw89_entity_init(rtwdev); + rtw89_tas_init(rtwdev); return 0; } @@ -3632,7 +4053,8 @@ EXPORT_SYMBOL(rtw89_core_deinit); void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, const u8 *mac_addr, bool hw_scan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); rtwdev->scanning = true; rtw89_leave_lps(rtwdev); @@ -3709,28 +4131,34 @@ static void rtw89_core_setup_rfe_parms(struct rtw89_dev *rtwdev) const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_rfe_parms_conf *conf = chip->rfe_parms_conf; struct rtw89_efuse *efuse = &rtwdev->efuse; + const struct rtw89_rfe_parms *sel; u8 rfe_type = efuse->rfe_type; - if (!conf) + if (!conf) { + sel = chip->dflt_parms; goto out; + } while (conf->rfe_parms) { if (rfe_type == conf->rfe_type) { - rtwdev->rfe_parms = conf->rfe_parms; - return; + sel = conf->rfe_parms; + goto out; } conf++; } + sel = chip->dflt_parms; + out: - rtwdev->rfe_parms = chip->dflt_parms; + rtwdev->rfe_parms = rtw89_load_rfe_data_from_fw(rtwdev, sel); + rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl); } static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) { int ret; - ret = rtw89_mac_partial_init(rtwdev); + ret = rtw89_mac_partial_init(rtwdev, false); if (ret) return ret; @@ -3747,7 +4175,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) return ret; rtw89_core_setup_phycap(rtwdev); - rtw89_core_setup_rfe_parms(rtwdev); rtw89_mac_pwr_off(rtwdev); @@ -3783,10 +4210,17 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev) if (ret) return ret; + ret = rtw89_fw_recognize_elements(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to recognize firmware elements\n"); + return ret; + } + ret = rtw89_chip_board_info_setup(rtwdev); if (ret) return ret; + rtw89_core_setup_rfe_parms(rtwdev); rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev); return 0; @@ -3828,6 +4262,10 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, WANT_MONITOR_VIF); + + /* ref: description of rtw89_mcc_get_tbtt_ofst() in chan.c */ + ieee80211_hw_set(hw, TIMING_BEACON_ONLY); + if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) ieee80211_hw_set(hw, CONNECTION_MONITOR); @@ -3969,7 +4407,11 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, goto err; hw->wiphy->iface_combinations = rtw89_iface_combs; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw89_iface_combs); + + if (no_chanctx || chip->support_chanctx_num == 1) + hw->wiphy->n_iface_combinations = 1; + else + hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw89_iface_combs); rtwdev = hw->priv; rtwdev->hw = hw; @@ -3994,6 +4436,7 @@ EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw); void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev) { kfree(rtwdev->ops); + kfree(rtwdev->rfe_data); release_firmware(rtwdev->fw.req.firmware); ieee80211_free_hw(rtwdev->hw); } diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index d2c67db97db1..91e4d4e79eea 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -14,6 +14,8 @@ struct rtw89_dev; struct rtw89_pci_info; +struct rtw89_mac_gen_def; +struct rtw89_phy_gen_def; extern const struct ieee80211_ops rtw89_ops; @@ -35,7 +37,14 @@ extern const struct ieee80211_ops rtw89_ops; #define RSSI_FACTOR 1 #define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI) #define RTW89_TX_DIV_RSSI_RAW_TH (2 << RSSI_FACTOR) -#define RTW89_RADIOTAP_ROOM ALIGN(sizeof(struct ieee80211_radiotap_he), 64) +#define RTW89_RADIOTAP_ROOM_HE sizeof(struct ieee80211_radiotap_he) +#define RTW89_RADIOTAP_ROOM_EHT \ + (sizeof(struct ieee80211_radiotap_tlv) + \ + ALIGN(struct_size((struct ieee80211_radiotap_eht *)0, user_info, 1), 4) + \ + sizeof(struct ieee80211_radiotap_tlv) + \ + ALIGN(sizeof(struct ieee80211_radiotap_eht_usig), 4)) +#define RTW89_RADIOTAP_ROOM \ + ALIGN(max(RTW89_RADIOTAP_ROOM_HE, RTW89_RADIOTAP_ROOM_EHT), 64) #define RTW89_HTC_MASK_VARIANT GENMASK(1, 0) #define RTW89_HTC_VARIANT_HE 3 @@ -109,6 +118,14 @@ enum rtw89_core_chip_id { RTL8852B, RTL8852C, RTL8851B, + RTL8922A, +}; + +enum rtw89_chip_gen { + RTW89_CHIP_AX, + RTW89_CHIP_BE, + + RTW89_CHIP_GEN_NUM, }; enum rtw89_cv { @@ -387,10 +404,201 @@ enum rtw89_hw_rate { RTW89_HW_RATE_HE_NSS4_MCS9 = 0x1B9, RTW89_HW_RATE_HE_NSS4_MCS10 = 0x1BA, RTW89_HW_RATE_HE_NSS4_MCS11 = 0x1BB, + + RTW89_HW_RATE_V1_MCS0 = 0x100, + RTW89_HW_RATE_V1_MCS1 = 0x101, + RTW89_HW_RATE_V1_MCS2 = 0x102, + RTW89_HW_RATE_V1_MCS3 = 0x103, + RTW89_HW_RATE_V1_MCS4 = 0x104, + RTW89_HW_RATE_V1_MCS5 = 0x105, + RTW89_HW_RATE_V1_MCS6 = 0x106, + RTW89_HW_RATE_V1_MCS7 = 0x107, + RTW89_HW_RATE_V1_MCS8 = 0x108, + RTW89_HW_RATE_V1_MCS9 = 0x109, + RTW89_HW_RATE_V1_MCS10 = 0x10A, + RTW89_HW_RATE_V1_MCS11 = 0x10B, + RTW89_HW_RATE_V1_MCS12 = 0x10C, + RTW89_HW_RATE_V1_MCS13 = 0x10D, + RTW89_HW_RATE_V1_MCS14 = 0x10E, + RTW89_HW_RATE_V1_MCS15 = 0x10F, + RTW89_HW_RATE_V1_MCS16 = 0x110, + RTW89_HW_RATE_V1_MCS17 = 0x111, + RTW89_HW_RATE_V1_MCS18 = 0x112, + RTW89_HW_RATE_V1_MCS19 = 0x113, + RTW89_HW_RATE_V1_MCS20 = 0x114, + RTW89_HW_RATE_V1_MCS21 = 0x115, + RTW89_HW_RATE_V1_MCS22 = 0x116, + RTW89_HW_RATE_V1_MCS23 = 0x117, + RTW89_HW_RATE_V1_MCS24 = 0x118, + RTW89_HW_RATE_V1_MCS25 = 0x119, + RTW89_HW_RATE_V1_MCS26 = 0x11A, + RTW89_HW_RATE_V1_MCS27 = 0x11B, + RTW89_HW_RATE_V1_MCS28 = 0x11C, + RTW89_HW_RATE_V1_MCS29 = 0x11D, + RTW89_HW_RATE_V1_MCS30 = 0x11E, + RTW89_HW_RATE_V1_MCS31 = 0x11F, + RTW89_HW_RATE_V1_VHT_NSS1_MCS0 = 0x200, + RTW89_HW_RATE_V1_VHT_NSS1_MCS1 = 0x201, + RTW89_HW_RATE_V1_VHT_NSS1_MCS2 = 0x202, + RTW89_HW_RATE_V1_VHT_NSS1_MCS3 = 0x203, + RTW89_HW_RATE_V1_VHT_NSS1_MCS4 = 0x204, + RTW89_HW_RATE_V1_VHT_NSS1_MCS5 = 0x205, + RTW89_HW_RATE_V1_VHT_NSS1_MCS6 = 0x206, + RTW89_HW_RATE_V1_VHT_NSS1_MCS7 = 0x207, + RTW89_HW_RATE_V1_VHT_NSS1_MCS8 = 0x208, + RTW89_HW_RATE_V1_VHT_NSS1_MCS9 = 0x209, + RTW89_HW_RATE_V1_VHT_NSS1_MCS10 = 0x20A, + RTW89_HW_RATE_V1_VHT_NSS1_MCS11 = 0x20B, + RTW89_HW_RATE_V1_VHT_NSS2_MCS0 = 0x220, + RTW89_HW_RATE_V1_VHT_NSS2_MCS1 = 0x221, + RTW89_HW_RATE_V1_VHT_NSS2_MCS2 = 0x222, + RTW89_HW_RATE_V1_VHT_NSS2_MCS3 = 0x223, + RTW89_HW_RATE_V1_VHT_NSS2_MCS4 = 0x224, + RTW89_HW_RATE_V1_VHT_NSS2_MCS5 = 0x225, + RTW89_HW_RATE_V1_VHT_NSS2_MCS6 = 0x226, + RTW89_HW_RATE_V1_VHT_NSS2_MCS7 = 0x227, + RTW89_HW_RATE_V1_VHT_NSS2_MCS8 = 0x228, + RTW89_HW_RATE_V1_VHT_NSS2_MCS9 = 0x229, + RTW89_HW_RATE_V1_VHT_NSS2_MCS10 = 0x22A, + RTW89_HW_RATE_V1_VHT_NSS2_MCS11 = 0x22B, + RTW89_HW_RATE_V1_VHT_NSS3_MCS0 = 0x240, + RTW89_HW_RATE_V1_VHT_NSS3_MCS1 = 0x241, + RTW89_HW_RATE_V1_VHT_NSS3_MCS2 = 0x242, + RTW89_HW_RATE_V1_VHT_NSS3_MCS3 = 0x243, + RTW89_HW_RATE_V1_VHT_NSS3_MCS4 = 0x244, + RTW89_HW_RATE_V1_VHT_NSS3_MCS5 = 0x245, + RTW89_HW_RATE_V1_VHT_NSS3_MCS6 = 0x246, + RTW89_HW_RATE_V1_VHT_NSS3_MCS7 = 0x247, + RTW89_HW_RATE_V1_VHT_NSS3_MCS8 = 0x248, + RTW89_HW_RATE_V1_VHT_NSS3_MCS9 = 0x249, + RTW89_HW_RATE_V1_VHT_NSS3_MCS10 = 0x24A, + RTW89_HW_RATE_V1_VHT_NSS3_MCS11 = 0x24B, + RTW89_HW_RATE_V1_VHT_NSS4_MCS0 = 0x260, + RTW89_HW_RATE_V1_VHT_NSS4_MCS1 = 0x261, + RTW89_HW_RATE_V1_VHT_NSS4_MCS2 = 0x262, + RTW89_HW_RATE_V1_VHT_NSS4_MCS3 = 0x263, + RTW89_HW_RATE_V1_VHT_NSS4_MCS4 = 0x264, + RTW89_HW_RATE_V1_VHT_NSS4_MCS5 = 0x265, + RTW89_HW_RATE_V1_VHT_NSS4_MCS6 = 0x266, + RTW89_HW_RATE_V1_VHT_NSS4_MCS7 = 0x267, + RTW89_HW_RATE_V1_VHT_NSS4_MCS8 = 0x268, + RTW89_HW_RATE_V1_VHT_NSS4_MCS9 = 0x269, + RTW89_HW_RATE_V1_VHT_NSS4_MCS10 = 0x26A, + RTW89_HW_RATE_V1_VHT_NSS4_MCS11 = 0x26B, + RTW89_HW_RATE_V1_HE_NSS1_MCS0 = 0x300, + RTW89_HW_RATE_V1_HE_NSS1_MCS1 = 0x301, + RTW89_HW_RATE_V1_HE_NSS1_MCS2 = 0x302, + RTW89_HW_RATE_V1_HE_NSS1_MCS3 = 0x303, + RTW89_HW_RATE_V1_HE_NSS1_MCS4 = 0x304, + RTW89_HW_RATE_V1_HE_NSS1_MCS5 = 0x305, + RTW89_HW_RATE_V1_HE_NSS1_MCS6 = 0x306, + RTW89_HW_RATE_V1_HE_NSS1_MCS7 = 0x307, + RTW89_HW_RATE_V1_HE_NSS1_MCS8 = 0x308, + RTW89_HW_RATE_V1_HE_NSS1_MCS9 = 0x309, + RTW89_HW_RATE_V1_HE_NSS1_MCS10 = 0x30A, + RTW89_HW_RATE_V1_HE_NSS1_MCS11 = 0x30B, + RTW89_HW_RATE_V1_HE_NSS2_MCS0 = 0x320, + RTW89_HW_RATE_V1_HE_NSS2_MCS1 = 0x321, + RTW89_HW_RATE_V1_HE_NSS2_MCS2 = 0x322, + RTW89_HW_RATE_V1_HE_NSS2_MCS3 = 0x323, + RTW89_HW_RATE_V1_HE_NSS2_MCS4 = 0x324, + RTW89_HW_RATE_V1_HE_NSS2_MCS5 = 0x325, + RTW89_HW_RATE_V1_HE_NSS2_MCS6 = 0x326, + RTW89_HW_RATE_V1_HE_NSS2_MCS7 = 0x327, + RTW89_HW_RATE_V1_HE_NSS2_MCS8 = 0x328, + RTW89_HW_RATE_V1_HE_NSS2_MCS9 = 0x329, + RTW89_HW_RATE_V1_HE_NSS2_MCS10 = 0x32A, + RTW89_HW_RATE_V1_HE_NSS2_MCS11 = 0x32B, + RTW89_HW_RATE_V1_HE_NSS3_MCS0 = 0x340, + RTW89_HW_RATE_V1_HE_NSS3_MCS1 = 0x341, + RTW89_HW_RATE_V1_HE_NSS3_MCS2 = 0x342, + RTW89_HW_RATE_V1_HE_NSS3_MCS3 = 0x343, + RTW89_HW_RATE_V1_HE_NSS3_MCS4 = 0x344, + RTW89_HW_RATE_V1_HE_NSS3_MCS5 = 0x345, + RTW89_HW_RATE_V1_HE_NSS3_MCS6 = 0x346, + RTW89_HW_RATE_V1_HE_NSS3_MCS7 = 0x347, + RTW89_HW_RATE_V1_HE_NSS3_MCS8 = 0x348, + RTW89_HW_RATE_V1_HE_NSS3_MCS9 = 0x349, + RTW89_HW_RATE_V1_HE_NSS3_MCS10 = 0x34A, + RTW89_HW_RATE_V1_HE_NSS3_MCS11 = 0x34B, + RTW89_HW_RATE_V1_HE_NSS4_MCS0 = 0x360, + RTW89_HW_RATE_V1_HE_NSS4_MCS1 = 0x361, + RTW89_HW_RATE_V1_HE_NSS4_MCS2 = 0x362, + RTW89_HW_RATE_V1_HE_NSS4_MCS3 = 0x363, + RTW89_HW_RATE_V1_HE_NSS4_MCS4 = 0x364, + RTW89_HW_RATE_V1_HE_NSS4_MCS5 = 0x365, + RTW89_HW_RATE_V1_HE_NSS4_MCS6 = 0x366, + RTW89_HW_RATE_V1_HE_NSS4_MCS7 = 0x367, + RTW89_HW_RATE_V1_HE_NSS4_MCS8 = 0x368, + RTW89_HW_RATE_V1_HE_NSS4_MCS9 = 0x369, + RTW89_HW_RATE_V1_HE_NSS4_MCS10 = 0x36A, + RTW89_HW_RATE_V1_HE_NSS4_MCS11 = 0x36B, + RTW89_HW_RATE_V1_EHT_NSS1_MCS0 = 0x400, + RTW89_HW_RATE_V1_EHT_NSS1_MCS1 = 0x401, + RTW89_HW_RATE_V1_EHT_NSS1_MCS2 = 0x402, + RTW89_HW_RATE_V1_EHT_NSS1_MCS3 = 0x403, + RTW89_HW_RATE_V1_EHT_NSS1_MCS4 = 0x404, + RTW89_HW_RATE_V1_EHT_NSS1_MCS5 = 0x405, + RTW89_HW_RATE_V1_EHT_NSS1_MCS6 = 0x406, + RTW89_HW_RATE_V1_EHT_NSS1_MCS7 = 0x407, + RTW89_HW_RATE_V1_EHT_NSS1_MCS8 = 0x408, + RTW89_HW_RATE_V1_EHT_NSS1_MCS9 = 0x409, + RTW89_HW_RATE_V1_EHT_NSS1_MCS10 = 0x40A, + RTW89_HW_RATE_V1_EHT_NSS1_MCS11 = 0x40B, + RTW89_HW_RATE_V1_EHT_NSS1_MCS12 = 0x40C, + RTW89_HW_RATE_V1_EHT_NSS1_MCS13 = 0x40D, + RTW89_HW_RATE_V1_EHT_NSS1_MCS14 = 0x40E, + RTW89_HW_RATE_V1_EHT_NSS1_MCS15 = 0x40F, + RTW89_HW_RATE_V1_EHT_NSS2_MCS0 = 0x420, + RTW89_HW_RATE_V1_EHT_NSS2_MCS1 = 0x421, + RTW89_HW_RATE_V1_EHT_NSS2_MCS2 = 0x422, + RTW89_HW_RATE_V1_EHT_NSS2_MCS3 = 0x423, + RTW89_HW_RATE_V1_EHT_NSS2_MCS4 = 0x424, + RTW89_HW_RATE_V1_EHT_NSS2_MCS5 = 0x425, + RTW89_HW_RATE_V1_EHT_NSS2_MCS6 = 0x426, + RTW89_HW_RATE_V1_EHT_NSS2_MCS7 = 0x427, + RTW89_HW_RATE_V1_EHT_NSS2_MCS8 = 0x428, + RTW89_HW_RATE_V1_EHT_NSS2_MCS9 = 0x429, + RTW89_HW_RATE_V1_EHT_NSS2_MCS10 = 0x42A, + RTW89_HW_RATE_V1_EHT_NSS2_MCS11 = 0x42B, + RTW89_HW_RATE_V1_EHT_NSS2_MCS12 = 0x42C, + RTW89_HW_RATE_V1_EHT_NSS2_MCS13 = 0x42D, + RTW89_HW_RATE_V1_EHT_NSS3_MCS0 = 0x440, + RTW89_HW_RATE_V1_EHT_NSS3_MCS1 = 0x441, + RTW89_HW_RATE_V1_EHT_NSS3_MCS2 = 0x442, + RTW89_HW_RATE_V1_EHT_NSS3_MCS3 = 0x443, + RTW89_HW_RATE_V1_EHT_NSS3_MCS4 = 0x444, + RTW89_HW_RATE_V1_EHT_NSS3_MCS5 = 0x445, + RTW89_HW_RATE_V1_EHT_NSS3_MCS6 = 0x446, + RTW89_HW_RATE_V1_EHT_NSS3_MCS7 = 0x447, + RTW89_HW_RATE_V1_EHT_NSS3_MCS8 = 0x448, + RTW89_HW_RATE_V1_EHT_NSS3_MCS9 = 0x449, + RTW89_HW_RATE_V1_EHT_NSS3_MCS10 = 0x44A, + RTW89_HW_RATE_V1_EHT_NSS3_MCS11 = 0x44B, + RTW89_HW_RATE_V1_EHT_NSS3_MCS12 = 0x44C, + RTW89_HW_RATE_V1_EHT_NSS3_MCS13 = 0x44D, + RTW89_HW_RATE_V1_EHT_NSS4_MCS0 = 0x460, + RTW89_HW_RATE_V1_EHT_NSS4_MCS1 = 0x461, + RTW89_HW_RATE_V1_EHT_NSS4_MCS2 = 0x462, + RTW89_HW_RATE_V1_EHT_NSS4_MCS3 = 0x463, + RTW89_HW_RATE_V1_EHT_NSS4_MCS4 = 0x464, + RTW89_HW_RATE_V1_EHT_NSS4_MCS5 = 0x465, + RTW89_HW_RATE_V1_EHT_NSS4_MCS6 = 0x466, + RTW89_HW_RATE_V1_EHT_NSS4_MCS7 = 0x467, + RTW89_HW_RATE_V1_EHT_NSS4_MCS8 = 0x468, + RTW89_HW_RATE_V1_EHT_NSS4_MCS9 = 0x469, + RTW89_HW_RATE_V1_EHT_NSS4_MCS10 = 0x46A, + RTW89_HW_RATE_V1_EHT_NSS4_MCS11 = 0x46B, + RTW89_HW_RATE_V1_EHT_NSS4_MCS12 = 0x46C, + RTW89_HW_RATE_V1_EHT_NSS4_MCS13 = 0x46D, + RTW89_HW_RATE_NR, + RTW89_HW_RATE_INVAL, RTW89_HW_RATE_MASK_MOD = GENMASK(8, 7), RTW89_HW_RATE_MASK_VAL = GENMASK(6, 0), + RTW89_HW_RATE_V1_MASK_MOD = GENMASK(10, 8), + RTW89_HW_RATE_V1_MASK_VAL = GENMASK(7, 0), }; /* 2G channels, @@ -439,12 +647,29 @@ enum rtw89_rate_section { RTW89_RS_TX_SHAPE_NUM = RTW89_RS_OFDM + 1, }; +enum rtw89_rate_offset_indexes { + RTW89_RATE_OFFSET_HE, + RTW89_RATE_OFFSET_VHT, + RTW89_RATE_OFFSET_HT, + RTW89_RATE_OFFSET_OFDM, + RTW89_RATE_OFFSET_CCK, + RTW89_RATE_OFFSET_DLRU_EHT, + RTW89_RATE_OFFSET_DLRU_HE, + RTW89_RATE_OFFSET_EHT, + __RTW89_RATE_OFFSET_NUM, + + RTW89_RATE_OFFSET_NUM_AX = RTW89_RATE_OFFSET_CCK + 1, + RTW89_RATE_OFFSET_NUM_BE = RTW89_RATE_OFFSET_EHT + 1, +}; + enum rtw89_rate_num { RTW89_RATE_CCK_NUM = 4, RTW89_RATE_OFDM_NUM = 8, - RTW89_RATE_MCS_NUM = 12, RTW89_RATE_HEDCM_NUM = 4, /* for HEDCM MCS0/1/3/4 */ - RTW89_RATE_OFFSET_NUM = 5, /* for HE(HEDCM)/VHT/HT/OFDM/CCK offset */ + + RTW89_RATE_MCS_NUM_AX = 12, + RTW89_RATE_MCS_NUM_BE = 16, + __RTW89_RATE_MCS_NUM = 16, }; enum rtw89_nss { @@ -469,6 +694,12 @@ enum rtw89_beamforming_type { RTW89_BF_NUM, }; +enum rtw89_ofdma_type { + RTW89_NON_OFDMA = 0, + RTW89_OFDMA = 1, + RTW89_OFDMA_NUM, +}; + enum rtw89_regulation_type { RTW89_WW = 0, RTW89_ETSI = 1, @@ -485,6 +716,7 @@ enum rtw89_regulation_type { RTW89_CN = 12, RTW89_QATAR = 13, RTW89_UK = 14, + RTW89_THAILAND = 15, RTW89_REGD_NUM, }; @@ -514,44 +746,16 @@ enum rtw89_fw_pkt_ofld_type { struct rtw89_txpwr_byrate { s8 cck[RTW89_RATE_CCK_NUM]; s8 ofdm[RTW89_RATE_OFDM_NUM]; - s8 mcs[RTW89_NSS_NUM][RTW89_RATE_MCS_NUM]; - s8 hedcm[RTW89_NSS_HEDCM_NUM][RTW89_RATE_HEDCM_NUM]; - s8 offset[RTW89_RATE_OFFSET_NUM]; -}; - -enum rtw89_bandwidth_section_num { - RTW89_BW20_SEC_NUM = 8, - RTW89_BW40_SEC_NUM = 4, - RTW89_BW80_SEC_NUM = 2, -}; - -#define RTW89_TXPWR_LMT_PAGE_SIZE 40 - -struct rtw89_txpwr_limit { - s8 cck_20m[RTW89_BF_NUM]; - s8 cck_40m[RTW89_BF_NUM]; - s8 ofdm[RTW89_BF_NUM]; - s8 mcs_20m[RTW89_BW20_SEC_NUM][RTW89_BF_NUM]; - s8 mcs_40m[RTW89_BW40_SEC_NUM][RTW89_BF_NUM]; - s8 mcs_80m[RTW89_BW80_SEC_NUM][RTW89_BF_NUM]; - s8 mcs_160m[RTW89_BF_NUM]; - s8 mcs_40m_0p5[RTW89_BF_NUM]; - s8 mcs_40m_2p5[RTW89_BF_NUM]; -}; - -#define RTW89_RU_SEC_NUM 8 - -#define RTW89_TXPWR_LMT_RU_PAGE_SIZE 24 - -struct rtw89_txpwr_limit_ru { - s8 ru26[RTW89_RU_SEC_NUM]; - s8 ru52[RTW89_RU_SEC_NUM]; - s8 ru106[RTW89_RU_SEC_NUM]; + s8 mcs[RTW89_OFDMA_NUM][RTW89_NSS_NUM][__RTW89_RATE_MCS_NUM]; + s8 hedcm[RTW89_OFDMA_NUM][RTW89_NSS_HEDCM_NUM][RTW89_RATE_HEDCM_NUM]; + s8 offset[__RTW89_RATE_OFFSET_NUM]; + s8 trap; }; struct rtw89_rate_desc { enum rtw89_nss nss; enum rtw89_rate_section rs; + enum rtw89_ofdma_type ofdma; u8 idx; }; @@ -590,6 +794,7 @@ enum rtw89_phy_idx { enum rtw89_sub_entity_idx { RTW89_SUB_ENTITY_0 = 0, + RTW89_SUB_ENTITY_1 = 1, NUM_OF_RTW89_SUB_ENTITY, RTW89_SUB_ENTITY_IDLE = NUM_OF_RTW89_SUB_ENTITY, @@ -639,9 +844,14 @@ enum rtw89_bandwidth { RTW89_CHANNEL_WIDTH_40 = 1, RTW89_CHANNEL_WIDTH_80 = 2, RTW89_CHANNEL_WIDTH_160 = 3, - RTW89_CHANNEL_WIDTH_80_80 = 4, - RTW89_CHANNEL_WIDTH_5 = 5, - RTW89_CHANNEL_WIDTH_10 = 6, + RTW89_CHANNEL_WIDTH_320 = 4, + + /* keep index order above */ + RTW89_CHANNEL_WIDTH_ORDINARY_NUM = 5, + + RTW89_CHANNEL_WIDTH_80_80 = 5, + RTW89_CHANNEL_WIDTH_5 = 6, + RTW89_CHANNEL_WIDTH_10 = 7, }; enum rtw89_ps_mode { @@ -653,13 +863,16 @@ enum rtw89_ps_mode { #define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1) #define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) -#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) +#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1) +#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1) #define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) enum rtw89_ru_bandwidth { RTW89_RU26 = 0, RTW89_RU52 = 1, RTW89_RU106 = 2, + RTW89_RU52_26 = 3, + RTW89_RU106_26 = 4, RTW89_RU_NUM, }; @@ -696,11 +909,13 @@ struct rtw89_chan { u32 freq; enum rtw89_subband subband_type; enum rtw89_sc_offset pri_ch_idx; + u8 pri_sb_idx; }; struct rtw89_chan_rcd { u8 prev_primary_channel; enum rtw89_band prev_band_type; + bool band_changed; }; struct rtw89_channel_help_params { @@ -723,6 +938,12 @@ struct rtw89_port_reg { u32 bcn_cnt_tmr; u32 tsftr_l; u32 tsftr_h; + u32 md_tsft; + u32 bss_color; + u32 mbssid; + u32 mbssid_drop; + u32 tsf_sync; + u32 hiq_win[RTW89_PORT_NUM]; }; struct rtw89_txwd_body { @@ -745,6 +966,17 @@ struct rtw89_txwd_body_v1 { __le32 dword7; } __packed; +struct rtw89_txwd_body_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; +} __packed; + struct rtw89_txwd_info { __le32 dword0; __le32 dword1; @@ -754,10 +986,23 @@ struct rtw89_txwd_info { __le32 dword5; } __packed; +struct rtw89_txwd_info_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; +} __packed; + struct rtw89_rx_desc_info { u16 pkt_size; u8 pkt_type; u8 drv_info_size; + u8 phy_rpt_size; + u8 hdr_cnv_size; u8 shift; u8 wl_hd_iv_len; bool long_rxdesc; @@ -796,6 +1041,15 @@ struct rtw89_rxdesc_short { __le32 dword3; } __packed; +struct rtw89_rxdesc_short_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; +} __packed; + struct rtw89_rxdesc_long { __le32 dword0; __le32 dword1; @@ -807,6 +1061,19 @@ struct rtw89_rxdesc_long { __le32 dword7; } __packed; +struct rtw89_rxdesc_long_v2 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; + __le32 dword8; + __le32 dword9; +} __packed; + struct rtw89_tx_desc_info { u16 pkt_size; u8 wp_offset; @@ -2457,12 +2724,24 @@ struct rtw89_btc { bool lps; }; +enum rtw89_btc_hmsg { + RTW89_BTC_HMSG_TMR_EN = 0x0, + RTW89_BTC_HMSG_BT_REG_READBACK = 0x1, + RTW89_BTC_HMSG_SET_BT_REQ_SLOT = 0x2, + RTW89_BTC_HMSG_FW_EV = 0x3, + RTW89_BTC_HMSG_BT_LINK_CHG = 0x4, + RTW89_BTC_HMSG_SET_BT_REQ_STBC = 0x5, + + NUM_OF_RTW89_BTC_HMSG, +}; + enum rtw89_ra_mode { RTW89_RA_MODE_CCK = BIT(0), RTW89_RA_MODE_OFDM = BIT(1), RTW89_RA_MODE_HT = BIT(2), RTW89_RA_MODE_VHT = BIT(3), RTW89_RA_MODE_HE = BIT(4), + RTW89_RA_MODE_EHT = BIT(5), }; enum rtw89_ra_report_mode { @@ -2470,6 +2749,7 @@ enum rtw89_ra_report_mode { RTW89_RA_RPT_MODE_HT, RTW89_RA_RPT_MODE_VHT, RTW89_RA_RPT_MODE_HE, + RTW89_RA_RPT_MODE_EHT, }; enum rtw89_dig_noisy_level { @@ -2504,9 +2784,10 @@ struct rtw89_ra_info { * Bit2 : HT * Bit3 : VHT * Bit4 : HE + * Bit5 : EHT */ - u8 mode_ctrl:5; - u8 bw_cap:2; + u8 mode_ctrl:6; + u8 bw_cap:3; /* enum rtw89_bandwidth */ u8 macid; u8 dcm_cap:1; u8 er_cap:1; @@ -2685,10 +2966,37 @@ struct rtw89_roc { #define RTW89_P2P_MAX_NOA_NUM 2 +struct rtw89_p2p_ie_head { + u8 eid; + u8 ie_len; + u8 oui[3]; + u8 oui_type; +} __packed; + +struct rtw89_noa_attr_head { + u8 attr_type; + __le16 attr_len; + u8 index; + u8 oppps_ctwindow; +} __packed; + +struct rtw89_p2p_noa_ie { + struct rtw89_p2p_ie_head p2p_head; + struct rtw89_noa_attr_head noa_head; + struct ieee80211_p2p_noa_desc noa_desc[RTW89_P2P_MAX_NOA_NUM]; +} __packed; + +struct rtw89_p2p_noa_setter { + struct rtw89_p2p_noa_ie ie; + u8 noa_count; + u8 noa_index; +}; + struct rtw89_vif { struct list_head list; struct rtw89_dev *rtwdev; struct rtw89_roc roc; + bool chanctx_assigned; /* only valid when running with chanctx_ops */ enum rtw89_sub_entity_idx sub_entity_idx; enum rtw89_reg_6ghz_power reg_6ghz_power; @@ -2716,6 +3024,8 @@ struct rtw89_vif { bool is_hesta; bool last_a_ctrl; bool dyn_tb_bedge_en; + bool pre_pwr_diff_en; + bool pwr_diff_en; u8 def_tri_idx; u32 tdls_peer; struct work_struct update_beacon_work; @@ -2727,6 +3037,7 @@ struct rtw89_vif { struct cfg80211_scan_request *scan_req; struct ieee80211_scan_ies *scan_ies; struct list_head general_pkt_list; + struct rtw89_p2p_noa_setter p2p_noa; }; enum rtw89_lv1_rcvy_step { @@ -2790,6 +3101,7 @@ struct rtw89_hci_info { struct rtw89_chip_ops { int (*enable_bb_rf)(struct rtw89_dev *rtwdev); int (*disable_bb_rf)(struct rtw89_dev *rtwdev); + void (*bb_preinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void (*bb_reset)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void (*bb_sethw)(struct rtw89_dev *rtwdev); @@ -2824,11 +3136,13 @@ struct rtw89_chip_ops { enum rtw89_phy_idx phy_idx); int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path); - void (*ctrl_btg)(struct rtw89_dev *rtwdev, bool btg); + void (*ctrl_btg_bt_rx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); void (*query_ppdu)(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status); - void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en); + void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); void (*cfg_txrx_path)(struct rtw89_dev *rtwdev); void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx); @@ -3052,10 +3366,17 @@ struct rtw89_txpwr_rule_6ghz { [RTW89_6G_CH_NUM]; }; +struct rtw89_tx_shape { + const u8 (*lmt)[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM][RTW89_REGD_NUM]; + const u8 (*lmt_ru)[RTW89_BAND_NUM][RTW89_REGD_NUM]; +}; + struct rtw89_rfe_parms { + const struct rtw89_txpwr_table *byr_tbl; struct rtw89_txpwr_rule_2ghz rule_2ghz; struct rtw89_txpwr_rule_5ghz rule_5ghz; struct rtw89_txpwr_rule_6ghz rule_6ghz; + struct rtw89_tx_shape tx_shape; }; struct rtw89_rfe_parms_conf { @@ -3063,6 +3384,95 @@ struct rtw89_rfe_parms_conf { u8 rfe_type; }; +#define RTW89_TXPWR_CONF_DFLT_RFE_TYPE 0x0 + +struct rtw89_txpwr_conf { + u8 rfe_type; + u8 ent_sz; + u32 num_ents; + const void *data; +}; + +#define rtw89_txpwr_conf_valid(conf) (!!(conf)->data) + +#define rtw89_for_each_in_txpwr_conf(entry, cursor, conf) \ + for (typecheck(const void *, cursor), (cursor) = (conf)->data, \ + memcpy(&(entry), cursor, \ + min_t(u8, sizeof(entry), (conf)->ent_sz)); \ + (cursor) < (conf)->data + (conf)->num_ents * (conf)->ent_sz; \ + (cursor) += (conf)->ent_sz, \ + memcpy(&(entry), cursor, \ + min_t(u8, sizeof(entry), (conf)->ent_sz))) + +struct rtw89_txpwr_byrate_data { + struct rtw89_txpwr_conf conf; + struct rtw89_txpwr_table tbl; +}; + +struct rtw89_txpwr_lmt_2ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_5ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_6ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_6G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][NUM_OF_RTW89_REG_6GHZ_POWER] + [RTW89_6G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_ru_2ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_ru_5ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +}; + +struct rtw89_txpwr_lmt_ru_6ghz_data { + struct rtw89_txpwr_conf conf; + s8 v[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][NUM_OF_RTW89_REG_6GHZ_POWER] + [RTW89_6G_CH_NUM]; +}; + +struct rtw89_tx_shape_lmt_data { + struct rtw89_txpwr_conf conf; + u8 v[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM][RTW89_REGD_NUM]; +}; + +struct rtw89_tx_shape_lmt_ru_data { + struct rtw89_txpwr_conf conf; + u8 v[RTW89_BAND_NUM][RTW89_REGD_NUM]; +}; + +struct rtw89_rfe_data { + struct rtw89_txpwr_byrate_data byrate; + struct rtw89_txpwr_lmt_2ghz_data lmt_2ghz; + struct rtw89_txpwr_lmt_5ghz_data lmt_5ghz; + struct rtw89_txpwr_lmt_6ghz_data lmt_6ghz; + struct rtw89_txpwr_lmt_ru_2ghz_data lmt_ru_2ghz; + struct rtw89_txpwr_lmt_ru_5ghz_data lmt_ru_5ghz; + struct rtw89_txpwr_lmt_ru_6ghz_data lmt_ru_6ghz; + struct rtw89_tx_shape_lmt_data tx_shape_lmt; + struct rtw89_tx_shape_lmt_ru_data tx_shape_lmt_ru; + struct rtw89_rfe_parms rfe_parms; +}; + struct rtw89_page_regs { u32 hci_fc_ctrl; u32 ch_page_ctrl; @@ -3139,6 +3549,10 @@ struct rtw89_dig_regs { u32 seg0_pd_reg; u32 pd_lower_bound_mask; u32 pd_spatial_reuse_en; + u32 bmode_pd_reg; + u32 bmode_cca_rssi_limit_en; + u32 bmode_pd_lower_bound_reg; + u32 bmode_rssi_nocca_low_th_mask; struct rtw89_reg_def p0_lna_init; struct rtw89_reg_def p1_lna_init; struct rtw89_reg_def p0_tia_init; @@ -3175,12 +3589,34 @@ struct rtw89_antdiv_info { bool get_stats; }; +enum rtw89_chanctx_state { + RTW89_CHANCTX_STATE_MCC_START, + RTW89_CHANCTX_STATE_MCC_STOP, +}; + +enum rtw89_chanctx_callbacks { + RTW89_CHANCTX_CALLBACK_PLACEHOLDER, + RTW89_CHANCTX_CALLBACK_RFK, + + NUM_OF_RTW89_CHANCTX_CALLBACKS, +}; + +struct rtw89_chanctx_listener { + void (*callbacks[NUM_OF_RTW89_CHANCTX_CALLBACKS]) + (struct rtw89_dev *rtwdev, enum rtw89_chanctx_state state); +}; + struct rtw89_chip_info { enum rtw89_core_chip_id chip_id; + enum rtw89_chip_gen chip_gen; const struct rtw89_chip_ops *ops; + const struct rtw89_mac_gen_def *mac_def; + const struct rtw89_phy_gen_def *phy_def; const char *fw_basename; u8 fw_format_max; bool try_ce_fw; + u8 bbmcu_nr; + u32 needed_fw_elms; u32 fifo_size; bool small_fifo_size; u32 dle_scc_rsvd_size; @@ -3196,7 +3632,8 @@ struct rtw89_chip_info { u8 support_bands; bool support_bw160; bool support_unii4; - bool support_ul_tb_ctrl; + bool ul_tb_waveform_ctrl; + bool ul_tb_pwr_diff; bool hw_sec_hdr; u8 rf_path_num; u8 tx_nss; @@ -3224,7 +3661,6 @@ struct rtw89_chip_info { const struct rtw89_phy_table *rf_table[RF_PATH_MAX]; const struct rtw89_phy_table *nctl_table; const struct rtw89_rfk_tbl *nctl_post_table; - const struct rtw89_txpwr_table *byr_table; const struct rtw89_phy_dig_gain_table *dig_table; const struct rtw89_dig_regs *dig_regs; const struct rtw89_phy_tssi_dbw_table *tssi_dbw_table; @@ -3232,6 +3668,7 @@ struct rtw89_chip_info { /* NULL if no rfe-specific, or a null-terminated array by rfe_parms */ const struct rtw89_rfe_parms_conf *rfe_parms_conf; const struct rtw89_rfe_parms *dflt_parms; + const struct rtw89_chanctx_listener *chanctx_listener; u8 txpwr_factor_rf; u8 txpwr_factor_mac; @@ -3260,6 +3697,7 @@ struct rtw89_chip_info { u32 hci_func_en_addr; u32 h2c_desc_size; u32 txwd_body_size; + u32 txwd_info_size; u32 h2c_ctrl_reg; const u32 *h2c_regs; struct rtw89_reg_def h2c_counter_reg; @@ -3273,6 +3711,7 @@ struct rtw89_chip_info { u8 dcfo_comp_sft; const struct rtw89_imr_info *imr_info; const struct rtw89_rrsr_cfgs *rrsr_cfgs; + struct rtw89_reg_def bss_clr_vld; u32 bss_clr_map_reg; u32 dma_ch_mask; u32 edcca_lvl_reg; @@ -3343,10 +3782,21 @@ struct rtw89_mac_info { struct rtw89_wait_info fw_ofld_wait; }; +enum rtw89_fwdl_check_type { + RTW89_FWDL_CHECK_FREERTOS_DONE, + RTW89_FWDL_CHECK_WCPU_FWDL_DONE, + RTW89_FWDL_CHECK_DCPU_FWDL_DONE, + RTW89_FWDL_CHECK_BB0_FWDL_DONE, + RTW89_FWDL_CHECK_BB1_FWDL_DONE, +}; + enum rtw89_fw_type { RTW89_FW_NORMAL = 1, RTW89_FW_WOWLAN = 3, RTW89_FW_NORMAL_CE = 5, + RTW89_FW_BBMCU0 = 64, + RTW89_FW_BBMCU1 = 65, + RTW89_FW_LOGFMT = 255, }; enum rtw89_fw_feature { @@ -3361,6 +3811,7 @@ enum rtw89_fw_feature { }; struct rtw89_fw_suit { + enum rtw89_fw_type type; const u8 *data; u32 size; u8 major_ver; @@ -3373,6 +3824,8 @@ struct rtw89_fw_suit { u16 build_hour; u16 build_min; u8 cmd_ver; + u8 hdr_ver; + u32 commitid; }; #define RTW89_FW_VER_CODE(major, minor, sub, idx) \ @@ -3397,6 +3850,22 @@ struct rtw89_fw_req_info { struct completion completion; }; +struct rtw89_fw_log { + struct rtw89_fw_suit suit; + bool enable; + u32 last_fmt_id; + u32 fmt_count; + const __le32 *fmt_ids; + const char *(*fmts)[]; +}; + +struct rtw89_fw_elm_info { + struct rtw89_phy_table *bb_tbl; + struct rtw89_phy_table *bb_gain; + struct rtw89_phy_table *rf_radio[RF_PATH_MAX]; + struct rtw89_phy_table *rf_nctl; +}; + struct rtw89_fw_info { struct rtw89_fw_req_info req; int fw_format; @@ -3406,8 +3875,11 @@ struct rtw89_fw_info { u8 c2h_counter; struct rtw89_fw_suit normal; struct rtw89_fw_suit wowlan; - bool fw_log_enable; + struct rtw89_fw_suit bbmcu0; + struct rtw89_fw_suit bbmcu1; + struct rtw89_fw_log log; u32 feature_map; + struct rtw89_fw_elm_info elm_info; }; #define RTW89_CHK_FW_FEATURE(_feat, _fw) \ @@ -3463,12 +3935,45 @@ struct rtw89_sar_info { }; }; +enum rtw89_tas_state { + RTW89_TAS_STATE_DPR_OFF, + RTW89_TAS_STATE_DPR_ON, + RTW89_TAS_STATE_DPR_FORBID, +}; + +#define RTW89_TAS_MAX_WINDOW 50 +struct rtw89_tas_info { + s16 txpwr_history[RTW89_TAS_MAX_WINDOW]; + s32 total_txpwr; + u8 cur_idx; + s8 dpr_gap; + s8 delta; + enum rtw89_tas_state state; + bool enable; +}; + struct rtw89_chanctx_cfg { enum rtw89_sub_entity_idx idx; }; +enum rtw89_chanctx_changes { + RTW89_CHANCTX_REMOTE_STA_CHANGE, + RTW89_CHANCTX_BCN_OFFSET_CHANGE, + RTW89_CHANCTX_P2P_PS_CHANGE, + RTW89_CHANCTX_BT_SLOT_CHANGE, + RTW89_CHANCTX_TSF32_TOGGLE_CHANGE, + + NUM_OF_RTW89_CHANCTX_CHANGES, + RTW89_CHANCTX_CHANGE_DFLT = NUM_OF_RTW89_CHANCTX_CHANGES, +}; + enum rtw89_entity_mode { RTW89_ENTITY_MODE_SCC, + RTW89_ENTITY_MODE_MCC_PREPARE, + RTW89_ENTITY_MODE_MCC, + + NUM_OF_RTW89_ENTITY_MODE, + RTW89_ENTITY_MODE_INVALID = NUM_OF_RTW89_ENTITY_MODE, }; struct rtw89_sub_entity { @@ -3493,11 +3998,13 @@ struct rtw89_hal { bool support_igi; atomic_t roc_entity_idx; + DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES); DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY); struct rtw89_sub_entity sub[NUM_OF_RTW89_SUB_ENTITY]; struct cfg80211_chan_def roc_chandef; bool entity_active; + bool entity_pause; enum rtw89_entity_mode entity_mode; u32 edcca_bak; @@ -4043,8 +4550,95 @@ struct rtw89_wow_param { u8 pattern_cnt; }; +struct rtw89_mcc_limit { + bool enable; + u16 max_tob; /* TU; max time offset behind */ + u16 max_toa; /* TU; max time offset ahead */ + u16 max_dur; /* TU */ +}; + +struct rtw89_mcc_policy { + u8 c2h_rpt; + u8 tx_null_early; + u8 dis_tx_null; + u8 in_curr_ch; + u8 dis_sw_retry; + u8 sw_retry_count; +}; + +struct rtw89_mcc_role { + struct rtw89_vif *rtwvif; + struct rtw89_mcc_policy policy; + struct rtw89_mcc_limit limit; + + /* byte-array in LE order for FW */ + u8 macid_bitmap[BITS_TO_BYTES(RTW89_MAX_MAC_ID_NUM)]; + + u16 duration; /* TU */ + u16 beacon_interval; /* TU */ + bool is_2ghz; + bool is_go; + bool is_gc; +}; + +struct rtw89_mcc_bt_role { + u16 duration; /* TU */ +}; + +struct rtw89_mcc_courtesy { + bool enable; + u8 slot_num; + u8 macid_src; + u8 macid_tgt; +}; + +enum rtw89_mcc_plan { + RTW89_MCC_PLAN_TAIL_BT, + RTW89_MCC_PLAN_MID_BT, + RTW89_MCC_PLAN_NO_BT, + + NUM_OF_RTW89_MCC_PLAN, +}; + +struct rtw89_mcc_pattern { + s16 tob_ref; /* TU; time offset behind of reference role */ + s16 toa_ref; /* TU; time offset ahead of reference role */ + s16 tob_aux; /* TU; time offset behind of auxiliary role */ + s16 toa_aux; /* TU; time offset ahead of auxiliary role */ + + enum rtw89_mcc_plan plan; + struct rtw89_mcc_courtesy courtesy; +}; + +struct rtw89_mcc_sync { + bool enable; + u16 offset; /* TU */ + u8 macid_src; + u8 macid_tgt; +}; + +struct rtw89_mcc_config { + struct rtw89_mcc_pattern pattern; + struct rtw89_mcc_sync sync; + u64 start_tsf; + u16 mcc_interval; /* TU */ + u16 beacon_offset; /* TU */ +}; + +enum rtw89_mcc_mode { + RTW89_MCC_MODE_GO_STA, + RTW89_MCC_MODE_GC_STA, +}; + struct rtw89_mcc_info { struct rtw89_wait_info wait; + + u8 group; + enum rtw89_mcc_mode mode; + struct rtw89_mcc_role role_ref; /* reference role */ + struct rtw89_mcc_role role_aux; /* auxiliary role */ + struct rtw89_mcc_bt_role bt_role; + struct rtw89_mcc_config config; }; struct rtw89_dev { @@ -4064,6 +4658,7 @@ struct rtw89_dev { struct rtw89_hci_info hci; struct rtw89_efuse efuse; struct rtw89_traffic_stats stats; + struct rtw89_rfe_data *rfe_data; /* ensures exclusive access from mac80211 callbacks */ struct mutex mutex; @@ -4111,7 +4706,7 @@ struct rtw89_dev { bool is_bt_iqk_timeout; struct rtw89_fem_info fem; - struct rtw89_txpwr_byrate byr[RTW89_BAND_NUM]; + struct rtw89_txpwr_byrate byr[RTW89_BAND_NUM][RTW89_BYR_BW_NUM]; struct rtw89_tssi_info tssi; struct rtw89_power_trim_info pwr_trim; @@ -4125,6 +4720,7 @@ struct rtw89_dev { struct rtw89_antdiv_info antdiv; struct delayed_work track_work; + struct delayed_work chanctx_work; struct delayed_work coex_act1_work; struct delayed_work coex_bt_devinfo_work; struct delayed_work coex_rfk_chk_work; @@ -4138,6 +4734,7 @@ struct rtw89_dev { struct rtw89_regulatory_info regulatory; struct rtw89_sar_info sar; + struct rtw89_tas_info tas; struct rtw89_btc btc; enum rtw89_ps_mode ps_mode; @@ -4595,6 +5192,30 @@ enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width) } static inline +enum nl80211_he_ru_alloc rtw89_he_rua_to_ru_alloc(u16 rua) +{ + switch (rua) { + default: + WARN(1, "Invalid RU allocation: %d\n", rua); + fallthrough; + case 0 ... 36: + return NL80211_RATE_INFO_HE_RU_ALLOC_26; + case 37 ... 52: + return NL80211_RATE_INFO_HE_RU_ALLOC_52; + case 53 ... 60: + return NL80211_RATE_INFO_HE_RU_ALLOC_106; + case 61 ... 64: + return NL80211_RATE_INFO_HE_RU_ALLOC_242; + case 65 ... 66: + return NL80211_RATE_INFO_HE_RU_ALLOC_484; + case 67: + return NL80211_RATE_INFO_HE_RU_ALLOC_996; + case 68: + return NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + } +} + +static inline struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta) { @@ -4673,6 +5294,18 @@ const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev, return &hal->sub[idx].rcd; } +static inline +const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev) +{ + struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; + struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); + + if (rtwvif) + return rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + else + return rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); +} + static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -4689,6 +5322,15 @@ static inline void rtw89_chip_rfe_gpio(struct rtw89_dev *rtwdev) chip->ops->rfe_gpio(rtwdev); } +static inline +void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->bb_preinit) + chip->ops->bb_preinit(rtwdev, phy_idx); +} + static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -4784,13 +5426,13 @@ static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev, chip->ops->query_ppdu(rtwdev, phy_ppdu, status); } -static inline void rtw89_chip_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, - bool bt_en) +static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->ops->bb_ctrl_btc_preagc) - chip->ops->bb_ctrl_btc_preagc(rtwdev, bt_en); + if (chip->ops->ctrl_nbtg_bt_tx) + chip->ops->ctrl_nbtg_bt_tx(rtwdev, en, phy_idx); } static inline void rtw89_chip_cfg_txrx_path(struct rtw89_dev *rtwdev) @@ -4828,12 +5470,13 @@ static inline u8 rtw89_regd_get(struct rtw89_dev *rtwdev, u8 band) return regd->txpwr_regd[band]; } -static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static inline void rtw89_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->ops->ctrl_btg) - chip->ops->ctrl_btg(rtwdev, btg); + if (chip->ops->ctrl_btg_bt_rx) + chip->ops->ctrl_btg_bt_rx(rtwdev, en, phy_idx); } static inline @@ -4940,8 +5583,19 @@ static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev, { struct rtw89_fw_info *fw_info = &rtwdev->fw; - if (type == RTW89_FW_WOWLAN) + switch (type) { + case RTW89_FW_WOWLAN: return &fw_info->wowlan; + case RTW89_FW_LOGFMT: + return &fw_info->log.suit; + case RTW89_FW_BBMCU0: + return &fw_info->bbmcu0; + case RTW89_FW_BBMCU1: + return &fw_info->bbmcu1; + default: + break; + } + return &fw_info->normal; } @@ -4994,15 +5648,24 @@ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); +void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); +void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); void rtw89_core_rx(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, struct sk_buff *skb); void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, u8 *data, u32 data_offset); +void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev, + struct rtw89_rx_desc_info *desc_info, + u8 *data, u32 data_offset); void rtw89_core_napi_start(struct rtw89_dev *rtwdev); void rtw89_core_napi_stop(struct rtw89_dev *rtwdev); void rtw89_core_napi_init(struct rtw89_dev *rtwdev); @@ -5035,6 +5698,8 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev); void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev); void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef); +void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, + struct rtw89_chan *chan); void rtw89_set_channel(struct rtw89_dev *rtwdev); void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_chan *chan); @@ -5069,5 +5734,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan); void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool active); +void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); +void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event); #endif diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index a4bbac916e22..a3f795d240ea 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -367,7 +367,11 @@ static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v) } struct txpwr_ent { - const char *txt; + bool nested; + union { + const char *txt; + const struct txpwr_ent *ptr; + }; u8 len; }; @@ -379,6 +383,12 @@ struct txpwr_map { u32 addr_to_1ss; }; +#define __GEN_TXPWR_ENT_NESTED(_e) \ + { .nested = true, .ptr = __txpwr_ent_##_e, \ + .len = ARRAY_SIZE(__txpwr_ent_##_e) } + +#define __GEN_TXPWR_ENT0(_t) { .len = 0, .txt = _t } + #define __GEN_TXPWR_ENT2(_t, _e0, _e1) \ { .len = 2, .txt = _t "\t- " _e0 " " _e1 } @@ -390,7 +400,7 @@ struct txpwr_map { _e0 " " _e1 " " _e2 " " _e3 " " \ _e4 " " _e5 " " _e6 " " _e7 } -static const struct txpwr_ent __txpwr_ent_byr[] = { +static const struct txpwr_ent __txpwr_ent_byr_ax[] = { __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), @@ -406,18 +416,18 @@ static const struct txpwr_ent __txpwr_ent_byr[] = { __GEN_TXPWR_ENT4("HEDCM_2NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), }; -static_assert((ARRAY_SIZE(__txpwr_ent_byr) * 4) == +static_assert((ARRAY_SIZE(__txpwr_ent_byr_ax) * 4) == (R_AX_PWR_BY_RATE_MAX - R_AX_PWR_BY_RATE + 4)); -static const struct txpwr_map __txpwr_map_byr = { - .ent = __txpwr_ent_byr, - .size = ARRAY_SIZE(__txpwr_ent_byr), +static const struct txpwr_map __txpwr_map_byr_ax = { + .ent = __txpwr_ent_byr_ax, + .size = ARRAY_SIZE(__txpwr_ent_byr_ax), .addr_from = R_AX_PWR_BY_RATE, .addr_to = R_AX_PWR_BY_RATE_MAX, .addr_to_1ss = R_AX_PWR_BY_RATE_1SS_MAX, }; -static const struct txpwr_ent __txpwr_ent_lmt[] = { +static const struct txpwr_ent __txpwr_ent_lmt_ax[] = { /* 1TX */ __GEN_TXPWR_ENT2("CCK_1TX_20M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("CCK_1TX_40M ", "NON_BF", "BF"), @@ -462,18 +472,18 @@ static const struct txpwr_ent __txpwr_ent_lmt[] = { __GEN_TXPWR_ENT2("MCS_2TX_40M_2p5", "NON_BF", "BF"), }; -static_assert((ARRAY_SIZE(__txpwr_ent_lmt) * 2) == +static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ax) * 2) == (R_AX_PWR_LMT_MAX - R_AX_PWR_LMT + 4)); -static const struct txpwr_map __txpwr_map_lmt = { - .ent = __txpwr_ent_lmt, - .size = ARRAY_SIZE(__txpwr_ent_lmt), +static const struct txpwr_map __txpwr_map_lmt_ax = { + .ent = __txpwr_ent_lmt_ax, + .size = ARRAY_SIZE(__txpwr_ent_lmt_ax), .addr_from = R_AX_PWR_LMT, .addr_to = R_AX_PWR_LMT_MAX, .addr_to_1ss = R_AX_PWR_LMT_1SS_MAX, }; -static const struct txpwr_ent __txpwr_ent_lmt_ru[] = { +static const struct txpwr_ent __txpwr_ent_lmt_ru_ax[] = { /* 1TX */ __GEN_TXPWR_ENT8("1TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3", "RU26__4", "RU26__5", "RU26__6", "RU26__7"), @@ -490,25 +500,207 @@ static const struct txpwr_ent __txpwr_ent_lmt_ru[] = { "RU106_4", "RU106_5", "RU106_6", "RU106_7"), }; -static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru) * 8) == +static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru_ax) * 8) == (R_AX_PWR_RU_LMT_MAX - R_AX_PWR_RU_LMT + 4)); -static const struct txpwr_map __txpwr_map_lmt_ru = { - .ent = __txpwr_ent_lmt_ru, - .size = ARRAY_SIZE(__txpwr_ent_lmt_ru), +static const struct txpwr_map __txpwr_map_lmt_ru_ax = { + .ent = __txpwr_ent_lmt_ru_ax, + .size = ARRAY_SIZE(__txpwr_ent_lmt_ru_ax), .addr_from = R_AX_PWR_RU_LMT, .addr_to = R_AX_PWR_RU_LMT_MAX, .addr_to_1ss = R_AX_PWR_RU_LMT_1SS_MAX, }; -static u8 __print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent, - const s8 *buf, const u8 cur) +static const struct txpwr_ent __txpwr_ent_byr_mcs_be[] = { + __GEN_TXPWR_ENT4("MCS_1SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("MCS_1SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("MCS_1SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("MCS_1SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("HEDCM_1SS ", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), + __GEN_TXPWR_ENT4("DLRU_MCS_1SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("DLRU_MCS_1SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("DLRU_MCS_1SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("DLRU_MCS_1SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("DLRU_HEDCM_1SS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), + __GEN_TXPWR_ENT4("MCS_2SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("MCS_2SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("MCS_2SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("MCS_2SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("HEDCM_2SS ", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), + __GEN_TXPWR_ENT4("DLRU_MCS_2SS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), + __GEN_TXPWR_ENT4("DLRU_MCS_2SS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), + __GEN_TXPWR_ENT4("DLRU_MCS_2SS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), + __GEN_TXPWR_ENT2("DLRU_MCS_2SS ", "MCS12 ", "MCS13 \t"), + __GEN_TXPWR_ENT4("DLRU_HEDCM_2SS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), +}; + +static const struct txpwr_ent __txpwr_ent_byr_be[] = { + __GEN_TXPWR_ENT0("BW20"), + __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + __GEN_TXPWR_ENT0("BW40"), + __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + /* there is no CCK section after BW80 */ + __GEN_TXPWR_ENT0("BW80"), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + __GEN_TXPWR_ENT0("BW160"), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), + + __GEN_TXPWR_ENT0("BW320"), + __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), + __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), + __GEN_TXPWR_ENT2("EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT2("DLRU_EHT ", "MCS14 ", "MCS15 \t"), + __GEN_TXPWR_ENT_NESTED(byr_mcs_be), +}; + +static const struct txpwr_map __txpwr_map_byr_be = { + .ent = __txpwr_ent_byr_be, + .size = ARRAY_SIZE(__txpwr_ent_byr_be), + .addr_from = R_BE_PWR_BY_RATE, + .addr_to = R_BE_PWR_BY_RATE_MAX, + .addr_to_1ss = 0, /* not support */ +}; + +static const struct txpwr_ent __txpwr_ent_lmt_mcs_be[] = { + __GEN_TXPWR_ENT2("MCS_20M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_4 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_5 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_6 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_7 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_8 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_9 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_10 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_11 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_12 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_13 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_14 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_20M_15 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_4 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_5 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_6 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_7 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_2 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_80M_3 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_160M_0 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_160M_1 ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_320M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_0p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_2p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_4p5", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("MCS_40M_6p5", "NON_BF", "BF"), +}; + +static const struct txpwr_ent __txpwr_ent_lmt_be[] = { + __GEN_TXPWR_ENT0("1TX"), + __GEN_TXPWR_ENT2("CCK_20M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("CCK_40M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("OFDM ", "NON_BF", "BF"), + __GEN_TXPWR_ENT_NESTED(lmt_mcs_be), + + __GEN_TXPWR_ENT0("2TX"), + __GEN_TXPWR_ENT2("CCK_20M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("CCK_40M ", "NON_BF", "BF"), + __GEN_TXPWR_ENT2("OFDM ", "NON_BF", "BF"), + __GEN_TXPWR_ENT_NESTED(lmt_mcs_be), +}; + +static const struct txpwr_map __txpwr_map_lmt_be = { + .ent = __txpwr_ent_lmt_be, + .size = ARRAY_SIZE(__txpwr_ent_lmt_be), + .addr_from = R_BE_PWR_LMT, + .addr_to = R_BE_PWR_LMT_MAX, + .addr_to_1ss = 0, /* not support */ +}; + +static const struct txpwr_ent __txpwr_ent_lmt_ru_indexes_be[] = { + __GEN_TXPWR_ENT8("RU26 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU26 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU52 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU52 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU106 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU106 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU52_26 ", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU52_26 ", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), + __GEN_TXPWR_ENT8("RU106_26", "IDX_0 ", "IDX_1 ", "IDX_2 ", "IDX_3 ", + "IDX_4 ", "IDX_5 ", "IDX_6 ", "IDX_7 "), + __GEN_TXPWR_ENT8("RU106_26", "IDX_8 ", "IDX_9 ", "IDX_10", "IDX_11", + "IDX_12", "IDX_13", "IDX_14", "IDX_15"), +}; + +static const struct txpwr_ent __txpwr_ent_lmt_ru_be[] = { + __GEN_TXPWR_ENT0("1TX"), + __GEN_TXPWR_ENT_NESTED(lmt_ru_indexes_be), + + __GEN_TXPWR_ENT0("2TX"), + __GEN_TXPWR_ENT_NESTED(lmt_ru_indexes_be), +}; + +static const struct txpwr_map __txpwr_map_lmt_ru_be = { + .ent = __txpwr_ent_lmt_ru_be, + .size = ARRAY_SIZE(__txpwr_ent_lmt_ru_be), + .addr_from = R_BE_PWR_RU_LMT, + .addr_to = R_BE_PWR_RU_LMT_MAX, + .addr_to_1ss = 0, /* not support */ +}; + +static unsigned int +__print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent, + const s8 *buf, const unsigned int cur) { + unsigned int cnt, i; char *fmt; + if (ent->nested) { + for (cnt = 0, i = 0; i < ent->len; i++) + cnt += __print_txpwr_ent(m, ent->ptr + i, buf, + cur + cnt); + return cnt; + } + switch (ent->len) { + case 0: + seq_printf(m, "\t<< %s >>\n", ent->txt); + return 0; case 2: - fmt = "%s\t| %3d, %3d,\tdBm\n"; + fmt = "%s\t| %3d, %3d,\t\tdBm\n"; seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]); return 2; case 4: @@ -532,10 +724,10 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, { u8 fct = rtwdev->chip->txpwr_factor_mac; u8 path_num = rtwdev->chip->rf_path_num; + unsigned int cur, i; u32 max_valid_addr; u32 val, addr; s8 *buf, tmp; - u8 cur, i; int ret; buf = vzalloc(map->addr_to - map->addr_from + 4); @@ -547,6 +739,9 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, else max_valid_addr = map->addr_to; + if (max_valid_addr == 0) + return -EOPNOTSUPP; + for (addr = map->addr_from; addr <= max_valid_addr; addr += 4) { ret = rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, addr, &val); if (ret) @@ -572,9 +767,9 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, seq_puts(m, #_regd "\n"); \ break -static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev) +static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); @@ -600,33 +795,69 @@ static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev) #undef case_REGD +struct dbgfs_txpwr_table { + const struct txpwr_map *byr; + const struct txpwr_map *lmt; + const struct txpwr_map *lmt_ru; +}; + +static const struct dbgfs_txpwr_table dbgfs_txpwr_table_ax = { + .byr = &__txpwr_map_byr_ax, + .lmt = &__txpwr_map_lmt_ax, + .lmt_ru = &__txpwr_map_lmt_ru_ax, +}; + +static const struct dbgfs_txpwr_table dbgfs_txpwr_table_be = { + .byr = &__txpwr_map_byr_be, + .lmt = &__txpwr_map_lmt_be, + .lmt_ru = &__txpwr_map_lmt_ru_be, +}; + +static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] = { + [RTW89_CHIP_AX] = &dbgfs_txpwr_table_ax, + [RTW89_CHIP_BE] = &dbgfs_txpwr_table_be, +}; + static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; + const struct dbgfs_txpwr_table *tbl; + const struct rtw89_chan *chan; int ret = 0; mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); + chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); seq_puts(m, "[Regulatory] "); - __print_regd(m, rtwdev); + __print_regd(m, rtwdev, chan); seq_puts(m, "[SAR]\n"); - rtw89_print_sar(m, rtwdev); + rtw89_print_sar(m, rtwdev, chan->freq); + + seq_puts(m, "[TAS]\n"); + rtw89_print_tas(m, rtwdev); + + tbl = dbgfs_txpwr_tables[chip_gen]; + if (!tbl) { + ret = -EOPNOTSUPP; + goto err; + } seq_puts(m, "\n[TX power byrate]\n"); - ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_byr); + ret = __print_txpwr_map(m, rtwdev, tbl->byr); if (ret) goto err; seq_puts(m, "\n[TX power limit]\n"); - ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt); + ret = __print_txpwr_map(m, rtwdev, tbl->lmt); if (ret) goto err; seq_puts(m, "\n[TX power limit_ru]\n"); - ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt_ru); + ret = __print_txpwr_map(m, rtwdev, tbl->lmt_ru); if (ret) goto err; @@ -790,6 +1021,9 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m, struct rtw89_dev *rtwdev, u8 sel, u32 start_addr, u32 len) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 filter_model_addr = mac->filter_model_addr; + u32 indir_access_addr = mac->indir_access_addr; u32 base_addr, start_page, residue; u32 i, j, p, pages; u32 dump_len, remain; @@ -799,17 +1033,17 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m, pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1; start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; - base_addr = rtw89_mac_mem_base_addrs[sel]; + base_addr = mac->mem_base_addrs[sel]; base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; for (p = 0; p < pages; p++) { dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE); - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); - for (i = R_AX_INDIR_ACCESS_ENTRY + residue; - i < R_AX_INDIR_ACCESS_ENTRY + dump_len;) { + rtw89_write32(rtwdev, filter_model_addr, base_addr); + for (i = indir_access_addr + residue; + i < indir_access_addr + dump_len;) { seq_printf(m, "%08xh:", i); for (j = 0; - j < 4 && i < R_AX_INDIR_ACCESS_ENTRY + dump_len; + j < 4 && i < indir_access_addr + dump_len; j++, i += 4) { val = rtw89_read32(rtwdev, i); seq_printf(m, " %08x", val); @@ -3193,29 +3427,33 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp, struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_btc *btc = &rtwdev->btc; bool btc_manual; + int ret; - if (kstrtobool_from_user(user_buf, count, &btc_manual)) - goto out; + ret = kstrtobool_from_user(user_buf, count, &btc_manual); + if (ret) + return ret; btc->ctrl.manual = btc_manual; -out: + return count; } -static ssize_t rtw89_debug_fw_log_btc_manual_set(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +static ssize_t rtw89_debug_fw_log_manual_set(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - struct rtw89_fw_info *fw_info = &rtwdev->fw; + struct rtw89_fw_log *log = &rtwdev->fw.log; bool fw_log_manual; if (kstrtobool_from_user(user_buf, count, &fw_log_manual)) goto out; mutex_lock(&rtwdev->mutex); - fw_info->fw_log_enable = fw_log_manual; + log->enable = fw_log_manual; + if (log->enable) + rtw89_fw_log_prepare(rtwdev); rtw89_fw_h2c_fw_log(rtwdev, fw_log_manual); mutex_unlock(&rtwdev->mutex); out: @@ -3229,6 +3467,11 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) [NL80211_RATE_INFO_HE_GI_1_6] = "1.6", [NL80211_RATE_INFO_HE_GI_3_2] = "3.2", }; + static const char * const eht_gi_str[] = { + [NL80211_RATE_INFO_EHT_GI_0_8] = "0.8", + [NL80211_RATE_INFO_EHT_GI_1_6] = "1.6", + [NL80211_RATE_INFO_EHT_GI_3_2] = "3.2", + }; struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct rate_info *rate = &rtwsta->ra_report.txrate; struct ieee80211_rx_status *status = &rtwsta->rx_status; @@ -3254,6 +3497,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs, rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? he_gi_str[rate->he_gi] : "N/A"); + else if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) + seq_printf(m, "EHT %dSS MCS-%d GI:%s", rate->nss, rate->mcs, + rate->eht_gi < ARRAY_SIZE(eht_gi_str) ? + eht_gi_str[rate->eht_gi] : "N/A"); else seq_printf(m, "Legacy %d", rate->legacy); seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : ""); @@ -3282,6 +3529,11 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? he_gi_str[rate->he_gi] : "N/A"); break; + case RX_ENC_EHT: + seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx, + status->eht.gi < ARRAY_SIZE(eht_gi_str) ? + eht_gi_str[status->eht.gi] : "N/A"); + break; } seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw)); seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate); @@ -3323,20 +3575,26 @@ rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat, pkt_stat->rx_rate_cnt[first_rate + i]); } +#define FIRST_RATE_SAME(rate) {RTW89_HW_RATE_ ## rate, RTW89_HW_RATE_ ## rate} +#define FIRST_RATE_ENUM(rate) {RTW89_HW_RATE_ ## rate, RTW89_HW_RATE_V1_ ## rate} +#define FIRST_RATE_GEV1(rate) {RTW89_HW_RATE_INVAL, RTW89_HW_RATE_V1_ ## rate} + static const struct rtw89_rx_rate_cnt_info { - enum rtw89_hw_rate first_rate; + enum rtw89_hw_rate first_rate[RTW89_CHIP_GEN_NUM]; int len; int ext; const char *rate_mode; } rtw89_rx_rate_cnt_infos[] = { - {RTW89_HW_RATE_CCK1, 4, 0, "Legacy:"}, - {RTW89_HW_RATE_OFDM6, 8, 0, "OFDM:"}, - {RTW89_HW_RATE_MCS0, 8, 0, "HT 0:"}, - {RTW89_HW_RATE_MCS8, 8, 0, "HT 1:"}, - {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, 2, "VHT 1SS:"}, - {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, 2, "VHT 2SS:"}, - {RTW89_HW_RATE_HE_NSS1_MCS0, 12, 0, "HE 1SS:"}, - {RTW89_HW_RATE_HE_NSS2_MCS0, 12, 0, "HE 2ss:"}, + {FIRST_RATE_SAME(CCK1), 4, 0, "Legacy:"}, + {FIRST_RATE_SAME(OFDM6), 8, 0, "OFDM:"}, + {FIRST_RATE_ENUM(MCS0), 8, 0, "HT 0:"}, + {FIRST_RATE_ENUM(MCS8), 8, 0, "HT 1:"}, + {FIRST_RATE_ENUM(VHT_NSS1_MCS0), 10, 2, "VHT 1SS:"}, + {FIRST_RATE_ENUM(VHT_NSS2_MCS0), 10, 2, "VHT 2SS:"}, + {FIRST_RATE_ENUM(HE_NSS1_MCS0), 12, 0, "HE 1SS:"}, + {FIRST_RATE_ENUM(HE_NSS2_MCS0), 12, 0, "HE 2SS:"}, + {FIRST_RATE_GEV1(EHT_NSS1_MCS0), 14, 2, "EHT 1SS:"}, + {FIRST_RATE_GEV1(EHT_NSS2_MCS0), 14, 0, "EHT 2SS:"}, }; static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) @@ -3345,7 +3603,9 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_traffic_stats *stats = &rtwdev->stats; struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat; + const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_rx_rate_cnt_info *info; + enum rtw89_hw_rate first_rate; int i; seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d), RX: %u [%u] Mbps (lv: %d)\n", @@ -3357,15 +3617,20 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) stats->rx_avg_len); seq_puts(m, "RX count:\n"); + for (i = 0; i < ARRAY_SIZE(rtw89_rx_rate_cnt_infos); i++) { info = &rtw89_rx_rate_cnt_infos[i]; + first_rate = info->first_rate[chip->chip_gen]; + if (first_rate >= RTW89_HW_RATE_NR) + continue; + seq_printf(m, "%10s [", info->rate_mode); rtw89_debug_append_rx_rate(m, pkt_stat, - info->first_rate, info->len); + first_rate, info->len); if (info->ext) { seq_puts(m, "]["); rtw89_debug_append_rx_rate(m, pkt_stat, - info->first_rate + info->len, info->ext); + first_rate + info->len, info->ext); } seq_puts(m, "]\n"); } @@ -3569,7 +3834,7 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = { }; static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = { - .cb_write = rtw89_debug_fw_log_btc_manual_set, + .cb_write = rtw89_debug_fw_log_manual_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = { diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 9637f5e48d84..a732c22a2d54 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -9,9 +9,24 @@ #include "fw.h" #include "mac.h" #include "phy.h" +#include "ps.h" #include "reg.h" #include "util.h" +union rtw89_fw_element_arg { + size_t offset; + enum rtw89_rf_path rf_path; + enum rtw89_fw_type fw_type; +}; + +struct rtw89_fw_element_handler { + int (*fn)(struct rtw89_dev *rtwdev, + const struct rtw89_fw_element_hdr *elm, + const union rtw89_fw_element_arg arg); + const union rtw89_fw_element_arg arg; + const char *name; +}; + static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb); static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, @@ -46,22 +61,15 @@ struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); } -static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) -{ - u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); - - return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); -} - -#define FWDL_WAIT_CNT 400000 -int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) +int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u8 val; int ret; - ret = read_poll_timeout_atomic(_fw_get_rdy, val, + ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, val == RTW89_FWDL_WCPU_FW_INIT_RDY, - 1, FWDL_WAIT_CNT, false, rtwdev); + 1, FWDL_WAIT_CNT, false, rtwdev, type); if (ret) { switch (val) { case RTW89_FWDL_CHECKSUM_FAIL: @@ -77,6 +85,7 @@ int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) return -EINVAL; default: + rtw89_err(rtwdev, "fw unexpected status %d\n", val); return -EBUSY; } } @@ -86,8 +95,8 @@ int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) return 0; } -static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, - struct rtw89_fw_bin_info *info) +static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, + struct rtw89_fw_bin_info *info) { const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; struct rtw89_fw_hdr_section_info *section_info; @@ -154,6 +163,94 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, return 0; } +static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, + struct rtw89_fw_bin_info *info) +{ + const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; + struct rtw89_fw_hdr_section_info *section_info; + const struct rtw89_fw_dynhdr_hdr *fwdynhdr; + const struct rtw89_fw_hdr_section_v1 *section; + const u8 *fw_end = fw + len; + const u8 *bin; + u32 base_hdr_len; + u32 mssc_len = 0; + u32 i; + + info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); + base_hdr_len = struct_size(fw_hdr, sections, info->section_num); + info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); + + if (info->dynamic_hdr_en) { + info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); + info->dynamic_hdr_len = info->hdr_len - base_hdr_len; + fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); + if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { + rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); + return -EINVAL; + } + } else { + info->hdr_len = base_hdr_len; + info->dynamic_hdr_len = 0; + } + + bin = fw + info->hdr_len; + + /* jump to section header */ + section_info = info->section_info; + for (i = 0; i < info->section_num; i++) { + section = &fw_hdr->sections[i]; + section_info->type = + le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); + if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { + section_info->mssc = + le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); + mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; + } else { + section_info->mssc = 0; + } + + section_info->len = + le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); + if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) + section_info->len += FWDL_SECTION_CHKSUM_LEN; + section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); + section_info->dladdr = + le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); + section_info->addr = bin; + bin += section_info->len; + section_info++; + } + + if (fw_end != bin + mssc_len) { + rtw89_err(rtwdev, "[ERR]fw bin size\n"); + return -EINVAL; + } + + return 0; +} + +static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, + const struct rtw89_fw_suit *fw_suit, + struct rtw89_fw_bin_info *info) +{ + const u8 *fw = fw_suit->data; + u32 len = fw_suit->size; + + if (!fw || !len) { + rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); + return -ENOENT; + } + + switch (fw_suit->hdr_ver) { + case 0: + return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); + case 1: + return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); + default: + return -ENOENT; + } +} + static int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, struct rtw89_fw_suit *fw_suit, bool nowarn) @@ -178,42 +275,110 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, for (i = 0; i < mfw_hdr->fw_nr; i++) { mfw_info = &mfw_hdr->info[i]; - if (mfw_info->cv != rtwdev->hal.cv || - mfw_info->type != type || - mfw_info->mp) - continue; - - fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); - fw_suit->size = le32_to_cpu(mfw_info->size); - return 0; + if (mfw_info->type == type) { + if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp) + goto found; + if (type == RTW89_FW_LOGFMT) + goto found; + } } if (!nowarn) rtw89_err(rtwdev, "no suitable firmware found\n"); return -ENOENT; + +found: + fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); + fw_suit->size = le32_to_cpu(mfw_info->size); + return 0; } -static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, - enum rtw89_fw_type type, - struct rtw89_fw_suit *fw_suit) +static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) { - const struct rtw89_fw_hdr *hdr = (const struct rtw89_fw_hdr *)fw_suit->data; + struct rtw89_fw_info *fw_info = &rtwdev->fw; + const struct firmware *firmware = fw_info->req.firmware; + const struct rtw89_mfw_hdr *mfw_hdr = + (const struct rtw89_mfw_hdr *)firmware->data; + const struct rtw89_mfw_info *mfw_info; + u32 size; + + if (mfw_hdr->sig != RTW89_MFW_SIG) { + rtw89_warn(rtwdev, "not mfw format\n"); + return 0; + } + mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; + size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); + + return size; +} + +static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, + struct rtw89_fw_suit *fw_suit, + const struct rtw89_fw_hdr *hdr) +{ fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); + fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); +} + +static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, + struct rtw89_fw_suit *fw_suit, + const struct rtw89_fw_hdr_v1 *hdr) +{ + fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); + fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); + fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); + fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); + fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); + fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); + fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); + fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); + fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); + fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); + fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); +} + +static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, + enum rtw89_fw_type type, + struct rtw89_fw_suit *fw_suit) +{ + const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; + const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; + + if (type == RTW89_FW_LOGFMT) + return 0; + + fw_suit->type = type; + fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); + + switch (fw_suit->hdr_ver) { + case 0: + rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); + break; + case 1: + rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); + break; + default: + rtw89_err(rtwdev, "Unknown firmware header version %u\n", + fw_suit->hdr_ver); + return -ENOENT; + } rtw89_info(rtwdev, - "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", + "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, - fw_suit->sub_idex, fw_suit->cmd_ver, type); + fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); + + return 0; } static @@ -227,9 +392,22 @@ int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, if (ret) return ret; - rtw89_fw_update_ver(rtwdev, type, fw_suit); + return rtw89_fw_update_ver(rtwdev, type, fw_suit); +} - return 0; +static +int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, + const struct rtw89_fw_element_hdr *elm, + const union rtw89_fw_element_arg arg) +{ + enum rtw89_fw_type type = arg.fw_type; + struct rtw89_fw_suit *fw_suit; + + fw_suit = rtw89_fw_suit_get(rtwdev, type); + fw_suit->data = elm->u.common.contents; + fw_suit->size = le32_to_cpu(elm->size); + + return rtw89_fw_update_ver(rtwdev, type, fw_suit); } #define __DEF_FW_FEAT_COND(__cond, __op) \ @@ -312,31 +490,17 @@ rtw89_early_fw_feature_recognize(struct device *device, struct rtw89_fw_info *early_fw, int *used_fw_format) { - union rtw89_compat_fw_hdr buf = {}; const struct firmware *firmware; - bool full_req = false; char fw_name[64]; int fw_format; u32 ver_code; int ret; - /* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will - * be denied (-EPERM). Then, we don't get right firmware things as - * expected. So, in this case, we have to request full firmware here. - */ - if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE)) - full_req = true; - for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { rtw89_fw_get_filename(fw_name, sizeof(fw_name), chip->fw_basename, fw_format); - if (full_req) - ret = request_firmware(&firmware, fw_name, device); - else - ret = request_partial_firmware_into_buf(&firmware, fw_name, - device, &buf, sizeof(buf), - 0); + ret = request_firmware(&firmware, fw_name, device); if (!ret) { dev_info(device, "loaded firmware %s\n", fw_name); *used_fw_format = fw_format; @@ -349,10 +513,7 @@ rtw89_early_fw_feature_recognize(struct device *device, return NULL; } - if (full_req) - ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); - else - ver_code = rtw89_compat_fw_hdr_ver_code(&buf); + ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); if (!ver_code) goto out; @@ -360,11 +521,7 @@ rtw89_early_fw_feature_recognize(struct device *device, rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); out: - if (full_req) - return firmware; - - release_firmware(firmware); - return NULL; + return firmware; } int rtw89_fw_recognize(struct rtw89_dev *rtwdev) @@ -386,6 +543,9 @@ normal_done: /* It still works if wowlan firmware isn't existing. */ __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); + /* It still works if log format file isn't existing. */ + __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); + rtw89_fw_recognize_features(rtwdev); rtw89_coex_recognize_ver(rtwdev); @@ -393,6 +553,225 @@ normal_done: return 0; } +static +int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, + const struct rtw89_fw_element_hdr *elm, + const union rtw89_fw_element_arg arg) +{ + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; + struct rtw89_phy_table *tbl; + struct rtw89_reg2_def *regs; + enum rtw89_rf_path rf_path; + u32 n_regs, i; + u8 idx; + + tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); + if (!tbl) + return -ENOMEM; + + switch (le32_to_cpu(elm->id)) { + case RTW89_FW_ELEMENT_ID_BB_REG: + elm_info->bb_tbl = tbl; + break; + case RTW89_FW_ELEMENT_ID_BB_GAIN: + elm_info->bb_gain = tbl; + break; + case RTW89_FW_ELEMENT_ID_RADIO_A: + case RTW89_FW_ELEMENT_ID_RADIO_B: + case RTW89_FW_ELEMENT_ID_RADIO_C: + case RTW89_FW_ELEMENT_ID_RADIO_D: + rf_path = arg.rf_path; + idx = elm->u.reg2.idx; + + elm_info->rf_radio[idx] = tbl; + tbl->rf_path = rf_path; + tbl->config = rtw89_phy_config_rf_reg_v1; + break; + case RTW89_FW_ELEMENT_ID_RF_NCTL: + elm_info->rf_nctl = tbl; + break; + default: + kfree(tbl); + return -ENOENT; + } + + n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); + regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); + if (!regs) + goto out; + + for (i = 0; i < n_regs; i++) { + regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); + regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); + } + + tbl->n_regs = n_regs; + tbl->regs = regs; + + return 0; + +out: + kfree(tbl); + return -ENOMEM; +} + +static +int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, + const struct rtw89_fw_element_hdr *elm, + const union rtw89_fw_element_arg arg) +{ + const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; + const unsigned long offset = arg.offset; + struct rtw89_efuse *efuse = &rtwdev->efuse; + struct rtw89_txpwr_conf *conf; + + if (!rtwdev->rfe_data) { + rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); + if (!rtwdev->rfe_data) + return -ENOMEM; + } + + conf = (void *)rtwdev->rfe_data + offset; + + /* if multiple matched, take the last eventually */ + if (txpwr_elm->rfe_type == efuse->rfe_type) + goto setup; + + /* without one is matched, accept default */ + if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && + (!rtw89_txpwr_conf_valid(conf) || + conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) + goto setup; + + rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", + elm->id, txpwr_elm->rfe_type); + return 0; + +setup: + rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", + elm->id, txpwr_elm->rfe_type); + + conf->rfe_type = txpwr_elm->rfe_type; + conf->ent_sz = txpwr_elm->ent_sz; + conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); + conf->data = txpwr_elm->content; + return 0; +} + +static const struct rtw89_fw_element_handler __fw_element_handlers[] = { + [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, + { .fw_type = RTW89_FW_BBMCU0 }, NULL}, + [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, + { .fw_type = RTW89_FW_BBMCU1 }, NULL}, + [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, + [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, + [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, + { .rf_path = RF_PATH_A }, "radio A"}, + [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, + { .rf_path = RF_PATH_B }, NULL}, + [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, + { .rf_path = RF_PATH_C }, NULL}, + [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, + { .rf_path = RF_PATH_D }, NULL}, + [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, + [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, + }, +}; + +int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_info *fw_info = &rtwdev->fw; + const struct firmware *firmware = fw_info->req.firmware; + const struct rtw89_chip_info *chip = rtwdev->chip; + u32 unrecognized_elements = chip->needed_fw_elms; + const struct rtw89_fw_element_handler *handler; + const struct rtw89_fw_element_hdr *hdr; + u32 elm_size; + u32 elem_id; + u32 offset; + int ret; + + BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); + + offset = rtw89_mfw_get_size(rtwdev); + offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); + if (offset == 0) + return -EINVAL; + + while (offset + sizeof(*hdr) < firmware->size) { + hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); + + elm_size = le32_to_cpu(hdr->size); + if (offset + elm_size >= firmware->size) { + rtw89_warn(rtwdev, "firmware element size exceeds\n"); + break; + } + + elem_id = le32_to_cpu(hdr->id); + if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) + goto next; + + handler = &__fw_element_handlers[elem_id]; + if (!handler->fn) + goto next; + + ret = handler->fn(rtwdev, hdr, handler->arg); + if (ret) + return ret; + + if (handler->name) + rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", + handler->name, hdr->ver); + + unrecognized_elements &= ~BIT(elem_id); +next: + offset += sizeof(*hdr) + elm_size; + offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); + } + + if (unrecognized_elements) { + rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", + unrecognized_elements); + return -ENOENT; + } + + return 0; +} + void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, u8 type, u8 cat, u8 class, u8 func, bool rack, bool dack, u32 len) @@ -469,7 +848,7 @@ fail: static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) { - u8 val; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; int ret; ret = __rtw89_fw_download_hdr(rtwdev, fw, len); @@ -478,9 +857,7 @@ static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len return ret; } - ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, - 1, FWDL_WAIT_CNT, false, - rtwdev, R_AX_WCPU_FW_CTRL); + ret = mac->fwdl_check_path_ready(rtwdev, false); if (ret) { rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); return ret; @@ -532,10 +909,27 @@ fail: return ret; } -static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, +static enum rtw89_fwdl_check_type +rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, + const struct rtw89_fw_suit *fw_suit) +{ + switch (fw_suit->type) { + case RTW89_FW_BBMCU0: + return RTW89_FWDL_CHECK_BB0_FWDL_DONE; + case RTW89_FW_BBMCU1: + return RTW89_FWDL_CHECK_BB1_FWDL_DONE; + default: + return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; + } +} + +static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, + const struct rtw89_fw_suit *fw_suit, struct rtw89_fw_bin_info *info) { struct rtw89_fw_hdr_section_info *section_info = info->section_info; + const struct rtw89_chip_info *chip = rtwdev->chip; + enum rtw89_fwdl_check_type chk_type; u8 section_num = info->section_num; int ret; @@ -546,11 +940,14 @@ static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, section_info++; } - mdelay(5); + if (chip->chip_gen == RTW89_CHIP_AX) + return 0; - ret = rtw89_fw_check_rdy(rtwdev); + chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); + ret = rtw89_fw_check_rdy(rtwdev, chk_type); if (ret) { - rtw89_warn(rtwdev, "download firmware fail\n"); + rtw89_warn(rtwdev, "failed to download firmware type %u\n", + fw_suit->type); return ret; } @@ -588,50 +985,66 @@ static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) rtw89_fw_prog_cnt_dump(rtwdev); } -int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) +static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, + struct rtw89_fw_suit *fw_suit) { - struct rtw89_fw_info *fw_info = &rtwdev->fw; - struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct rtw89_fw_bin_info info; - const u8 *fw = fw_suit->data; - u32 len = fw_suit->size; - u8 val; int ret; - rtw89_mac_disable_cpu(rtwdev); - ret = rtw89_mac_enable_cpu(rtwdev, 0, true); - if (ret) - return ret; - - if (!fw || !len) { - rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); - return -ENOENT; - } - - ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); + ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); if (ret) { rtw89_err(rtwdev, "parse fw header fail\n"); - goto fwdl_err; + return ret; } - ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, - 1, FWDL_WAIT_CNT, false, - rtwdev, R_AX_WCPU_FW_CTRL); + if (rtwdev->chip->chip_id == RTL8922A && + (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) + rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); + + ret = mac->fwdl_check_path_ready(rtwdev, true); if (ret) { rtw89_err(rtwdev, "[ERR]H2C path ready\n"); - goto fwdl_err; + return ret; } - ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); - if (ret) { - ret = -EBUSY; - goto fwdl_err; - } + ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len - + info.dynamic_hdr_len); + if (ret) + return ret; - ret = rtw89_fw_download_main(rtwdev, fw, &info); - if (ret) { - ret = -EBUSY; + ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); + if (ret) + return ret; + + return 0; +} + +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + struct rtw89_fw_info *fw_info = &rtwdev->fw; + struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); + u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; + int ret; + int i; + + mac->disable_cpu(rtwdev); + ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); + if (ret) + return ret; + + ret = rtw89_fw_download_suit(rtwdev, fw_suit); + if (ret) goto fwdl_err; + + for (i = 0; i < bbmcu_nr && include_bb; i++) { + fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); + + ret = rtw89_fw_download_suit(rtwdev, fw_suit); + if (ret) + goto fwdl_err; } fw_info->h2c_seq = 0; @@ -641,6 +1054,14 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; + mdelay(5); + + ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); + if (ret) { + rtw89_warn(rtwdev, "download firmware fail\n"); + return ret; + } + return ret; fwdl_err: @@ -695,6 +1116,27 @@ void rtw89_load_firmware_work(struct work_struct *work) rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); } +static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) +{ + if (!tbl) + return; + + kfree(tbl->regs); + kfree(tbl); +} + +static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; + int i; + + rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); + rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); + for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) + rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); + rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); +} + void rtw89_unload_firmware(struct rtw89_dev *rtwdev) { struct rtw89_fw_info *fw = &rtwdev->fw; @@ -709,6 +1151,151 @@ void rtw89_unload_firmware(struct rtw89_dev *rtwdev) */ fw->req.firmware = NULL; } + + kfree(fw->log.fmts); + rtw89_unload_firmware_elements(rtwdev); +} + +static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) +{ + struct rtw89_fw_log *fw_log = &rtwdev->fw.log; + u32 i; + + if (fmt_id > fw_log->last_fmt_id) + return 0; + + for (i = 0; i < fw_log->fmt_count; i++) { + if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) + return i; + } + return 0; +} + +static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_log *log = &rtwdev->fw.log; + const struct rtw89_fw_logsuit_hdr *suit_hdr; + struct rtw89_fw_suit *suit = &log->suit; + const void *fmts_ptr, *fmts_end_ptr; + u32 fmt_count; + int i; + + suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; + fmt_count = le32_to_cpu(suit_hdr->count); + log->fmt_ids = suit_hdr->ids; + fmts_ptr = &suit_hdr->ids[fmt_count]; + fmts_end_ptr = suit->data + suit->size; + log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); + if (!log->fmts) + return -ENOMEM; + + for (i = 0; i < fmt_count; i++) { + fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); + if (!fmts_ptr) + break; + + (*log->fmts)[i] = fmts_ptr; + log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); + log->fmt_count++; + fmts_ptr += strlen(fmts_ptr); + } + + return 0; +} + +int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_log *log = &rtwdev->fw.log; + struct rtw89_fw_suit *suit = &log->suit; + + if (!suit || !suit->data) { + rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); + return -EINVAL; + } + if (log->fmts) + return 0; + + return rtw89_fw_log_create_fmts_dict(rtwdev); +} + +static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, + const struct rtw89_fw_c2h_log_fmt *log_fmt, + u32 fmt_idx, u8 para_int, bool raw_data) +{ + const char *(*fmts)[] = rtwdev->fw.log.fmts; + char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; + u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; + int i; + + if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { + rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", + log_fmt->argc); + return; + } + + if (para_int) + for (i = 0 ; i < log_fmt->argc; i++) + args[i] = le32_to_cpu(log_fmt->u.argv[i]); + + if (raw_data) { + if (para_int) + snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, + "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), + para_int, log_fmt->argc, (int)sizeof(args), args); + else + snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, + "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), + para_int, log_fmt->argc, log_fmt->u.raw); + } else { + snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], + args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], + args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], + args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], + args[0xf]); + } + + rtw89_info(rtwdev, "C2H log: %s", str_buf); +} + +void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) +{ + const struct rtw89_fw_c2h_log_fmt *log_fmt; + u8 para_int; + u32 fmt_idx; + + if (len < RTW89_C2H_HEADER_LEN) { + rtw89_err(rtwdev, "c2h log length is wrong!\n"); + return; + } + + buf += RTW89_C2H_HEADER_LEN; + len -= RTW89_C2H_HEADER_LEN; + log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; + + if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) + goto plain_log; + + if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) + goto plain_log; + + if (!rtwdev->fw.log.fmts) + return; + + para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); + fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); + + if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) + rtw89_info(rtwdev, "C2H log: %s%s", + (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); + else if (fmt_idx != 0 && para_int) + rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); + else + rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); + return; + +plain_log: + rtw89_info(rtwdev, "C2H log: %.*s", len, buf); + } #define H2C_CAM_LEN 60 @@ -922,7 +1509,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) } skb_put(skb, H2C_LOG_CFG_LEN); - SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); + SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); SET_LOG_CFG_COMP(skb->data, comp); SET_LOG_CFG_COMP_EXT(skb->data, 0); @@ -1300,7 +1887,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct sk_buff *skb; u8 pads[RTW89_PPE_BW_NUM]; u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; @@ -1457,12 +2045,15 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct sk_buff *skb; struct sk_buff *skb_beacon; u16 tim_offset; int bcn_total_len; u16 beacon_rate; + void *noa_data; + u8 noa_len; int ret; if (vif->p2p) @@ -1479,6 +2070,13 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, return -ENOMEM; } + noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); + if (noa_len && + (noa_len <= skb_tailroom(skb_beacon) || + pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { + skb_put_data(skb_beacon, noa_data, noa_len); + } + bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); if (!skb) { @@ -1903,61 +2501,76 @@ fail: return ret; } -#define H2C_RA_LEN 16 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) { + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_h2c_ra_v1 *h2c_v1; + struct rtw89_h2c_ra *h2c; + u32 len = sizeof(*h2c); + bool format_v1 = false; struct sk_buff *skb; - u8 *cmd; int ret; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); + if (chip->chip_gen == RTW89_CHIP_BE) { + len = sizeof(*h2c_v1); + format_v1 = true; + } + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); return -ENOMEM; } - skb_put(skb, H2C_RA_LEN); - cmd = skb->data; + skb_put(skb, len); + h2c = (struct rtw89_h2c_ra *)skb->data; rtw89_debug(rtwdev, RTW89_DBG_RA, "ra cmd msk: %llx ", ra->ra_mask); - RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); - RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); - RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); - RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); - RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); - RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); - RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); - RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); - RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); - RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); - RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); - RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); - RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); - RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); - RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); - RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); - RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); - RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); - RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); - RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); - RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); - - if (csi) { - RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); - RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); - RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); - RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); - RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); - RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); - RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); - RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); - RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); - } - + h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | + le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | + le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | + le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | + le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | + le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | + le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | + le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | + le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | + le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | + le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | + le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | + le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | + le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); + h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); + h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); + h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | + le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); + + if (!format_v1) + goto csi; + + h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; + h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | + le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); + +csi: + if (!csi) + goto done; + + h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); + h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | + le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | + le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | + le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | + le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | + le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | + le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | + le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); + +done: rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, - H2C_RA_LEN); + len); ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { @@ -2693,11 +3306,11 @@ fail: int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; struct rtw89_fw_h2c_rf_get_mccch *mccch; struct sk_buff *skb; int ret; + u8 idx; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); if (!skb) { @@ -2707,12 +3320,13 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) skb_put(skb, sizeof(*mccch)); mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; + idx = rfk_mcc->table_idx; mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); - mccch->current_channel = cpu_to_le32(chan->channel); - mccch->current_band_type = cpu_to_le32(chan->band_type); + mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); + mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, @@ -2815,12 +3429,13 @@ void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) { + const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); - attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); - attr->class = RTW89_GET_C2H_CLASS(c2h->data); - attr->func = RTW89_GET_C2H_FUNC(c2h->data); - attr->len = RTW89_GET_C2H_LEN(c2h->data); + attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); + attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); + attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); + attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); } static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, @@ -3377,6 +3992,7 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct cfg80211_scan_request *req = &scan_req->req; u32 rx_fltr = rtwdev->hal.rx_fltr; u8 mac_addr[ETH_ALEN]; @@ -3399,14 +4015,17 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, rx_fltr &= ~B_AX_A_BC; rx_fltr &= ~B_AX_A_A1_MATCH; rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rx_fltr); + + rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); } void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool aborted) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct cfg80211_scan_info info = { .aborted = aborted, @@ -3417,7 +4036,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, return; rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); @@ -3432,7 +4051,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, scan_info->last_chan_idx = 0; scan_info->scanning_vif = NULL; - rtw89_set_channel(rtwdev); + rtw89_chanctx_proceed(rtwdev); } void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) @@ -4032,7 +4651,7 @@ int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, } #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 -int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, +int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, u8 *bitmap) { struct rtw89_wait_info *wait = &rtwdev->mcc.wait; @@ -4135,3 +4754,454 @@ int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); } + +static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) +{ + static const u8 zeros[U8_MAX] = {}; + + return memcmp(ext_ptr, zeros, ext_len) == 0; +} + +#define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ +({ \ + u8 __var_sz = sizeof(*(e)); \ + bool __accept; \ + if (__var_sz >= (ent_sz)) \ + __accept = true; \ + else \ + __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ + (ent_sz) - __var_sz);\ + __accept; \ +}) + +static bool +fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) + return false; + + switch (e->rs) { + case RTW89_RS_CCK: + if (e->shf + e->len > RTW89_RATE_CCK_NUM) + return false; + break; + case RTW89_RS_OFDM: + if (e->shf + e->len > RTW89_RATE_OFDM_NUM) + return false; + break; + case RTW89_RS_MCS: + if (e->shf + e->len > __RTW89_RATE_MCS_NUM || + e->nss >= RTW89_NSS_NUM || + e->ofdma >= RTW89_OFDMA_NUM) + return false; + break; + case RTW89_RS_HEDCM: + if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || + e->nss >= RTW89_NSS_HEDCM_NUM || + e->ofdma >= RTW89_OFDMA_NUM) + return false; + break; + case RTW89_RS_OFFSET: + if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) + return false; + break; + default: + return false; + } + + return true; +} + +static +void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, + const struct rtw89_txpwr_table *tbl) +{ + const struct rtw89_txpwr_conf *conf = tbl->data; + struct rtw89_fw_txpwr_byrate_entry entry = {}; + struct rtw89_txpwr_byrate *byr_head; + struct rtw89_rate_desc desc = {}; + const void *cursor; + u32 data; + s8 *byr; + int i; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) + continue; + + byr_head = &rtwdev->byr[entry.band][entry.bw]; + data = le32_to_cpu(entry.data); + desc.ofdma = entry.ofdma; + desc.nss = entry.nss; + desc.rs = entry.rs; + + for (i = 0; i < entry.len; i++, data >>= 8) { + desc.idx = entry.shf + i; + byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); + *byr = data & 0xff; + } + } +} + +static bool +fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->bw >= RTW89_2G_BW_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->rs >= RTW89_RS_LMT_NUM) + return false; + if (e->bf >= RTW89_BF_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_2G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] + [entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->bw >= RTW89_5G_BW_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->rs >= RTW89_RS_LMT_NUM) + return false; + if (e->bf >= RTW89_BF_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_5G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] + [entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->bw >= RTW89_6G_BW_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->rs >= RTW89_RS_LMT_NUM) + return false; + if (e->bf >= RTW89_BF_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) + return false; + if (e->ch_idx >= RTW89_6G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] + [entry.reg_6ghz_power][entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->ru >= RTW89_RU_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_2G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->ru >= RTW89_RU_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->ch_idx >= RTW89_5G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; + } +} + +static bool +fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->ru >= RTW89_RU_NUM) + return false; + if (e->nt >= RTW89_NTX_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) + return false; + if (e->ch_idx >= RTW89_6G_CH_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] + [entry.ch_idx] = entry.v; + } +} + +static bool +fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->band >= RTW89_BAND_NUM) + return false; + if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_tx_shape_lmt_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; + } +} + +static bool +fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, + const void *cursor, + const struct rtw89_txpwr_conf *conf) +{ + if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) + return false; + + if (e->band >= RTW89_BAND_NUM) + return false; + if (e->regd >= RTW89_REGD_NUM) + return false; + + return true; +} + +static +void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) +{ + const struct rtw89_txpwr_conf *conf = &data->conf; + struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; + const void *cursor; + + rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { + if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) + continue; + + data->v[entry.band][entry.regd] = entry.v; + } +} + +const struct rtw89_rfe_parms * +rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, + const struct rtw89_rfe_parms *init) +{ + struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; + struct rtw89_rfe_parms *parms; + + if (!rfe_data) + return init; + + parms = &rfe_data->rfe_parms; + if (init) + *parms = *init; + + if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { + rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; + rfe_data->byrate.tbl.size = 0; /* don't care here */ + rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; + parms->byr_tbl = &rfe_data->byrate.tbl; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { + rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); + parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { + rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); + parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { + rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); + parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); + parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); + parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); + parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { + rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); + parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { + rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); + parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; + } + + return parms; +} diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 45f927dc212e..d4db9ab0b5e8 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -291,160 +291,52 @@ struct rtw89_pktofld_info { bool cancel; }; -static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0)); -} - -static inline void RTW89_SET_FWCMD_RA_MODE(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(5, 1)); -} - -static inline void RTW89_SET_FWCMD_RA_BW_CAP(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 6)); -} - -static inline void RTW89_SET_FWCMD_RA_MACID(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8)); -} - -static inline void RTW89_SET_FWCMD_RA_DCM(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(16)); -} - -static inline void RTW89_SET_FWCMD_RA_ER(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(17)); -} - -static inline void RTW89_SET_FWCMD_RA_INIT_RATE_LV(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(19, 18)); -} - -static inline void RTW89_SET_FWCMD_RA_UPD_ALL(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(20)); -} - -static inline void RTW89_SET_FWCMD_RA_SGI(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(21)); -} - -static inline void RTW89_SET_FWCMD_RA_LDPC(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(22)); -} - -static inline void RTW89_SET_FWCMD_RA_STBC(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(23)); -} - -static inline void RTW89_SET_FWCMD_RA_SS_NUM(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(26, 24)); -} - -static inline void RTW89_SET_FWCMD_RA_GILTF(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(29, 27)); -} - -static inline void RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(30)); -} - -static inline void RTW89_SET_FWCMD_RA_UPD_MASK(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(31)); -} - -static inline void RTW89_SET_FWCMD_RA_MASK_0(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(7, 0)); -} - -static inline void RTW89_SET_FWCMD_RA_MASK_1(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(15, 8)); -} - -static inline void RTW89_SET_FWCMD_RA_MASK_2(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(23, 16)); -} - -static inline void RTW89_SET_FWCMD_RA_MASK_3(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 24)); -} - -static inline void RTW89_SET_FWCMD_RA_MASK_4(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(7, 0)); -} - -static inline void RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x02, val, BIT(31)); -} - -static inline void RTW89_SET_FWCMD_RA_BAND_NUM(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(7, 0)); -} - -static inline void RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(8)); -} - -static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(9)); -} - -static inline void RTW89_SET_FWCMD_RA_CR_TBL_SEL(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10)); -} - -static inline void RTW89_SET_FWCMD_RA_FIX_GILTF_EN(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(11)); -} - -static inline void RTW89_SET_FWCMD_RA_FIX_GILTF(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(14, 12)); -} - -static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16)); -} - -static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(25, 24)); -} +struct rtw89_h2c_ra { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; +} __packed; -static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(28, 26)); -} +#define RTW89_H2C_RA_W0_IS_DIS BIT(0) +#define RTW89_H2C_RA_W0_MODE GENMASK(5, 1) +#define RTW89_H2C_RA_W0_BW_CAP GENMASK(7, 6) +#define RTW89_H2C_RA_W0_MACID GENMASK(15, 8) +#define RTW89_H2C_RA_W0_DCM BIT(16) +#define RTW89_H2C_RA_W0_ER BIT(17) +#define RTW89_H2C_RA_W0_INIT_RATE_LV GENMASK(19, 18) +#define RTW89_H2C_RA_W0_UPD_ALL BIT(20) +#define RTW89_H2C_RA_W0_SGI BIT(21) +#define RTW89_H2C_RA_W0_LDPC BIT(22) +#define RTW89_H2C_RA_W0_STBC BIT(23) +#define RTW89_H2C_RA_W0_SS_NUM GENMASK(26, 24) +#define RTW89_H2C_RA_W0_GILTF GENMASK(29, 27) +#define RTW89_H2C_RA_W0_UPD_BW_NSS_MASK BIT(30) +#define RTW89_H2C_RA_W0_UPD_MASK BIT(31) +#define RTW89_H2C_RA_W1_RAMASK_LO32 GENMASK(31, 0) +#define RTW89_H2C_RA_W2_RAMASK_HI32 GENMASK(30, 0) +#define RTW89_H2C_RA_W2_BFEE_CSI_CTL BIT(31) +#define RTW89_H2C_RA_W3_BAND_NUM GENMASK(7, 0) +#define RTW89_H2C_RA_W3_RA_CSI_RATE_EN BIT(8) +#define RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN BIT(9) +#define RTW89_H2C_RA_W3_CR_TBL_SEL BIT(10) +#define RTW89_H2C_RA_W3_FIX_GILTF_EN BIT(11) +#define RTW89_H2C_RA_W3_FIX_GILTF GENMASK(14, 12) +#define RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX GENMASK(23, 16) +#define RTW89_H2C_RA_W3_FIXED_CSI_MODE GENMASK(25, 24) +#define RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF GENMASK(28, 26) +#define RTW89_H2C_RA_W3_FIXED_CSI_BW GENMASK(31, 29) + +struct rtw89_h2c_ra_v1 { + struct rtw89_h2c_ra v0; + __le32 w4; + __le32 w5; +} __packed; -static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_BW(void *cmd, u32 val) -{ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 29)); -} +#define RTW89_H2C_RA_V1_W4_MODE_EHT GENMASK(6, 0) +#define RTW89_H2C_RA_V1_W4_BW_EHT GENMASK(10, 8) +#define RTW89_H2C_RA_V1_W4_RAMASK_UHL16 GENMASK(31, 16) +#define RTW89_H2C_RA_V1_W5_RAMASK_UHH16 GENMASK(15, 0) static inline void RTW89_SET_FWCMD_SEC_IDX(void *cmd, u32 val) { @@ -571,7 +463,9 @@ struct rtw89_fw_hdr { #define FW_HDR_W1_MINOR_VERSION GENMASK(15, 8) #define FW_HDR_W1_SUBVERSION GENMASK(23, 16) #define FW_HDR_W1_SUBINDEX GENMASK(31, 24) +#define FW_HDR_W2_COMMITID GENMASK(31, 0) #define FW_HDR_W3_LEN GENMASK(23, 16) +#define FW_HDR_W3_HDR_VER GENMASK(31, 24) #define FW_HDR_W4_MONTH GENMASK(7, 0) #define FW_HDR_W4_DATE GENMASK(15, 8) #define FW_HDR_W4_HOUR GENMASK(23, 16) @@ -581,6 +475,54 @@ struct rtw89_fw_hdr { #define FW_HDR_W7_DYN_HDR BIT(16) #define FW_HDR_W7_CMD_VERSERION GENMASK(31, 24) +struct rtw89_fw_hdr_section_v1 { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; +} __packed; + +#define FWSECTION_HDR_V1_W0_DL_ADDR GENMASK(31, 0) +#define FWSECTION_HDR_V1_W1_METADATA GENMASK(31, 24) +#define FWSECTION_HDR_V1_W1_SECTIONTYPE GENMASK(27, 24) +#define FWSECTION_HDR_V1_W1_SEC_SIZE GENMASK(23, 0) +#define FWSECTION_HDR_V1_W1_CHECKSUM BIT(28) +#define FWSECTION_HDR_V1_W1_REDL BIT(29) +#define FWSECTION_HDR_V1_W2_MSSC GENMASK(7, 0) +#define FWSECTION_HDR_V1_W2_BBMCU_IDX GENMASK(27, 24) + +struct rtw89_fw_hdr_v1 { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; + __le32 w6; + __le32 w7; + __le32 w8; + __le32 w9; + __le32 w10; + __le32 w11; + struct rtw89_fw_hdr_section_v1 sections[]; +} __packed; + +#define FW_HDR_V1_W1_MAJOR_VERSION GENMASK(7, 0) +#define FW_HDR_V1_W1_MINOR_VERSION GENMASK(15, 8) +#define FW_HDR_V1_W1_SUBVERSION GENMASK(23, 16) +#define FW_HDR_V1_W1_SUBINDEX GENMASK(31, 24) +#define FW_HDR_V1_W2_COMMITID GENMASK(31, 0) +#define FW_HDR_V1_W3_CMD_VERSERION GENMASK(23, 16) +#define FW_HDR_V1_W3_HDR_VER GENMASK(31, 24) +#define FW_HDR_V1_W4_MONTH GENMASK(7, 0) +#define FW_HDR_V1_W4_DATE GENMASK(15, 8) +#define FW_HDR_V1_W4_HOUR GENMASK(23, 16) +#define FW_HDR_V1_W4_MIN GENMASK(31, 24) +#define FW_HDR_V1_W5_YEAR GENMASK(15, 0) +#define FW_HDR_V1_W5_HDR_SIZE GENMASK(31, 16) +#define FW_HDR_V1_W6_SEC_NUM GENMASK(15, 8) +#define FW_HDR_V1_W7_DYN_HDR BIT(16) + static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val) { le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0)); @@ -2989,6 +2931,11 @@ static inline void RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(void *cmd, u32 val) le32p_replace_bits((__le32 *)cmd + 3, val, GENMASK(23, 16)); } +enum rtw89_fw_mcc_old_group_actions { + RTW89_FW_MCC_OLD_GROUP_ACT_NONE = 0, + RTW89_FW_MCC_OLD_GROUP_ACT_REPLACE = 1, +}; + struct rtw89_fw_mcc_start_req { u32 group: 2; u32 btc_in_group: 1; @@ -3209,14 +3156,15 @@ inline void RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(void *cmd, u32 val) #define RTW89_C2H_HEADER_LEN 8 -#define RTW89_GET_C2H_CATEGORY(c2h) \ - le32_get_bits(*((const __le32 *)c2h), GENMASK(1, 0)) -#define RTW89_GET_C2H_CLASS(c2h) \ - le32_get_bits(*((const __le32 *)c2h), GENMASK(7, 2)) -#define RTW89_GET_C2H_FUNC(c2h) \ - le32_get_bits(*((const __le32 *)c2h), GENMASK(15, 8)) -#define RTW89_GET_C2H_LEN(c2h) \ - le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(13, 0)) +struct rtw89_c2h_hdr { + __le32 w0; + __le32 w1; +} __packed; + +#define RTW89_C2H_HDR_W0_CATEGORY GENMASK(1, 0) +#define RTW89_C2H_HDR_W0_CLASS GENMASK(7, 2) +#define RTW89_C2H_HDR_W0_FUNC GENMASK(15, 8) +#define RTW89_C2H_HDR_W1_LEN GENMASK(13, 0) struct rtw89_fw_c2h_attr { u8 category; @@ -3232,9 +3180,6 @@ static inline struct rtw89_fw_c2h_attr *RTW89_SKB_C2H_CB(struct sk_buff *skb) return (struct rtw89_fw_c2h_attr *)skb->cb; } -#define RTW89_GET_C2H_LOG_SRT_PRT(c2h) (char *)((__le32 *)(c2h) + 2) -#define RTW89_GET_C2H_LOG_LEN(len) ((len) - RTW89_C2H_HEADER_LEN) - struct rtw89_c2h_done_ack { __le32 w0; __le32 w1; @@ -3256,6 +3201,26 @@ struct rtw89_c2h_done_ack { #define RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h) \ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16)) +struct rtw89_fw_c2h_log_fmt { + __le16 signature; + u8 feature; + u8 syntax; + __le32 fmt_id; + u8 file_num; + __le16 line_num; + u8 argc; + union { + DECLARE_FLEX_ARRAY(u8, raw); + DECLARE_FLEX_ARRAY(__le32, argv); + } __packed u; +} __packed; + +#define RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN 11 +#define RTW89_C2H_FW_LOG_FEATURE_PARA_INT BIT(2) +#define RTW89_C2H_FW_LOG_MAX_PARA_NUM 16 +#define RTW89_C2H_FW_LOG_SIGNATURE 0xA5A5 +#define RTW89_C2H_FW_LOG_STR_BUF_SIZE 512 + struct rtw89_c2h_mac_bcnfltr_rpt { __le32 w0; __le32 w1; @@ -3267,24 +3232,32 @@ struct rtw89_c2h_mac_bcnfltr_rpt { #define RTW89_C2H_MAC_BCNFLTR_RPT_W2_EVENT GENMASK(11, 10) #define RTW89_C2H_MAC_BCNFLTR_RPT_W2_MA GENMASK(23, 16) -#define RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h) \ - le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 0)) -#define RTW89_GET_PHY_C2H_RA_RPT_RETRY_RATIO(c2h) \ - le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16)) -#define RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h) \ - le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(6, 0)) -#define RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h) \ - le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(9, 8)) -#define RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h) \ - le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(12, 10)) -#define RTW89_GET_PHY_C2H_RA_RPT_BW(c2h) \ - le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(14, 13)) - -/* VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS - * HT-new: [6:5]: NA, [4:0]: MCS +struct rtw89_c2h_ra_rpt { + struct rtw89_c2h_hdr hdr; + __le32 w2; + __le32 w3; +} __packed; + +#define RTW89_C2H_RA_RPT_W2_MACID GENMASK(15, 0) +#define RTW89_C2H_RA_RPT_W2_RETRY_RATIO GENMASK(23, 16) +#define RTW89_C2H_RA_RPT_W2_MCSNSS_B7 BIT(31) +#define RTW89_C2H_RA_RPT_W3_MCSNSS GENMASK(6, 0) +#define RTW89_C2H_RA_RPT_W3_MD_SEL GENMASK(9, 8) +#define RTW89_C2H_RA_RPT_W3_GILTF GENMASK(12, 10) +#define RTW89_C2H_RA_RPT_W3_BW GENMASK(14, 13) +#define RTW89_C2H_RA_RPT_W3_MD_SEL_B2 BIT(15) +#define RTW89_C2H_RA_RPT_W3_BW_B2 BIT(16) + +/* For WiFi 6 chips: + * VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS + * HT-new: [6:5]: NA, [4:0]: MCS + * For WiFi 7 chips (V1): + * HT, VHT, HE, EHT: [7:5]: NSS, [4:0]: MCS */ #define RTW89_RA_RATE_MASK_NSS GENMASK(6, 4) #define RTW89_RA_RATE_MASK_MCS GENMASK(3, 0) +#define RTW89_RA_RATE_MASK_NSS_V1 GENMASK(7, 5) +#define RTW89_RA_RATE_MASK_MCS_V1 GENMASK(4, 0) #define RTW89_RA_RATE_MASK_HT_MCS GENMASK(4, 0) #define RTW89_MK_HT_RATE(nss, mcs) (FIELD_PREP(GENMASK(4, 3), nss) | \ FIELD_PREP(GENMASK(2, 0), mcs)) @@ -3426,6 +3399,88 @@ struct rtw89_mfw_hdr { struct rtw89_mfw_info info[]; } __packed; +struct rtw89_fw_logsuit_hdr { + __le32 rsvd; + __le32 count; + __le32 ids[]; +} __packed; + +#define RTW89_FW_ELEMENT_ALIGN 16 + +enum rtw89_fw_element_id { + RTW89_FW_ELEMENT_ID_BBMCU0 = 0, + RTW89_FW_ELEMENT_ID_BBMCU1 = 1, + RTW89_FW_ELEMENT_ID_BB_REG = 2, + RTW89_FW_ELEMENT_ID_BB_GAIN = 3, + RTW89_FW_ELEMENT_ID_RADIO_A = 4, + RTW89_FW_ELEMENT_ID_RADIO_B = 5, + RTW89_FW_ELEMENT_ID_RADIO_C = 6, + RTW89_FW_ELEMENT_ID_RADIO_D = 7, + RTW89_FW_ELEMENT_ID_RF_NCTL = 8, + RTW89_FW_ELEMENT_ID_TXPWR_BYRATE = 9, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ = 10, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ = 11, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ = 12, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ = 13, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ = 14, + RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ = 15, + RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT = 16, + RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU = 17, + + RTW89_FW_ELEMENT_ID_NUM, +}; + +#define BITS_OF_RTW89_TXPWR_FW_ELEMENTS \ + (BIT(RTW89_FW_ELEMENT_ID_TXPWR_BYRATE) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ) | \ + BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT) | \ + BIT(RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU)) + +#define RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS (BIT(RTW89_FW_ELEMENT_ID_BBMCU0) | \ + BIT(RTW89_FW_ELEMENT_ID_BB_REG) | \ + BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \ + BIT(RTW89_FW_ELEMENT_ID_RADIO_B) | \ + BIT(RTW89_FW_ELEMENT_ID_RF_NCTL) | \ + BITS_OF_RTW89_TXPWR_FW_ELEMENTS) + +struct __rtw89_fw_txpwr_element { + u8 rsvd0; + u8 rsvd1; + u8 rfe_type; + u8 ent_sz; + __le32 num_ents; + u8 content[]; +} __packed; + +struct rtw89_fw_element_hdr { + __le32 id; /* enum rtw89_fw_element_id */ + __le32 size; /* exclude header size */ + u8 ver[4]; + __le32 rsvd0; + __le32 rsvd1; + __le32 rsvd2; + union { + struct { + u8 priv[8]; + u8 contents[]; + } __packed common; + struct { + u8 idx; + u8 rsvd[7]; + struct { + __le32 addr; + __le32 data; + } __packed regs[]; + } __packed reg2; + struct __rtw89_fw_txpwr_element txpwr; + } __packed u; +} __packed; + struct fwcmd_hdr { __le32 hdr0; __le32 hdr1; @@ -3605,17 +3660,23 @@ struct rtw89_fw_h2c_rf_get_mccch { #define RTW89_FW_BACKTRACE_MAX_SIZE 512 /* 8 * 64 (entries) */ #define RTW89_FW_BACKTRACE_KEY 0xBACEBACE -int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev); +#define FWDL_WAIT_CNT 400000 + +int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type); int rtw89_fw_recognize(struct rtw89_dev *rtwdev); +int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev); const struct firmware * rtw89_early_fw_feature_recognize(struct device *device, const struct rtw89_chip_info *chip, struct rtw89_fw_info *early_fw, int *used_fw_format); -int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type); +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb); void rtw89_load_firmware_work(struct work_struct *work); void rtw89_unload_firmware(struct rtw89_dev *rtwdev); int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev); +int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev); +void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len); void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, u8 type, u8 cat, u8 class, u8 func, bool rack, bool dack, u32 len); @@ -3739,7 +3800,7 @@ int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group); int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, const struct rtw89_fw_mcc_tsf_req *req, struct rtw89_mac_mcc_tsf_rpt *rpt); -int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, +int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, u8 *bitmap); int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, u8 target, u8 offset); @@ -3754,4 +3815,97 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev) rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(rtwdev); } +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_byrate_entry { + u8 band; + u8 nss; + u8 rs; + u8 shf; + u8 len; + __le32 data; + u8 bw; + u8 ofdma; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_2ghz_entry { + u8 bw; + u8 nt; + u8 rs; + u8 bf; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_5ghz_entry { + u8 bw; + u8 nt; + u8 rs; + u8 bf; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_6ghz_entry { + u8 bw; + u8 nt; + u8 rs; + u8 bf; + u8 regd; + u8 reg_6ghz_power; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_ru_2ghz_entry { + u8 ru; + u8 nt; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_ru_5ghz_entry { + u8 ru; + u8 nt; + u8 regd; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_txpwr_lmt_ru_6ghz_entry { + u8 ru; + u8 nt; + u8 regd; + u8 reg_6ghz_power; + u8 ch_idx; + s8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_tx_shape_lmt_entry { + u8 band; + u8 tx_shape_rs; + u8 regd; + u8 v; +} __packed; + +/* must consider compatibility; don't insert new in the mid */ +struct rtw89_fw_tx_shape_lmt_ru_entry { + u8 band; + u8 regd; + u8 v; +} __packed; + +const struct rtw89_rfe_parms * +rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, + const struct rtw89_rfe_parms *init); + #endif diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index c93e6250cb8b..0c5768f41d55 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -12,7 +12,7 @@ #include "reg.h" #include "util.h" -const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = { +static const u32 rtw89_mac_mem_base_addrs_ax[RTW89_MAC_MEM_NUM] = { [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, @@ -39,19 +39,21 @@ const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = { static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset, u32 val, enum rtw89_mac_mem_sel sel) { - u32 addr = rtw89_mac_mem_base_addrs[sel] + offset; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 addr = mac->mem_base_addrs[sel] + offset; - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr); - rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, val); + rtw89_write32(rtwdev, mac->filter_model_addr, addr); + rtw89_write32(rtwdev, mac->indir_access_addr, val); } static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset, enum rtw89_mac_mem_sel sel) { - u32 addr = rtw89_mac_mem_base_addrs[sel] + offset; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 addr = mac->mem_base_addrs[sel] + offset; - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr); - return rtw89_read32(rtwdev, R_AX_INDIR_ACCESS_ENTRY); + rtw89_write32(rtwdev, mac->filter_model_addr, addr); + return rtw89_read32(rtwdev, mac->indir_access_addr); } int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, @@ -2082,7 +2084,7 @@ static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_ADDR_CAM_CTRL, mac_idx); val = rtw89_read32(rtwdev, reg); val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | @@ -2109,7 +2111,7 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_1, mac_idx); if (rtwdev->chip->chip_id == RTL8852C) rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1_V1); @@ -2118,14 +2120,14 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) SIFS_MACTXEN_T1); if (rtwdev->chip->chip_id == RTL8852B || rtwdev->chip->chip_id == RTL8851B) { - reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV); } - reg = rtw89_mac_reg_by_idx(R_AX_CCA_CFG_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CFG_0, mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN); - reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_0, mac_idx); if (rtwdev->chip->chip_id == RTL8852C) { val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL, B_AX_TX_PARTIAL_MODE); @@ -2165,13 +2167,13 @@ int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev, switch (type) { case RTW89_MGNT: - reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MGNT_FLTR, mac_idx); break; case RTW89_CTRL: - reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTRL_FLTR, mac_idx); break; case RTW89_DATA: - reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DATA_FLTR, mac_idx); break; default: rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); @@ -2202,9 +2204,9 @@ static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | B_AX_HE_SIGB_CRC_CHK; - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx), mac_ftlr); - rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx), + rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx), plcp_ftlr); return 0; @@ -2224,20 +2226,20 @@ static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) switch (rtwdev->chip->chip_id) { case RTL8852A: case RTL8852B: - reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; rtw89_write32(rtwdev, reg, val32); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; rtw89_write32(rtwdev, reg, val32); break; default: - reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; rtw89_write32(rtwdev, reg, val32); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; rtw89_write32(rtwdev, reg, val32); break; @@ -2253,7 +2255,7 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CONTROL, mac_idx); val = rtw89_read32(rtwdev, reg); val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | @@ -2294,7 +2296,7 @@ static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx); rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); return 0; @@ -2309,13 +2311,13 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MAC_LOOPBACK, mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); - reg = rtw89_mac_reg_by_idx(R_AX_TCR0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TCR0, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD); - reg = rtw89_mac_reg_by_idx(R_AX_TXD_FIFO_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXD_FIFO_CTRL, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE); rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE); @@ -2333,7 +2335,7 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); val = rtw89_read32(rtwdev, reg); val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); @@ -2353,12 +2355,12 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); rtw89_write32(rtwdev, reg, val); - reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); - reg = rtw89_mac_reg_by_idx(rrsr->ref_rate.addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->ref_rate.addr, mac_idx); rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data); - reg = rtw89_mac_reg_by_idx(rrsr->rsc.addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->rsc.addr, mac_idx); rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data); return 0; @@ -2397,10 +2399,10 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (mac_idx == RTW89_MAC_0) rst_bacam(rtwdev); - reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RESPBA_CAM_CTRL, mac_idx); rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); - reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx); val = rtw89_read16(rtwdev, reg); val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, B_AX_RX_DLK_DATA_TIME_MASK); @@ -2408,10 +2410,10 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_RX_DLK_CCA_TIME_MASK); rtw89_write16(rtwdev, reg, val); - reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx); rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); - reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx); if (mac_idx == RTW89_MAC_0) rx_qta = rtwdev->mac.dle_info.c0_rx_qta; else @@ -2425,13 +2427,13 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) { rtw89_write16_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx), + rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx), B_AX_RX_DLK_CCA_TIME_MASK, 0); - rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx), + rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx), BIT(12)); } - reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx); rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); return ret; @@ -2447,7 +2449,7 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); val = rtw89_read32(rtwdev, reg); val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); @@ -2455,7 +2457,7 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) rtw89_write32(rtwdev, reg, val); if (chip_id == RTL8852A || chip_id == RTL8852B) { - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_RRSR1, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN); } @@ -2485,7 +2487,7 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) return ret; if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { - reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SIFS_SETTING, mac_idx); val = rtw89_read32(rtwdev, reg); val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); @@ -2494,7 +2496,7 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) val |= B_AX_HW_CTS2SELF_EN; rtw89_write32(rtwdev, reg, val); - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_FSM_MON, mac_idx); val = rtw89_read32(rtwdev, reg); val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); val &= ~B_AX_PTCL_TX_ARB_TO_MODE; @@ -2531,7 +2533,7 @@ static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_RXDMA_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXDMA_CTRL_0, mac_idx); rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE); return 0; @@ -2725,7 +2727,7 @@ static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en, u16 tx_en_mask) { - u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx); + u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx); u16 val; int ret; @@ -2747,7 +2749,7 @@ static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en, u32 tx_en_mask) { - u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx); + u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx); u32 val; int ret; @@ -2768,7 +2770,7 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, int ret; *tx_en = rtw89_read16(rtwdev, - rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx)); + rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx)); switch (sel) { case RTW89_SCH_TX_SEL_ALL: @@ -2809,7 +2811,7 @@ int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, int ret; *tx_en = rtw89_read32(rtwdev, - rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx)); + rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx)); switch (sel) { case RTW89_SCH_TX_SEL_ALL: @@ -3016,7 +3018,7 @@ static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_TX_CTN_SEL, mac_idx); ret = read_poll_timeout(rtw89_read8, val, (val & B_AX_PTCL_TX_ON_STAT) == 0, @@ -3224,7 +3226,7 @@ static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) { u32 reg; - reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCHEDULE_ERR_IMR, mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN | B_AX_FSM_TIMEOUT_ERR_INT_EN); rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN); @@ -3235,7 +3237,7 @@ static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_IMR0, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr); rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set); } @@ -3246,12 +3248,12 @@ static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->cdma_imr_0_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_0_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr); rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set); if (chip_id == RTL8852C) { - reg = rtw89_mac_reg_by_idx(imr->cdma_imr_1_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_1_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr); rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set); } @@ -3262,7 +3264,7 @@ static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->phy_intf_imr_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->phy_intf_imr_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr); rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set); } @@ -3272,7 +3274,7 @@ static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->rmac_imr_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->rmac_imr_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr); rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set); } @@ -3282,7 +3284,7 @@ static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; u32 reg; - reg = rtw89_mac_reg_by_idx(imr->tmac_imr_reg, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, imr->tmac_imr_reg, mac_idx); rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr); rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set); } @@ -3450,7 +3452,7 @@ static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev) rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL); } -void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) +static void rtw89_mac_disable_cpu_ax(struct rtw89_dev *rtwdev) { clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); @@ -3465,7 +3467,8 @@ void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); } -int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw) +static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason, + bool dlfw, bool include_bb) { u32 val; int ret; @@ -3503,7 +3506,7 @@ int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw) if (!dlfw) { mdelay(5); - ret = rtw89_fw_check_rdy(rtwdev); + ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); if (ret) return ret; } @@ -3590,7 +3593,7 @@ int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) } EXPORT_SYMBOL(rtw89_mac_disable_bb_rf); -int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) +int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb) { int ret; @@ -3604,6 +3607,12 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); + if (include_bb) { + rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_0); + if (rtwdev->dbcc_en) + rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_1); + } + ret = rtw89_mac_dmac_pre_init(rtwdev); if (ret) return ret; @@ -3614,7 +3623,7 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) return ret; } - ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL); + ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL, include_bb); if (ret) return ret; @@ -3623,9 +3632,11 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) int rtw89_mac_init(struct rtw89_dev *rtwdev) { + const struct rtw89_chip_info *chip = rtwdev->chip; + bool include_bb = !!chip->bbmcu_nr; int ret; - ret = rtw89_mac_partial_init(rtwdev); + ret = rtw89_mac_partial_init(rtwdev, include_bb); if (ret) goto fail; @@ -3661,6 +3672,9 @@ static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) { u8 i; + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + for (i = 0; i < 4; i++) { rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); @@ -3670,6 +3684,9 @@ static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) { + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); @@ -3704,7 +3721,7 @@ int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) return 0; } -static const struct rtw89_port_reg rtw_port_base = { +static const struct rtw89_port_reg rtw89_port_base_ax = { .port_cfg = R_AX_PORT_CFG_P0, .tbtt_prohib = R_AX_TBTT_PROHIB_P0, .bcn_area = R_AX_BCN_AREA_P0, @@ -3719,7 +3736,15 @@ static const struct rtw89_port_reg rtw_port_base = { .tbtt_shift = R_AX_TBTT_SHIFT_P0, .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, .tsftr_l = R_AX_TSFTR_LOW_P0, - .tsftr_h = R_AX_TSFTR_HIGH_P0 + .tsftr_h = R_AX_TSFTR_HIGH_P0, + .md_tsft = R_AX_MD_TSFT_STMP_CTL, + .bss_color = R_AX_PTCL_BSS_COLOR_0, + .mbssid = R_AX_MBSSID_CTRL, + .mbssid_drop = R_AX_MBSSID_DROP_0, + .tsf_sync = R_AX_PORT0_TSF_SYNC, + .hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, + R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, + R_AX_PORT_HGQ_WINDOW_CFG + 3}, }; #define BCN_INTERVAL 100 @@ -3734,8 +3759,9 @@ static const struct rtw89_port_reg rtw_port_base = { static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_port_reg *p = &rtw_port_base; if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN)) return; @@ -3756,7 +3782,8 @@ static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool en) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; if (en) rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); @@ -3767,7 +3794,8 @@ static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool en) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; if (en) rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); @@ -3778,7 +3806,8 @@ static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK, rtwvif->net_type); @@ -3787,7 +3816,8 @@ static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; @@ -3800,7 +3830,8 @@ static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; u32 bit = B_AX_RX_BSSID_FIT_EN; @@ -3814,7 +3845,8 @@ static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; @@ -3827,7 +3859,8 @@ static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; @@ -3840,8 +3873,9 @@ static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_port_reg *p = &rtw_port_base; u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL; rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, @@ -3851,27 +3885,25 @@ static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - static const u32 hiq_win_addr[RTW89_PORT_NUM] = { - R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, - R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, - R_AX_PORT_HGQ_WINDOW_CFG + 3, - }; u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u8 port = rtwvif->port; u32 reg; - reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, p->hiq_win[port], rtwvif->mac_idx); rtw89_write8(rtwdev, reg, win); } static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_port_reg *p = &rtw_port_base; u32 addr; - addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx); + addr = rtw89_mac_reg_by_idx(rtwdev, p->md_tsft, rtwvif->mac_idx); rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE); rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK, @@ -3881,7 +3913,8 @@ static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); @@ -3890,7 +3923,8 @@ static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); @@ -3899,7 +3933,8 @@ static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); @@ -3908,7 +3943,8 @@ static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); @@ -3917,6 +3953,8 @@ static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); static const u32 masks[RTW89_PORT_NUM] = { B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, @@ -3929,14 +3967,16 @@ static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, u8 bss_color; bss_color = vif->bss_conf.he_bss_color.color; - reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0; - reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx); + reg_base = port >= 4 ? p->bss_color + 4 : p->bss_color; + reg = rtw89_mac_reg_by_idx(rtwdev, reg_base, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); } static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u8 port = rtwvif->port; u32 reg; @@ -3944,7 +3984,7 @@ static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, return; if (port == 0) { - reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid, rtwvif->mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); } } @@ -3952,11 +3992,13 @@ static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u8 port = rtwvif->port; u32 reg; u32 val; - reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid_drop, rtwvif->mac_idx); val = rtw89_read32(rtwdev, reg); val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); if (port == 0) @@ -3967,7 +4009,8 @@ static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; if (enable) rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, @@ -3980,7 +4023,8 @@ static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, BCN_ERLY_DEF); @@ -3989,7 +4033,8 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u16 val; if (rtwdev->chip->chip_id != RTL8852C) @@ -4011,10 +4056,12 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif_src, u16 offset_tu) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u32 val, reg; val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu); - reg = rtw89_mac_reg_by_idx(R_AX_PORT0_TSF_SYNC + rtwvif->port * 4, + reg = rtw89_mac_reg_by_idx(rtwdev, p->tsf_sync + rtwvif->port * 4, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_SRC, rtwvif_src->port); @@ -4152,7 +4199,8 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u64 *tsf) { - const struct rtw89_port_reg *p = &rtw_port_base; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; u32 tsf_low, tsf_high; int ret; @@ -4204,7 +4252,7 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, rtw89_mac_check_he_obss_narrow_bw_ru_iter, &tolerated); - reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx); if (tolerated) rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS); else @@ -4437,8 +4485,7 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le static void rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { - rtw89_info(rtwdev, "%*s", RTW89_GET_C2H_LOG_LEN(len), - RTW89_GET_C2H_LOG_SRT_PRT(c2h->data)); + rtw89_fw_log_dump(rtwdev, c2h->data, len); } static void @@ -4472,6 +4519,7 @@ static void rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { + rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_TSF32_TOGGLE_CHANGE); } static void @@ -4726,21 +4774,22 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, handler(rtwdev, skb, len); } -bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, - u32 reg_base, u32 *cr) +static +bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr) { const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; enum rtw89_qta_mode mode = dle_mem->mode; - u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx); + u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx); - if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) { + if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) { rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", addr); goto error; } - if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR) + if (addr >= CMAC1_START_ADDR_AX && addr <= CMAC1_END_ADDR_AX) if (mode == RTW89_QTA_SCC) { rtw89_err(rtwdev, "[TXPWR] addr=0x%x but hw not enable\n", @@ -4757,11 +4806,10 @@ error: return false; } -EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr); int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) { - u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx); + u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx); int ret; ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); @@ -4792,6 +4840,7 @@ void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) #define MAC_AX_LEN_TH_MAX 255 #define MAC_AX_TIME_TH_DEF 88 #define MAC_AX_LEN_TH_DEF 4080 + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct ieee80211_hw *hw = rtwdev->hw; u32 rts_threshold = hw->wiphy->rts_threshold; u32 time_th, len_th; @@ -4808,7 +4857,7 @@ void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); - reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_len_ht, mac_idx); rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); } @@ -5044,7 +5093,7 @@ int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, plt->band); val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | @@ -5134,7 +5183,7 @@ u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band) u32 reg; u16 cnt; - reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, band); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, band); cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK); rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST); @@ -5146,8 +5195,11 @@ static void rtw89_mac_bfee_standby_timer(struct rtw89_dev *rtwdev, u8 mac_idx, { u32 reg; + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee standby_timer to %d\n", keep); - reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); if (keep) { set_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); rtw89_write32_mask(rtwdev, reg, B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, @@ -5159,14 +5211,14 @@ static void rtw89_mac_bfee_standby_timer(struct rtw89_dev *rtwdev, u8 mac_idx, } } -static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) +void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 reg; - u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | - B_AX_BFMEE_HE_NDPA_EN; + u32 mask = mac->bfee_ctrl.mask; rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); - reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, mac->bfee_ctrl.addr, mac_idx); if (en) { set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); rtw89_write32_set(rtwdev, reg, mask); @@ -5176,7 +5228,7 @@ static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) } } -static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) +static int rtw89_mac_init_bfee_ax(struct rtw89_dev *rtwdev, u8 mac_idx) { u32 reg; u32 val32; @@ -5188,39 +5240,39 @@ static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) /* AP mode set tx gid to 63 */ /* STA mode set tx gid to 0(default) */ - reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMER_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); - reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); val32 = FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); rtw89_write32(rtwdev, reg, val32); rtw89_mac_bfee_standby_timer(rtwdev, mac_idx, true); rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | B_AX_BFMEE_USE_NSTS | B_AX_BFMEE_CSI_GID_SEL | B_AX_BFMEE_CSI_FORCE_RETE_EN); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); rtw89_write32(rtwdev, reg, u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); - reg = rtw89_mac_reg_by_idx(R_AX_CSIRPT_OPTION, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CSIRPT_OPTION, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN); return 0; } -static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; u8 mac_idx = rtwvif->mac_idx; @@ -5255,7 +5307,7 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, nc = min(nc, sound_dim); nr = min(nr, sound_dim); - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | @@ -5267,18 +5319,18 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); if (port_sel == 0) - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); else - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); rtw89_write16(rtwdev, reg, val); return 0; } -static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static int rtw89_mac_csi_rrsc_ax(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); @@ -5305,27 +5357,28 @@ static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); } - reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); rtw89_write32(rtwdev, - rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), + rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), rrsc); return 0; } -void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static void rtw89_mac_bf_assoc_ax(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; if (rtw89_sta_has_beamformer_cap(sta)) { rtw89_debug(rtwdev, RTW89_DBG_BF, "initialize bfee for new association\n"); - rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx); - rtw89_mac_set_csi_para_reg(rtwdev, vif, sta); - rtw89_mac_csi_rrsc(rtwdev, vif, sta); + rtw89_mac_init_bfee_ax(rtwdev, rtwvif->mac_idx); + rtw89_mac_set_csi_para_reg_ax(rtwdev, vif, sta); + rtw89_mac_csi_rrsc_ax(rtwdev, vif, sta); } } @@ -5347,19 +5400,21 @@ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif * rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); p = (__le32 *)conf->mu_group.membership; - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx), + rtw89_write32(rtwdev, + rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN0, mac_idx), le32_to_cpu(p[0])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx), + rtw89_write32(rtwdev, + rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN1, mac_idx), le32_to_cpu(p[1])); p = (__le32 *)conf->mu_group.position; - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION0, mac_idx), le32_to_cpu(p[0])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION1, mac_idx), le32_to_cpu(p[1])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION2, mac_idx), le32_to_cpu(p[2])); - rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx), + rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION3, mac_idx), le32_to_cpu(p[3])); } @@ -5450,7 +5505,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, return ret; } - reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, max_tx_time >> 5); } @@ -5490,7 +5545,7 @@ int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, return ret; } - reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; } @@ -5532,7 +5587,7 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, return ret; } - reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXCNT, mac_idx); *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); } @@ -5542,8 +5597,9 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool en) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u8 mac_idx = rtwvif->mac_idx; - u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0; + u16 set = mac->muedca_ctrl.mask; u32 reg; u32 ret; @@ -5551,7 +5607,7 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, if (ret) return ret; - reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, mac->muedca_ctrl.addr, mac_idx); if (en) rtw89_write16_set(rtwdev, reg, set); else @@ -5674,3 +5730,52 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev, } return ret; } + +static u8 rtw89_fw_get_rdy_ax(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) +{ + u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); + + return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); +} + +static +int rtw89_fwdl_check_path_ready_ax(struct rtw89_dev *rtwdev, + bool h2c_or_fwdl) +{ + u8 check = h2c_or_fwdl ? B_AX_H2C_PATH_RDY : B_AX_FWDL_PATH_RDY; + u8 val; + + return read_poll_timeout_atomic(rtw89_read8, val, val & check, + 1, FWDL_WAIT_CNT, false, + rtwdev, R_AX_WCPU_FW_CTRL); +} + +const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { + .band1_offset = RTW89_MAC_AX_BAND_REG_OFFSET, + .filter_model_addr = R_AX_FILTER_MODEL_ADDR, + .indir_access_addr = R_AX_INDIR_ACCESS_ENTRY, + .mem_base_addrs = rtw89_mac_mem_base_addrs_ax, + .rx_fltr = R_AX_RX_FLTR_OPT, + .port_base = &rtw89_port_base_ax, + .agg_len_ht = R_AX_AGG_LEN_HT_0, + + .muedca_ctrl = { + .addr = R_AX_MUEDCA_EN, + .mask = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0, + }, + .bfee_ctrl = { + .addr = R_AX_BFMEE_RESP_OPTION, + .mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | + B_AX_BFMEE_HE_NDPA_EN, + }, + + .bf_assoc = rtw89_mac_bf_assoc_ax, + + .disable_cpu = rtw89_mac_disable_cpu_ax, + .fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax, + .fwdl_get_status = rtw89_fw_get_rdy_ax, + .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_ax, + + .get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax, +}; +EXPORT_SYMBOL(rtw89_mac_gen_ax); diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 0e1570451c2c..c11c904f87fe 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -275,6 +275,7 @@ enum rtw89_mac_dbg_port_sel { /* SRAM mem dump */ #define R_AX_INDIR_ACCESS_ENTRY 0x40000 +#define R_BE_INDIR_ACCESS_ENTRY 0x80000 #define AXIDMA_BASE_ADDR 0x18006000 #define STA_SCHED_BASE_ADDR 0x18808000 @@ -298,6 +299,31 @@ enum rtw89_mac_dbg_port_sel { #define TXDATA_FIFO_1_BASE_ADDR 0x188A1000 #define CPU_LOCAL_BASE_ADDR 0x18003000 +#define WD_PAGE_BASE_ADDR_BE 0x0 +#define CPU_LOCAL_BASE_ADDR_BE 0x18003000 +#define AXIDMA_BASE_ADDR_BE 0x18006000 +#define SHARED_BUF_BASE_ADDR_BE 0x18700000 +#define DMAC_TBL_BASE_ADDR_BE 0x18800000 +#define SHCUT_MACHDR_BASE_ADDR_BE 0x18800800 +#define STA_SCHED_BASE_ADDR_BE 0x18818000 +#define NAT25_CAM_BASE_ADDR_BE 0x18820000 +#define RXPLD_FLTR_CAM_BASE_ADDR_BE 0x18823000 +#define SEC_CAM_BASE_ADDR_BE 0x18824000 +#define WOW_CAM_BASE_ADDR_BE 0x18828000 +#define MLD_TBL_BASE_ADDR_BE 0x18829000 +#define RX_CLSF_CAM_BASE_ADDR_BE 0x1882A000 +#define CMAC_TBL_BASE_ADDR_BE 0x18840000 +#define ADDR_CAM_BASE_ADDR_BE 0x18850000 +#define BSSID_CAM_BASE_ADDR_BE 0x18858000 +#define BA_CAM_BASE_ADDR_BE 0x18859000 +#define BCN_IE_CAM0_BASE_ADDR_BE 0x18860000 +#define TXDATA_FIFO_0_BASE_ADDR_BE 0x18861000 +#define TXD_FIFO_0_BASE_ADDR_BE 0x18862000 +#define BCN_IE_CAM1_BASE_ADDR_BE 0x18880000 +#define TXDATA_FIFO_1_BASE_ADDR_BE 0x18881000 +#define TXD_FIFO_1_BASE_ADDR_BE 0x18881800 +#define DCPU_LOCAL_BASE_ADDR_BE 0x19C02000 + #define CCTL_INFO_SIZE 32 enum rtw89_mac_mem_sel { @@ -322,13 +348,12 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_BSSID_CAM, RTW89_MAC_MEM_TXD_FIFO_0_V1, RTW89_MAC_MEM_TXD_FIFO_1_V1, + RTW89_MAC_MEM_WD_PAGE, /* keep last */ RTW89_MAC_MEM_NUM, }; -extern const u32 rtw89_mac_mem_base_addrs[]; - enum rtw89_rpwm_req_pwr_state { RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE = 0, RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFON = 1, @@ -478,6 +503,7 @@ enum rtw89_mac_bf_rrsc_rate { ({typeof(_addr) __addr = (_addr); \ __addr >= R_AX_CMAC_REG_START && __addr <= R_AX_CMAC_REG_END; }) #define RTW89_MAC_AX_BAND_REG_OFFSET 0x2000 +#define RTW89_MAC_BE_BAND_REG_OFFSET 0x4000 #define PTCL_IDLE_POLL_CNT 10000 #define SW_CVR_DUR_US 8 @@ -826,14 +852,47 @@ struct rtw89_mac_size_set { extern const struct rtw89_mac_size_set rtw89_mac_size; -static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band) +struct rtw89_mac_gen_def { + u32 band1_offset; + u32 filter_model_addr; + u32 indir_access_addr; + const u32 *mem_base_addrs; + u32 rx_fltr; + const struct rtw89_port_reg *port_base; + u32 agg_len_ht; + + struct rtw89_reg_def muedca_ctrl; + struct rtw89_reg_def bfee_ctrl; + + void (*bf_assoc)(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + + void (*disable_cpu)(struct rtw89_dev *rtwdev); + int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason, + bool dlfw, bool include_bb); + u8 (*fwdl_get_status)(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type); + int (*fwdl_check_path_ready)(struct rtw89_dev *rtwdev, bool h2c_or_fwdl); + + bool (*get_txpwr_cr)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr); +}; + +extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax; +extern const struct rtw89_mac_gen_def rtw89_mac_gen_be; + +static inline +u32 rtw89_mac_reg_by_idx(struct rtw89_dev *rtwdev, u32 reg_base, u8 band) { - return band == 0 ? reg_base : (reg_base + 0x2000); + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + + return band == 0 ? reg_base : (reg_base + mac->band1_offset); } -static inline u32 rtw89_mac_reg_by_port(u32 base, u8 port, u8 mac_idx) +static inline +u32 rtw89_mac_reg_by_port(struct rtw89_dev *rtwdev, u32 base, u8 port, u8 mac_idx) { - return rtw89_mac_reg_by_idx(base + port * 0x40, mac_idx); + return rtw89_mac_reg_by_idx(rtwdev, base + port * 0x40, mac_idx); } static inline u32 @@ -841,7 +900,7 @@ rtw89_read32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base) { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); return rtw89_read32(rtwdev, reg); } @@ -851,7 +910,7 @@ rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); return rtw89_read32_mask(rtwdev, reg, mask); } @@ -861,7 +920,7 @@ rtw89_write32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32(rtwdev, reg, data); } @@ -871,7 +930,7 @@ rtw89_write32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32_mask(rtwdev, reg, mask, data); } @@ -881,7 +940,7 @@ rtw89_write16_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write16_mask(rtwdev, reg, mask, data); } @@ -891,7 +950,7 @@ rtw89_write32_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32_clr(rtwdev, reg, bit); } @@ -901,7 +960,7 @@ rtw89_write16_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write16_clr(rtwdev, reg, bit); } @@ -911,12 +970,12 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { u32 reg; - reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx); + reg = rtw89_mac_reg_by_port(rtwdev, base, rtwvif->port, rtwvif->mac_idx); rtw89_write32_set(rtwdev, reg, bit); } void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev); -int rtw89_mac_partial_init(struct rtw89_dev *rtwdev); +int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb); int rtw89_mac_init(struct rtw89_dev *rtwdev); int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 band, enum rtw89_mac_hwmod_sel sel); @@ -934,8 +993,6 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); -void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev); -int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw); int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev); int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev); @@ -982,13 +1039,19 @@ u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev); bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev); int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl); int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl); -bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, - u32 reg_base, u32 *cr); void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter); void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev); + +static inline void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + struct ieee80211_sta *sta) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + + if (mac->bf_assoc) + mac->bf_assoc(rtwdev, vif, sta); +} + void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, @@ -996,6 +1059,7 @@ void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif * void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, bool disconnect); void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev); +void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en); int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, @@ -1004,6 +1068,9 @@ int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause); static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) { + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + return; + if (!test_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags)) return; @@ -1014,9 +1081,10 @@ static inline int rtw89_mac_txpwr_read32(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u32 reg_base, u32 *val) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 cr; - if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + if (!mac->get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) return -EINVAL; *val = rtw89_read32(rtwdev, cr); @@ -1027,9 +1095,10 @@ static inline int rtw89_mac_txpwr_write32(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u32 reg_base, u32 val) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 cr; - if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + if (!mac->get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) return -EINVAL; rtw89_write32(rtwdev, cr, val); @@ -1040,9 +1109,10 @@ static inline int rtw89_mac_txpwr_write32_mask(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u32 reg_base, u32 mask, u32 val) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 cr; - if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) + if (!mac->get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr)) return -EINVAL; rtw89_write32_mask(rtwdev, cr, mask, val); diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index a66503eb35b8..31d1f7891675 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -145,6 +145,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, rtwvif->mac_idx = RTW89_MAC_0; rtwvif->phy_idx = RTW89_PHY_0; rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0; + rtwvif->chanctx_assigned = false; rtwvif->hit_rule = 0; rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; ether_addr_copy(rtwvif->mac_addr, vif->addr); @@ -224,6 +225,7 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw, u64 multicast) { struct rtw89_dev *rtwdev = hw->priv; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); @@ -271,13 +273,13 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw, } rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); if (!rtwdev->dbcc_en) goto out; rtw89_write32_mask(rtwdev, - rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_1), + rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_1), B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); @@ -296,7 +298,8 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u8 aifsn) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); u8 slot_time; u8 sifs; @@ -325,11 +328,14 @@ static void ____rtw89_conf_tx_edca(struct rtw89_dev *rtwdev, rtw89_fw_h2c_set_edca(rtwdev, rtwvif, ac_to_fw_idx[ac], val); } -static const u32 ac_to_mu_edca_param[IEEE80211_NUM_ACS] = { - [IEEE80211_AC_VO] = R_AX_MUEDCA_VO_PARAM_0, - [IEEE80211_AC_VI] = R_AX_MUEDCA_VI_PARAM_0, - [IEEE80211_AC_BE] = R_AX_MUEDCA_BE_PARAM_0, - [IEEE80211_AC_BK] = R_AX_MUEDCA_BK_PARAM_0, +#define R_MUEDCA_ACS_PARAM(acs) {R_AX_MUEDCA_ ## acs ## _PARAM_0, \ + R_BE_MUEDCA_ ## acs ## _PARAM_0} + +static const u32 ac_to_mu_edca_param[IEEE80211_NUM_ACS][RTW89_CHIP_GEN_NUM] = { + [IEEE80211_AC_VO] = R_MUEDCA_ACS_PARAM(VO), + [IEEE80211_AC_VI] = R_MUEDCA_ACS_PARAM(VI), + [IEEE80211_AC_BE] = R_MUEDCA_ACS_PARAM(BE), + [IEEE80211_AC_BK] = R_MUEDCA_ACS_PARAM(BK), }; static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, @@ -337,6 +343,7 @@ static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, { struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac]; struct ieee80211_he_mu_edca_param_ac_rec *mu_edca; + int gen = rtwdev->chip->chip_gen; u8 aifs, aifsn; u16 timer_32us; u32 reg; @@ -353,7 +360,7 @@ static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev, val = FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK, timer_32us) | FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_CW_MASK, mu_edca->ecw_min_max) | FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK, aifs); - reg = rtw89_mac_reg_by_idx(ac_to_mu_edca_param[ac], rtwvif->mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, ac_to_mu_edca_param[ac][gen], rtwvif->mac_idx); rtw89_write32(rtwdev, reg, val); rtw89_mac_set_hw_muedca_ctrl(rtwdev, rtwvif, true); @@ -413,6 +420,8 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif); rtw89_mac_port_update(rtwdev, rtwvif); rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, vif); + + rtw89_queue_chanctx_work(rtwdev); } else { /* Abort ongoing scan if cancel_scan isn't issued * when disconnected by peer @@ -441,7 +450,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, rtw89_mac_bf_set_gid_table(rtwdev, vif, conf); if (changed & BSS_CHANGED_P2P_PS) - rtw89_process_p2p_ps(rtwdev, vif); + rtw89_core_update_p2p_ps(rtwdev, vif); if (changed & BSS_CHANGED_CQM) rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); @@ -476,6 +485,8 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true); rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); rtw89_chip_rfk_channel(rtwdev); + + rtw89_queue_chanctx_work(rtwdev); mutex_unlock(&rtwdev->mutex); return 0; diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c new file mode 100644 index 000000000000..3278f241db6e --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2020 Realtek Corporation + */ + +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "reg.h" + +static const u32 rtw89_mac_mem_base_addrs_be[RTW89_MAC_MEM_NUM] = { + [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR_BE, + [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR_BE, + [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR_BE, + [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR_BE, + [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR_BE, + [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_SECURITY_CAM] = SEC_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR_BE, + [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR_BE, + [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR_BE, + [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR_BE, + [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR_BE, + [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR_BE, + [RTW89_MAC_MEM_WD_PAGE] = WD_PAGE_BASE_ADDR_BE, +}; + +static const struct rtw89_port_reg rtw89_port_base_be = { + .port_cfg = R_BE_PORT_CFG_P0, + .tbtt_prohib = R_BE_TBTT_PROHIB_P0, + .bcn_area = R_BE_BCN_AREA_P0, + .bcn_early = R_BE_BCNERLYINT_CFG_P0, + .tbtt_early = R_BE_TBTTERLYINT_CFG_P0, + .tbtt_agg = R_BE_TBTT_AGG_P0, + .bcn_space = R_BE_BCN_SPACE_CFG_P0, + .bcn_forcetx = R_BE_BCN_FORCETX_P0, + .bcn_err_cnt = R_BE_BCN_ERR_CNT_P0, + .bcn_err_flag = R_BE_BCN_ERR_FLAG_P0, + .dtim_ctrl = R_BE_DTIM_CTRL_P0, + .tbtt_shift = R_BE_TBTT_SHIFT_P0, + .bcn_cnt_tmr = R_BE_BCN_CNT_TMR_P0, + .tsftr_l = R_BE_TSFTR_LOW_P0, + .tsftr_h = R_BE_TSFTR_HIGH_P0, + .md_tsft = R_BE_WMTX_MOREDATA_TSFT_STMP_CTL, + .bss_color = R_BE_PTCL_BSS_COLOR_0, + .mbssid = R_BE_MBSSID_CTRL, + .mbssid_drop = R_BE_MBSSID_DROP_0, + .tsf_sync = R_BE_PORT_0_TSF_SYNC, + .hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG, + R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2, + R_BE_PORT_HGQ_WINDOW_CFG + 3}, +}; + +static void rtw89_mac_disable_cpu_be(struct rtw89_dev *rtwdev) +{ + u32 val32; + + clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); + + rtw89_write32_clr(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + rtw89_write32_set(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_HOLD_AFTER_RESET); + rtw89_write32_set(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + + val32 = rtw89_read32(rtwdev, R_BE_WCPU_FW_CTRL); + val32 &= B_BE_RUN_ENV_MASK; + rtw89_write32(rtwdev, R_BE_WCPU_FW_CTRL, val32); + + rtw89_write32_set(rtwdev, R_BE_DCPU_PLATFORM_ENABLE, B_BE_DCPU_PLATFORM_EN); + + rtw89_write32(rtwdev, R_BE_UDM0, 0); + rtw89_write32(rtwdev, R_BE_HALT_C2H, 0); + rtw89_write32(rtwdev, R_BE_UDM2, 0); +} + +static void set_cpu_en(struct rtw89_dev *rtwdev, bool include_bb) +{ + u32 set = B_BE_WLANCPU_FWDL_EN; + + if (include_bb) + set |= B_BE_BBMCU0_FWDL_EN; + + rtw89_write32_set(rtwdev, R_BE_WCPU_FW_CTRL, set); +} + +static int wcpu_on(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw) +{ + u32 val32; + int ret; + + rtw89_write32_set(rtwdev, R_BE_UDM0, B_BE_UDM0_DBG_MODE_CTRL); + + val32 = rtw89_read32(rtwdev, R_BE_HALT_C2H); + if (val32) { + rtw89_warn(rtwdev, "[SER] AON L2 Debug register not empty before Boot.\n"); + rtw89_warn(rtwdev, "[SER] %s: R_BE_HALT_C2H = 0x%x\n", __func__, val32); + } + val32 = rtw89_read32(rtwdev, R_BE_UDM1); + if (val32) { + rtw89_warn(rtwdev, "[SER] AON L2 Debug register not empty before Boot.\n"); + rtw89_warn(rtwdev, "[SER] %s: R_BE_UDM1 = 0x%x\n", __func__, val32); + } + val32 = rtw89_read32(rtwdev, R_BE_UDM2); + if (val32) { + rtw89_warn(rtwdev, "[SER] AON L2 Debug register not empty before Boot.\n"); + rtw89_warn(rtwdev, "[SER] %s: R_BE_UDM2 = 0x%x\n", __func__, val32); + } + + rtw89_write32(rtwdev, R_BE_UDM1, 0); + rtw89_write32(rtwdev, R_BE_UDM2, 0); + rtw89_write32(rtwdev, R_BE_HALT_H2C, 0); + rtw89_write32(rtwdev, R_BE_HALT_C2H, 0); + rtw89_write32(rtwdev, R_BE_HALT_H2C_CTRL, 0); + rtw89_write32(rtwdev, R_BE_HALT_C2H_CTRL, 0); + + rtw89_write32_set(rtwdev, R_BE_SYS_CLK_CTRL, B_BE_CPU_CLK_EN); + rtw89_write32_clr(rtwdev, R_BE_SYS_CFG5, + B_BE_WDT_WAKE_PCIE_EN | B_BE_WDT_WAKE_USB_EN); + rtw89_write32_clr(rtwdev, R_BE_WCPU_FW_CTRL, + B_BE_WDT_PLT_RST_EN | B_BE_WCPU_ROM_CUT_GET); + + rtw89_write16_mask(rtwdev, R_BE_BOOT_REASON, B_BE_BOOT_REASON_MASK, boot_reason); + rtw89_write32_clr(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + rtw89_write32_clr(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_HOLD_AFTER_RESET); + rtw89_write32_set(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_WCPU_EN); + + if (!dlfw) { + ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); + if (ret) + return ret; + } + + return 0; +} + +static int rtw89_mac_fwdl_enable_wcpu_be(struct rtw89_dev *rtwdev, + u8 boot_reason, bool dlfw, + bool include_bb) +{ + set_cpu_en(rtwdev, include_bb); + + return wcpu_on(rtwdev, boot_reason, dlfw); +} + +static const u8 fwdl_status_map[] = { + [0] = RTW89_FWDL_INITIAL_STATE, + [1] = RTW89_FWDL_FWDL_ONGOING, + [4] = RTW89_FWDL_CHECKSUM_FAIL, + [5] = RTW89_FWDL_SECURITY_FAIL, + [6] = RTW89_FWDL_SECURITY_FAIL, + [7] = RTW89_FWDL_CV_NOT_MATCH, + [8] = RTW89_FWDL_RSVD0, + [2] = RTW89_FWDL_WCPU_FWDL_RDY, + [3] = RTW89_FWDL_WCPU_FW_INIT_RDY, + [9] = RTW89_FWDL_RSVD0, +}; + +static u8 fwdl_get_status_be(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) +{ + bool check_pass = false; + u32 val32; + u8 st; + + val32 = rtw89_read32(rtwdev, R_BE_WCPU_FW_CTRL); + + switch (type) { + case RTW89_FWDL_CHECK_WCPU_FWDL_DONE: + check_pass = !(val32 & B_BE_WLANCPU_FWDL_EN); + break; + case RTW89_FWDL_CHECK_DCPU_FWDL_DONE: + check_pass = !(val32 & B_BE_DATACPU_FWDL_EN); + break; + case RTW89_FWDL_CHECK_BB0_FWDL_DONE: + check_pass = !(val32 & B_BE_BBMCU0_FWDL_EN); + break; + case RTW89_FWDL_CHECK_BB1_FWDL_DONE: + check_pass = !(val32 & B_BE_BBMCU1_FWDL_EN); + break; + default: + break; + } + + if (check_pass) + return RTW89_FWDL_WCPU_FW_INIT_RDY; + + st = u32_get_bits(val32, B_BE_WCPU_FWDL_STATUS_MASK); + if (st < ARRAY_SIZE(fwdl_status_map)) + return fwdl_status_map[st]; + + return st; +} + +static int rtw89_fwdl_check_path_ready_be(struct rtw89_dev *rtwdev, + bool h2c_or_fwdl) +{ + u32 check = h2c_or_fwdl ? B_BE_H2C_PATH_RDY : B_BE_DLFW_PATH_RDY; + u32 val; + + return read_poll_timeout_atomic(rtw89_read32, val, val & check, + 1, 1000000, false, + rtwdev, R_BE_WCPU_FW_CTRL); +} + +static bool rtw89_mac_get_txpwr_cr_be(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, + u32 reg_base, u32 *cr) +{ + const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; + enum rtw89_qta_mode mode = dle_mem->mode; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, (enum rtw89_mac_idx)phy_idx, + RTW89_CMAC_SEL); + if (ret) { + if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) + return false; + + rtw89_err(rtwdev, "[TXPWR] check mac enable failed\n"); + return false; + } + + if (reg_base < R_BE_PWR_MODULE || reg_base > R_BE_CMAC_FUNC_EN_C1) { + rtw89_err(rtwdev, "[TXPWR] reg_base=0x%x exceed txpwr cr\n", + reg_base); + return false; + } + + *cr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx); + + if (*cr >= CMAC1_START_ADDR_BE && *cr <= CMAC1_END_ADDR_BE) { + if (mode == RTW89_QTA_SCC) { + rtw89_err(rtwdev, + "[TXPWR] addr=0x%x but hw not enable\n", + *cr); + return false; + } + } + + return true; +} + +static int rtw89_mac_init_bfee_be(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg; + u32 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL | + B_BE_BFMEE_USE_NSTS | + B_BE_BFMEE_CSI_GID_SEL | + B_BE_BFMEE_CSI_FORCE_RETE_EN); + rtw89_write32_mask(rtwdev, reg, B_BE_BFMEE_CSI_RSC_MASK, CSI_RX_BW_CFG); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CSIRPT_OPTION, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_CSIPRT_VHTSU_AID_EN | + B_BE_CSIPRT_HESU_AID_EN | + B_BE_CSIPRT_EHTSU_AID_EN); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_RRSC, mac_idx); + rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP_BE); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_BE_BFMEE_BE_CSI_RRSC_BITMAP_MASK, + CSI_RRSC_BITMAP_CFG); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_RATE, mac_idx); + val = u32_encode_bits(CSI_INIT_RATE_HT, B_BE_BFMEE_HT_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_VHT, B_BE_BFMEE_VHT_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_HE, B_BE_BFMEE_HE_CSI_RATE_MASK) | + u32_encode_bits(CSI_INIT_RATE_EHT, B_BE_BFMEE_EHT_CSI_RATE_MASK); + + rtw89_write32(rtwdev, reg, val); + + return 0; +} + +static int rtw89_mac_set_csi_para_reg_be(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; + u8 mac_idx = rtwvif->mac_idx; + u8 port_sel = rtwvif->port; + u8 sound_dim = 3, t; + u8 *phy_cap; + u32 reg; + u16 val; + int ret; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info; + + if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || + (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { + ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); + stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); + t = u8_get_bits(phy_cap[5], + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK); + sound_dim = min(sound_dim, t); + } + + if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { + ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); + stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); + t = u32_get_bits(sta->deflink.vht_cap.cap, + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); + sound_dim = min(sound_dim, t); + } + + nc = min(nc, sound_dim); + nr = min(nr, sound_dim); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL); + + val = u16_encode_bits(nc, B_BE_BFMEE_CSIINFO0_NC_MASK) | + u16_encode_bits(nr, B_BE_BFMEE_CSIINFO0_NR_MASK) | + u16_encode_bits(ng, B_BE_BFMEE_CSIINFO0_NG_MASK) | + u16_encode_bits(cb, B_BE_BFMEE_CSIINFO0_CB_MASK) | + u16_encode_bits(cs, B_BE_BFMEE_CSIINFO0_CS_MASK) | + u16_encode_bits(ldpc_en, B_BE_BFMEE_CSIINFO0_LDPC_EN) | + u16_encode_bits(stbc_en, B_BE_BFMEE_CSIINFO0_STBC_EN); + + if (port_sel == 0) + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, + mac_idx); + else + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_1, + mac_idx); + + rtw89_write16(rtwdev, reg, val); + + return 0; +} + +static int rtw89_mac_csi_rrsc_be(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); + u8 mac_idx = rtwvif->mac_idx; + int ret; + u32 reg; + + ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); + if (ret) + return ret; + + if (sta->deflink.he_cap.has_he) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | + BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | + BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); + } + if (sta->deflink.vht_cap.vht_supported) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | + BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | + BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); + } + if (sta->deflink.ht_cap.ht_supported) { + rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | + BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | + BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); + } + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); + rtw89_write32_set(rtwdev, reg, B_BE_BFMEE_BFPARAM_SEL); + rtw89_write32_clr(rtwdev, reg, B_BE_BFMEE_CSI_FORCE_RETE_EN); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_CSI_RRSC, mac_idx); + rtw89_write32(rtwdev, reg, rrsc); + + return 0; +} + +static void rtw89_mac_bf_assoc_be(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + if (rtw89_sta_has_beamformer_cap(sta)) { + rtw89_debug(rtwdev, RTW89_DBG_BF, + "initialize bfee for new association\n"); + rtw89_mac_init_bfee_be(rtwdev, rtwvif->mac_idx); + rtw89_mac_set_csi_para_reg_be(rtwdev, vif, sta); + rtw89_mac_csi_rrsc_be(rtwdev, vif, sta); + } +} + +const struct rtw89_mac_gen_def rtw89_mac_gen_be = { + .band1_offset = RTW89_MAC_BE_BAND_REG_OFFSET, + .filter_model_addr = R_BE_FILTER_MODEL_ADDR, + .indir_access_addr = R_BE_INDIR_ACCESS_ENTRY, + .mem_base_addrs = rtw89_mac_mem_base_addrs_be, + .rx_fltr = R_BE_RX_FLTR_OPT, + .port_base = &rtw89_port_base_be, + .agg_len_ht = R_BE_AGG_LEN_HT_0, + + .muedca_ctrl = { + .addr = R_BE_MUEDCA_EN, + .mask = B_BE_MUEDCA_EN_0 | B_BE_SET_MUEDCATIMER_TF_0, + }, + .bfee_ctrl = { + .addr = R_BE_BFMEE_RESP_OPTION, + .mask = B_BE_BFMEE_HT_NDPA_EN | B_BE_BFMEE_VHT_NDPA_EN | + B_BE_BFMEE_HE_NDPA_EN | B_BE_BFMEE_EHT_NDPA_EN, + }, + + .bf_assoc = rtw89_mac_bf_assoc_be, + + .disable_cpu = rtw89_mac_disable_cpu_be, + .fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be, + .fwdl_get_status = fwdl_get_status_be, + .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_be, + + .get_txpwr_cr = rtw89_mac_get_txpwr_cr_be, +}; +EXPORT_SYMBOL(rtw89_mac_gen_be); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 9402f1a0caea..14ddb0d39e63 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1196,7 +1196,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; - struct rtw89_txwd_info *txwd_info; struct rtw89_pci_tx_wp_info *txwp_info; void *txaddr_info_addr; struct pci_dev *pdev = rtwpci->pdev; @@ -1222,7 +1221,7 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, txwp_len = sizeof(*txwp_info); txwd_len = chip->txwd_body_size; - txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; + txwd_len += en_wd_info ? chip->txwd_info_size : 0; txwp_info = txwd->vaddr + txwd_len; txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); @@ -3939,5 +3938,5 @@ void rtw89_pci_remove(struct pci_dev *pdev) EXPORT_SYMBOL(rtw89_pci_remove); MODULE_AUTHOR("Realtek Corporation"); -MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); +MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index fb15c852fdd4..17ccc9efed28 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -88,6 +88,55 @@ static u64 get_he_ra_mask(struct ieee80211_sta *sta) return get_mcs_ra_mask(mcs_map, 11, 2); } +static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss) +{ + u64 nss_mcs_shift; + u64 nss_mcs_val; + u64 mask = 0; + int i, j; + u8 nss; + + for (i = 0; i < n_nss; i++) { + nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX); + if (!nss) + continue; + + nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0); + + for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16) + mask |= nss_mcs_val << nss_mcs_shift; + } + + return mask; +} + +static u64 get_eht_ra_mask(struct ieee80211_sta *sta) +{ + struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; + struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz; + struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss; + + switch (sta->deflink.bandwidth) { + case IEEE80211_STA_RX_BW_320: + mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320; + /* MCS 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); + case IEEE80211_STA_RX_BW_160: + mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160; + /* MCS 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); + case IEEE80211_STA_RX_BW_80: + default: + mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80; + /* MCS 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); + case IEEE80211_STA_RX_BW_20: + mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; + /* MCS 7, 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); + } +} + #define RA_FLOOR_TABLE_SIZE 7 #define RA_FLOOR_UP_GAP 3 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi, @@ -133,10 +182,10 @@ static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak) return ra_mask; } -static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta) +static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + const struct rtw89_chan *chan) { struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct cfg80211_bitrate_mask *mask = &rtwsta->mask; enum nl80211_band band; u64 cfg_mask; @@ -194,12 +243,15 @@ rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES, static const u64 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES}; +static const u64 +rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES, + RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES}; static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, + const struct rtw89_chan *chan, bool *fix_giltf_en, u8 *fix_giltf) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct cfg80211_bitrate_mask *mask = &rtwsta->mask; u8 band = chan->band_type; enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); @@ -236,7 +288,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif = rtwsta->rtwvif; struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; struct rtw89_ra_info *ra = &rtwsta->ra; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif); const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi); @@ -254,7 +307,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, memset(ra, 0, sizeof(*ra)); /* Set the ra mask from sta's capability */ - if (sta->deflink.he_cap.has_he) { + if (sta->deflink.eht_cap.has_eht) { + mode |= RTW89_RA_MODE_EHT; + ra_mask |= get_eht_ra_mask(sta); + high_rate_masks = rtw89_ra_mask_eht_rates; + } else if (sta->deflink.he_cap.has_he) { mode |= RTW89_RA_MODE_HE; csi_mode = RTW89_RA_RPT_MODE_HE; ra_mask |= get_he_ra_mask(sta); @@ -265,7 +322,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) ldpc_en = 1; - rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, &fix_giltf_en, &fix_giltf); + rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, chan, &fix_giltf_en, &fix_giltf); } else if (sta->deflink.vht_cap.vht_supported) { u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); @@ -332,7 +389,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0); ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak); - ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta); + ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan); switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: @@ -362,7 +419,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra->dcm_cap = 1; if (rate_pattern->enable && !vif->p2p) { - ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta); + ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan); ra_mask &= rate_pattern->ra_mask; mode = rate_pattern->ra_mode; } @@ -444,6 +501,12 @@ static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next, return true; } +#define RTW89_HW_RATE_BY_CHIP_GEN(rate) \ + { \ + [RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \ + [RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \ + } + void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) @@ -451,40 +514,48 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, struct ieee80211_supported_band *sband; struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_phy_rate_pattern next_pattern = {0}; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - static const u16 hw_rate_he[] = {RTW89_HW_RATE_HE_NSS1_MCS0, - RTW89_HW_RATE_HE_NSS2_MCS0, - RTW89_HW_RATE_HE_NSS3_MCS0, - RTW89_HW_RATE_HE_NSS4_MCS0}; - static const u16 hw_rate_vht[] = {RTW89_HW_RATE_VHT_NSS1_MCS0, - RTW89_HW_RATE_VHT_NSS2_MCS0, - RTW89_HW_RATE_VHT_NSS3_MCS0, - RTW89_HW_RATE_VHT_NSS4_MCS0}; - static const u16 hw_rate_ht[] = {RTW89_HW_RATE_MCS0, - RTW89_HW_RATE_MCS8, - RTW89_HW_RATE_MCS16, - RTW89_HW_RATE_MCS24}; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); + static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = { + RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0), + RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0), + RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0), + RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0), + }; + static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = { + RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0), + RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0), + RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0), + RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0), + }; + static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = { + RTW89_HW_RATE_BY_CHIP_GEN(MCS0), + RTW89_HW_RATE_BY_CHIP_GEN(MCS8), + RTW89_HW_RATE_BY_CHIP_GEN(MCS16), + RTW89_HW_RATE_BY_CHIP_GEN(MCS24), + }; u8 band = chan->band_type; enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); + enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; u8 tx_nss = rtwdev->hal.tx_nss; u8 i; for (i = 0; i < tx_nss; i++) - if (!__check_rate_pattern(&next_pattern, hw_rate_he[i], + if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen], RA_MASK_HE_RATES, RTW89_RA_MODE_HE, mask->control[nl_band].he_mcs[i], 0, true)) goto out; for (i = 0; i < tx_nss; i++) - if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i], + if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen], RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT, mask->control[nl_band].vht_mcs[i], 0, true)) goto out; for (i = 0; i < tx_nss; i++) - if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i], + if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen], RA_MASK_HT_RATES, RTW89_RA_MODE_HT, mask->control[nl_band].ht_mcs[i], 0, true)) @@ -1342,12 +1413,16 @@ static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev, void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev) { + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; const struct rtw89_chip_info *chip = rtwdev->chip; - const struct rtw89_phy_table *bb_table = chip->bb_table; - const struct rtw89_phy_table *bb_gain_table = chip->bb_gain_table; + const struct rtw89_phy_table *bb_table; + const struct rtw89_phy_table *bb_gain_table; + bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table; rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL); rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0); + + bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table; if (bb_gain_table) rtw89_phy_init_reg(rtwdev, bb_gain_table, rtw89_phy_config_bb_gain, NULL); @@ -1365,6 +1440,7 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio) { void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg, enum rtw89_rf_path rf_path, void *data); + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_phy_table *rf_table; struct rtw89_fw_h2c_rf_reg_info *rf_reg_info; @@ -1375,7 +1451,8 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio) return; for (path = RF_PATH_A; path < chip->rf_path_num; path++) { - rf_table = chip->rf_table[path]; + rf_table = elm_info->rf_radio[path] ? + elm_info->rf_radio[path] : chip->rf_table[path]; rf_reg_info->rf_path = rf_table->rf_path; if (noio) config = rtw89_phy_config_rf_reg_noio; @@ -1392,6 +1469,7 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio) static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev) { + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_phy_table *nctl_table; u32 val; @@ -1414,7 +1492,7 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev) if (ret) rtw89_err(rtwdev, "failed to poll nctl block\n"); - nctl_table = chip->nctl_table; + nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table; rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL); if (chip->nctl_post_table) @@ -1426,6 +1504,9 @@ static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr) u32 phy_page = addr >> 8; u32 ofst = 0; + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + return addr < 0x10000 ? 0x20000 : 0; + switch (phy_page) { case 0x6: case 0x7: @@ -1494,15 +1575,15 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl); -static const u8 rtw89_rs_idx_num[] = { +static const u8 rtw89_rs_idx_num_ax[] = { [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM, [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM, - [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM, + [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX, [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM, - [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM, + [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX, }; -static const u8 rtw89_rs_nss_num[] = { +static const u8 rtw89_rs_nss_num_ax[] = { [RTW89_RS_CCK] = 1, [RTW89_RS_OFDM] = 1, [RTW89_RS_MCS] = RTW89_NSS_NUM, @@ -1510,68 +1591,73 @@ static const u8 rtw89_rs_nss_num[] = { [RTW89_RS_OFFSET] = 1, }; -static const u8 _byr_of_rs[] = { - [RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck), - [RTW89_RS_OFDM] = offsetof(struct rtw89_txpwr_byrate, ofdm), - [RTW89_RS_MCS] = offsetof(struct rtw89_txpwr_byrate, mcs), - [RTW89_RS_HEDCM] = offsetof(struct rtw89_txpwr_byrate, hedcm), - [RTW89_RS_OFFSET] = offsetof(struct rtw89_txpwr_byrate, offset), -}; - -#define _byr_seek(rs, raw) ((s8 *)(raw) + _byr_of_rs[rs]) -#define _byr_idx(rs, nss, idx) ((nss) * rtw89_rs_idx_num[rs] + (idx)) -#define _byr_chk(rs, nss, idx) \ - ((nss) < rtw89_rs_nss_num[rs] && (idx) < rtw89_rs_idx_num[rs]) +s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_byrate *head, + const struct rtw89_rate_desc *desc) +{ + switch (desc->rs) { + case RTW89_RS_CCK: + return &head->cck[desc->idx]; + case RTW89_RS_OFDM: + return &head->ofdm[desc->idx]; + case RTW89_RS_MCS: + return &head->mcs[desc->ofdma][desc->nss][desc->idx]; + case RTW89_RS_HEDCM: + return &head->hedcm[desc->ofdma][desc->nss][desc->idx]; + case RTW89_RS_OFFSET: + return &head->offset[desc->idx]; + default: + rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs); + return &head->trap; + } +} void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, const struct rtw89_txpwr_table *tbl) { const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data; const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size; + struct rtw89_txpwr_byrate *byr_head; + struct rtw89_rate_desc desc = {}; s8 *byr; u32 data; - u8 i, idx; + u8 i; for (; cfg < end; cfg++) { - byr = _byr_seek(cfg->rs, &rtwdev->byr[cfg->band]); + byr_head = &rtwdev->byr[cfg->band][0]; + desc.rs = cfg->rs; + desc.nss = cfg->nss; data = cfg->data; for (i = 0; i < cfg->len; i++, data >>= 8) { - idx = _byr_idx(cfg->rs, cfg->nss, (cfg->shf + i)); - byr[idx] = (s8)(data & 0xff); + desc.idx = cfg->shf + i; + byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); + *byr = data & 0xff; } } } EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate); -#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \ -({ \ - const struct rtw89_chip_info *__c = (rtwdev)->chip; \ - (txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \ -}) +static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; -static -s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, + return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); +} + +s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, const struct rtw89_rate_desc *rate_desc) { + struct rtw89_txpwr_byrate *byr_head; s8 *byr; - u8 idx; if (rate_desc->rs == RTW89_RS_CCK) band = RTW89_BAND_2G; - if (!_byr_chk(rate_desc->rs, rate_desc->nss, rate_desc->idx)) { - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, - "[TXPWR] unknown byrate desc rs=%d nss=%d idx=%d\n", - rate_desc->rs, rate_desc->nss, rate_desc->idx); - - return 0; - } - - byr = _byr_seek(rate_desc->rs, &rtwdev->byr[band]); - idx = _byr_idx(rate_desc->rs, rate_desc->nss, rate_desc->idx); + byr_head = &rtwdev->byr[band][bw]; + byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc); - return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]); + return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr); } static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g) @@ -1627,6 +1713,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); + u32 freq = ieee80211_channel_to_frequency(ch, nl_band); u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; @@ -1661,8 +1749,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, return 0; } - lmt = _phy_txpwr_rf_to_mac(rtwdev, lmt); - sar = rtw89_query_sar(rtwdev); + lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt); + sar = rtw89_query_sar(rtwdev, freq); return min(lmt, sar); } @@ -1679,9 +1767,9 @@ EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); (ch)); \ } while (0) -static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch) +static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch) { __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch); @@ -1694,9 +1782,9 @@ static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev, ntx, RTW89_RS_MCS, ch); } -static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch, u8 pri_ch) +static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) { __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch - 2); @@ -1715,9 +1803,9 @@ static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev, ntx, RTW89_RS_MCS, ch); } -static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch, u8 pri_ch) +static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) { s8 val_0p5_n[RTW89_BF_NUM]; s8 val_0p5_p[RTW89_BF_NUM]; @@ -1756,9 +1844,9 @@ static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev, lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); } -static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit *lmt, - u8 band, u8 ntx, u8 ch, u8 pri_ch) +static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ax *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) { s8 val_0p5_n[RTW89_BF_NUM]; s8 val_0p5_p[RTW89_BF_NUM]; @@ -1843,10 +1931,10 @@ static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev, } static -void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - struct rtw89_txpwr_limit *lmt, - u8 ntx) +void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_ax *lmt, + u8 ntx) { u8 band = chan->band_type; u8 pri_ch = chan->primary_channel; @@ -1857,31 +1945,33 @@ void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev, switch (bw) { case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch); + rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch); break; case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch, - pri_ch); + rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch, + pri_ch); break; case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch, - pri_ch); + rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch, + pri_ch); break; case RTW89_CHANNEL_WIDTH_160: - rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch, - pri_ch); + rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch, + pri_ch); break; } } -static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, - u8 ru, u8 ntx, u8 ch) +s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, + u8 ru, u8 ntx, u8 ch) { const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); + u32 freq = ieee80211_channel_to_frequency(ch, nl_band); u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; @@ -1916,16 +2006,16 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, return 0; } - lmt_ru = _phy_txpwr_rf_to_mac(rtwdev, lmt_ru); - sar = rtw89_query_sar(rtwdev); + lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru); + sar = rtw89_query_sar(rtwdev, freq); return min(lmt_ru, sar); } static void -rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, @@ -1939,9 +2029,9 @@ rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev, } static void -rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, @@ -1964,9 +2054,9 @@ rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev, } static void -rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, @@ -2007,15 +2097,15 @@ rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev, } static void -rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 band, u8 ntx, u8 ch) +rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 band, u8 ntx, u8 ch) { static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 }; int i; - static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM); - for (i = 0; i < RTW89_RU_SEC_NUM; i++) { + static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX); + for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) { lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, ntx, @@ -2032,10 +2122,10 @@ rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev, } static -void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - struct rtw89_txpwr_limit_ru *lmt_ru, - u8 ntx) +void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_ru_ax *lmt_ru, + u8 ntx) { u8 band = chan->band_type; u8 ch = chan->channel; @@ -2045,27 +2135,27 @@ void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev, switch (bw) { case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; case RTW89_CHANNEL_WIDTH_160: - rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx, - ch); + rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx, + ch); break; } } -void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { u8 max_nss_num = rtwdev->chip->rf_path_num; static const u8 rs[] = { @@ -2074,7 +2164,7 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, RTW89_RS_MCS, RTW89_RS_HEDCM, }; - struct rtw89_rate_desc cur; + struct rtw89_rate_desc cur = {}; u8 band = chan->band_type; u8 ch = chan->channel; u32 addr, val; @@ -2084,23 +2174,23 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr byrate with ch=%d\n", ch); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_CCK] % 4); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_OFDM] % 4); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_MCS] % 4); - BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_HEDCM] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4); + BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4); addr = R_AX_PWR_BY_RATE; for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) { for (i = 0; i < ARRAY_SIZE(rs); i++) { - if (cur.nss >= rtw89_rs_nss_num[rs[i]]) + if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]]) continue; cur.rs = rs[i]; - for (cur.idx = 0; cur.idx < rtw89_rs_idx_num[rs[i]]; + for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]]; cur.idx++) { v[cur.idx % 4] = rtw89_phy_read_txpwr_byrate(rtwdev, - band, + band, 0, &cur); if ((cur.idx + 1) % 4) @@ -2118,26 +2208,26 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, } } } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_byrate); -void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static +void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { struct rtw89_rate_desc desc = { .nss = RTW89_NSS_1, .rs = RTW89_RS_OFFSET, }; u8 band = chan->band_type; - s8 v[RTW89_RATE_OFFSET_NUM] = {}; + s8 v[RTW89_RATE_OFFSET_NUM_AX] = {}; u32 val; rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); - for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM; desc.idx++) - v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc); + for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++) + v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); - BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM != 5); + BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5); val = FIELD_PREP(GENMASK(3, 0), v[0]) | FIELD_PREP(GENMASK(7, 4), v[1]) | FIELD_PREP(GENMASK(11, 8), v[2]) | @@ -2147,14 +2237,13 @@ void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev, rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, GENMASK(19, 0), val); } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_offset); -void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { u8 max_ntx_num = rtwdev->chip->rf_path_num; - struct rtw89_txpwr_limit lmt; + struct rtw89_txpwr_limit_ax lmt; u8 ch = chan->channel; u8 bw = chan->band_width; const s8 *ptr; @@ -2164,15 +2253,15 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); - BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit) != - RTW89_TXPWR_LMT_PAGE_SIZE); + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) != + RTW89_TXPWR_LMT_PAGE_SIZE_AX); addr = R_AX_PWR_LMT; for (i = 0; i < max_ntx_num; i++) { - rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i); + rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i); ptr = (s8 *)&lmt; - for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE; + for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX; j += 4, addr += 4, ptr += 4) { val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | FIELD_PREP(GENMASK(15, 8), ptr[1]) | @@ -2183,14 +2272,13 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, } } } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit); -void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) +static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { u8 max_ntx_num = rtwdev->chip->rf_path_num; - struct rtw89_txpwr_limit_ru lmt_ru; + struct rtw89_txpwr_limit_ru_ax lmt_ru; u8 ch = chan->channel; u8 bw = chan->band_width; const s8 *ptr; @@ -2200,15 +2288,15 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); - BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru) != - RTW89_TXPWR_LMT_RU_PAGE_SIZE); + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) != + RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX); addr = R_AX_PWR_RU_LMT; for (i = 0; i < max_ntx_num; i++) { - rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i); + rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i); ptr = (s8 *)&lmt_ru; - for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE; + for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX; j += 4, addr += 4, ptr += 4) { val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | FIELD_PREP(GENMASK(15, 8), ptr[1]) | @@ -2219,7 +2307,6 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, } } } -EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit_ru); struct rtw89_phy_iter_ra_data { struct rtw89_dev *rtwdev; @@ -2231,21 +2318,34 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data; struct rtw89_dev *rtwdev = ra_data->rtwdev; struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + const struct rtw89_c2h_ra_rpt *c2h = + (const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data; struct rtw89_ra_report *ra_report = &rtwsta->ra_report; - struct sk_buff *c2h = ra_data->c2h; + const struct rtw89_chip_info *chip = rtwdev->chip; + bool format_v1 = chip->chip_gen == RTW89_CHIP_BE; u8 mode, rate, bw, giltf, mac_id; u16 legacy_bitrate; bool valid; u8 mcs = 0; + u8 t; - mac_id = RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h->data); + mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID); if (mac_id != rtwsta->mac_id) return; - rate = RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h->data); - bw = RTW89_GET_PHY_C2H_RA_RPT_BW(c2h->data); - giltf = RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h->data); - mode = RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h->data); + rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS); + bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW); + giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF); + mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL); + + if (format_v1) { + t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7); + rate |= u8_encode_bits(t, BIT(7)); + t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2); + bw |= u8_encode_bits(t, BIT(2)); + t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2); + mode |= u8_encode_bits(t, BIT(2)); + } if (mode == RTW89_RA_RPT_MODE_LEGACY) { valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate); @@ -2273,16 +2373,24 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) break; case RTW89_RA_RPT_MODE_VHT: ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; - ra_report->txrate.mcs = FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate); - ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1; + ra_report->txrate.mcs = format_v1 ? + u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : + u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); + ra_report->txrate.nss = format_v1 ? + u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : + u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; if (giltf) ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; mcs = ra_report->txrate.mcs; break; case RTW89_RA_RPT_MODE_HE: ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS; - ra_report->txrate.mcs = FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate); - ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1; + ra_report->txrate.mcs = format_v1 ? + u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : + u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); + ra_report->txrate.nss = format_v1 ? + u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : + u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) @@ -2291,12 +2399,27 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2; mcs = ra_report->txrate.mcs; break; + case RTW89_RA_RPT_MODE_EHT: + ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS; + ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1); + ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1; + if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) + ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8; + else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) + ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6; + else + ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2; + mcs = ra_report->txrate.mcs; + break; } ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw); ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate); - ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) | - FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate); + ra_report->hw_rate = format_v1 ? + u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) | + u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) : + u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) | + u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL); ra_report->might_fallback_legacy = mcs <= 2; sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report); rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1; @@ -2434,6 +2557,9 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) s32 dcfo_comp_val; int sign; + if (rtwdev->chip->chip_id == RTL8922A) + return; + if (!is_linked) { rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n", is_linked); @@ -2454,16 +2580,23 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_cfo_regs *cfo = phy->cfo; - rtw89_phy_set_phy_regs(rtwdev, R_DCFO_OPT, B_DCFO_OPT_EN, 1); - rtw89_phy_set_phy_regs(rtwdev, R_DCFO_WEIGHT, B_DCFO_WEIGHT_MSK, 8); + rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8); - if (chip->cfo_hw_comp) - rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, - B_AX_PWR_UL_CFO_MASK, 0x6); - else - rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, B_AX_PWR_UL_CFO_MASK); + if (chip->chip_gen == RTW89_CHIP_AX) { + if (chip->cfo_hw_comp) { + rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, + B_AX_PWR_UL_CFO_MASK, 0x6); + } else { + rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); + rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, + B_AX_PWR_UL_CFO_MASK); + } + } } static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) @@ -2486,7 +2619,6 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n", cfo->crystal_cap_default); rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true); - rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); rtw89_dcfo_comp_init(rtwdev); cfo->cfo_timer_ms = 2000; cfo->cfo_trig_by_timer_en = false; @@ -2503,11 +2635,15 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev, s32 cfo_abs = abs(curr_cfo); int sign; + if (curr_cfo == 0) { + rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); + return; + } if (!cfo->is_adjust) { if (cfo_abs > CFO_TRK_ENABLE_TH) cfo->is_adjust = true; } else { - if (cfo_abs < CFO_TRK_STOP_TH) + if (cfo_abs <= CFO_TRK_STOP_TH) cfo->is_adjust = false; } if (!cfo->is_adjust) { @@ -2699,10 +2835,6 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) new_cfo = rtw89_phy_average_cfo_calc(rtwdev); else new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev); - if (new_cfo == 0) { - rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); - return; - } if (cfo->divergence_lock_en) { cfo->lock_cnt++; if (cfo->lock_cnt > CFO_PERIOD_CNT) { @@ -2841,10 +2973,11 @@ void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { const struct rtw89_chip_info *chip = rtwdev->chip; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, + rtwvif->sub_entity_idx); struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; - if (!chip->support_ul_tb_ctrl) + if (!chip->ul_tb_waveform_ctrl) return; rtwvif->def_tri_idx = @@ -2874,6 +3007,61 @@ struct rtw89_phy_ul_tb_check_data { u8 def_tri_idx; }; +struct rtw89_phy_power_diff { + u32 q_00; + u32 q_11; + u32 q_matrix_en; + u32 ultb_1t_norm_160; + u32 ultb_2t_norm_160; + u32 com1_norm_1sts; + u32 com2_resp_1sts_path; +}; + +static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + static const struct rtw89_phy_power_diff table[2] = { + {0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3}, + {0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1}, + }; + const struct rtw89_phy_power_diff *param; + u32 reg; + + if (!rtwdev->chip->ul_tb_pwr_diff) + return; + + if (rtwvif->pwr_diff_en == rtwvif->pre_pwr_diff_en) { + rtwvif->pwr_diff_en = false; + return; + } + + rtwvif->pre_pwr_diff_en = rtwvif->pwr_diff_en; + param = &table[rtwvif->pwr_diff_en]; + + rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL, + param->q_00); + rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL, + param->q_11); + rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX, + B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160, + param->ultb_1t_norm_160); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160, + param->ultb_2t_norm_160); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS, + param->com1_norm_1sts); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif->mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH, + param->com2_resp_1sts_path); +} + static void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, @@ -2888,41 +3076,34 @@ void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, if (!vif->cfg.assoc) return; - if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) - ul_tb_data->high_tf_client = true; - else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) - ul_tb_data->low_tf_client = true; + if (rtwdev->chip->ul_tb_waveform_ctrl) { + if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) + ul_tb_data->high_tf_client = true; + else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) + ul_tb_data->low_tf_client = true; + + ul_tb_data->valid = true; + ul_tb_data->def_tri_idx = rtwvif->def_tri_idx; + ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en; + } - ul_tb_data->valid = true; - ul_tb_data->def_tri_idx = rtwvif->def_tri_idx; - ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en; + rtw89_phy_ofdma_power_diff(rtwdev, rtwvif); } -void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) +static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev, + struct rtw89_phy_ul_tb_check_data *ul_tb_data) { - const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; - struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; - struct rtw89_vif *rtwvif; - - if (!chip->support_ul_tb_ctrl) - return; - - if (rtwdev->total_sta_assoc != 1) - return; - rtw89_for_each_rtwvif(rtwdev, rtwvif) - rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data); - - if (!ul_tb_data.valid) + if (!rtwdev->chip->ul_tb_waveform_ctrl) return; - if (ul_tb_data.dyn_tb_bedge_en) { - if (ul_tb_data.high_tf_client) { + if (ul_tb_data->dyn_tb_bedge_en) { + if (ul_tb_data->high_tf_client) { rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, "[ULTB] Turn off if_bandedge\n"); - } else if (ul_tb_data.low_tf_client) { + } else if (ul_tb_data->low_tf_client) { rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, ul_tb_info->def_if_bandedge); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, @@ -2932,28 +3113,49 @@ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) } if (ul_tb_info->dyn_tb_tri_en) { - if (ul_tb_data.high_tf_client) { + if (ul_tb_data->high_tf_client) { rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, 0); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, "[ULTB] Turn off Tx triangle\n"); - } else if (ul_tb_data.low_tf_client) { + } else if (ul_tb_data->low_tf_client) { rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, - ul_tb_data.def_tri_idx); + ul_tb_data->def_tri_idx); rtw89_debug(rtwdev, RTW89_DBG_UL_TB, "[ULTB] Set to default tx_shap_idx = %d\n", - ul_tb_data.def_tri_idx); + ul_tb_data->def_tri_idx); } } } +void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; + struct rtw89_vif *rtwvif; + + if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff) + return; + + if (rtwdev->total_sta_assoc != 1) + return; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data); + + if (!ul_tb_data.valid) + return; + + rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data); +} + static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; - if (!chip->support_ul_tb_ctrl) + if (!chip->ul_tb_waveform_ctrl) return; ul_tb_info->dyn_tb_tri_en = true; @@ -2977,7 +3179,7 @@ static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct rtw89_antdiv_stats *stats) { - if (GET_DATA_RATE_MODE(phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) { + if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) { if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) { ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg); stats->pkt_cnt_cck++; @@ -3183,7 +3385,9 @@ static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; env->ccx_manual_ctrl = false; env->ccx_ongoing = false; @@ -3191,10 +3395,10 @@ static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) env->ccx_period = 0; env->ccx_unit_idx = RTW89_CCX_32_US; - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EN_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_TRIG_OPT_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EDCCA_OPT_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, RTW89_CCX_EDCCA_BW20_0); } @@ -3309,25 +3513,27 @@ ifs_update_finished: static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; u8 i = 0; - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, env->ifs_clm_th_l[0]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, env->ifs_clm_th_l[1]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, env->ifs_clm_th_l[2]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_LOW_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, env->ifs_clm_th_l[3]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, env->ifs_clm_th_h[0]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, env->ifs_clm_th_h[1]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, env->ifs_clm_th_h[2]); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_HIGH_MSK, + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, env->ifs_clm_th_h[3]); for (i = 0; i < RTW89_IFS_CLM_NUM; i++) @@ -3338,7 +3544,9 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; struct rtw89_ccx_para_info para = {0}; env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; @@ -3348,12 +3556,11 @@ static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) rtw89_phy_ifs_clm_set_th_reg(rtwdev); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COLLECT_EN, - true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_EN_MSK, true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_EN_MSK, true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_EN_MSK, true); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_EN_MSK, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true); } static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, @@ -3390,12 +3597,14 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 0); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 0); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 1); - rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1); + rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); env->ccx_ongoing = true; } @@ -3467,63 +3676,79 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; u8 i = 0; - if (rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_DONE_MSK) == 0) { + if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, + ccx->ifs_cnt_done_mask) == 0) { rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Get IFS_CLM report Fail\n"); return false; } env->ifs_clm_tx = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT, - B_IFS_CLM_TX_CNT_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, + ccx->ifs_clm_tx_cnt_msk); env->ifs_clm_edcca_excl_cca = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT, - B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, + ccx->ifs_clm_edcca_excl_cca_fa_mask); env->ifs_clm_cckcca_excl_fa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA, - B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, + ccx->ifs_clm_cckcca_excl_fa_mask); env->ifs_clm_ofdmcca_excl_fa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA, - B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, + ccx->ifs_clm_ofdmcca_excl_fa_mask); env->ifs_clm_cckfa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA, - B_IFS_CLM_CCK_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, + ccx->ifs_clm_cck_fa_mask); env->ifs_clm_ofdmfa = - rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA, - B_IFS_CLM_OFDM_FA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, + ccx->ifs_clm_ofdm_fa_mask); env->ifs_clm_his[0] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T1_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t1_his_mask); env->ifs_clm_his[1] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T2_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t2_his_mask); env->ifs_clm_his[2] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T3_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t3_his_mask); env->ifs_clm_his[3] = - rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T4_HIS_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t4_his_mask); env->ifs_clm_avg[0] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T1_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, + ccx->ifs_t1_avg_mask); env->ifs_clm_avg[1] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T2_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, + ccx->ifs_t2_avg_mask); env->ifs_clm_avg[2] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T3_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, + ccx->ifs_t3_avg_mask); env->ifs_clm_avg[3] = - rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T4_AVG_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, + ccx->ifs_t4_avg_mask); env->ifs_clm_cca[0] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T1_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, + ccx->ifs_t1_cca_mask); env->ifs_clm_cca[1] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T2_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, + ccx->ifs_t2_cca_mask); env->ifs_clm_cca[2] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T3_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, + ccx->ifs_t3_cca_mask); env->ifs_clm_cca[3] = - rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T4_CCA_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, + ccx->ifs_t4_cca_mask); env->ifs_clm_total_ifs = - rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_TOTAL_CNT_MSK); + rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, + ccx->ifs_total_mask); rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", env->ifs_clm_total_ifs); @@ -3551,7 +3776,9 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, struct rtw89_ccx_para_info *para) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const struct rtw89_ccx_regs *ccx = phy->ccx; u32 period = 0; u32 unit_idx = 0; @@ -3567,10 +3794,11 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, if (para->mntr_time != env->ifs_clm_mntr_time) { rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, &period, &unit_idx); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, - B_IFS_CLM_PERIOD_MSK, period); - rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, - B_IFS_CLM_COUNTER_UNIT_MSK, unit_idx); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, + ccx->ifs_clm_period_mask, period); + rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, + ccx->ifs_clm_cnt_unit_mask, + unit_idx); rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Update IFS-CLM time ((%d)) -> ((%d))\n", @@ -3688,16 +3916,19 @@ static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev, bool enable, enum rtw89_phy_idx phy_idx) { + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + const struct rtw89_physts_regs *physts = phy->physts; + if (enable) { - rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_FAIL); - rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_BRK); + rtw89_phy_write32_clr(rtwdev, physts->setting_addr, + physts->dis_trigger_fail_mask); + rtw89_phy_write32_clr(rtwdev, physts->setting_addr, + physts->dis_trigger_brk_mask); } else { - rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_FAIL); - rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, - B_STS_DIS_TRIG_BY_BRK); + rtw89_phy_write32_set(rtwdev, physts->setting_addr, + physts->dis_trigger_fail_mask); + rtw89_phy_write32_set(rtwdev, physts->setting_addr, + physts->dis_trigger_brk_mask); } } @@ -4125,10 +4356,10 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n", final_rssi, cck_cca_th, under_region, pd_val); - rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_EN_V1, - B_BMODE_PDTH_LIMIT_EN_MSK_V1, enable); - rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_V1, - B_BMODE_PDTH_LOWER_BOUND_MSK_V1, pd_val); + rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg, + dig_regs->bmode_cca_rssi_limit_en, enable); + rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg, + dig_regs->bmode_rssi_nocca_low_th_mask, pd_val); } void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) @@ -4391,8 +4622,6 @@ static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) { - const struct rtw89_chip_info *chip = rtwdev->chip; - rtw89_phy_stat_init(rtwdev); rtw89_chip_bb_sethw(rtwdev); @@ -4408,7 +4637,6 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) rtw89_phy_init_rf_nctl(rtwdev); rtw89_chip_rfk_init(rtwdev); - rtw89_load_txpwr_table(rtwdev, chip->byr_table); rtw89_chip_set_txpwr_ctrl(rtwdev); rtw89_chip_power_trim(rtwdev); rtw89_chip_cfg_txrx_path(rtwdev); @@ -4417,6 +4645,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) { const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld; enum rtw89_phy_idx phy_idx = RTW89_PHY_0; u8 bss_color; @@ -4425,7 +4654,7 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif bss_color = vif->bss_conf.he_bss_color.color; - rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_VLD0, 0x1, + rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1, phy_idx); rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT, bss_color, phy_idx); @@ -4517,7 +4746,7 @@ void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev, regs = rtw89_tssi_fastmode_regs_level; for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) { - reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); rtw89_write32_mask(rtwdev, reg, regs[i].mask, val); } } @@ -4579,11 +4808,11 @@ void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, data = chip->tssi_dbw_table->data[bandedge_cfg]; for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) { - reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]); } - reg = rtw89_mac_reg_by_idx(R_AX_BANDEDGE_CFG, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg); rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg, @@ -4681,3 +4910,87 @@ void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan) rtw89_phy_write32(rtwdev, reg, hal->edcca_bak); } } + +static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = { + .setting_addr = R_CCX, + .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK, + .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, + .trig_opt_mask = B_CCX_TRIG_OPT_MSK, + .en_mask = B_CCX_EN_MSK, + .ifs_cnt_addr = R_IFS_COUNTER, + .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, + .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, + .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, + .ifs_collect_en_mask = B_IFS_COLLECT_EN, + .ifs_t1_addr = R_IFS_T1, + .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, + .ifs_t1_en_mask = B_IFS_T1_EN_MSK, + .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, + .ifs_t2_addr = R_IFS_T2, + .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, + .ifs_t2_en_mask = B_IFS_T2_EN_MSK, + .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, + .ifs_t3_addr = R_IFS_T3, + .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, + .ifs_t3_en_mask = B_IFS_T3_EN_MSK, + .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, + .ifs_t4_addr = R_IFS_T4, + .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, + .ifs_t4_en_mask = B_IFS_T4_EN_MSK, + .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, + .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT, + .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, + .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, + .ifs_clm_cca_addr = R_IFS_CLM_CCA, + .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, + .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, + .ifs_clm_fa_addr = R_IFS_CLM_FA, + .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, + .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, + .ifs_his_addr = R_IFS_HIS, + .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, + .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, + .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, + .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, + .ifs_avg_l_addr = R_IFS_AVG_L, + .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, + .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, + .ifs_avg_h_addr = R_IFS_AVG_H, + .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, + .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, + .ifs_cca_l_addr = R_IFS_CCA_L, + .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, + .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, + .ifs_cca_h_addr = R_IFS_CCA_H, + .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, + .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, + .ifs_total_addr = R_IFSCNT, + .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, + .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, +}; + +static const struct rtw89_physts_regs rtw89_physts_regs_ax = { + .setting_addr = R_PLCP_HISTOGRAM, + .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, + .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, +}; + +static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = { + .comp = R_DCFO_WEIGHT, + .weighting_mask = B_DCFO_WEIGHT_MSK, + .comp_seg0 = R_DCFO_OPT, + .valid_0_mask = B_DCFO_OPT_EN, +}; + +const struct rtw89_phy_gen_def rtw89_phy_gen_ax = { + .cr_base = 0x10000, + .ccx = &rtw89_ccx_regs_ax, + .physts = &rtw89_physts_regs_ax, + .cfo = &rtw89_cfo_regs_ax, + + .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax, + .set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax, + .set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax, + .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax, +}; +EXPORT_SYMBOL(rtw89_phy_gen_ax); diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index ab174a0ba488..5c85122e7bb5 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -7,7 +7,6 @@ #include "core.h" -#define RTW89_PHY_ADDR_OFFSET 0x10000 #define RTW89_RF_ADDR_ADSEL_MASK BIT(16) #define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr) @@ -47,6 +46,11 @@ #define RA_MASK_HE_3SS_RATES GENMASK_ULL(47, 36) #define RA_MASK_HE_4SS_RATES GENMASK_ULL(59, 48) #define RA_MASK_HE_RATES GENMASK_ULL(59, 12) +#define RA_MASK_EHT_1SS_RATES GENMASK_ULL(27, 12) +#define RA_MASK_EHT_2SS_RATES GENMASK_ULL(43, 28) +#define RA_MASK_EHT_3SS_RATES GENMASK_ULL(59, 44) +#define RA_MASK_EHT_4SS_RATES GENMASK_ULL(62, 60) +#define RA_MASK_EHT_RATES GENMASK_ULL(62, 12) #define CFO_TRK_ENABLE_TH (2 << 2) #define CFO_TRK_STOP_TH_4 (30 << 2) @@ -337,61 +341,241 @@ struct rtw89_nbi_reg_def { struct rtw89_reg_def notch2_en; }; +struct rtw89_ccx_regs { + u32 setting_addr; + u32 edcca_opt_mask; + u32 measurement_trig_mask; + u32 trig_opt_mask; + u32 en_mask; + u32 ifs_cnt_addr; + u32 ifs_clm_period_mask; + u32 ifs_clm_cnt_unit_mask; + u32 ifs_clm_cnt_clear_mask; + u32 ifs_collect_en_mask; + u32 ifs_t1_addr; + u32 ifs_t1_th_h_mask; + u32 ifs_t1_en_mask; + u32 ifs_t1_th_l_mask; + u32 ifs_t2_addr; + u32 ifs_t2_th_h_mask; + u32 ifs_t2_en_mask; + u32 ifs_t2_th_l_mask; + u32 ifs_t3_addr; + u32 ifs_t3_th_h_mask; + u32 ifs_t3_en_mask; + u32 ifs_t3_th_l_mask; + u32 ifs_t4_addr; + u32 ifs_t4_th_h_mask; + u32 ifs_t4_en_mask; + u32 ifs_t4_th_l_mask; + u32 ifs_clm_tx_cnt_addr; + u32 ifs_clm_edcca_excl_cca_fa_mask; + u32 ifs_clm_tx_cnt_msk; + u32 ifs_clm_cca_addr; + u32 ifs_clm_ofdmcca_excl_fa_mask; + u32 ifs_clm_cckcca_excl_fa_mask; + u32 ifs_clm_fa_addr; + u32 ifs_clm_ofdm_fa_mask; + u32 ifs_clm_cck_fa_mask; + u32 ifs_his_addr; + u32 ifs_t4_his_mask; + u32 ifs_t3_his_mask; + u32 ifs_t2_his_mask; + u32 ifs_t1_his_mask; + u32 ifs_avg_l_addr; + u32 ifs_t2_avg_mask; + u32 ifs_t1_avg_mask; + u32 ifs_avg_h_addr; + u32 ifs_t4_avg_mask; + u32 ifs_t3_avg_mask; + u32 ifs_cca_l_addr; + u32 ifs_t2_cca_mask; + u32 ifs_t1_cca_mask; + u32 ifs_cca_h_addr; + u32 ifs_t4_cca_mask; + u32 ifs_t3_cca_mask; + u32 ifs_total_addr; + u32 ifs_cnt_done_mask; + u32 ifs_total_mask; +}; + +struct rtw89_physts_regs { + u32 setting_addr; + u32 dis_trigger_fail_mask; + u32 dis_trigger_brk_mask; +}; + +struct rtw89_cfo_regs { + u32 comp; + u32 weighting_mask; + u32 comp_seg0; + u32 valid_0_mask; +}; + +enum rtw89_bandwidth_section_num_ax { + RTW89_BW20_SEC_NUM_AX = 8, + RTW89_BW40_SEC_NUM_AX = 4, + RTW89_BW80_SEC_NUM_AX = 2, +}; + +enum rtw89_bandwidth_section_num_be { + RTW89_BW20_SEC_NUM_BE = 16, + RTW89_BW40_SEC_NUM_BE = 8, + RTW89_BW80_SEC_NUM_BE = 4, + RTW89_BW160_SEC_NUM_BE = 2, +}; + +#define RTW89_TXPWR_LMT_PAGE_SIZE_AX 40 + +struct rtw89_txpwr_limit_ax { + s8 cck_20m[RTW89_BF_NUM]; + s8 cck_40m[RTW89_BF_NUM]; + s8 ofdm[RTW89_BF_NUM]; + s8 mcs_20m[RTW89_BW20_SEC_NUM_AX][RTW89_BF_NUM]; + s8 mcs_40m[RTW89_BW40_SEC_NUM_AX][RTW89_BF_NUM]; + s8 mcs_80m[RTW89_BW80_SEC_NUM_AX][RTW89_BF_NUM]; + s8 mcs_160m[RTW89_BF_NUM]; + s8 mcs_40m_0p5[RTW89_BF_NUM]; + s8 mcs_40m_2p5[RTW89_BF_NUM]; +}; + +#define RTW89_TXPWR_LMT_PAGE_SIZE_BE 76 + +struct rtw89_txpwr_limit_be { + s8 cck_20m[RTW89_BF_NUM]; + s8 cck_40m[RTW89_BF_NUM]; + s8 ofdm[RTW89_BF_NUM]; + s8 mcs_20m[RTW89_BW20_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_40m[RTW89_BW40_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_80m[RTW89_BW80_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_160m[RTW89_BW160_SEC_NUM_BE][RTW89_BF_NUM]; + s8 mcs_320m[RTW89_BF_NUM]; + s8 mcs_40m_0p5[RTW89_BF_NUM]; + s8 mcs_40m_2p5[RTW89_BF_NUM]; + s8 mcs_40m_4p5[RTW89_BF_NUM]; + s8 mcs_40m_6p5[RTW89_BF_NUM]; +}; + +#define RTW89_RU_SEC_NUM_AX 8 + +#define RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX 24 + +struct rtw89_txpwr_limit_ru_ax { + s8 ru26[RTW89_RU_SEC_NUM_AX]; + s8 ru52[RTW89_RU_SEC_NUM_AX]; + s8 ru106[RTW89_RU_SEC_NUM_AX]; +}; + +#define RTW89_RU_SEC_NUM_BE 16 + +#define RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE 80 + +struct rtw89_txpwr_limit_ru_be { + s8 ru26[RTW89_RU_SEC_NUM_BE]; + s8 ru52[RTW89_RU_SEC_NUM_BE]; + s8 ru106[RTW89_RU_SEC_NUM_BE]; + s8 ru52_26[RTW89_RU_SEC_NUM_BE]; + s8 ru106_26[RTW89_RU_SEC_NUM_BE]; +}; + +struct rtw89_phy_gen_def { + u32 cr_base; + const struct rtw89_ccx_regs *ccx; + const struct rtw89_physts_regs *physts; + const struct rtw89_cfo_regs *cfo; + + void (*set_txpwr_byrate)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_offset)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_limit)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_limit_ru)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); +}; + +extern const struct rtw89_phy_gen_def rtw89_phy_gen_ax; +extern const struct rtw89_phy_gen_def rtw89_phy_gen_be; + static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) { - rtw89_write8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write8(rtwdev, addr + phy->cr_base, data); } static inline void rtw89_phy_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) { - rtw89_write16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write16(rtwdev, addr + phy->cr_base, data); } static inline void rtw89_phy_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) { - rtw89_write32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32(rtwdev, addr + phy->cr_base, data); } static inline void rtw89_phy_write32_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits) { - rtw89_write32_set(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32_set(rtwdev, addr + phy->cr_base, bits); } static inline void rtw89_phy_write32_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits) { - rtw89_write32_clr(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32_clr(rtwdev, addr + phy->cr_base, bits); } static inline void rtw89_phy_write32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 data) { - rtw89_write32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask, data); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + rtw89_write32_mask(rtwdev, addr + phy->cr_base, mask, data); } static inline u8 rtw89_phy_read8(struct rtw89_dev *rtwdev, u32 addr) { - return rtw89_read8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read8(rtwdev, addr + phy->cr_base); } static inline u16 rtw89_phy_read16(struct rtw89_dev *rtwdev, u32 addr) { - return rtw89_read16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read16(rtwdev, addr + phy->cr_base); } static inline u32 rtw89_phy_read32(struct rtw89_dev *rtwdev, u32 addr) { - return rtw89_read32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read32(rtwdev, addr + phy->cr_base); } static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask) { - return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask); + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + return rtw89_read32_mask(rtwdev, addr + phy->cr_base, mask); } static inline @@ -521,22 +705,58 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 data, enum rtw89_phy_idx phy_idx); u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, enum rtw89_phy_idx phy_idx); +s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_byrate *head, + const struct rtw89_rate_desc *desc); +s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, + const struct rtw89_rate_desc *rate_desc); void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, const struct rtw89_txpwr_table *tbl); s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch); +s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, + u8 ru, u8 ntx, u8 ch); + +static inline void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_byrate(rtwdev, chan, phy_idx); +} + +static inline void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_offset(rtwdev, chan, phy_idx); +} + +static inline void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_limit(rtwdev, chan, phy_idx); +} + +static inline void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx); + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; + + phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx); +} + void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); void rtw89_phy_ra_update(struct rtw89_dev *rtwdev); void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c new file mode 100644 index 000000000000..63eeeea72b68 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/phy_be.c @@ -0,0 +1,653 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2023 Realtek Corporation + */ + +#include "debug.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" + +static const struct rtw89_ccx_regs rtw89_ccx_regs_be = { + .setting_addr = R_CCX, + .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK_V1, + .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, + .trig_opt_mask = B_CCX_TRIG_OPT_MSK, + .en_mask = B_CCX_EN_MSK, + .ifs_cnt_addr = R_IFS_COUNTER, + .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, + .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, + .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, + .ifs_collect_en_mask = B_IFS_COLLECT_EN, + .ifs_t1_addr = R_IFS_T1, + .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, + .ifs_t1_en_mask = B_IFS_T1_EN_MSK, + .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, + .ifs_t2_addr = R_IFS_T2, + .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, + .ifs_t2_en_mask = B_IFS_T2_EN_MSK, + .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, + .ifs_t3_addr = R_IFS_T3, + .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, + .ifs_t3_en_mask = B_IFS_T3_EN_MSK, + .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, + .ifs_t4_addr = R_IFS_T4, + .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, + .ifs_t4_en_mask = B_IFS_T4_EN_MSK, + .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, + .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT_V1, + .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, + .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, + .ifs_clm_cca_addr = R_IFS_CLM_CCA_V1, + .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, + .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, + .ifs_clm_fa_addr = R_IFS_CLM_FA_V1, + .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, + .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, + .ifs_his_addr = R_IFS_HIS_V1, + .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, + .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, + .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, + .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, + .ifs_avg_l_addr = R_IFS_AVG_L_V1, + .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, + .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, + .ifs_avg_h_addr = R_IFS_AVG_H_V1, + .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, + .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, + .ifs_cca_l_addr = R_IFS_CCA_L_V1, + .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, + .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, + .ifs_cca_h_addr = R_IFS_CCA_H_V1, + .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, + .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, + .ifs_total_addr = R_IFSCNT_V1, + .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, + .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, +}; + +static const struct rtw89_physts_regs rtw89_physts_regs_be = { + .setting_addr = R_PLCP_HISTOGRAM, + .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, + .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, +}; + +static const struct rtw89_cfo_regs rtw89_cfo_regs_be = { + .comp = R_DCFO_WEIGHT_V1, + .weighting_mask = B_DCFO_WEIGHT_MSK_V1, + .comp_seg0 = R_DCFO_OPT_V1, + .valid_0_mask = B_DCFO_OPT_EN_V1, +}; + +struct rtw89_byr_spec_ent_be { + struct rtw89_rate_desc init; + u8 num_of_idx; + bool no_over_bw40; + bool no_multi_nss; +}; + +static const struct rtw89_byr_spec_ent_be rtw89_byr_spec_be[] = { + { + .init = { .rs = RTW89_RS_CCK }, + .num_of_idx = RTW89_RATE_CCK_NUM, + .no_over_bw40 = true, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_OFDM }, + .num_of_idx = RTW89_RATE_OFDM_NUM, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_NON_OFDMA }, + .num_of_idx = 2, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_MCS, .idx = 14, .ofdma = RTW89_OFDMA }, + .num_of_idx = 2, + .no_multi_nss = true, + }, + { + .init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_NON_OFDMA }, + .num_of_idx = 14, + }, + { + .init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_NON_OFDMA }, + .num_of_idx = RTW89_RATE_HEDCM_NUM, + }, + { + .init = { .rs = RTW89_RS_MCS, .ofdma = RTW89_OFDMA }, + .num_of_idx = 14, + }, + { + .init = { .rs = RTW89_RS_HEDCM, .ofdma = RTW89_OFDMA }, + .num_of_idx = RTW89_RATE_HEDCM_NUM, + }, +}; + +static +void __phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev, u8 band, u8 bw, + u8 nss, u32 *addr, enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_byr_spec_ent_be *ent; + struct rtw89_rate_desc desc; + int pos = 0; + int i, j; + u32 val; + s8 v[4]; + + for (i = 0; i < ARRAY_SIZE(rtw89_byr_spec_be); i++) { + ent = &rtw89_byr_spec_be[i]; + + if (bw > RTW89_CHANNEL_WIDTH_40 && ent->no_over_bw40) + continue; + if (nss > RTW89_NSS_1 && ent->no_multi_nss) + continue; + + desc = ent->init; + desc.nss = nss; + for (j = 0; j < ent->num_of_idx; j++, desc.idx++) { + v[pos] = rtw89_phy_read_txpwr_byrate(rtwdev, band, bw, + &desc); + pos = (pos + 1) % 4; + if (pos) + continue; + + val = u32_encode_bits(v[0], GENMASK(7, 0)) | + u32_encode_bits(v[1], GENMASK(15, 8)) | + u32_encode_bits(v[2], GENMASK(23, 16)) | + u32_encode_bits(v[3], GENMASK(31, 24)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, *addr, val); + *addr += 4; + } + } +} + +static void rtw89_phy_set_txpwr_byrate_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + u32 addr = R_BE_PWR_BY_RATE; + u8 band = chan->band_type; + u8 bw, nss; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr byrate on band %d\n", band); + + for (bw = 0; bw <= RTW89_CHANNEL_WIDTH_320; bw++) + for (nss = 0; nss <= RTW89_NSS_2; nss++) + __phy_set_txpwr_byrate_be(rtwdev, band, bw, nss, + &addr, phy_idx); +} + +static void rtw89_phy_set_txpwr_offset_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_rate_desc desc = { + .nss = RTW89_NSS_1, + .rs = RTW89_RS_OFFSET, + }; + u8 band = chan->band_type; + s8 v[RTW89_RATE_OFFSET_NUM_BE] = {}; + u32 val; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr offset on band %d\n", band); + + for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_BE; desc.idx++) + v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); + + val = u32_encode_bits(v[RTW89_RATE_OFFSET_CCK], GENMASK(3, 0)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_OFDM], GENMASK(7, 4)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_HT], GENMASK(11, 8)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_VHT], GENMASK(15, 12)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_HE], GENMASK(19, 16)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_EHT], GENMASK(23, 20)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_HE], GENMASK(27, 24)) | + u32_encode_bits(v[RTW89_RATE_OFFSET_DLRU_EHT], GENMASK(31, 28)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_BE_PWR_RATE_OFST_CTRL, val); +} + +static void +fill_limit_nonbf_bf(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM], + u8 band, u8 bw, u8 ntx, u8 rs, u8 ch) +{ + int bf; + + for (bf = 0; bf < RTW89_BF_NUM; bf++) + (*ptr)[bf] = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, ntx, + rs, bf, ch); +} + +static void +fill_limit_nonbf_bf_min(struct rtw89_dev *rtwdev, s8 (*ptr)[RTW89_BF_NUM], + u8 band, u8 bw, u8 ntx, u8 rs, u8 ch1, u8 ch2) +{ + s8 v1[RTW89_BF_NUM]; + s8 v2[RTW89_BF_NUM]; + int bf; + + fill_limit_nonbf_bf(rtwdev, &v1, band, bw, ntx, rs, ch1); + fill_limit_nonbf_bf(rtwdev, &v2, band, bw, ntx, rs, ch2); + + for (bf = 0; bf < RTW89_BF_NUM; bf++) + (*ptr)[bf] = min(v1[bf], v2[bf]); +} + +static void phy_fill_limit_20m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch); + fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch); + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, ch); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch); +} + +static void phy_fill_limit_40m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->cck_20m, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_CCK, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->cck_40m, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_CCK, ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch); +} + +static void phy_fill_limit_80m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch); + + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 4, ch + 4); +} + +static void phy_fill_limit_160m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band, + RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch); + + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 12, ch - 4); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch + 4, ch + 12); +} + +static void phy_fill_limit_320m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_be *lmt, + u8 band, u8 ntx, u8 ch, u8 pri_ch) +{ + fill_limit_nonbf_bf(rtwdev, &lmt->ofdm, band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_OFDM, pri_ch); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[0], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 30); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[1], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 26); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[2], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 22); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[3], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 18); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[4], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 14); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[5], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[6], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[7], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch - 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[8], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 2); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[9], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 6); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[10], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 10); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[11], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 14); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[12], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 18); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[13], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 22); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[14], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 26); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_20m[15], band, + RTW89_CHANNEL_WIDTH_20, ntx, RTW89_RS_MCS, ch + 30); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[0], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 28); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[1], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 20); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[2], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 12); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[3], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch - 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[4], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 4); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[5], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 12); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[6], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 20); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_40m[7], band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, ch + 28); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[0], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 24); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[1], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch - 8); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[2], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 8); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_80m[3], band, + RTW89_CHANNEL_WIDTH_80, ntx, RTW89_RS_MCS, ch + 24); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[0], band, + RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch - 16); + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_160m[1], band, + RTW89_CHANNEL_WIDTH_160, ntx, RTW89_RS_MCS, ch + 16); + + fill_limit_nonbf_bf(rtwdev, &lmt->mcs_320m, band, + RTW89_CHANNEL_WIDTH_320, ntx, RTW89_RS_MCS, ch); + + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_0p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 28, ch - 20); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_2p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch - 12, ch - 4); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_4p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch + 4, ch + 12); + fill_limit_nonbf_bf_min(rtwdev, &lmt->mcs_40m_6p5, band, + RTW89_CHANNEL_WIDTH_40, ntx, RTW89_RS_MCS, + ch + 20, ch + 28); +} + +static void rtw89_phy_fill_limit_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_be *lmt, + u8 ntx) +{ + u8 band = chan->band_type; + u8 pri_ch = chan->primary_channel; + u8 ch = chan->channel; + u8 bw = chan->band_width; + + memset(lmt, 0, sizeof(*lmt)); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + phy_fill_limit_20m_be(rtwdev, lmt, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_40: + phy_fill_limit_40m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + case RTW89_CHANNEL_WIDTH_80: + phy_fill_limit_80m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + case RTW89_CHANNEL_WIDTH_160: + phy_fill_limit_160m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + case RTW89_CHANNEL_WIDTH_320: + phy_fill_limit_320m_be(rtwdev, lmt, band, ntx, ch, pri_ch); + break; + } +} + +static void rtw89_phy_set_txpwr_limit_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_txpwr_limit_be lmt; + const s8 *ptr; + u32 addr, val; + u8 i, j; + + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_be) != + RTW89_TXPWR_LMT_PAGE_SIZE_BE); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit on band %d bw %d\n", + chan->band_type, chan->band_width); + + addr = R_BE_PWR_LMT; + for (i = 0; i <= RTW89_NSS_2; i++) { + rtw89_phy_fill_limit_be(rtwdev, chan, &lmt, i); + + ptr = (s8 *)&lmt; + for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_BE; + j += 4, addr += 4, ptr += 4) { + val = u32_encode_bits(ptr[0], GENMASK(7, 0)) | + u32_encode_bits(ptr[1], GENMASK(15, 8)) | + u32_encode_bits(ptr[2], GENMASK(23, 16)) | + u32_encode_bits(ptr[3], GENMASK(31, 24)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } +} + +static void fill_limit_ru_each(struct rtw89_dev *rtwdev, u8 index, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + lmt_ru->ru26[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU26, ntx, ch); + lmt_ru->ru52[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52, ntx, ch); + lmt_ru->ru106[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106, ntx, ch); + lmt_ru->ru52_26[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU52_26, ntx, ch); + lmt_ru->ru106_26[index] = + rtw89_phy_read_txpwr_limit_ru(rtwdev, band, RTW89_RU106_26, ntx, ch); +} + +static void phy_fill_limit_ru_20m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch); +} + +static void phy_fill_limit_ru_40m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch + 2); +} + +static void phy_fill_limit_ru_80m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 6); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch + 2); + fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch + 6); +} + +static void phy_fill_limit_ru_160m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 14); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 10); + fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 6); + fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch + 2); + fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch + 6); + fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch + 10); + fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch + 14); +} + +static void phy_fill_limit_ru_320m_be(struct rtw89_dev *rtwdev, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 band, u8 ntx, u8 ch) +{ + fill_limit_ru_each(rtwdev, 0, lmt_ru, band, ntx, ch - 30); + fill_limit_ru_each(rtwdev, 1, lmt_ru, band, ntx, ch - 26); + fill_limit_ru_each(rtwdev, 2, lmt_ru, band, ntx, ch - 22); + fill_limit_ru_each(rtwdev, 3, lmt_ru, band, ntx, ch - 18); + fill_limit_ru_each(rtwdev, 4, lmt_ru, band, ntx, ch - 14); + fill_limit_ru_each(rtwdev, 5, lmt_ru, band, ntx, ch - 10); + fill_limit_ru_each(rtwdev, 6, lmt_ru, band, ntx, ch - 6); + fill_limit_ru_each(rtwdev, 7, lmt_ru, band, ntx, ch - 2); + fill_limit_ru_each(rtwdev, 8, lmt_ru, band, ntx, ch + 2); + fill_limit_ru_each(rtwdev, 9, lmt_ru, band, ntx, ch + 6); + fill_limit_ru_each(rtwdev, 10, lmt_ru, band, ntx, ch + 10); + fill_limit_ru_each(rtwdev, 11, lmt_ru, band, ntx, ch + 14); + fill_limit_ru_each(rtwdev, 12, lmt_ru, band, ntx, ch + 18); + fill_limit_ru_each(rtwdev, 13, lmt_ru, band, ntx, ch + 22); + fill_limit_ru_each(rtwdev, 14, lmt_ru, band, ntx, ch + 26); + fill_limit_ru_each(rtwdev, 15, lmt_ru, band, ntx, ch + 30); +} + +static void rtw89_phy_fill_limit_ru_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + struct rtw89_txpwr_limit_ru_be *lmt_ru, + u8 ntx) +{ + u8 band = chan->band_type; + u8 ch = chan->channel; + u8 bw = chan->band_width; + + memset(lmt_ru, 0, sizeof(*lmt_ru)); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + phy_fill_limit_ru_20m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_40: + phy_fill_limit_ru_40m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_80: + phy_fill_limit_ru_80m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_160: + phy_fill_limit_ru_160m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + case RTW89_CHANNEL_WIDTH_320: + phy_fill_limit_ru_320m_be(rtwdev, lmt_ru, band, ntx, ch); + break; + } +} + +static void rtw89_phy_set_txpwr_limit_ru_be(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_txpwr_limit_ru_be lmt_ru; + const s8 *ptr; + u32 addr, val; + u8 i, j; + + BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_be) != + RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit ru on band %d bw %d\n", + chan->band_type, chan->band_width); + + addr = R_BE_PWR_RU_LMT; + for (i = 0; i <= RTW89_NSS_2; i++) { + rtw89_phy_fill_limit_ru_be(rtwdev, chan, &lmt_ru, i); + + ptr = (s8 *)&lmt_ru; + for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_BE; + j += 4, addr += 4, ptr += 4) { + val = u32_encode_bits(ptr[0], GENMASK(7, 0)) | + u32_encode_bits(ptr[1], GENMASK(15, 8)) | + u32_encode_bits(ptr[2], GENMASK(23, 16)) | + u32_encode_bits(ptr[3], GENMASK(31, 24)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } +} + +const struct rtw89_phy_gen_def rtw89_phy_gen_be = { + .cr_base = 0x20000, + .ccx = &rtw89_ccx_regs_be, + .physts = &rtw89_physts_regs_be, + .cfo = &rtw89_cfo_regs_be, + + .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be, + .set_txpwr_offset = rtw89_phy_set_txpwr_offset_be, + .set_txpwr_limit = rtw89_phy_set_txpwr_limit_be, + .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_be, +}; +EXPORT_SYMBOL(rtw89_phy_gen_be); diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index 84201ef19c17..917c01e5e9ed 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "core.h" #include "debug.h" @@ -257,8 +258,13 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev) { struct ieee80211_vif *vif, *found_vif = NULL; struct rtw89_vif *rtwvif; + enum rtw89_entity_mode mode; int count = 0; + mode = rtw89_get_entity_mode(rtwdev); + if (mode == RTW89_ENTITY_MODE_MCC) + goto disable_lps; + rtw89_for_each_rtwvif(rtwdev, rtwvif) { vif = rtwvif_to_vif(rtwvif); @@ -273,8 +279,71 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev) if (count == 1 && found_vif->cfg.ps) { rtwdev->lps_enabled = true; - } else { - rtw89_leave_lps(rtwdev); - rtwdev->lps_enabled = false; + return; } + +disable_lps: + rtw89_leave_lps(rtwdev); + rtwdev->lps_enabled = false; +} + +void rtw89_p2p_noa_renew(struct rtw89_vif *rtwvif) +{ + struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa; + struct rtw89_p2p_noa_ie *ie = &setter->ie; + struct rtw89_p2p_ie_head *p2p_head = &ie->p2p_head; + struct rtw89_noa_attr_head *noa_head = &ie->noa_head; + + if (setter->noa_count) { + setter->noa_index++; + setter->noa_count = 0; + } + + memset(ie, 0, sizeof(*ie)); + + p2p_head->eid = WLAN_EID_VENDOR_SPECIFIC; + p2p_head->ie_len = 4 + sizeof(*noa_head); + p2p_head->oui[0] = (WLAN_OUI_WFA >> 16) & 0xff; + p2p_head->oui[1] = (WLAN_OUI_WFA >> 8) & 0xff; + p2p_head->oui[2] = (WLAN_OUI_WFA >> 0) & 0xff; + p2p_head->oui_type = WLAN_OUI_TYPE_WFA_P2P; + + noa_head->attr_type = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; + noa_head->attr_len = cpu_to_le16(2); + noa_head->index = setter->noa_index; + noa_head->oppps_ctwindow = 0; +} + +void rtw89_p2p_noa_append(struct rtw89_vif *rtwvif, + const struct ieee80211_p2p_noa_desc *desc) +{ + struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa; + struct rtw89_p2p_noa_ie *ie = &setter->ie; + struct rtw89_p2p_ie_head *p2p_head = &ie->p2p_head; + struct rtw89_noa_attr_head *noa_head = &ie->noa_head; + + if (!desc->count || !desc->duration) + return; + + if (setter->noa_count >= RTW89_P2P_MAX_NOA_NUM) + return; + + p2p_head->ie_len += sizeof(*desc); + le16_add_cpu(&noa_head->attr_len, sizeof(*desc)); + + ie->noa_desc[setter->noa_count++] = *desc; +} + +u8 rtw89_p2p_noa_fetch(struct rtw89_vif *rtwvif, void **data) +{ + struct rtw89_p2p_noa_setter *setter = &rtwvif->p2p_noa; + struct rtw89_p2p_noa_ie *ie = &setter->ie; + void *tail; + + if (!setter->noa_count) + return 0; + + *data = ie; + tail = ie->noa_desc + setter->noa_count; + return tail - *data; } diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h index 4c18f49204b2..aff0fba71cb0 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.h +++ b/drivers/net/wireless/realtek/rtw89/ps.h @@ -16,6 +16,10 @@ void rtw89_leave_ips(struct rtw89_dev *rtwdev); void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl); void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_recalc_lps(struct rtw89_dev *rtwdev); +void rtw89_p2p_noa_renew(struct rtw89_vif *rtwvif); +void rtw89_p2p_noa_append(struct rtw89_vif *rtwvif, + const struct ieee80211_p2p_noa_desc *desc); +u8 rtw89_p2p_noa_fetch(struct rtw89_vif *rtwvif, void **data); static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev) { diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 55595fde7494..ccd5481e8a3d 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3360,9 +3360,11 @@ #define R_AX_PWR_UL_TB_1T 0xD28C #define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0) #define B_AX_PWR_UL_TB_1T_V1_MASK GENMASK(7, 0) +#define B_AX_PWR_UL_TB_1T_NORM_BW160 GENMASK(31, 24) #define R_AX_PWR_UL_TB_2T 0xD290 #define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0) #define B_AX_PWR_UL_TB_2T_V1_MASK GENMASK(7, 0) +#define B_AX_PWR_UL_TB_2T_NORM_BW160 GENMASK(31, 24) #define R_AX_PWR_BY_RATE_TABLE0 0xD2C0 #define R_AX_PWR_BY_RATE_TABLE6 0xD2D8 #define R_AX_PWR_BY_RATE_TABLE10 0xD2E8 @@ -3390,11 +3392,13 @@ #define AX_PATH_COM0_PATHB 0x11111900 #define AX_PATH_COM0_PATHAB 0x19999980 #define R_AX_PATH_COM1 0xD804 +#define B_AX_PATH_COM1_NORM_1STS GENMASK(31, 28) #define AX_PATH_COM1_DFVAL 0x00000000 #define AX_PATH_COM1_PATHA 0x13111111 #define AX_PATH_COM1_PATHB 0x23222222 #define AX_PATH_COM1_PATHAB 0x33333333 #define R_AX_PATH_COM2 0xD808 +#define B_AX_PATH_COM2_RESP_1STS_PATH GENMASK(7, 4) #define AX_PATH_COM2_DFVAL 0x00000000 #define AX_PATH_COM2_PATHA 0x01209313 #define AX_PATH_COM2_PATHB 0x01209323 @@ -3581,8 +3585,8 @@ #define R_AX_MACID_ANT_TABLE 0xDC00 #define R_AX_MACID_ANT_TABLE_LAST 0xDDFC -#define CMAC1_START_ADDR 0xE000 -#define CMAC1_END_ADDR 0xFFFF +#define CMAC1_START_ADDR_AX 0xE000 +#define CMAC1_END_ADDR_AX 0xFFFF #define R_AX_CMAC_REG_END 0xFFFF #define R_AX_LTE_SW_CFG_1 0x0038 @@ -3625,6 +3629,408 @@ #define B_AX_GNT_BT_TX_SW_VAL BIT(1) #define B_AX_GNT_BT_TX_SW_CTRL BIT(0) +#define R_BE_SYS_CLK_CTRL 0x0008 +#define B_BE_CPU_CLK_EN BIT(14) +#define B_BE_SYMR_BE_CLK_EN BIT(13) +#define B_BE_MAC_CLK_EN BIT(11) +#define B_BE_EXT_32K_EN BIT(8) +#define B_BE_WL_CLK_TEST BIT(7) +#define B_BE_LOADER_CLK_EN BIT(5) +#define B_BE_ANA_CLK_DIVISION_2 BIT(1) +#define B_BE_CNTD16V_EN BIT(0) + +#define R_BE_PLATFORM_ENABLE 0x0088 +#define B_BE_HOLD_AFTER_RESET BIT(11) +#define B_BE_SYM_WLPLT_MEM_MUX_EN BIT(10) +#define B_BE_WCPU_WARM_EN BIT(9) +#define B_BE_SPIC_EN BIT(8) +#define B_BE_UART_EN BIT(7) +#define B_BE_IDDMA_EN BIT(6) +#define B_BE_IPSEC_EN BIT(5) +#define B_BE_HIOE_EN BIT(4) +#define B_BE_APB_WRAP_EN BIT(2) +#define B_BE_WCPU_EN BIT(1) +#define B_BE_PLATFORM_EN BIT(0) + +#define R_BE_HALT_H2C_CTRL 0x0160 +#define B_BE_HALT_H2C_TRIGGER BIT(0) + +#define R_BE_HALT_C2H_CTRL 0x0164 +#define B_BE_HALT_C2H_TRIGGER BIT(0) + +#define R_BE_HALT_H2C 0x0168 +#define B_BE_HALT_H2C_MASK GENMASK(31, 0) + +#define R_BE_HALT_C2H 0x016C +#define B_BE_HALT_C2H_ERROR_SENARIO_MASK GENMASK(31, 28) +#define B_BE_ERROR_CODE_MASK GENMASK(15, 0) + +#define R_BE_SYS_CFG5 0x0170 +#define B_BE_WDT_DATACPU_WAKE_PCIE_EN BIT(12) +#define B_BE_WDT_DATACPU_WAKE_USB_EN BIT(11) +#define B_BE_WDT_WAKE_PCIE_EN BIT(10) +#define B_BE_WDT_WAKE_USB_EN BIT(9) +#define B_BE_SYM_DIS_HC_ACCESS_MAC BIT(8) +#define B_BE_LPS_STATUS BIT(3) +#define B_BE_HCI_TXDMA_BUSY BIT(2) + +#define R_BE_SECURE_BOOT_MALLOC_INFO 0x0184 + +#define R_BE_WCPU_FW_CTRL 0x01E0 +#define B_BE_RUN_ENV_MASK GENMASK(31, 30) +#define B_BE_WCPU_FWDL_STATUS_MASK GENMASK(29, 26) +#define B_BE_WDT_PLT_RST_EN BIT(17) +#define B_BE_FW_SEC_AUTH_DONE BIT(14) +#define B_BE_FW_CPU_UTIL_STS_EN BIT(13) +#define B_BE_BBMCU1_FWDL_EN BIT(12) +#define B_BE_BBMCU0_FWDL_EN BIT(11) +#define B_BE_DATACPU_FWDL_EN BIT(10) +#define B_BE_WLANCPU_FWDL_EN BIT(9) +#define B_BE_WCPU_ROM_CUT_GET BIT(8) +#define B_BE_WCPU_ROM_CUT_VAL_MASK GENMASK(7, 4) +#define B_BE_FW_BOOT_MODE_MASK GENMASK(3, 2) +#define B_BE_H2C_PATH_RDY BIT(1) +#define B_BE_DLFW_PATH_RDY BIT(0) + +#define R_BE_BOOT_REASON 0x01E6 +#define B_BE_BOOT_REASON_MASK GENMASK(2, 0) + +#define R_BE_LDM 0x01E8 +#define B_BE_EN_32K BIT(31) +#define B_BE_LDM_MASK GENMASK(30, 0) + +#define R_BE_UDM0 0x01F0 +#define B_BE_UDM0_SEND2RA_CNT_MASK GENMASK(31, 28) +#define B_BE_UDM0_TX_RPT_CNT_MASK GENMASK(27, 24) +#define B_BE_UDM0_FS_CODE_MASK GENMASK(23, 8) +#define B_BE_NULL_POINTER_INDC BIT(7) +#define B_BE_ROM_ASSERT_INDC BIT(6) +#define B_BE_RAM_ASSERT_INDC BIT(5) +#define B_BE_FW_IMAGE_TYPE BIT(4) +#define B_BE_UDM0_TRAP_LOOP_CTRL BIT(2) +#define B_BE_UDM0_SEND_HALTC2H_CTRL BIT(1) +#define B_BE_UDM0_DBG_MODE_CTRL BIT(0) + +#define R_BE_UDM1 0x01F4 +#define B_BE_UDM1_ERROR_ADDR_MASK GENMASK(31, 16) +#define B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK GENMASK(15, 12) +#define B_BE_UDM1_HALMAC_H2C_DEQ_CNT_MASK GENMASK(11, 8) +#define B_BE_UDM1_WCPU_C2H_ENQ_CNT_MASK GENMASK(7, 4) +#define B_BE_UDM1_WCPU_H2C_DEQ_CNT_MASK GENMASK(3, 0) + +#define R_BE_UDM2 0x01F8 +#define B_BE_UDM2_EPC_RA_MASK GENMASK(31, 0) + +#define R_BE_DCPU_PLATFORM_ENABLE 0x0888 +#define B_BE_DCPU_SYM_DPLT_MEM_MUX_EN BIT(10) +#define B_BE_DCPU_WARM_EN BIT(9) +#define B_BE_DCPU_UART_EN BIT(7) +#define B_BE_DCPU_IDDMA_EN BIT(6) +#define B_BE_DCPU_APB_WRAP_EN BIT(2) +#define B_BE_DCPU_EN BIT(1) +#define B_BE_DCPU_PLATFORM_EN BIT(0) + +#define R_BE_FILTER_MODEL_ADDR 0x0C04 + +#define R_BE_PLE_DBG_FUN_INTF_CTL 0x9110 +#define B_BE_PLE_DFI_ACTIVE BIT(31) +#define B_BE_PLE_DFI_TRGSEL_MASK GENMASK(19, 16) +#define B_BE_PLE_DFI_ADDR_MASK GENMASK(15, 0) + +#define R_BE_PLE_DBG_FUN_INTF_DATA 0x9114 +#define B_BE_PLE_DFI_DATA_MASK GENMASK(31, 0) + +#define R_BE_CMAC_FUNC_EN 0x10000 +#define R_BE_CMAC_FUNC_EN_C1 0x14000 +#define B_BE_CMAC_CRPRT BIT(31) +#define B_BE_CMAC_EN BIT(30) +#define B_BE_CMAC_TXEN BIT(29) +#define B_BE_CMAC_RXEN BIT(28) +#define B_BE_FORCE_RESP_PKTCTL_GCKEN BIT(26) +#define B_BE_FORCE_SIGB_REG_GCKEN BIT(25) +#define B_BE_FORCE_POWER_REG_GCKEN BIT(23) +#define B_BE_FORCE_RMAC_REG_GCKEN BIT(22) +#define B_BE_FORCE_TRXPTCL_REG_GCKEN BIT(21) +#define B_BE_FORCE_TMAC_REG_GCKEN BIT(20) +#define B_BE_FORCE_CMAC_DMA_REG_GCKEN BIT(19) +#define B_BE_FORCE_PTCL_REG_GCKEN BIT(18) +#define B_BE_FORCE_SCHEDULER_RREG_GCKEN BIT(17) +#define B_BE_FORCE_CMAC_COMMON_REG_GCKEN BIT(16) +#define B_BE_FORCE_CMACREG_GCKEN BIT(15) +#define B_BE_TXTIME_EN BIT(8) +#define B_BE_RESP_PKTCTL_EN BIT(7) +#define B_BE_SIGB_EN BIT(6) +#define B_BE_PHYINTF_EN BIT(5) +#define B_BE_CMAC_DMA_EN BIT(4) +#define B_BE_PTCLTOP_EN BIT(3) +#define B_BE_SCHEDULER_EN BIT(2) +#define B_BE_TMAC_EN BIT(1) +#define B_BE_RMAC_EN BIT(0) +#define B_BE_CMAC_FUNC_EN_SET (B_BE_CMAC_EN | B_BE_CMAC_TXEN | B_BE_CMAC_RXEN | \ + B_BE_PHYINTF_EN | B_BE_CMAC_DMA_EN | B_BE_PTCLTOP_EN | \ + B_BE_SCHEDULER_EN | B_BE_TMAC_EN | B_BE_RMAC_EN | \ + B_BE_CMAC_CRPRT | B_BE_TXTIME_EN | B_BE_RESP_PKTCTL_EN | \ + B_BE_SIGB_EN) + +#define R_BE_PORT_0_TSF_SYNC 0x102A0 +#define R_BE_PORT_0_TSF_SYNC_C1 0x142A0 +#define B_BE_P0_SYNC_NOW_P BIT(30) +#define B_BE_P0_SYNC_ONCE_P BIT(29) +#define B_BE_P0_AUTO_SYNC BIT(28) +#define B_BE_P0_SYNC_PORT_SRC_SEL_MASK GENMASK(26, 24) +#define B_BE_P0_TSFTR_SYNC_OFFSET_MASK GENMASK(18, 0) + +#define R_BE_MUEDCA_BE_PARAM_0 0x10350 +#define R_BE_MUEDCA_BK_PARAM_0 0x10354 +#define R_BE_MUEDCA_VI_PARAM_0 0x10358 +#define R_BE_MUEDCA_VO_PARAM_0 0x1035C + +#define R_BE_MUEDCA_EN 0x10370 +#define R_BE_MUEDCA_EN_C1 0x14370 +#define B_BE_MUEDCA_WMM_SEL BIT(8) +#define B_BE_SET_MUEDCATIMER_TF_1 BIT(5) +#define B_BE_SET_MUEDCATIMER_TF_0 BIT(4) +#define B_BE_MUEDCA_EN_0 BIT(0) + +#define R_BE_PORT_CFG_P0 0x10400 +#define R_BE_PORT_CFG_P0_C1 0x14400 +#define B_BE_BCN_ERLY_SORT_EN_P0 BIT(18) +#define B_BE_PROHIB_END_CAL_EN_P0 BIT(17) +#define B_BE_BRK_SETUP_P0 BIT(16) +#define B_BE_TBTT_UPD_SHIFT_SEL_P0 BIT(15) +#define B_BE_BCN_DROP_ALLOW_P0 BIT(14) +#define B_BE_TBTT_PROHIB_EN_P0 BIT(13) +#define B_BE_BCNTX_EN_P0 BIT(12) +#define B_BE_NET_TYPE_P0_MASK GENMASK(11, 10) +#define B_BE_BCN_FORCETX_EN_P0 BIT(9) +#define B_BE_TXBCN_BTCCA_EN_P0 BIT(8) +#define B_BE_BCNERR_CNT_EN_P0 BIT(7) +#define B_BE_BCN_AGRES_P0 BIT(6) +#define B_BE_TSFTR_RST_P0 BIT(5) +#define B_BE_RX_BSSID_FIT_EN_P0 BIT(4) +#define B_BE_TSF_UDT_EN_P0 BIT(3) +#define B_BE_PORT_FUNC_EN_P0 BIT(2) +#define B_BE_TXBCN_RPT_EN_P0 BIT(1) +#define B_BE_RXBCN_RPT_EN_P0 BIT(0) + +#define R_BE_TBTT_PROHIB_P0 0x10404 +#define R_BE_TBTT_PROHIB_P0_C1 0x14404 +#define B_BE_TBTT_HOLD_P0_MASK GENMASK(27, 16) +#define B_BE_TBTT_SETUP_P0_MASK GENMASK(7, 0) + +#define R_BE_BCN_AREA_P0 0x10408 +#define R_BE_BCN_AREA_P0_C1 0x14408 +#define B_BE_BCN_MSK_AREA_P0_MSK 0xfff +#define B_BE_BCN_CTN_AREA_P0_MASK GENMASK(11, 0) + +#define R_BE_BCNERLYINT_CFG_P0 0x1040C +#define R_BE_BCNERLYINT_CFG_P0_C1 0x1440C +#define B_BE_BCNERLY_P0_MASK GENMASK(11, 0) + +#define R_BE_TBTTERLYINT_CFG_P0 0x1040E +#define R_BE_TBTTERLYINT_CFG_P0_C1 0x1440E +#define B_BE_TBTTERLY_P0_MASK GENMASK(11, 0) + +#define R_BE_TBTT_AGG_P0 0x10412 +#define R_BE_TBTT_AGG_P0_C1 0x14412 +#define B_BE_TBTT_AGG_NUM_P0_MASK GENMASK(15, 8) + +#define R_BE_BCN_SPACE_CFG_P0 0x10414 +#define R_BE_BCN_SPACE_CFG_P0_C1 0x14414 +#define B_BE_SUB_BCN_SPACE_P0_MASK GENMASK(23, 16) +#define B_BE_BCN_SPACE_P0_MASK GENMASK(15, 0) + +#define R_BE_BCN_FORCETX_P0 0x10418 +#define R_BE_BCN_FORCETX_P0_C1 0x14418 +#define B_BE_FORCE_BCN_NUM_P0_MASK GENMASK(15, 8) +#define B_BE_BCN_MAX_ERR_P0_MASK GENMASK(7, 0) + +#define R_BE_BCN_ERR_CNT_P0 0x10420 +#define R_BE_BCN_ERR_CNT_P0_C1 0x14420 +#define B_BE_BCN_ERR_CNT_SUM_P0_MASK GENMASK(31, 24) +#define B_BE_BCN_ERR_CNT_NAV_P0_MASK GENMASK(23, 16) +#define B_BE_BCN_ERR_CNT_EDCCA_P0_MASK GENMASK(15, 8) +#define B_BE_BCN_ERR_CNT_CCA_P0_MASK GENMASK(7, 0) + +#define R_BE_BCN_ERR_FLAG_P0 0x10424 +#define R_BE_BCN_ERR_FLAG_P0_C1 0x14424 +#define B_BE_BCN_ERR_FLAG_SRCHEND_P0 BIT(3) +#define B_BE_BCN_ERR_FLAG_INVALID_P0 BIT(2) +#define B_BE_BCN_ERR_FLAG_CMP_P0 BIT(1) +#define B_BE_BCN_ERR_FLAG_LOCK_P0 BIT(0) + +#define R_BE_DTIM_CTRL_P0 0x10426 +#define R_BE_DTIM_CTRL_P0_C1 0x14426 +#define B_BE_DTIM_NUM_P0_MASK GENMASK(15, 8) +#define B_BE_DTIM_CURRCNT_P0_MASK GENMASK(7, 0) + +#define R_BE_TBTT_SHIFT_P0 0x10428 +#define R_BE_TBTT_SHIFT_P0_C1 0x14428 +#define B_BE_TBTT_SHIFT_OFST_P0_SH 0 +#define B_BE_TBTT_SHIFT_OFST_P0_MSK 0xfff + +#define R_BE_BCN_CNT_TMR_P0 0x10434 +#define R_BE_BCN_CNT_TMR_P0_C1 0x14434 +#define B_BE_BCN_CNT_TMR_P0_MASK GENMASK(31, 0) + +#define R_BE_TSFTR_LOW_P0 0x10438 +#define R_BE_TSFTR_LOW_P0_C1 0x14438 +#define B_BE_TSFTR_LOW_P0_MASK GENMASK(31, 0) + +#define R_BE_TSFTR_HIGH_P0 0x1043C +#define R_BE_TSFTR_HIGH_P0_C1 0x1443C +#define B_BE_TSFTR_HIGH_P0_MASK GENMASK(31, 0) + +#define R_BE_MBSSID_CTRL 0x10568 +#define R_BE_MBSSID_CTRL_C1 0x14568 +#define B_BE_MBSSID_MODE_SEL BIT(20) +#define B_BE_P0MB_NUM_MASK GENMASK(19, 16) +#define B_BE_P0MB15_EN BIT(15) +#define B_BE_P0MB14_EN BIT(14) +#define B_BE_P0MB13_EN BIT(13) +#define B_BE_P0MB12_EN BIT(12) +#define B_BE_P0MB11_EN BIT(11) +#define B_BE_P0MB10_EN BIT(10) +#define B_BE_P0MB9_EN BIT(9) +#define B_BE_P0MB8_EN BIT(8) +#define B_BE_P0MB7_EN BIT(7) +#define B_BE_P0MB6_EN BIT(6) +#define B_BE_P0MB5_EN BIT(5) +#define B_BE_P0MB4_EN BIT(4) +#define B_BE_P0MB3_EN BIT(3) +#define B_BE_P0MB2_EN BIT(2) +#define B_BE_P0MB1_EN BIT(1) + +#define R_BE_P0MB_HGQ_WINDOW_CFG_0 0x10590 +#define R_BE_P0MB_HGQ_WINDOW_CFG_0_C1 0x14590 +#define R_BE_PORT_HGQ_WINDOW_CFG 0x105A0 +#define R_BE_PORT_HGQ_WINDOW_CFG_C1 0x145A0 + +#define R_BE_AGG_LEN_HT_0 0x10814 +#define R_BE_AGG_LEN_HT_0_C1 0x14814 +#define B_BE_AMPDU_MAX_LEN_HT_MASK GENMASK(31, 16) +#define B_BE_RTS_TXTIME_TH_MASK GENMASK(15, 8) +#define B_BE_RTS_LEN_TH_MASK GENMASK(7, 0) + +#define R_BE_MBSSID_DROP_0 0x1083C +#define R_BE_MBSSID_DROP_0_C1 0x1483C +#define B_BE_GI_LTF_FB_SEL BIT(30) +#define B_BE_RATE_SEL_MASK GENMASK(29, 24) +#define B_BE_PORT_DROP_4_0_MASK GENMASK(20, 16) +#define B_BE_MBSSID_DROP_15_0_MASK GENMASK(15, 0) + +#define R_BE_PTCL_BSS_COLOR_0 0x108A0 +#define R_BE_PTCL_BSS_COLOR_0_C1 0x148A0 +#define B_BE_BSS_COLOB_BE_PORT_3_MASK GENMASK(29, 24) +#define B_BE_BSS_COLOB_BE_PORT_2_MASK GENMASK(21, 16) +#define B_BE_BSS_COLOB_BE_PORT_1_MASK GENMASK(13, 8) +#define B_BE_BSS_COLOB_BE_PORT_0_MASK GENMASK(5, 0) + +#define R_BE_PTCL_BSS_COLOR_1 0x108A4 +#define R_BE_PTCL_BSS_COLOR_1_C1 0x148A4 +#define B_BE_BSS_COLOB_BE_PORT_4_MASK GENMASK(5, 0) + +#define R_BE_WMTX_MOREDATA_TSFT_STMP_CTL 0x10E08 +#define R_BE_WMTX_MOREDATA_TSFT_STMP_CTL_C1 0x14E08 +#define B_BE_TSFT_OFS_MASK GENMASK(31, 16) +#define B_BE_STMP_THSD_MASK GENMASK(15, 8) +#define B_BE_UPD_HGQMD BIT(1) +#define B_BE_UPD_TIMIE BIT(0) + +#define R_BE_BFMEE_RESP_OPTION 0x11180 +#define R_BE_BFMEE_RESP_OPTION_C1 0x15180 +#define B_BE_BFMEE_CSI_SEC_TYPE_SH 20 +#define B_BE_BFMEE_CSI_SEC_TYPE_MSK 0xf +#define B_BE_BFMEE_BFRPT_SEG_SIZE_SH 16 +#define B_BE_BFMEE_BFRPT_SEG_SIZE_MSK 0x3 +#define B_BE_BFMEE_MIMO_EN_SEL BIT(8) +#define B_BE_BFMEE_MU_BFEE_DIS BIT(7) +#define B_BE_BFMEE_CHECK_RPTPOLL_MACID_DIS BIT(6) +#define B_BE_BFMEE_NOCHK_BFPOLL_BMP BIT(5) +#define B_BE_BFMEE_VHTBFRPT_CHK BIT(4) +#define B_BE_BFMEE_EHT_NDPA_EN BIT(3) +#define B_BE_BFMEE_HE_NDPA_EN BIT(2) +#define B_BE_BFMEE_VHT_NDPA_EN BIT(1) +#define B_BE_BFMEE_HT_NDPA_EN BIT(0) + +#define R_BE_TRXPTCL_RESP_CSI_CTRL_0 0x11188 +#define R_BE_TRXPTCL_RESP_CSI_CTRL_0_C1 0x15188 +#define B_BE_BFMEE_CSISEQ_SEL BIT(29) +#define B_BE_BFMEE_BFPARAM_SEL BIT(28) +#define B_BE_BFMEE_OFDM_LEN_TH_MASK GENMASK(27, 24) +#define B_BE_BFMEE_BF_PORT_SEL BIT(23) +#define B_BE_BFMEE_USE_NSTS BIT(22) +#define B_BE_BFMEE_CSI_RATE_FB_EN BIT(21) +#define B_BE_BFMEE_CSI_GID_SEL BIT(20) +#define B_BE_BFMEE_CSI_RSC_MASK GENMASK(19, 18) +#define B_BE_BFMEE_CSI_FORCE_RETE_EN BIT(17) +#define B_BE_BFMEE_CSI_USE_NDPARATE BIT(16) +#define B_BE_BFMEE_CSI_WITHHTC_EN BIT(15) +#define B_BE_BFMEE_CSIINFO0_BF_EN BIT(14) +#define B_BE_BFMEE_CSIINFO0_STBC_EN BIT(13) +#define B_BE_BFMEE_CSIINFO0_LDPC_EN BIT(12) +#define B_BE_BFMEE_CSIINFO0_CS_MASK GENMASK(11, 10) +#define B_BE_BFMEE_CSIINFO0_CB_MASK GENMASK(9, 8) +#define B_BE_BFMEE_CSIINFO0_NG_MASK GENMASK(7, 6) +#define B_BE_BFMEE_CSIINFO0_NR_MASK GENMASK(5, 3) +#define B_BE_BFMEE_CSIINFO0_NC_MASK GENMASK(2, 0) +#define CSI_RX_BW_CFG 0x1 +#define R_BE_TRXPTCL_RESP_CSI_CTRL_1 0x11194 +#define R_BE_TRXPTCL_RESP_CSI_CTRL_1_C1 0x15194 +#define B_BE_BFMEE_BE_CSI_RRSC_BITMAP_MASK GENMASK(31, 24) +#define CSI_RRSC_BITMAP_CFG 0x2A + +#define R_BE_TRXPTCL_RESP_CSI_RRSC 0x1118C +#define R_BE_TRXPTCL_RESP_CSI_RRSC_C1 0x1518C +#define CSI_RRSC_BMAP_BE 0x2A2AFF + +#define R_BE_TRXPTCL_RESP_CSI_RATE 0x11190 +#define R_BE_TRXPTCL_RESP_CSI_RATE_C1 0x15190 +#define B_BE_BFMEE_EHT_CSI_RATE_MASK GENMASK(31, 24) +#define B_BE_BFMEE_HE_CSI_RATE_MASK GENMASK(23, 16) +#define B_BE_BFMEE_VHT_CSI_RATE_MASK GENMASK(15, 8) +#define B_BE_BFMEE_HT_CSI_RATE_MASK GENMASK(7, 0) +#define CSI_INIT_RATE_EHT 0x3 + +#define R_BE_RX_FLTR_OPT 0x11420 +#define R_BE_RX_FLTR_OPT_C1 0x15420 +#define B_BE_UID_FILTER_MASK GENMASK(31, 24) +#define B_BE_UNSPT_TYPE BIT(22) +#define B_BE_RX_MPDU_MAX_LEN_MASK GENMASK(21, 16) +#define B_BE_A_FTM_REQ BIT(14) +#define B_BE_A_ERR_PKT BIT(13) +#define B_BE_A_UNSUP_PKT BIT(12) +#define B_BE_A_CRC32_ERR BIT(11) +#define B_BE_A_BCN_CHK_RULE_MASK GENMASK(9, 8) +#define B_BE_A_BCN_CHK_EN BIT(7) +#define B_BE_A_MC_LIST_CAM_MATCH BIT(6) +#define B_BE_A_BC_CAM_MATCH BIT(5) +#define B_BE_A_UC_CAM_MATCH BIT(4) +#define B_BE_A_MC BIT(3) +#define B_BE_A_BC BIT(2) +#define B_BE_A_A1_MATCH BIT(1) +#define B_BE_SNIFFER_MODE BIT(0) + +#define R_BE_CSIRPT_OPTION 0x11464 +#define R_BE_CSIRPT_OPTION_C1 0x15464 +#define B_BE_CSIPRT_EHTSU_AID_EN BIT(26) +#define B_BE_CSIPRT_HESU_AID_EN BIT(25) +#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24) + +#define R_BE_PWR_MODULE 0x11900 +#define R_BE_PWR_MODULE_C1 0x15900 + +#define R_BE_PWR_RATE_OFST_CTRL 0x11A30 +#define R_BE_PWR_BY_RATE 0x11E00 +#define R_BE_PWR_BY_RATE_MAX 0x11FA8 +#define R_BE_PWR_LMT 0x11FAC +#define R_BE_PWR_LMT_MAX 0x12040 +#define R_BE_PWR_RU_LMT 0x12048 +#define R_BE_PWR_RU_LMT_MAX 0x120E4 + +#define CMAC1_START_ADDR_BE 0x14000 +#define CMAC1_END_ADDR_BE 0x17FFF + #define RR_MOD 0x00 #define RR_MOD_V1 0x10000 #define RR_MOD_IQK GENMASK(19, 4) @@ -3977,6 +4383,7 @@ #define B_DBCC_80P80_SEL_EVM_RPT_EN BIT(0) #define R_CCX 0x0C00 #define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4) +#define B_CCX_EDCCA_OPT_MSK_V1 GENMASK(7, 4) #define B_MEASUREMENT_TRIG_MSK BIT(2) #define B_CCX_TRIG_OPT_MSK BIT(1) #define B_CCX_EN_MSK BIT(0) @@ -4068,32 +4475,41 @@ #define B_SWSI_R_DATA_DONE_V1 BIT(26) #define R_TX_COUNTER 0x1A40 #define R_IFS_CLM_TX_CNT 0x1ACC +#define R_IFS_CLM_TX_CNT_V1 0x0ECC #define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16) #define B_IFS_CLM_TX_CNT_MSK GENMASK(15, 0) #define R_IFS_CLM_CCA 0x1AD0 +#define R_IFS_CLM_CCA_V1 0x0ED0 #define B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK GENMASK(31, 16) #define B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK GENMASK(15, 0) #define R_IFS_CLM_FA 0x1AD4 +#define R_IFS_CLM_FA_V1 0x0ED4 #define B_IFS_CLM_OFDM_FA_MSK GENMASK(31, 16) #define B_IFS_CLM_CCK_FA_MSK GENMASK(15, 0) #define R_IFS_HIS 0x1AD8 +#define R_IFS_HIS_V1 0x0ED8 #define B_IFS_T4_HIS_MSK GENMASK(31, 24) #define B_IFS_T3_HIS_MSK GENMASK(23, 16) #define B_IFS_T2_HIS_MSK GENMASK(15, 8) #define B_IFS_T1_HIS_MSK GENMASK(7, 0) #define R_IFS_AVG_L 0x1ADC +#define R_IFS_AVG_L_V1 0x0EDC #define B_IFS_T2_AVG_MSK GENMASK(31, 16) #define B_IFS_T1_AVG_MSK GENMASK(15, 0) #define R_IFS_AVG_H 0x1AE0 +#define R_IFS_AVG_H_V1 0x0EE0 #define B_IFS_T4_AVG_MSK GENMASK(31, 16) #define B_IFS_T3_AVG_MSK GENMASK(15, 0) #define R_IFS_CCA_L 0x1AE4 +#define R_IFS_CCA_L_V1 0x0EE4 #define B_IFS_T2_CCA_MSK GENMASK(31, 16) #define B_IFS_T1_CCA_MSK GENMASK(15, 0) #define R_IFS_CCA_H 0x1AE8 +#define R_IFS_CCA_H_V1 0x0EE8 #define B_IFS_T4_CCA_MSK GENMASK(31, 16) #define B_IFS_T3_CCA_MSK GENMASK(15, 0) #define R_IFSCNT 0x1AEC +#define R_IFSCNT_V1 0x0EEC #define B_IFSCNT_DONE_MSK BIT(16) #define B_IFSCNT_TOTAL_CNT_MSK GENMASK(15, 0) #define R_TXAGC_TP 0x1C04 @@ -4109,6 +4525,8 @@ #define B_TXAGC_BB_OFT GENMASK(31, 16) #define B_TXAGC_BB GENMASK(31, 24) #define B_TXAGC_RF GENMASK(5, 0) +#define R_PATH0_TXPWR 0x1C78 +#define B_PATH0_TXPWR GENMASK(8, 0) #define R_S0_ADDCK 0x1E00 #define B_S0_ADDCK_I GENMASK(9, 0) #define B_S0_ADDCK_Q GENMASK(19, 10) @@ -4184,6 +4602,8 @@ #define R_TXAGC_BB_S1 0x3C60 #define B_TXAGC_BB_S1_OFT GENMASK(31, 16) #define B_TXAGC_BB_S1 GENMASK(31, 24) +#define R_PATH1_TXPWR 0x3C78 +#define B_PATH1_TXPWR GENMASK(8, 0) #define R_S1_ADDCK 0x3E00 #define B_S1_ADDCK_I GENMASK(9, 0) #define B_S1_ADDCK_Q GENMASK(19, 10) @@ -4360,6 +4780,7 @@ #define B_PKT_POP_EN BIT(8) #define R_SEG0R_PD 0x481C #define R_SEG0R_PD_V1 0x4860 +#define R_SEG0R_PD_V2 0x6A74 #define R_SEG0R_EDCCA_LVL 0x4840 #define R_SEG0R_EDCCA_LVL_V1 0x4884 #define B_SEG0R_PPDU_LVL_MSK GENMASK(31, 24) @@ -4377,12 +4798,20 @@ #define B_ANT_RX_1RCCA_SEG1 GENMASK(21, 18) #define B_ANT_RX_1RCCA_SEG0 GENMASK(17, 14) #define B_FC0_BW_INV GENMASK(6, 0) +#define R_Q_MATRIX_00 0x497C +#define B_Q_MATRIX_00_IMAGINARY GENMASK(15, 0) +#define B_Q_MATRIX_00_REAL GENMASK(31, 16) #define R_CHBW_MOD 0x4978 #define R_CHBW_MOD_V1 0x49C4 #define B_BT_SHARE BIT(14) #define B_CHBW_MOD_SBW GENMASK(13, 12) #define B_CHBW_MOD_PRICH GENMASK(11, 8) #define B_ANT_RX_SEG0 GENMASK(3, 0) +#define R_Q_MATRIX_11 0x4988 +#define B_Q_MATRIX_11_IMAGINARY GENMASK(15, 0) +#define B_Q_MATRIX_11_REAL GENMASK(31, 16) +#define R_CUSTOMIZE_Q_MATRIX 0x498C +#define B_CUSTOMIZE_Q_MATRIX_EN BIT(0) #define R_P0_RPL1 0x49B0 #define B_P0_RPL1_41_MASK GENMASK(31, 24) #define B_P0_RPL1_40_MASK GENMASK(23, 16) @@ -4478,8 +4907,10 @@ #define R_DCFO_COMP_S0_V1 0x4A40 #define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0) #define R_BMODE_PDTH_V1 0x4B64 +#define R_BMODE_PDTH_V2 0x6708 #define B_BMODE_PDTH_LOWER_BOUND_MSK_V1 GENMASK(31, 24) #define R_BMODE_PDTH_EN_V1 0x4B74 +#define R_BMODE_PDTH_EN_V2 0x6718 #define B_BMODE_PDTH_LIMIT_EN_MSK_V1 BIT(30) #define R_CFO_COMP_SEG1_L 0x5384 #define R_CFO_COMP_SEG1_H 0x5388 @@ -4501,6 +4932,8 @@ #define B_P0_TSSI_ALIM2 GENMASK(29, 0) #define R_P0_TSSI_ALIM4 0x5640 #define R_TSSI_PA_K8 0x5644 +#define R_P0_TSSI_ADC_CLK 0x566c +#define B_P0_TSSI_ADC_CLK GENMASK(17, 16) #define R_UPD_CLK 0x5670 #define B_DAC_VAL BIT(31) #define B_ACK_VAL GENMASK(30, 29) @@ -4581,6 +5014,8 @@ #define R_TXGAIN_SCALE 0x58F0 #define B_TXGAIN_SCALE_EN BIT(19) #define B_TXGAIN_SCALE_OFT GENMASK(31, 24) +#define R_P0_DAC_COMP_POST_DPD_EN 0x58F8 +#define B_P0_DAC_COMP_POST_DPD_EN BIT(31) #define R_P0_TSSI_BASE 0x5C00 #define R_S0_DACKI 0x5E00 #define B_S0_DACKI_AR GENMASK(31, 28) @@ -4600,6 +5035,10 @@ #define B_S0_DACKQ7_K GENMASK(15, 8) #define R_S0_DACKQ8 0x5E98 #define B_S0_DACKQ8_K GENMASK(15, 8) +#define R_DCFO_WEIGHT_V1 0x6244 +#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28) +#define R_DCFO_OPT_V1 0x6260 +#define B_DCFO_OPT_EN_V1 BIT(17) #define R_RPL_BIAS_COMP1 0x6DF0 #define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0) #define R_P1_TSSI_ALIM1 0x7630 @@ -4611,6 +5050,8 @@ #define B_P1_TSSI_ALIM31 GENMASK(9, 0) #define R_P1_TSSI_ALIM2 0x763c #define B_P1_TSSI_ALIM2 GENMASK(29, 0) +#define R_P1_TSSI_ADC_CLK 0x766c +#define B_P1_TSSI_ADC_CLK GENMASK(17, 16) #define R_P1_TSSIC 0x7814 #define B_P1_TSSIC_BYPASS BIT(11) #define R_P1_TMETER 0x7810 @@ -4637,6 +5078,8 @@ #define B_P1_TSSI_MV_MIX GENMASK(19, 11) #define B_P1_TSSI_MV_AVG GENMASK(13, 11) #define B_P1_TSSI_MV_CLR BIT(14) +#define R_P1_DAC_COMP_POST_DPD_EN 0x78F8 +#define B_P1_DAC_COMP_POST_DPD_EN BIT(31) #define R_TSSI_THOF 0x7C00 #define R_S1_DACKI 0x7E00 #define B_S1_DACKI_AR GENMASK(31, 28) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 34c4d40cfa02..ca99422e600f 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -13,10 +13,10 @@ } static const struct rtw89_regd rtw89_ww_regd = - COUNTRY_REGD("00", RTW89_WW, RTW89_WW); + COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW); static const struct rtw89_regd rtw89_regd_map[] = { - COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA), + COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC), COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE), @@ -26,7 +26,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA), + COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC), COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA), @@ -81,7 +81,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), @@ -96,7 +96,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE), COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), @@ -115,7 +115,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_THAILAND), COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), @@ -148,7 +148,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), @@ -164,7 +164,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA), @@ -179,20 +179,21 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC), COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA), COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), @@ -207,7 +208,7 @@ static const struct rtw89_regd rtw89_regd_map[] = { COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA), COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), @@ -376,7 +377,7 @@ bottom: return; wiphy->bands[NL80211_BAND_6GHZ] = NULL; - kfree(sband->iftype_data); + kfree((__force void *)sband->iftype_data); kfree(sband); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index c3ffcb645ebf..50522ff85003 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -185,6 +185,10 @@ static const struct rtw89_dig_regs rtw8851b_dig_regs = { .seg0_pd_reg = R_SEG0R_PD_V1, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1}, @@ -756,9 +760,9 @@ static void rtw8851b_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); u8 txsc20 = 0, txsc40 = 0; switch (chan->band_width) { @@ -1700,10 +1704,11 @@ static void rtw8851b_set_tx_shape(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) { + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = rtw89_8851b_tx_shape[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = rtw89_8851b_tx_shape[band][RTW89_RS_OFDM][regd]; + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; if (band == RTW89_BAND_2G) rtw8851b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); @@ -1740,14 +1745,14 @@ void rtw8851b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, return; } - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); pw_ofst = max_t(s8, pw_ofst - 3, -16); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); } @@ -1774,14 +1779,15 @@ rtw8851b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return 0; } -static void rtw8851b_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8851b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8851b_btc_preagc_en_defs_tbl : + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8851b_btc_preagc_en_defs_tbl : &rtw8851b_btc_preagc_dis_defs_tbl); - if (!bt_en) { + if (!en) { if (chan->band_type == RTW89_BAND_2G) { rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1, B_PATH0_G_LNA6_OP1DB_V1, 0x20); @@ -1796,11 +1802,12 @@ static void rtw8851b_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) } } -static void rtw8851b_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8851b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, B_PATH0_BT_SHARE_V1, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, @@ -2276,6 +2283,7 @@ static int rtw8851b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) static const struct rtw89_chip_ops rtw8851b_chip_ops = { .enable_bb_rf = rtw8851b_mac_enable_bb_rf, .disable_bb_rf = rtw8851b_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8851b_bb_reset, .bb_sethw = rtw8851b_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, @@ -2296,9 +2304,9 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = { .set_txpwr_ctrl = rtw8851b_set_txpwr_ctrl, .init_txpwr_unit = rtw8851b_init_txpwr_unit, .get_thermal = rtw8851b_get_thermal, - .ctrl_btg = rtw8851b_ctrl_btg, + .ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx, .query_ppdu = rtw8851b_query_ppdu, - .bb_ctrl_btc_preagc = rtw8851b_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8851b_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8851b_pwr_on_func, @@ -2334,10 +2342,15 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8851b = { const struct rtw89_chip_info rtw8851b_chip_info = { .chip_id = RTL8851B, + .chip_gen = RTW89_CHIP_AX, .ops = &rtw8851b_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8851B_FW_BASENAME, .fw_format_max = RTW8851B_FW_FORMAT_MAX, .try_ce_fw = true, + .bbmcu_nr = 0, + .needed_fw_elms = 0, .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, @@ -2356,7 +2369,6 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .rf_table = {&rtw89_8851b_phy_radioa_table,}, .nctl_table = &rtw89_8851b_phy_nctl_table, .nctl_post_table = &rtw8851b_nctl_post_defs_tbl, - .byr_table = &rtw89_8851b_byr_table, .dflt_parms = &rtw89_8851b_dflt_parms, .rfe_parms_conf = rtw89_8851b_rfe_parms_conf, .txpwr_factor_rf = 2, @@ -2369,7 +2381,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = { BIT(NL80211_BAND_5GHZ), .support_bw160 = false, .support_unii4 = true, - .support_ul_tb_ctrl = true, + .ul_tb_waveform_ctrl = true, + .ul_tb_pwr_diff = false, .hw_sec_hdr = false, .rf_path_num = 1, .tx_nss = 1, @@ -2411,6 +2424,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), .txwd_body_size = sizeof(struct rtw89_txwd_body), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8851b_h2c_regs, @@ -2424,6 +2438,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .dcfo_comp_sft = 12, .imr_info = &rtw8851b_imr_info, .rrsr_cfgs = &rtw8851b_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP_V1, .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) | BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) | diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c index c447f91a4bd0..8cb5bde8f625 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c @@ -3247,12 +3247,50 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = { static const struct rtw89_txpwr_byrate_cfg rtw89_8851b_txpwr_byrate[] = { { 0, 0, 0, 0, 4, 0x50505050, }, + { 0, 0, 1, 0, 4, 0x58585858, }, + { 0, 0, 1, 4, 4, 0x484c5054, }, + { 0, 0, 2, 0, 4, 0x54585858, }, + { 0, 0, 2, 4, 4, 0x44484c50, }, + { 0, 0, 2, 8, 4, 0x34383c40, }, + { 0, 0, 3, 0, 4, 0x58585858, }, + { 0, 1, 2, 0, 4, 0x50545858, }, + { 0, 1, 2, 4, 4, 0x4044484c, }, + { 0, 1, 2, 8, 4, 0x3034383c, }, + { 0, 1, 3, 0, 4, 0x50505050, }, + { 0, 0, 4, 1, 4, 0x00000000, }, + { 0, 0, 4, 0, 1, 0x00000000, }, + { 1, 0, 1, 0, 4, 0x58585858, }, + { 1, 0, 1, 4, 4, 0x484c5054, }, + { 1, 0, 2, 0, 4, 0x54585858, }, + { 1, 0, 2, 4, 4, 0x44484c50, }, + { 1, 0, 2, 8, 4, 0x34383c40, }, + { 1, 0, 3, 0, 4, 0x54585858, }, + { 1, 1, 2, 0, 4, 0x54585858, }, + { 1, 1, 2, 4, 4, 0x44484c50, }, + { 1, 1, 2, 8, 4, 0x34383c40, }, + { 1, 1, 3, 0, 4, 0x48484848, }, + { 1, 0, 4, 0, 4, 0x00000000, }, + { 2, 0, 1, 0, 4, 0x40404040, }, + { 2, 0, 1, 4, 4, 0x383c4040, }, + { 2, 0, 2, 0, 4, 0x40404040, }, + { 2, 0, 2, 4, 4, 0x34383c40, }, + { 2, 0, 2, 8, 4, 0x24282c30, }, + { 2, 0, 3, 0, 4, 0x40404040, }, + { 2, 1, 2, 0, 4, 0x40404040, }, + { 2, 1, 2, 4, 4, 0x34383c40, }, + { 2, 1, 2, 8, 4, 0x24282c30, }, + { 2, 1, 3, 0, 4, 0x40404040, }, + { 2, 0, 4, 0, 4, 0x00000000, }, +}; + +static const struct rtw89_txpwr_byrate_cfg rtw89_8851b_txpwr_byrate_type2[] = { + { 0, 0, 0, 0, 4, 0x50505050, }, { 0, 0, 1, 0, 4, 0x54585858, }, { 0, 0, 1, 4, 4, 0x44484c50, }, { 0, 0, 2, 0, 4, 0x50545858, }, { 0, 0, 2, 4, 4, 0x4044484c, }, { 0, 0, 2, 8, 4, 0x3034383c, }, - { 0, 0, 3, 0, 4, 0x50505050, }, + { 0, 0, 3, 0, 4, 0x58585858, }, { 0, 1, 2, 0, 4, 0x50545858, }, { 0, 1, 2, 4, 4, 0x4044484c, }, { 0, 1, 2, 8, 4, 0x3034383c, }, @@ -3264,7 +3302,7 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8851b_txpwr_byrate[] = { { 1, 0, 2, 0, 4, 0x54585858, }, { 1, 0, 2, 4, 4, 0x44484c50, }, { 1, 0, 2, 8, 4, 0x34383c40, }, - { 1, 0, 3, 0, 4, 0x40404040, }, + { 1, 0, 3, 0, 4, 0x54585858, }, { 1, 1, 2, 0, 4, 0x54585858, }, { 1, 1, 2, 4, 4, 0x44484c50, }, { 1, 1, 2, 8, 4, 0x34383c40, }, @@ -3321,8 +3359,9 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4 }; -const u8 rtw89_8851b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM] = { +static +const u8 rtw89_8851b_tx_shape_lmt[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { [0][0][RTW89_ACMA] = 0, [0][0][RTW89_CN] = 0, [0][0][RTW89_ETSI] = 0, @@ -3342,14 +3381,34 @@ const u8 rtw89_8851b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [1][1][RTW89_ACMA] = 0, [1][1][RTW89_CN] = 0, [1][1][RTW89_ETSI] = 0, - [1][1][RTW89_FCC] = 1, - [1][1][RTW89_IC] = 1, + [1][1][RTW89_FCC] = 3, + [1][1][RTW89_IC] = 3, [1][1][RTW89_KCC] = 0, [1][1][RTW89_MKK] = 0, [1][1][RTW89_UK] = 0, }; static +const u8 rtw89_8851b_tx_shape_lmt_ru[RTW89_BAND_NUM][RTW89_REGD_NUM] = { + [0][RTW89_ACMA] = 0, + [0][RTW89_CN] = 0, + [0][RTW89_ETSI] = 0, + [0][RTW89_FCC] = 3, + [0][RTW89_IC] = 3, + [0][RTW89_KCC] = 0, + [0][RTW89_MKK] = 0, + [0][RTW89_UK] = 0, + [1][RTW89_ACMA] = 0, + [1][RTW89_CN] = 0, + [1][RTW89_ETSI] = 0, + [1][RTW89_FCC] = 3, + [1][RTW89_IC] = 3, + [1][RTW89_KCC] = 0, + [1][RTW89_MKK] = 0, + [1][RTW89_UK] = 0, +}; + +static const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { @@ -3365,7 +3424,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_WW][9] = 58, [0][0][0][0][RTW89_WW][10] = 58, [0][0][0][0][RTW89_WW][11] = 58, - [0][0][0][0][RTW89_WW][12] = 52, + [0][0][0][0][RTW89_WW][12] = 50, [0][0][0][0][RTW89_WW][13] = 76, [0][1][0][0][RTW89_WW][0] = 0, [0][1][0][0][RTW89_WW][1] = 0, @@ -3391,7 +3450,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][7] = 58, [1][0][0][0][RTW89_WW][8] = 58, [1][0][0][0][RTW89_WW][9] = 58, - [1][0][0][0][RTW89_WW][10] = 58, + [1][0][0][0][RTW89_WW][10] = 50, [1][0][0][0][RTW89_WW][11] = 0, [1][0][0][0][RTW89_WW][12] = 0, [1][0][0][0][RTW89_WW][13] = 0, @@ -3421,7 +3480,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][9] = 60, [0][0][1][0][RTW89_WW][10] = 60, [0][0][1][0][RTW89_WW][11] = 60, - [0][0][1][0][RTW89_WW][12] = 58, + [0][0][1][0][RTW89_WW][12] = 40, [0][0][1][0][RTW89_WW][13] = 0, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][1] = 0, @@ -3449,7 +3508,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][9] = 60, [0][0][2][0][RTW89_WW][10] = 60, [0][0][2][0][RTW89_WW][11] = 60, - [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][12] = 38, [0][0][2][0][RTW89_WW][13] = 0, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][1] = 0, @@ -3489,7 +3548,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][7] = 58, [1][0][2][0][RTW89_WW][8] = 58, [1][0][2][0][RTW89_WW][9] = 58, - [1][0][2][0][RTW89_WW][10] = 58, + [1][0][2][0][RTW89_WW][10] = 46, [1][0][2][0][RTW89_WW][11] = 0, [1][0][2][0][RTW89_WW][12] = 0, [1][0][2][0][RTW89_WW][13] = 0, @@ -3527,7 +3586,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][0] = 84, [0][0][0][0][RTW89_KCC][0] = 68, [0][0][0][0][RTW89_ACMA][0] = 58, - [0][0][0][0][RTW89_CN][0] = 60, + [0][0][0][0][RTW89_CN][0] = 58, [0][0][0][0][RTW89_UK][0] = 58, [0][0][0][0][RTW89_FCC][1] = 84, [0][0][0][0][RTW89_ETSI][1] = 58, @@ -3535,7 +3594,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][1] = 84, [0][0][0][0][RTW89_KCC][1] = 68, [0][0][0][0][RTW89_ACMA][1] = 58, - [0][0][0][0][RTW89_CN][1] = 60, + [0][0][0][0][RTW89_CN][1] = 58, [0][0][0][0][RTW89_UK][1] = 58, [0][0][0][0][RTW89_FCC][2] = 84, [0][0][0][0][RTW89_ETSI][2] = 58, @@ -3543,7 +3602,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][2] = 84, [0][0][0][0][RTW89_KCC][2] = 68, [0][0][0][0][RTW89_ACMA][2] = 58, - [0][0][0][0][RTW89_CN][2] = 60, + [0][0][0][0][RTW89_CN][2] = 58, [0][0][0][0][RTW89_UK][2] = 58, [0][0][0][0][RTW89_FCC][3] = 84, [0][0][0][0][RTW89_ETSI][3] = 58, @@ -3551,7 +3610,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][3] = 84, [0][0][0][0][RTW89_KCC][3] = 68, [0][0][0][0][RTW89_ACMA][3] = 58, - [0][0][0][0][RTW89_CN][3] = 60, + [0][0][0][0][RTW89_CN][3] = 58, [0][0][0][0][RTW89_UK][3] = 58, [0][0][0][0][RTW89_FCC][4] = 84, [0][0][0][0][RTW89_ETSI][4] = 58, @@ -3559,7 +3618,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][4] = 84, [0][0][0][0][RTW89_KCC][4] = 68, [0][0][0][0][RTW89_ACMA][4] = 58, - [0][0][0][0][RTW89_CN][4] = 60, + [0][0][0][0][RTW89_CN][4] = 58, [0][0][0][0][RTW89_UK][4] = 58, [0][0][0][0][RTW89_FCC][5] = 84, [0][0][0][0][RTW89_ETSI][5] = 58, @@ -3567,7 +3626,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][5] = 84, [0][0][0][0][RTW89_KCC][5] = 68, [0][0][0][0][RTW89_ACMA][5] = 58, - [0][0][0][0][RTW89_CN][5] = 60, + [0][0][0][0][RTW89_CN][5] = 58, [0][0][0][0][RTW89_UK][5] = 58, [0][0][0][0][RTW89_FCC][6] = 84, [0][0][0][0][RTW89_ETSI][6] = 58, @@ -3575,7 +3634,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][6] = 84, [0][0][0][0][RTW89_KCC][6] = 68, [0][0][0][0][RTW89_ACMA][6] = 58, - [0][0][0][0][RTW89_CN][6] = 60, + [0][0][0][0][RTW89_CN][6] = 58, [0][0][0][0][RTW89_UK][6] = 58, [0][0][0][0][RTW89_FCC][7] = 84, [0][0][0][0][RTW89_ETSI][7] = 58, @@ -3583,7 +3642,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][7] = 84, [0][0][0][0][RTW89_KCC][7] = 68, [0][0][0][0][RTW89_ACMA][7] = 58, - [0][0][0][0][RTW89_CN][7] = 60, + [0][0][0][0][RTW89_CN][7] = 58, [0][0][0][0][RTW89_UK][7] = 58, [0][0][0][0][RTW89_FCC][8] = 84, [0][0][0][0][RTW89_ETSI][8] = 58, @@ -3591,7 +3650,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][8] = 84, [0][0][0][0][RTW89_KCC][8] = 68, [0][0][0][0][RTW89_ACMA][8] = 58, - [0][0][0][0][RTW89_CN][8] = 60, + [0][0][0][0][RTW89_CN][8] = 58, [0][0][0][0][RTW89_UK][8] = 58, [0][0][0][0][RTW89_FCC][9] = 84, [0][0][0][0][RTW89_ETSI][9] = 58, @@ -3599,7 +3658,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][9] = 84, [0][0][0][0][RTW89_KCC][9] = 68, [0][0][0][0][RTW89_ACMA][9] = 58, - [0][0][0][0][RTW89_CN][9] = 60, + [0][0][0][0][RTW89_CN][9] = 58, [0][0][0][0][RTW89_UK][9] = 58, [0][0][0][0][RTW89_FCC][10] = 82, [0][0][0][0][RTW89_ETSI][10] = 58, @@ -3607,7 +3666,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][10] = 82, [0][0][0][0][RTW89_KCC][10] = 68, [0][0][0][0][RTW89_ACMA][10] = 58, - [0][0][0][0][RTW89_CN][10] = 60, + [0][0][0][0][RTW89_CN][10] = 58, [0][0][0][0][RTW89_UK][10] = 58, [0][0][0][0][RTW89_FCC][11] = 62, [0][0][0][0][RTW89_ETSI][11] = 58, @@ -3615,7 +3674,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][11] = 62, [0][0][0][0][RTW89_KCC][11] = 68, [0][0][0][0][RTW89_ACMA][11] = 58, - [0][0][0][0][RTW89_CN][11] = 60, + [0][0][0][0][RTW89_CN][11] = 58, [0][0][0][0][RTW89_UK][11] = 58, [0][0][0][0][RTW89_FCC][12] = 52, [0][0][0][0][RTW89_ETSI][12] = 58, @@ -3623,7 +3682,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][12] = 52, [0][0][0][0][RTW89_KCC][12] = 68, [0][0][0][0][RTW89_ACMA][12] = 58, - [0][0][0][0][RTW89_CN][12] = 60, + [0][0][0][0][RTW89_CN][12] = 50, [0][0][0][0][RTW89_UK][12] = 58, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, @@ -3767,7 +3826,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][2] = 127, [1][0][0][0][RTW89_KCC][2] = 68, [1][0][0][0][RTW89_ACMA][2] = 58, - [1][0][0][0][RTW89_CN][2] = 60, + [1][0][0][0][RTW89_CN][2] = 58, [1][0][0][0][RTW89_UK][2] = 58, [1][0][0][0][RTW89_FCC][3] = 127, [1][0][0][0][RTW89_ETSI][3] = 58, @@ -3775,7 +3834,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][3] = 127, [1][0][0][0][RTW89_KCC][3] = 68, [1][0][0][0][RTW89_ACMA][3] = 58, - [1][0][0][0][RTW89_CN][3] = 60, + [1][0][0][0][RTW89_CN][3] = 58, [1][0][0][0][RTW89_UK][3] = 58, [1][0][0][0][RTW89_FCC][4] = 127, [1][0][0][0][RTW89_ETSI][4] = 58, @@ -3783,7 +3842,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][4] = 127, [1][0][0][0][RTW89_KCC][4] = 68, [1][0][0][0][RTW89_ACMA][4] = 58, - [1][0][0][0][RTW89_CN][4] = 60, + [1][0][0][0][RTW89_CN][4] = 58, [1][0][0][0][RTW89_UK][4] = 58, [1][0][0][0][RTW89_FCC][5] = 127, [1][0][0][0][RTW89_ETSI][5] = 58, @@ -3791,7 +3850,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][5] = 127, [1][0][0][0][RTW89_KCC][5] = 68, [1][0][0][0][RTW89_ACMA][5] = 58, - [1][0][0][0][RTW89_CN][5] = 60, + [1][0][0][0][RTW89_CN][5] = 58, [1][0][0][0][RTW89_UK][5] = 58, [1][0][0][0][RTW89_FCC][6] = 127, [1][0][0][0][RTW89_ETSI][6] = 58, @@ -3799,7 +3858,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][6] = 127, [1][0][0][0][RTW89_KCC][6] = 68, [1][0][0][0][RTW89_ACMA][6] = 58, - [1][0][0][0][RTW89_CN][6] = 60, + [1][0][0][0][RTW89_CN][6] = 58, [1][0][0][0][RTW89_UK][6] = 58, [1][0][0][0][RTW89_FCC][7] = 127, [1][0][0][0][RTW89_ETSI][7] = 58, @@ -3807,7 +3866,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][7] = 127, [1][0][0][0][RTW89_KCC][7] = 68, [1][0][0][0][RTW89_ACMA][7] = 58, - [1][0][0][0][RTW89_CN][7] = 60, + [1][0][0][0][RTW89_CN][7] = 58, [1][0][0][0][RTW89_UK][7] = 58, [1][0][0][0][RTW89_FCC][8] = 127, [1][0][0][0][RTW89_ETSI][8] = 58, @@ -3815,7 +3874,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][8] = 127, [1][0][0][0][RTW89_KCC][8] = 68, [1][0][0][0][RTW89_ACMA][8] = 58, - [1][0][0][0][RTW89_CN][8] = 60, + [1][0][0][0][RTW89_CN][8] = 58, [1][0][0][0][RTW89_UK][8] = 58, [1][0][0][0][RTW89_FCC][9] = 127, [1][0][0][0][RTW89_ETSI][9] = 58, @@ -3823,7 +3882,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][9] = 127, [1][0][0][0][RTW89_KCC][9] = 68, [1][0][0][0][RTW89_ACMA][9] = 58, - [1][0][0][0][RTW89_CN][9] = 60, + [1][0][0][0][RTW89_CN][9] = 58, [1][0][0][0][RTW89_UK][9] = 58, [1][0][0][0][RTW89_FCC][10] = 127, [1][0][0][0][RTW89_ETSI][10] = 58, @@ -3831,7 +3890,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][10] = 127, [1][0][0][0][RTW89_KCC][10] = 68, [1][0][0][0][RTW89_ACMA][10] = 58, - [1][0][0][0][RTW89_CN][10] = 60, + [1][0][0][0][RTW89_CN][10] = 50, [1][0][0][0][RTW89_UK][10] = 58, [1][0][0][0][RTW89_FCC][11] = 127, [1][0][0][0][RTW89_ETSI][11] = 127, @@ -4071,7 +4130,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][12] = 64, [0][0][1][0][RTW89_KCC][12] = 74, [0][0][1][0][RTW89_ACMA][12] = 58, - [0][0][1][0][RTW89_CN][12] = 60, + [0][0][1][0][RTW89_CN][12] = 40, [0][0][1][0][RTW89_UK][12] = 58, [0][0][1][0][RTW89_FCC][13] = 127, [0][0][1][0][RTW89_ETSI][13] = 127, @@ -4295,7 +4354,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 70, [0][0][2][0][RTW89_KCC][12] = 78, [0][0][2][0][RTW89_ACMA][12] = 60, - [0][0][2][0][RTW89_CN][12] = 60, + [0][0][2][0][RTW89_CN][12] = 38, [0][0][2][0][RTW89_UK][12] = 60, [0][0][2][0][RTW89_FCC][13] = 127, [0][0][2][0][RTW89_ETSI][13] = 127, @@ -4551,7 +4610,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][2] = 72, [1][0][2][0][RTW89_KCC][2] = 80, [1][0][2][0][RTW89_ACMA][2] = 58, - [1][0][2][0][RTW89_CN][2] = 60, + [1][0][2][0][RTW89_CN][2] = 58, [1][0][2][0][RTW89_UK][2] = 58, [1][0][2][0][RTW89_FCC][3] = 72, [1][0][2][0][RTW89_ETSI][3] = 58, @@ -4559,7 +4618,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][3] = 72, [1][0][2][0][RTW89_KCC][3] = 80, [1][0][2][0][RTW89_ACMA][3] = 58, - [1][0][2][0][RTW89_CN][3] = 60, + [1][0][2][0][RTW89_CN][3] = 58, [1][0][2][0][RTW89_UK][3] = 58, [1][0][2][0][RTW89_FCC][4] = 76, [1][0][2][0][RTW89_ETSI][4] = 58, @@ -4567,7 +4626,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][4] = 76, [1][0][2][0][RTW89_KCC][4] = 80, [1][0][2][0][RTW89_ACMA][4] = 58, - [1][0][2][0][RTW89_CN][4] = 60, + [1][0][2][0][RTW89_CN][4] = 58, [1][0][2][0][RTW89_UK][4] = 58, [1][0][2][0][RTW89_FCC][5] = 78, [1][0][2][0][RTW89_ETSI][5] = 58, @@ -4575,7 +4634,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 78, [1][0][2][0][RTW89_KCC][5] = 80, [1][0][2][0][RTW89_ACMA][5] = 58, - [1][0][2][0][RTW89_CN][5] = 60, + [1][0][2][0][RTW89_CN][5] = 58, [1][0][2][0][RTW89_UK][5] = 58, [1][0][2][0][RTW89_FCC][6] = 78, [1][0][2][0][RTW89_ETSI][6] = 58, @@ -4583,7 +4642,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][6] = 78, [1][0][2][0][RTW89_KCC][6] = 80, [1][0][2][0][RTW89_ACMA][6] = 58, - [1][0][2][0][RTW89_CN][6] = 60, + [1][0][2][0][RTW89_CN][6] = 58, [1][0][2][0][RTW89_UK][6] = 58, [1][0][2][0][RTW89_FCC][7] = 78, [1][0][2][0][RTW89_ETSI][7] = 58, @@ -4591,7 +4650,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][7] = 78, [1][0][2][0][RTW89_KCC][7] = 80, [1][0][2][0][RTW89_ACMA][7] = 58, - [1][0][2][0][RTW89_CN][7] = 60, + [1][0][2][0][RTW89_CN][7] = 58, [1][0][2][0][RTW89_UK][7] = 58, [1][0][2][0][RTW89_FCC][8] = 78, [1][0][2][0][RTW89_ETSI][8] = 58, @@ -4599,7 +4658,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][8] = 78, [1][0][2][0][RTW89_KCC][8] = 78, [1][0][2][0][RTW89_ACMA][8] = 58, - [1][0][2][0][RTW89_CN][8] = 60, + [1][0][2][0][RTW89_CN][8] = 58, [1][0][2][0][RTW89_UK][8] = 58, [1][0][2][0][RTW89_FCC][9] = 76, [1][0][2][0][RTW89_ETSI][9] = 58, @@ -4607,7 +4666,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 76, [1][0][2][0][RTW89_KCC][9] = 78, [1][0][2][0][RTW89_ACMA][9] = 58, - [1][0][2][0][RTW89_CN][9] = 60, + [1][0][2][0][RTW89_CN][9] = 58, [1][0][2][0][RTW89_UK][9] = 58, [1][0][2][0][RTW89_FCC][10] = 70, [1][0][2][0][RTW89_ETSI][10] = 58, @@ -4615,7 +4674,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][10] = 70, [1][0][2][0][RTW89_KCC][10] = 78, [1][0][2][0][RTW89_ACMA][10] = 58, - [1][0][2][0][RTW89_CN][10] = 60, + [1][0][2][0][RTW89_CN][10] = 46, [1][0][2][0][RTW89_UK][10] = 58, [1][0][2][0][RTW89_FCC][11] = 127, [1][0][2][0][RTW89_ETSI][11] = 127, @@ -4896,9 +4955,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][42] = 30, [0][0][1][0][RTW89_WW][44] = 30, [0][0][1][0][RTW89_WW][46] = 30, - [0][0][1][0][RTW89_WW][48] = 68, - [0][0][1][0][RTW89_WW][50] = 68, - [0][0][1][0][RTW89_WW][52] = 68, + [0][0][1][0][RTW89_WW][48] = 72, + [0][0][1][0][RTW89_WW][50] = 72, + [0][0][1][0][RTW89_WW][52] = 72, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][2] = 0, [0][1][1][0][RTW89_WW][4] = 0, @@ -4927,14 +4986,14 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][48] = 0, [0][1][1][0][RTW89_WW][50] = 0, [0][1][1][0][RTW89_WW][52] = 0, - [0][0][2][0][RTW89_WW][0] = 62, - [0][0][2][0][RTW89_WW][2] = 62, - [0][0][2][0][RTW89_WW][4] = 62, + [0][0][2][0][RTW89_WW][0] = 60, + [0][0][2][0][RTW89_WW][2] = 60, + [0][0][2][0][RTW89_WW][4] = 60, [0][0][2][0][RTW89_WW][6] = 54, - [0][0][2][0][RTW89_WW][8] = 62, - [0][0][2][0][RTW89_WW][10] = 62, - [0][0][2][0][RTW89_WW][12] = 62, - [0][0][2][0][RTW89_WW][14] = 62, + [0][0][2][0][RTW89_WW][8] = 60, + [0][0][2][0][RTW89_WW][10] = 60, + [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][14] = 60, [0][0][2][0][RTW89_WW][15] = 60, [0][0][2][0][RTW89_WW][17] = 62, [0][0][2][0][RTW89_WW][19] = 62, @@ -4952,9 +5011,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][42] = 30, [0][0][2][0][RTW89_WW][44] = 30, [0][0][2][0][RTW89_WW][46] = 30, - [0][0][2][0][RTW89_WW][48] = 70, - [0][0][2][0][RTW89_WW][50] = 72, - [0][0][2][0][RTW89_WW][52] = 72, + [0][0][2][0][RTW89_WW][48] = 74, + [0][0][2][0][RTW89_WW][50] = 76, + [0][0][2][0][RTW89_WW][52] = 76, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][2] = 0, [0][1][2][0][RTW89_WW][4] = 0, @@ -5011,11 +5070,11 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][48] = 0, [0][1][2][1][RTW89_WW][50] = 0, [0][1][2][1][RTW89_WW][52] = 0, - [1][0][2][0][RTW89_WW][1] = 60, + [1][0][2][0][RTW89_WW][1] = 62, [1][0][2][0][RTW89_WW][5] = 62, - [1][0][2][0][RTW89_WW][9] = 64, - [1][0][2][0][RTW89_WW][13] = 60, - [1][0][2][0][RTW89_WW][16] = 62, + [1][0][2][0][RTW89_WW][9] = 62, + [1][0][2][0][RTW89_WW][13] = 62, + [1][0][2][0][RTW89_WW][16] = 66, [1][0][2][0][RTW89_WW][20] = 66, [1][0][2][0][RTW89_WW][24] = 66, [1][0][2][0][RTW89_WW][28] = 66, @@ -5023,8 +5082,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][36] = 76, [1][0][2][0][RTW89_WW][39] = 30, [1][0][2][0][RTW89_WW][43] = 30, - [1][0][2][0][RTW89_WW][47] = 80, - [1][0][2][0][RTW89_WW][51] = 80, + [1][0][2][0][RTW89_WW][47] = 84, + [1][0][2][0][RTW89_WW][51] = 84, [1][1][2][0][RTW89_WW][1] = 0, [1][1][2][0][RTW89_WW][5] = 0, [1][1][2][0][RTW89_WW][9] = 0, @@ -5054,12 +5113,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][47] = 0, [1][1][2][1][RTW89_WW][51] = 0, [2][0][2][0][RTW89_WW][3] = 60, - [2][0][2][0][RTW89_WW][11] = 58, - [2][0][2][0][RTW89_WW][18] = 62, + [2][0][2][0][RTW89_WW][11] = 56, + [2][0][2][0][RTW89_WW][18] = 64, [2][0][2][0][RTW89_WW][26] = 64, [2][0][2][0][RTW89_WW][34] = 72, [2][0][2][0][RTW89_WW][41] = 30, - [2][0][2][0][RTW89_WW][49] = 70, + [2][0][2][0][RTW89_WW][49] = 74, [2][1][2][0][RTW89_WW][3] = 0, [2][1][2][0][RTW89_WW][11] = 0, [2][1][2][0][RTW89_WW][18] = 0, @@ -5074,8 +5133,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_WW][34] = 0, [2][1][2][1][RTW89_WW][41] = 0, [2][1][2][1][RTW89_WW][49] = 0, - [3][0][2][0][RTW89_WW][7] = 58, - [3][0][2][0][RTW89_WW][22] = 58, + [3][0][2][0][RTW89_WW][7] = 0, + [3][0][2][0][RTW89_WW][22] = 0, [3][0][2][0][RTW89_WW][45] = 0, [3][1][2][0][RTW89_WW][7] = 0, [3][1][2][0][RTW89_WW][22] = 0, @@ -5083,7 +5142,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_WW][7] = 0, [3][1][2][1][RTW89_WW][22] = 0, [3][1][2][1][RTW89_WW][45] = 0, - [0][0][1][0][RTW89_FCC][0] = 76, + [0][0][1][0][RTW89_FCC][0] = 80, [0][0][1][0][RTW89_ETSI][0] = 58, [0][0][1][0][RTW89_MKK][0] = 60, [0][0][1][0][RTW89_IC][0] = 62, @@ -5139,7 +5198,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][12] = 58, [0][0][1][0][RTW89_CN][12] = 60, [0][0][1][0][RTW89_UK][12] = 58, - [0][0][1][0][RTW89_FCC][14] = 74, + [0][0][1][0][RTW89_FCC][14] = 78, [0][0][1][0][RTW89_ETSI][14] = 58, [0][0][1][0][RTW89_MKK][14] = 60, [0][0][1][0][RTW89_IC][14] = 64, @@ -5147,10 +5206,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][14] = 58, [0][0][1][0][RTW89_CN][14] = 60, [0][0][1][0][RTW89_UK][14] = 58, - [0][0][1][0][RTW89_FCC][15] = 74, + [0][0][1][0][RTW89_FCC][15] = 78, [0][0][1][0][RTW89_ETSI][15] = 58, [0][0][1][0][RTW89_MKK][15] = 78, - [0][0][1][0][RTW89_IC][15] = 74, + [0][0][1][0][RTW89_IC][15] = 78, [0][0][1][0][RTW89_KCC][15] = 78, [0][0][1][0][RTW89_ACMA][15] = 58, [0][0][1][0][RTW89_CN][15] = 127, @@ -5227,10 +5286,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][33] = 60, [0][0][1][0][RTW89_CN][33] = 127, [0][0][1][0][RTW89_UK][33] = 60, - [0][0][1][0][RTW89_FCC][35] = 68, + [0][0][1][0][RTW89_FCC][35] = 72, [0][0][1][0][RTW89_ETSI][35] = 60, [0][0][1][0][RTW89_MKK][35] = 78, - [0][0][1][0][RTW89_IC][35] = 68, + [0][0][1][0][RTW89_IC][35] = 72, [0][0][1][0][RTW89_KCC][35] = 74, [0][0][1][0][RTW89_ACMA][35] = 60, [0][0][1][0][RTW89_CN][35] = 127, @@ -5249,7 +5308,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][38] = 82, [0][0][1][0][RTW89_KCC][38] = 70, [0][0][1][0][RTW89_ACMA][38] = 78, - [0][0][1][0][RTW89_CN][38] = 78, + [0][0][1][0][RTW89_CN][38] = 74, [0][0][1][0][RTW89_UK][38] = 58, [0][0][1][0][RTW89_FCC][40] = 82, [0][0][1][0][RTW89_ETSI][40] = 30, @@ -5257,7 +5316,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][40] = 82, [0][0][1][0][RTW89_KCC][40] = 76, [0][0][1][0][RTW89_ACMA][40] = 78, - [0][0][1][0][RTW89_CN][40] = 78, + [0][0][1][0][RTW89_CN][40] = 74, [0][0][1][0][RTW89_UK][40] = 58, [0][0][1][0][RTW89_FCC][42] = 82, [0][0][1][0][RTW89_ETSI][42] = 30, @@ -5265,7 +5324,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][42] = 82, [0][0][1][0][RTW89_KCC][42] = 76, [0][0][1][0][RTW89_ACMA][42] = 78, - [0][0][1][0][RTW89_CN][42] = 78, + [0][0][1][0][RTW89_CN][42] = 74, [0][0][1][0][RTW89_UK][42] = 58, [0][0][1][0][RTW89_FCC][44] = 82, [0][0][1][0][RTW89_ETSI][44] = 30, @@ -5273,7 +5332,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][44] = 82, [0][0][1][0][RTW89_KCC][44] = 76, [0][0][1][0][RTW89_ACMA][44] = 78, - [0][0][1][0][RTW89_CN][44] = 78, + [0][0][1][0][RTW89_CN][44] = 58, [0][0][1][0][RTW89_UK][44] = 58, [0][0][1][0][RTW89_FCC][46] = 82, [0][0][1][0][RTW89_ETSI][46] = 30, @@ -5281,9 +5340,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][46] = 82, [0][0][1][0][RTW89_KCC][46] = 76, [0][0][1][0][RTW89_ACMA][46] = 78, - [0][0][1][0][RTW89_CN][46] = 78, + [0][0][1][0][RTW89_CN][46] = 58, [0][0][1][0][RTW89_UK][46] = 58, - [0][0][1][0][RTW89_FCC][48] = 68, + [0][0][1][0][RTW89_FCC][48] = 72, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, [0][0][1][0][RTW89_IC][48] = 127, @@ -5291,7 +5350,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][48] = 127, [0][0][1][0][RTW89_CN][48] = 127, [0][0][1][0][RTW89_UK][48] = 127, - [0][0][1][0][RTW89_FCC][50] = 68, + [0][0][1][0][RTW89_FCC][50] = 72, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, [0][0][1][0][RTW89_IC][50] = 127, @@ -5299,7 +5358,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][50] = 127, [0][0][1][0][RTW89_CN][50] = 127, [0][0][1][0][RTW89_UK][50] = 127, - [0][0][1][0][RTW89_FCC][52] = 68, + [0][0][1][0][RTW89_FCC][52] = 72, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, [0][0][1][0][RTW89_IC][52] = 127, @@ -5531,13 +5590,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_ACMA][52] = 127, [0][1][1][0][RTW89_CN][52] = 127, [0][1][1][0][RTW89_UK][52] = 127, - [0][0][2][0][RTW89_FCC][0] = 74, + [0][0][2][0][RTW89_FCC][0] = 78, [0][0][2][0][RTW89_ETSI][0] = 62, [0][0][2][0][RTW89_MKK][0] = 62, [0][0][2][0][RTW89_IC][0] = 64, [0][0][2][0][RTW89_KCC][0] = 76, [0][0][2][0][RTW89_ACMA][0] = 62, - [0][0][2][0][RTW89_CN][0] = 62, + [0][0][2][0][RTW89_CN][0] = 60, [0][0][2][0][RTW89_UK][0] = 62, [0][0][2][0][RTW89_FCC][2] = 82, [0][0][2][0][RTW89_ETSI][2] = 62, @@ -5545,7 +5604,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][2] = 64, [0][0][2][0][RTW89_KCC][2] = 76, [0][0][2][0][RTW89_ACMA][2] = 62, - [0][0][2][0][RTW89_CN][2] = 62, + [0][0][2][0][RTW89_CN][2] = 60, [0][0][2][0][RTW89_UK][2] = 62, [0][0][2][0][RTW89_FCC][4] = 82, [0][0][2][0][RTW89_ETSI][4] = 62, @@ -5553,7 +5612,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][4] = 64, [0][0][2][0][RTW89_KCC][4] = 76, [0][0][2][0][RTW89_ACMA][4] = 62, - [0][0][2][0][RTW89_CN][4] = 62, + [0][0][2][0][RTW89_CN][4] = 60, [0][0][2][0][RTW89_UK][4] = 62, [0][0][2][0][RTW89_FCC][6] = 82, [0][0][2][0][RTW89_ETSI][6] = 62, @@ -5561,7 +5620,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][6] = 64, [0][0][2][0][RTW89_KCC][6] = 54, [0][0][2][0][RTW89_ACMA][6] = 62, - [0][0][2][0][RTW89_CN][6] = 62, + [0][0][2][0][RTW89_CN][6] = 60, [0][0][2][0][RTW89_UK][6] = 62, [0][0][2][0][RTW89_FCC][8] = 82, [0][0][2][0][RTW89_ETSI][8] = 62, @@ -5569,7 +5628,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][8] = 64, [0][0][2][0][RTW89_KCC][8] = 76, [0][0][2][0][RTW89_ACMA][8] = 62, - [0][0][2][0][RTW89_CN][8] = 62, + [0][0][2][0][RTW89_CN][8] = 60, [0][0][2][0][RTW89_UK][8] = 62, [0][0][2][0][RTW89_FCC][10] = 82, [0][0][2][0][RTW89_ETSI][10] = 62, @@ -5577,7 +5636,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][10] = 64, [0][0][2][0][RTW89_KCC][10] = 76, [0][0][2][0][RTW89_ACMA][10] = 62, - [0][0][2][0][RTW89_CN][10] = 62, + [0][0][2][0][RTW89_CN][10] = 60, [0][0][2][0][RTW89_UK][10] = 62, [0][0][2][0][RTW89_FCC][12] = 82, [0][0][2][0][RTW89_ETSI][12] = 62, @@ -5585,20 +5644,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 64, [0][0][2][0][RTW89_KCC][12] = 78, [0][0][2][0][RTW89_ACMA][12] = 62, - [0][0][2][0][RTW89_CN][12] = 62, + [0][0][2][0][RTW89_CN][12] = 60, [0][0][2][0][RTW89_UK][12] = 62, - [0][0][2][0][RTW89_FCC][14] = 72, + [0][0][2][0][RTW89_FCC][14] = 76, [0][0][2][0][RTW89_ETSI][14] = 62, [0][0][2][0][RTW89_MKK][14] = 62, [0][0][2][0][RTW89_IC][14] = 64, [0][0][2][0][RTW89_KCC][14] = 78, [0][0][2][0][RTW89_ACMA][14] = 62, - [0][0][2][0][RTW89_CN][14] = 62, + [0][0][2][0][RTW89_CN][14] = 60, [0][0][2][0][RTW89_UK][14] = 62, - [0][0][2][0][RTW89_FCC][15] = 72, + [0][0][2][0][RTW89_FCC][15] = 76, [0][0][2][0][RTW89_ETSI][15] = 60, [0][0][2][0][RTW89_MKK][15] = 78, - [0][0][2][0][RTW89_IC][15] = 72, + [0][0][2][0][RTW89_IC][15] = 76, [0][0][2][0][RTW89_KCC][15] = 78, [0][0][2][0][RTW89_ACMA][15] = 60, [0][0][2][0][RTW89_CN][15] = 127, @@ -5675,10 +5734,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][33] = 62, [0][0][2][0][RTW89_CN][33] = 127, [0][0][2][0][RTW89_UK][33] = 62, - [0][0][2][0][RTW89_FCC][35] = 68, + [0][0][2][0][RTW89_FCC][35] = 72, [0][0][2][0][RTW89_ETSI][35] = 62, [0][0][2][0][RTW89_MKK][35] = 78, - [0][0][2][0][RTW89_IC][35] = 68, + [0][0][2][0][RTW89_IC][35] = 72, [0][0][2][0][RTW89_KCC][35] = 74, [0][0][2][0][RTW89_ACMA][35] = 62, [0][0][2][0][RTW89_CN][35] = 127, @@ -5697,7 +5756,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][38] = 82, [0][0][2][0][RTW89_KCC][38] = 66, [0][0][2][0][RTW89_ACMA][38] = 78, - [0][0][2][0][RTW89_CN][38] = 78, + [0][0][2][0][RTW89_CN][38] = 70, [0][0][2][0][RTW89_UK][38] = 60, [0][0][2][0][RTW89_FCC][40] = 82, [0][0][2][0][RTW89_ETSI][40] = 30, @@ -5705,7 +5764,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][40] = 82, [0][0][2][0][RTW89_KCC][40] = 74, [0][0][2][0][RTW89_ACMA][40] = 78, - [0][0][2][0][RTW89_CN][40] = 78, + [0][0][2][0][RTW89_CN][40] = 70, [0][0][2][0][RTW89_UK][40] = 60, [0][0][2][0][RTW89_FCC][42] = 82, [0][0][2][0][RTW89_ETSI][42] = 30, @@ -5713,7 +5772,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][42] = 82, [0][0][2][0][RTW89_KCC][42] = 74, [0][0][2][0][RTW89_ACMA][42] = 78, - [0][0][2][0][RTW89_CN][42] = 78, + [0][0][2][0][RTW89_CN][42] = 70, [0][0][2][0][RTW89_UK][42] = 60, [0][0][2][0][RTW89_FCC][44] = 82, [0][0][2][0][RTW89_ETSI][44] = 30, @@ -5721,7 +5780,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][44] = 82, [0][0][2][0][RTW89_KCC][44] = 74, [0][0][2][0][RTW89_ACMA][44] = 78, - [0][0][2][0][RTW89_CN][44] = 78, + [0][0][2][0][RTW89_CN][44] = 58, [0][0][2][0][RTW89_UK][44] = 60, [0][0][2][0][RTW89_FCC][46] = 82, [0][0][2][0][RTW89_ETSI][46] = 30, @@ -5729,9 +5788,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][46] = 82, [0][0][2][0][RTW89_KCC][46] = 74, [0][0][2][0][RTW89_ACMA][46] = 78, - [0][0][2][0][RTW89_CN][46] = 78, + [0][0][2][0][RTW89_CN][46] = 58, [0][0][2][0][RTW89_UK][46] = 60, - [0][0][2][0][RTW89_FCC][48] = 70, + [0][0][2][0][RTW89_FCC][48] = 74, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, [0][0][2][0][RTW89_IC][48] = 127, @@ -5739,7 +5798,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][48] = 127, [0][0][2][0][RTW89_CN][48] = 127, [0][0][2][0][RTW89_UK][48] = 127, - [0][0][2][0][RTW89_FCC][50] = 72, + [0][0][2][0][RTW89_FCC][50] = 76, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, [0][0][2][0][RTW89_IC][50] = 127, @@ -5747,7 +5806,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][50] = 127, [0][0][2][0][RTW89_CN][50] = 127, [0][0][2][0][RTW89_UK][50] = 127, - [0][0][2][0][RTW89_FCC][52] = 72, + [0][0][2][0][RTW89_FCC][52] = 76, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, [0][0][2][0][RTW89_IC][52] = 127, @@ -6203,13 +6262,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_ACMA][52] = 127, [0][1][2][1][RTW89_CN][52] = 127, [0][1][2][1][RTW89_UK][52] = 127, - [1][0][2][0][RTW89_FCC][1] = 64, + [1][0][2][0][RTW89_FCC][1] = 68, [1][0][2][0][RTW89_ETSI][1] = 64, [1][0][2][0][RTW89_MKK][1] = 64, - [1][0][2][0][RTW89_IC][1] = 60, + [1][0][2][0][RTW89_IC][1] = 64, [1][0][2][0][RTW89_KCC][1] = 74, [1][0][2][0][RTW89_ACMA][1] = 64, - [1][0][2][0][RTW89_CN][1] = 64, + [1][0][2][0][RTW89_CN][1] = 62, [1][0][2][0][RTW89_UK][1] = 64, [1][0][2][0][RTW89_FCC][5] = 82, [1][0][2][0][RTW89_ETSI][5] = 64, @@ -6217,7 +6276,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 64, [1][0][2][0][RTW89_KCC][5] = 66, [1][0][2][0][RTW89_ACMA][5] = 64, - [1][0][2][0][RTW89_CN][5] = 64, + [1][0][2][0][RTW89_CN][5] = 62, [1][0][2][0][RTW89_UK][5] = 64, [1][0][2][0][RTW89_FCC][9] = 82, [1][0][2][0][RTW89_ETSI][9] = 64, @@ -6225,20 +6284,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 64, [1][0][2][0][RTW89_KCC][9] = 78, [1][0][2][0][RTW89_ACMA][9] = 64, - [1][0][2][0][RTW89_CN][9] = 64, + [1][0][2][0][RTW89_CN][9] = 62, [1][0][2][0][RTW89_UK][9] = 64, - [1][0][2][0][RTW89_FCC][13] = 62, + [1][0][2][0][RTW89_FCC][13] = 66, [1][0][2][0][RTW89_ETSI][13] = 64, [1][0][2][0][RTW89_MKK][13] = 64, - [1][0][2][0][RTW89_IC][13] = 60, + [1][0][2][0][RTW89_IC][13] = 64, [1][0][2][0][RTW89_KCC][13] = 72, [1][0][2][0][RTW89_ACMA][13] = 64, - [1][0][2][0][RTW89_CN][13] = 64, + [1][0][2][0][RTW89_CN][13] = 62, [1][0][2][0][RTW89_UK][13] = 64, - [1][0][2][0][RTW89_FCC][16] = 62, + [1][0][2][0][RTW89_FCC][16] = 66, [1][0][2][0][RTW89_ETSI][16] = 66, [1][0][2][0][RTW89_MKK][16] = 80, - [1][0][2][0][RTW89_IC][16] = 62, + [1][0][2][0][RTW89_IC][16] = 66, [1][0][2][0][RTW89_KCC][16] = 74, [1][0][2][0][RTW89_ACMA][16] = 66, [1][0][2][0][RTW89_CN][16] = 127, @@ -6246,7 +6305,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][20] = 80, [1][0][2][0][RTW89_ETSI][20] = 66, [1][0][2][0][RTW89_MKK][20] = 80, - [1][0][2][0][RTW89_IC][20] = 76, + [1][0][2][0][RTW89_IC][20] = 80, [1][0][2][0][RTW89_KCC][20] = 74, [1][0][2][0][RTW89_ACMA][20] = 66, [1][0][2][0][RTW89_CN][20] = 127, @@ -6267,10 +6326,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][28] = 127, [1][0][2][0][RTW89_CN][28] = 127, [1][0][2][0][RTW89_UK][28] = 66, - [1][0][2][0][RTW89_FCC][32] = 72, + [1][0][2][0][RTW89_FCC][32] = 76, [1][0][2][0][RTW89_ETSI][32] = 66, [1][0][2][0][RTW89_MKK][32] = 80, - [1][0][2][0][RTW89_IC][32] = 72, + [1][0][2][0][RTW89_IC][32] = 76, [1][0][2][0][RTW89_KCC][32] = 78, [1][0][2][0][RTW89_ACMA][32] = 66, [1][0][2][0][RTW89_CN][32] = 127, @@ -6286,10 +6345,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][39] = 84, [1][0][2][0][RTW89_ETSI][39] = 30, [1][0][2][0][RTW89_MKK][39] = 127, - [1][0][2][0][RTW89_IC][39] = 80, + [1][0][2][0][RTW89_IC][39] = 84, [1][0][2][0][RTW89_KCC][39] = 68, [1][0][2][0][RTW89_ACMA][39] = 80, - [1][0][2][0][RTW89_CN][39] = 70, + [1][0][2][0][RTW89_CN][39] = 60, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_FCC][43] = 84, [1][0][2][0][RTW89_ETSI][43] = 30, @@ -6297,9 +6356,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][43] = 84, [1][0][2][0][RTW89_KCC][43] = 78, [1][0][2][0][RTW89_ACMA][43] = 80, - [1][0][2][0][RTW89_CN][43] = 80, + [1][0][2][0][RTW89_CN][43] = 62, [1][0][2][0][RTW89_UK][43] = 64, - [1][0][2][0][RTW89_FCC][47] = 80, + [1][0][2][0][RTW89_FCC][47] = 84, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, [1][0][2][0][RTW89_IC][47] = 127, @@ -6307,7 +6366,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][47] = 127, [1][0][2][0][RTW89_CN][47] = 127, [1][0][2][0][RTW89_UK][47] = 127, - [1][0][2][0][RTW89_FCC][51] = 80, + [1][0][2][0][RTW89_FCC][51] = 84, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, [1][0][2][0][RTW89_IC][51] = 127, @@ -6539,26 +6598,26 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_ACMA][51] = 127, [1][1][2][1][RTW89_CN][51] = 127, [1][1][2][1][RTW89_UK][51] = 127, - [2][0][2][0][RTW89_FCC][3] = 72, + [2][0][2][0][RTW89_FCC][3] = 76, [2][0][2][0][RTW89_ETSI][3] = 64, [2][0][2][0][RTW89_MKK][3] = 62, - [2][0][2][0][RTW89_IC][3] = 60, + [2][0][2][0][RTW89_IC][3] = 64, [2][0][2][0][RTW89_KCC][3] = 72, [2][0][2][0][RTW89_ACMA][3] = 64, - [2][0][2][0][RTW89_CN][3] = 64, + [2][0][2][0][RTW89_CN][3] = 60, [2][0][2][0][RTW89_UK][3] = 64, - [2][0][2][0][RTW89_FCC][11] = 60, + [2][0][2][0][RTW89_FCC][11] = 64, [2][0][2][0][RTW89_ETSI][11] = 64, [2][0][2][0][RTW89_MKK][11] = 64, - [2][0][2][0][RTW89_IC][11] = 58, + [2][0][2][0][RTW89_IC][11] = 62, [2][0][2][0][RTW89_KCC][11] = 72, [2][0][2][0][RTW89_ACMA][11] = 64, - [2][0][2][0][RTW89_CN][11] = 64, + [2][0][2][0][RTW89_CN][11] = 56, [2][0][2][0][RTW89_UK][11] = 64, - [2][0][2][0][RTW89_FCC][18] = 62, + [2][0][2][0][RTW89_FCC][18] = 66, [2][0][2][0][RTW89_ETSI][18] = 64, [2][0][2][0][RTW89_MKK][18] = 72, - [2][0][2][0][RTW89_IC][18] = 62, + [2][0][2][0][RTW89_IC][18] = 66, [2][0][2][0][RTW89_KCC][18] = 72, [2][0][2][0][RTW89_ACMA][18] = 64, [2][0][2][0][RTW89_CN][18] = 127, @@ -6574,7 +6633,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][34] = 76, [2][0][2][0][RTW89_ETSI][34] = 127, [2][0][2][0][RTW89_MKK][34] = 72, - [2][0][2][0][RTW89_IC][34] = 72, + [2][0][2][0][RTW89_IC][34] = 76, [2][0][2][0][RTW89_KCC][34] = 72, [2][0][2][0][RTW89_ACMA][34] = 72, [2][0][2][0][RTW89_CN][34] = 127, @@ -6582,12 +6641,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][41] = 76, [2][0][2][0][RTW89_ETSI][41] = 30, [2][0][2][0][RTW89_MKK][41] = 127, - [2][0][2][0][RTW89_IC][41] = 72, + [2][0][2][0][RTW89_IC][41] = 76, [2][0][2][0][RTW89_KCC][41] = 64, [2][0][2][0][RTW89_ACMA][41] = 72, - [2][0][2][0][RTW89_CN][41] = 72, + [2][0][2][0][RTW89_CN][41] = 40, [2][0][2][0][RTW89_UK][41] = 64, - [2][0][2][0][RTW89_FCC][49] = 70, + [2][0][2][0][RTW89_FCC][49] = 74, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, [2][0][2][0][RTW89_IC][49] = 127, @@ -6713,7 +6772,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][7] = 127, [3][0][2][0][RTW89_KCC][7] = 127, [3][0][2][0][RTW89_ACMA][7] = 127, - [3][0][2][0][RTW89_CN][7] = 58, + [3][0][2][0][RTW89_CN][7] = 127, [3][0][2][0][RTW89_UK][7] = 127, [3][0][2][0][RTW89_FCC][22] = 127, [3][0][2][0][RTW89_ETSI][22] = 127, @@ -6721,7 +6780,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][22] = 127, [3][0][2][0][RTW89_KCC][22] = 127, [3][0][2][0][RTW89_ACMA][22] = 127, - [3][0][2][0][RTW89_CN][22] = 58, + [3][0][2][0][RTW89_CN][22] = 127, [3][0][2][0][RTW89_UK][22] = 127, [3][0][2][0][RTW89_FCC][45] = 127, [3][0][2][0][RTW89_ETSI][45] = 127, @@ -6798,19 +6857,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][11] = 30, [0][0][RTW89_WW][12] = 30, [0][0][RTW89_WW][13] = 0, - [0][1][RTW89_WW][0] = 20, - [0][1][RTW89_WW][1] = 22, - [0][1][RTW89_WW][2] = 22, - [0][1][RTW89_WW][3] = 22, - [0][1][RTW89_WW][4] = 22, - [0][1][RTW89_WW][5] = 22, - [0][1][RTW89_WW][6] = 22, - [0][1][RTW89_WW][7] = 22, - [0][1][RTW89_WW][8] = 22, - [0][1][RTW89_WW][9] = 22, - [0][1][RTW89_WW][10] = 22, - [0][1][RTW89_WW][11] = 22, - [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][1] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][3] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][5] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][7] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][9] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][11] = 0, + [0][1][RTW89_WW][12] = 0, [0][1][RTW89_WW][13] = 0, [1][0][RTW89_WW][0] = 42, [1][0][RTW89_WW][1] = 42, @@ -6826,19 +6885,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][11] = 42, [1][0][RTW89_WW][12] = 34, [1][0][RTW89_WW][13] = 0, - [1][1][RTW89_WW][0] = 32, - [1][1][RTW89_WW][1] = 32, - [1][1][RTW89_WW][2] = 32, - [1][1][RTW89_WW][3] = 32, - [1][1][RTW89_WW][4] = 32, - [1][1][RTW89_WW][5] = 32, - [1][1][RTW89_WW][6] = 32, - [1][1][RTW89_WW][7] = 32, - [1][1][RTW89_WW][8] = 32, - [1][1][RTW89_WW][9] = 32, - [1][1][RTW89_WW][10] = 32, - [1][1][RTW89_WW][11] = 32, - [1][1][RTW89_WW][12] = 32, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][1] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][3] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][5] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][7] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][9] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][11] = 0, + [1][1][RTW89_WW][12] = 0, [1][1][RTW89_WW][13] = 0, [2][0][RTW89_WW][0] = 54, [2][0][RTW89_WW][1] = 54, @@ -6854,19 +6913,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][11] = 54, [2][0][RTW89_WW][12] = 34, [2][0][RTW89_WW][13] = 0, - [2][1][RTW89_WW][0] = 44, - [2][1][RTW89_WW][1] = 44, - [2][1][RTW89_WW][2] = 44, - [2][1][RTW89_WW][3] = 44, - [2][1][RTW89_WW][4] = 44, - [2][1][RTW89_WW][5] = 44, - [2][1][RTW89_WW][6] = 44, - [2][1][RTW89_WW][7] = 44, - [2][1][RTW89_WW][8] = 44, - [2][1][RTW89_WW][9] = 44, - [2][1][RTW89_WW][10] = 44, - [2][1][RTW89_WW][11] = 44, - [2][1][RTW89_WW][12] = 42, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][1] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][3] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][5] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][7] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][9] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][11] = 0, + [2][1][RTW89_WW][12] = 0, [2][1][RTW89_WW][13] = 0, [0][0][RTW89_FCC][0] = 62, [0][0][RTW89_ETSI][0] = 30, @@ -6986,7 +7045,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 20, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][1] = 127, [0][1][RTW89_ETSI][1] = 127, @@ -6994,7 +7053,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][1] = 127, [0][1][RTW89_KCC][1] = 127, [0][1][RTW89_ACMA][1] = 127, - [0][1][RTW89_CN][1] = 22, + [0][1][RTW89_CN][1] = 127, [0][1][RTW89_UK][1] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -7002,7 +7061,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 22, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][3] = 127, [0][1][RTW89_ETSI][3] = 127, @@ -7010,7 +7069,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][3] = 127, [0][1][RTW89_KCC][3] = 127, [0][1][RTW89_ACMA][3] = 127, - [0][1][RTW89_CN][3] = 22, + [0][1][RTW89_CN][3] = 127, [0][1][RTW89_UK][3] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -7018,7 +7077,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 22, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][5] = 127, [0][1][RTW89_ETSI][5] = 127, @@ -7026,7 +7085,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][5] = 127, [0][1][RTW89_KCC][5] = 127, [0][1][RTW89_ACMA][5] = 127, - [0][1][RTW89_CN][5] = 22, + [0][1][RTW89_CN][5] = 127, [0][1][RTW89_UK][5] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -7034,7 +7093,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 22, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][7] = 127, [0][1][RTW89_ETSI][7] = 127, @@ -7042,7 +7101,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][7] = 127, [0][1][RTW89_KCC][7] = 127, [0][1][RTW89_ACMA][7] = 127, - [0][1][RTW89_CN][7] = 22, + [0][1][RTW89_CN][7] = 127, [0][1][RTW89_UK][7] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -7050,7 +7109,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 22, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][9] = 127, [0][1][RTW89_ETSI][9] = 127, @@ -7058,7 +7117,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][9] = 127, [0][1][RTW89_KCC][9] = 127, [0][1][RTW89_ACMA][9] = 127, - [0][1][RTW89_CN][9] = 22, + [0][1][RTW89_CN][9] = 127, [0][1][RTW89_UK][9] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -7066,7 +7125,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 22, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][11] = 127, [0][1][RTW89_ETSI][11] = 127, @@ -7074,7 +7133,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][11] = 127, [0][1][RTW89_KCC][11] = 127, [0][1][RTW89_ACMA][11] = 127, - [0][1][RTW89_CN][11] = 22, + [0][1][RTW89_CN][11] = 127, [0][1][RTW89_UK][11] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -7082,7 +7141,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 20, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][13] = 127, [0][1][RTW89_ETSI][13] = 127, @@ -7210,7 +7269,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 32, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][1] = 127, [1][1][RTW89_ETSI][1] = 127, @@ -7218,7 +7277,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][1] = 127, [1][1][RTW89_KCC][1] = 127, [1][1][RTW89_ACMA][1] = 127, - [1][1][RTW89_CN][1] = 32, + [1][1][RTW89_CN][1] = 127, [1][1][RTW89_UK][1] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -7226,7 +7285,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 32, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][3] = 127, [1][1][RTW89_ETSI][3] = 127, @@ -7234,7 +7293,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][3] = 127, [1][1][RTW89_KCC][3] = 127, [1][1][RTW89_ACMA][3] = 127, - [1][1][RTW89_CN][3] = 32, + [1][1][RTW89_CN][3] = 127, [1][1][RTW89_UK][3] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -7242,7 +7301,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 32, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][5] = 127, [1][1][RTW89_ETSI][5] = 127, @@ -7250,7 +7309,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][5] = 127, [1][1][RTW89_KCC][5] = 127, [1][1][RTW89_ACMA][5] = 127, - [1][1][RTW89_CN][5] = 32, + [1][1][RTW89_CN][5] = 127, [1][1][RTW89_UK][5] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -7258,7 +7317,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 32, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][7] = 127, [1][1][RTW89_ETSI][7] = 127, @@ -7266,7 +7325,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][7] = 127, [1][1][RTW89_KCC][7] = 127, [1][1][RTW89_ACMA][7] = 127, - [1][1][RTW89_CN][7] = 32, + [1][1][RTW89_CN][7] = 127, [1][1][RTW89_UK][7] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -7274,7 +7333,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 32, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][9] = 127, [1][1][RTW89_ETSI][9] = 127, @@ -7282,7 +7341,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][9] = 127, [1][1][RTW89_KCC][9] = 127, [1][1][RTW89_ACMA][9] = 127, - [1][1][RTW89_CN][9] = 32, + [1][1][RTW89_CN][9] = 127, [1][1][RTW89_UK][9] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -7290,7 +7349,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 32, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][11] = 127, [1][1][RTW89_ETSI][11] = 127, @@ -7298,7 +7357,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][11] = 127, [1][1][RTW89_KCC][11] = 127, [1][1][RTW89_ACMA][11] = 127, - [1][1][RTW89_CN][11] = 32, + [1][1][RTW89_CN][11] = 127, [1][1][RTW89_UK][11] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -7306,7 +7365,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 32, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][13] = 127, [1][1][RTW89_ETSI][13] = 127, @@ -7434,7 +7493,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 44, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][1] = 127, [2][1][RTW89_ETSI][1] = 127, @@ -7442,7 +7501,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][1] = 127, [2][1][RTW89_KCC][1] = 127, [2][1][RTW89_ACMA][1] = 127, - [2][1][RTW89_CN][1] = 44, + [2][1][RTW89_CN][1] = 127, [2][1][RTW89_UK][1] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -7450,7 +7509,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 44, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][3] = 127, [2][1][RTW89_ETSI][3] = 127, @@ -7458,7 +7517,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][3] = 127, [2][1][RTW89_KCC][3] = 127, [2][1][RTW89_ACMA][3] = 127, - [2][1][RTW89_CN][3] = 44, + [2][1][RTW89_CN][3] = 127, [2][1][RTW89_UK][3] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -7466,7 +7525,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 44, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][5] = 127, [2][1][RTW89_ETSI][5] = 127, @@ -7474,7 +7533,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][5] = 127, [2][1][RTW89_KCC][5] = 127, [2][1][RTW89_ACMA][5] = 127, - [2][1][RTW89_CN][5] = 44, + [2][1][RTW89_CN][5] = 127, [2][1][RTW89_UK][5] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -7482,7 +7541,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 44, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][7] = 127, [2][1][RTW89_ETSI][7] = 127, @@ -7490,7 +7549,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][7] = 127, [2][1][RTW89_KCC][7] = 127, [2][1][RTW89_ACMA][7] = 127, - [2][1][RTW89_CN][7] = 44, + [2][1][RTW89_CN][7] = 127, [2][1][RTW89_UK][7] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -7498,7 +7557,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 44, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][9] = 127, [2][1][RTW89_ETSI][9] = 127, @@ -7506,7 +7565,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][9] = 127, [2][1][RTW89_KCC][9] = 127, [2][1][RTW89_ACMA][9] = 127, - [2][1][RTW89_CN][9] = 44, + [2][1][RTW89_CN][9] = 127, [2][1][RTW89_UK][9] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -7514,7 +7573,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 44, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][11] = 127, [2][1][RTW89_ETSI][11] = 127, @@ -7522,7 +7581,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][11] = 127, [2][1][RTW89_KCC][11] = 127, [2][1][RTW89_ACMA][11] = 127, - [2][1][RTW89_CN][11] = 44, + [2][1][RTW89_CN][11] = 127, [2][1][RTW89_UK][11] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -7530,7 +7589,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 42, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][13] = 127, [2][1][RTW89_ETSI][13] = 127, @@ -7573,14 +7632,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][48] = 42, [0][0][RTW89_WW][50] = 42, [0][0][RTW89_WW][52] = 40, - [0][1][RTW89_WW][0] = 4, - [0][1][RTW89_WW][2] = 4, - [0][1][RTW89_WW][4] = 4, - [0][1][RTW89_WW][6] = 4, - [0][1][RTW89_WW][8] = 4, - [0][1][RTW89_WW][10] = 4, - [0][1][RTW89_WW][12] = 4, - [0][1][RTW89_WW][14] = 4, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][12] = 0, + [0][1][RTW89_WW][14] = 0, [0][1][RTW89_WW][15] = 0, [0][1][RTW89_WW][17] = 0, [0][1][RTW89_WW][19] = 0, @@ -7593,11 +7652,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_WW][33] = 0, [0][1][RTW89_WW][35] = 0, [0][1][RTW89_WW][37] = 0, - [0][1][RTW89_WW][38] = 42, - [0][1][RTW89_WW][40] = 42, - [0][1][RTW89_WW][42] = 42, - [0][1][RTW89_WW][44] = 42, - [0][1][RTW89_WW][46] = 42, + [0][1][RTW89_WW][38] = 0, + [0][1][RTW89_WW][40] = 0, + [0][1][RTW89_WW][42] = 0, + [0][1][RTW89_WW][44] = 0, + [0][1][RTW89_WW][46] = 0, [0][1][RTW89_WW][48] = 0, [0][1][RTW89_WW][50] = 0, [0][1][RTW89_WW][52] = 0, @@ -7629,14 +7688,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][48] = 52, [1][0][RTW89_WW][50] = 52, [1][0][RTW89_WW][52] = 52, - [1][1][RTW89_WW][0] = 14, - [1][1][RTW89_WW][2] = 14, - [1][1][RTW89_WW][4] = 14, - [1][1][RTW89_WW][6] = 14, - [1][1][RTW89_WW][8] = 14, - [1][1][RTW89_WW][10] = 14, - [1][1][RTW89_WW][12] = 14, - [1][1][RTW89_WW][14] = 14, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][12] = 0, + [1][1][RTW89_WW][14] = 0, [1][1][RTW89_WW][15] = 0, [1][1][RTW89_WW][17] = 0, [1][1][RTW89_WW][19] = 0, @@ -7649,11 +7708,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][33] = 0, [1][1][RTW89_WW][35] = 0, [1][1][RTW89_WW][37] = 0, - [1][1][RTW89_WW][38] = 54, - [1][1][RTW89_WW][40] = 54, - [1][1][RTW89_WW][42] = 54, - [1][1][RTW89_WW][44] = 54, - [1][1][RTW89_WW][46] = 54, + [1][1][RTW89_WW][38] = 0, + [1][1][RTW89_WW][40] = 0, + [1][1][RTW89_WW][42] = 0, + [1][1][RTW89_WW][44] = 0, + [1][1][RTW89_WW][46] = 0, [1][1][RTW89_WW][48] = 0, [1][1][RTW89_WW][50] = 0, [1][1][RTW89_WW][52] = 0, @@ -7685,14 +7744,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][48] = 64, [2][0][RTW89_WW][50] = 64, [2][0][RTW89_WW][52] = 60, - [2][1][RTW89_WW][0] = 28, - [2][1][RTW89_WW][2] = 28, - [2][1][RTW89_WW][4] = 28, - [2][1][RTW89_WW][6] = 28, - [2][1][RTW89_WW][8] = 28, - [2][1][RTW89_WW][10] = 28, - [2][1][RTW89_WW][12] = 28, - [2][1][RTW89_WW][14] = 28, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][12] = 0, + [2][1][RTW89_WW][14] = 0, [2][1][RTW89_WW][15] = 0, [2][1][RTW89_WW][17] = 0, [2][1][RTW89_WW][19] = 0, @@ -7705,11 +7764,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][33] = 0, [2][1][RTW89_WW][35] = 0, [2][1][RTW89_WW][37] = 0, - [2][1][RTW89_WW][38] = 56, - [2][1][RTW89_WW][40] = 56, - [2][1][RTW89_WW][42] = 56, - [2][1][RTW89_WW][44] = 56, - [2][1][RTW89_WW][46] = 56, + [2][1][RTW89_WW][38] = 0, + [2][1][RTW89_WW][40] = 0, + [2][1][RTW89_WW][42] = 0, + [2][1][RTW89_WW][44] = 0, + [2][1][RTW89_WW][46] = 0, [2][1][RTW89_WW][48] = 0, [2][1][RTW89_WW][50] = 0, [2][1][RTW89_WW][52] = 0, @@ -7943,7 +8002,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 4, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -7951,7 +8010,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 4, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -7959,7 +8018,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 4, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -7967,7 +8026,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 4, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -7975,7 +8034,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 4, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -7983,7 +8042,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 4, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -7991,7 +8050,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 4, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][14] = 127, [0][1][RTW89_ETSI][14] = 127, @@ -7999,7 +8058,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][14] = 127, [0][1][RTW89_KCC][14] = 127, [0][1][RTW89_ACMA][14] = 127, - [0][1][RTW89_CN][14] = 4, + [0][1][RTW89_CN][14] = 127, [0][1][RTW89_UK][14] = 127, [0][1][RTW89_FCC][15] = 127, [0][1][RTW89_ETSI][15] = 127, @@ -8103,7 +8162,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][38] = 127, [0][1][RTW89_KCC][38] = 127, [0][1][RTW89_ACMA][38] = 127, - [0][1][RTW89_CN][38] = 42, + [0][1][RTW89_CN][38] = 127, [0][1][RTW89_UK][38] = 127, [0][1][RTW89_FCC][40] = 127, [0][1][RTW89_ETSI][40] = 127, @@ -8111,7 +8170,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][40] = 127, [0][1][RTW89_KCC][40] = 127, [0][1][RTW89_ACMA][40] = 127, - [0][1][RTW89_CN][40] = 42, + [0][1][RTW89_CN][40] = 127, [0][1][RTW89_UK][40] = 127, [0][1][RTW89_FCC][42] = 127, [0][1][RTW89_ETSI][42] = 127, @@ -8119,7 +8178,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][42] = 127, [0][1][RTW89_KCC][42] = 127, [0][1][RTW89_ACMA][42] = 127, - [0][1][RTW89_CN][42] = 42, + [0][1][RTW89_CN][42] = 127, [0][1][RTW89_UK][42] = 127, [0][1][RTW89_FCC][44] = 127, [0][1][RTW89_ETSI][44] = 127, @@ -8127,7 +8186,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][44] = 127, [0][1][RTW89_KCC][44] = 127, [0][1][RTW89_ACMA][44] = 127, - [0][1][RTW89_CN][44] = 42, + [0][1][RTW89_CN][44] = 127, [0][1][RTW89_UK][44] = 127, [0][1][RTW89_FCC][46] = 127, [0][1][RTW89_ETSI][46] = 127, @@ -8135,7 +8194,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][46] = 127, [0][1][RTW89_KCC][46] = 127, [0][1][RTW89_ACMA][46] = 127, - [0][1][RTW89_CN][46] = 42, + [0][1][RTW89_CN][46] = 127, [0][1][RTW89_UK][46] = 127, [0][1][RTW89_FCC][48] = 127, [0][1][RTW89_ETSI][48] = 127, @@ -8391,7 +8450,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 14, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -8399,7 +8458,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 14, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -8407,7 +8466,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 14, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -8415,7 +8474,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 14, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -8423,7 +8482,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 14, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -8431,7 +8490,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 14, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -8439,7 +8498,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 14, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][14] = 127, [1][1][RTW89_ETSI][14] = 127, @@ -8447,7 +8506,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][14] = 127, [1][1][RTW89_KCC][14] = 127, [1][1][RTW89_ACMA][14] = 127, - [1][1][RTW89_CN][14] = 14, + [1][1][RTW89_CN][14] = 127, [1][1][RTW89_UK][14] = 127, [1][1][RTW89_FCC][15] = 127, [1][1][RTW89_ETSI][15] = 127, @@ -8551,7 +8610,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][38] = 127, [1][1][RTW89_KCC][38] = 127, [1][1][RTW89_ACMA][38] = 127, - [1][1][RTW89_CN][38] = 54, + [1][1][RTW89_CN][38] = 127, [1][1][RTW89_UK][38] = 127, [1][1][RTW89_FCC][40] = 127, [1][1][RTW89_ETSI][40] = 127, @@ -8559,7 +8618,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][40] = 127, [1][1][RTW89_KCC][40] = 127, [1][1][RTW89_ACMA][40] = 127, - [1][1][RTW89_CN][40] = 54, + [1][1][RTW89_CN][40] = 127, [1][1][RTW89_UK][40] = 127, [1][1][RTW89_FCC][42] = 127, [1][1][RTW89_ETSI][42] = 127, @@ -8567,7 +8626,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][42] = 127, [1][1][RTW89_KCC][42] = 127, [1][1][RTW89_ACMA][42] = 127, - [1][1][RTW89_CN][42] = 54, + [1][1][RTW89_CN][42] = 127, [1][1][RTW89_UK][42] = 127, [1][1][RTW89_FCC][44] = 127, [1][1][RTW89_ETSI][44] = 127, @@ -8575,7 +8634,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][44] = 127, [1][1][RTW89_KCC][44] = 127, [1][1][RTW89_ACMA][44] = 127, - [1][1][RTW89_CN][44] = 54, + [1][1][RTW89_CN][44] = 127, [1][1][RTW89_UK][44] = 127, [1][1][RTW89_FCC][46] = 127, [1][1][RTW89_ETSI][46] = 127, @@ -8583,7 +8642,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][46] = 127, [1][1][RTW89_KCC][46] = 127, [1][1][RTW89_ACMA][46] = 127, - [1][1][RTW89_CN][46] = 54, + [1][1][RTW89_CN][46] = 127, [1][1][RTW89_UK][46] = 127, [1][1][RTW89_FCC][48] = 127, [1][1][RTW89_ETSI][48] = 127, @@ -8839,7 +8898,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 28, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -8847,7 +8906,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 28, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -8855,7 +8914,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 28, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -8863,7 +8922,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 28, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -8871,7 +8930,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 28, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -8879,7 +8938,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 28, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -8887,7 +8946,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 28, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][14] = 127, [2][1][RTW89_ETSI][14] = 127, @@ -8895,7 +8954,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][14] = 127, [2][1][RTW89_KCC][14] = 127, [2][1][RTW89_ACMA][14] = 127, - [2][1][RTW89_CN][14] = 28, + [2][1][RTW89_CN][14] = 127, [2][1][RTW89_UK][14] = 127, [2][1][RTW89_FCC][15] = 127, [2][1][RTW89_ETSI][15] = 127, @@ -8999,7 +9058,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][38] = 127, [2][1][RTW89_KCC][38] = 127, [2][1][RTW89_ACMA][38] = 127, - [2][1][RTW89_CN][38] = 56, + [2][1][RTW89_CN][38] = 127, [2][1][RTW89_UK][38] = 127, [2][1][RTW89_FCC][40] = 127, [2][1][RTW89_ETSI][40] = 127, @@ -9007,7 +9066,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][40] = 127, [2][1][RTW89_KCC][40] = 127, [2][1][RTW89_ACMA][40] = 127, - [2][1][RTW89_CN][40] = 56, + [2][1][RTW89_CN][40] = 127, [2][1][RTW89_UK][40] = 127, [2][1][RTW89_FCC][42] = 127, [2][1][RTW89_ETSI][42] = 127, @@ -9015,7 +9074,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][42] = 127, [2][1][RTW89_KCC][42] = 127, [2][1][RTW89_ACMA][42] = 127, - [2][1][RTW89_CN][42] = 56, + [2][1][RTW89_CN][42] = 127, [2][1][RTW89_UK][42] = 127, [2][1][RTW89_FCC][44] = 127, [2][1][RTW89_ETSI][44] = 127, @@ -9023,7 +9082,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][44] = 127, [2][1][RTW89_KCC][44] = 127, [2][1][RTW89_ACMA][44] = 127, - [2][1][RTW89_CN][44] = 56, + [2][1][RTW89_CN][44] = 127, [2][1][RTW89_UK][44] = 127, [2][1][RTW89_FCC][46] = 127, [2][1][RTW89_ETSI][46] = 127, @@ -9031,7 +9090,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][46] = 127, [2][1][RTW89_KCC][46] = 127, [2][1][RTW89_ACMA][46] = 127, - [2][1][RTW89_CN][46] = 56, + [2][1][RTW89_CN][46] = 127, [2][1][RTW89_UK][46] = 127, [2][1][RTW89_FCC][48] = 127, [2][1][RTW89_ETSI][48] = 127, @@ -9063,19 +9122,19 @@ static const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][RTW89_WW][0] = 58, - [0][0][0][0][RTW89_WW][1] = 58, - [0][0][0][0][RTW89_WW][2] = 58, - [0][0][0][0][RTW89_WW][3] = 58, - [0][0][0][0][RTW89_WW][4] = 58, - [0][0][0][0][RTW89_WW][5] = 58, - [0][0][0][0][RTW89_WW][6] = 58, - [0][0][0][0][RTW89_WW][7] = 58, - [0][0][0][0][RTW89_WW][8] = 58, - [0][0][0][0][RTW89_WW][9] = 58, - [0][0][0][0][RTW89_WW][10] = 58, - [0][0][0][0][RTW89_WW][11] = 58, - [0][0][0][0][RTW89_WW][12] = 52, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, + [0][0][0][0][RTW89_WW][12] = 42, [0][0][0][0][RTW89_WW][13] = 76, [0][1][0][0][RTW89_WW][0] = 0, [0][1][0][0][RTW89_WW][1] = 0, @@ -9093,15 +9152,15 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_WW][13] = 0, [1][0][0][0][RTW89_WW][0] = 0, [1][0][0][0][RTW89_WW][1] = 0, - [1][0][0][0][RTW89_WW][2] = 58, - [1][0][0][0][RTW89_WW][3] = 58, - [1][0][0][0][RTW89_WW][4] = 58, - [1][0][0][0][RTW89_WW][5] = 58, - [1][0][0][0][RTW89_WW][6] = 58, - [1][0][0][0][RTW89_WW][7] = 58, - [1][0][0][0][RTW89_WW][8] = 58, - [1][0][0][0][RTW89_WW][9] = 58, - [1][0][0][0][RTW89_WW][10] = 58, + [1][0][0][0][RTW89_WW][2] = 56, + [1][0][0][0][RTW89_WW][3] = 56, + [1][0][0][0][RTW89_WW][4] = 56, + [1][0][0][0][RTW89_WW][5] = 56, + [1][0][0][0][RTW89_WW][6] = 56, + [1][0][0][0][RTW89_WW][7] = 56, + [1][0][0][0][RTW89_WW][8] = 56, + [1][0][0][0][RTW89_WW][9] = 56, + [1][0][0][0][RTW89_WW][10] = 42, [1][0][0][0][RTW89_WW][11] = 0, [1][0][0][0][RTW89_WW][12] = 0, [1][0][0][0][RTW89_WW][13] = 0, @@ -9131,7 +9190,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][9] = 60, [0][0][1][0][RTW89_WW][10] = 60, [0][0][1][0][RTW89_WW][11] = 60, - [0][0][1][0][RTW89_WW][12] = 58, + [0][0][1][0][RTW89_WW][12] = 40, [0][0][1][0][RTW89_WW][13] = 0, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][1] = 0, @@ -9147,19 +9206,19 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][11] = 0, [0][1][1][0][RTW89_WW][12] = 0, [0][1][1][0][RTW89_WW][13] = 0, - [0][0][2][0][RTW89_WW][0] = 60, - [0][0][2][0][RTW89_WW][1] = 60, - [0][0][2][0][RTW89_WW][2] = 60, - [0][0][2][0][RTW89_WW][3] = 60, - [0][0][2][0][RTW89_WW][4] = 60, - [0][0][2][0][RTW89_WW][5] = 60, - [0][0][2][0][RTW89_WW][6] = 60, - [0][0][2][0][RTW89_WW][7] = 60, - [0][0][2][0][RTW89_WW][8] = 60, - [0][0][2][0][RTW89_WW][9] = 60, - [0][0][2][0][RTW89_WW][10] = 60, - [0][0][2][0][RTW89_WW][11] = 60, - [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][0] = 58, + [0][0][2][0][RTW89_WW][1] = 58, + [0][0][2][0][RTW89_WW][2] = 58, + [0][0][2][0][RTW89_WW][3] = 58, + [0][0][2][0][RTW89_WW][4] = 58, + [0][0][2][0][RTW89_WW][5] = 58, + [0][0][2][0][RTW89_WW][6] = 58, + [0][0][2][0][RTW89_WW][7] = 58, + [0][0][2][0][RTW89_WW][8] = 58, + [0][0][2][0][RTW89_WW][9] = 58, + [0][0][2][0][RTW89_WW][10] = 58, + [0][0][2][0][RTW89_WW][11] = 58, + [0][0][2][0][RTW89_WW][12] = 38, [0][0][2][0][RTW89_WW][13] = 0, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][1] = 0, @@ -9191,15 +9250,15 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][13] = 0, [1][0][2][0][RTW89_WW][0] = 0, [1][0][2][0][RTW89_WW][1] = 0, - [1][0][2][0][RTW89_WW][2] = 58, - [1][0][2][0][RTW89_WW][3] = 58, - [1][0][2][0][RTW89_WW][4] = 58, - [1][0][2][0][RTW89_WW][5] = 58, - [1][0][2][0][RTW89_WW][6] = 58, - [1][0][2][0][RTW89_WW][7] = 58, - [1][0][2][0][RTW89_WW][8] = 58, - [1][0][2][0][RTW89_WW][9] = 58, - [1][0][2][0][RTW89_WW][10] = 58, + [1][0][2][0][RTW89_WW][2] = 56, + [1][0][2][0][RTW89_WW][3] = 56, + [1][0][2][0][RTW89_WW][4] = 56, + [1][0][2][0][RTW89_WW][5] = 56, + [1][0][2][0][RTW89_WW][6] = 56, + [1][0][2][0][RTW89_WW][7] = 56, + [1][0][2][0][RTW89_WW][8] = 56, + [1][0][2][0][RTW89_WW][9] = 56, + [1][0][2][0][RTW89_WW][10] = 48, [1][0][2][0][RTW89_WW][11] = 0, [1][0][2][0][RTW89_WW][12] = 0, [1][0][2][0][RTW89_WW][13] = 0, @@ -9237,7 +9296,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][0] = 82, [0][0][0][0][RTW89_KCC][0] = 68, [0][0][0][0][RTW89_ACMA][0] = 58, - [0][0][0][0][RTW89_CN][0] = 60, + [0][0][0][0][RTW89_CN][0] = 56, [0][0][0][0][RTW89_UK][0] = 58, [0][0][0][0][RTW89_FCC][1] = 82, [0][0][0][0][RTW89_ETSI][1] = 58, @@ -9245,7 +9304,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][1] = 82, [0][0][0][0][RTW89_KCC][1] = 68, [0][0][0][0][RTW89_ACMA][1] = 58, - [0][0][0][0][RTW89_CN][1] = 60, + [0][0][0][0][RTW89_CN][1] = 56, [0][0][0][0][RTW89_UK][1] = 58, [0][0][0][0][RTW89_FCC][2] = 82, [0][0][0][0][RTW89_ETSI][2] = 58, @@ -9253,7 +9312,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][2] = 82, [0][0][0][0][RTW89_KCC][2] = 68, [0][0][0][0][RTW89_ACMA][2] = 58, - [0][0][0][0][RTW89_CN][2] = 60, + [0][0][0][0][RTW89_CN][2] = 56, [0][0][0][0][RTW89_UK][2] = 58, [0][0][0][0][RTW89_FCC][3] = 82, [0][0][0][0][RTW89_ETSI][3] = 58, @@ -9261,7 +9320,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][3] = 82, [0][0][0][0][RTW89_KCC][3] = 68, [0][0][0][0][RTW89_ACMA][3] = 58, - [0][0][0][0][RTW89_CN][3] = 60, + [0][0][0][0][RTW89_CN][3] = 56, [0][0][0][0][RTW89_UK][3] = 58, [0][0][0][0][RTW89_FCC][4] = 82, [0][0][0][0][RTW89_ETSI][4] = 58, @@ -9269,7 +9328,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][4] = 82, [0][0][0][0][RTW89_KCC][4] = 68, [0][0][0][0][RTW89_ACMA][4] = 58, - [0][0][0][0][RTW89_CN][4] = 60, + [0][0][0][0][RTW89_CN][4] = 56, [0][0][0][0][RTW89_UK][4] = 58, [0][0][0][0][RTW89_FCC][5] = 82, [0][0][0][0][RTW89_ETSI][5] = 58, @@ -9277,7 +9336,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][5] = 82, [0][0][0][0][RTW89_KCC][5] = 68, [0][0][0][0][RTW89_ACMA][5] = 58, - [0][0][0][0][RTW89_CN][5] = 60, + [0][0][0][0][RTW89_CN][5] = 56, [0][0][0][0][RTW89_UK][5] = 58, [0][0][0][0][RTW89_FCC][6] = 82, [0][0][0][0][RTW89_ETSI][6] = 58, @@ -9285,7 +9344,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][6] = 82, [0][0][0][0][RTW89_KCC][6] = 68, [0][0][0][0][RTW89_ACMA][6] = 58, - [0][0][0][0][RTW89_CN][6] = 60, + [0][0][0][0][RTW89_CN][6] = 56, [0][0][0][0][RTW89_UK][6] = 58, [0][0][0][0][RTW89_FCC][7] = 82, [0][0][0][0][RTW89_ETSI][7] = 58, @@ -9293,7 +9352,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][7] = 82, [0][0][0][0][RTW89_KCC][7] = 68, [0][0][0][0][RTW89_ACMA][7] = 58, - [0][0][0][0][RTW89_CN][7] = 60, + [0][0][0][0][RTW89_CN][7] = 56, [0][0][0][0][RTW89_UK][7] = 58, [0][0][0][0][RTW89_FCC][8] = 82, [0][0][0][0][RTW89_ETSI][8] = 58, @@ -9301,7 +9360,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][8] = 82, [0][0][0][0][RTW89_KCC][8] = 68, [0][0][0][0][RTW89_ACMA][8] = 58, - [0][0][0][0][RTW89_CN][8] = 60, + [0][0][0][0][RTW89_CN][8] = 56, [0][0][0][0][RTW89_UK][8] = 58, [0][0][0][0][RTW89_FCC][9] = 82, [0][0][0][0][RTW89_ETSI][9] = 58, @@ -9309,7 +9368,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][9] = 82, [0][0][0][0][RTW89_KCC][9] = 68, [0][0][0][0][RTW89_ACMA][9] = 58, - [0][0][0][0][RTW89_CN][9] = 60, + [0][0][0][0][RTW89_CN][9] = 56, [0][0][0][0][RTW89_UK][9] = 58, [0][0][0][0][RTW89_FCC][10] = 80, [0][0][0][0][RTW89_ETSI][10] = 58, @@ -9317,7 +9376,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][10] = 80, [0][0][0][0][RTW89_KCC][10] = 68, [0][0][0][0][RTW89_ACMA][10] = 58, - [0][0][0][0][RTW89_CN][10] = 60, + [0][0][0][0][RTW89_CN][10] = 56, [0][0][0][0][RTW89_UK][10] = 58, [0][0][0][0][RTW89_FCC][11] = 60, [0][0][0][0][RTW89_ETSI][11] = 58, @@ -9325,7 +9384,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][11] = 60, [0][0][0][0][RTW89_KCC][11] = 68, [0][0][0][0][RTW89_ACMA][11] = 58, - [0][0][0][0][RTW89_CN][11] = 60, + [0][0][0][0][RTW89_CN][11] = 56, [0][0][0][0][RTW89_UK][11] = 58, [0][0][0][0][RTW89_FCC][12] = 52, [0][0][0][0][RTW89_ETSI][12] = 58, @@ -9333,7 +9392,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][12] = 52, [0][0][0][0][RTW89_KCC][12] = 68, [0][0][0][0][RTW89_ACMA][12] = 58, - [0][0][0][0][RTW89_CN][12] = 60, + [0][0][0][0][RTW89_CN][12] = 42, [0][0][0][0][RTW89_UK][12] = 58, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, @@ -9477,7 +9536,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][2] = 127, [1][0][0][0][RTW89_KCC][2] = 68, [1][0][0][0][RTW89_ACMA][2] = 58, - [1][0][0][0][RTW89_CN][2] = 60, + [1][0][0][0][RTW89_CN][2] = 56, [1][0][0][0][RTW89_UK][2] = 58, [1][0][0][0][RTW89_FCC][3] = 127, [1][0][0][0][RTW89_ETSI][3] = 58, @@ -9485,7 +9544,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][3] = 127, [1][0][0][0][RTW89_KCC][3] = 68, [1][0][0][0][RTW89_ACMA][3] = 58, - [1][0][0][0][RTW89_CN][3] = 60, + [1][0][0][0][RTW89_CN][3] = 56, [1][0][0][0][RTW89_UK][3] = 58, [1][0][0][0][RTW89_FCC][4] = 127, [1][0][0][0][RTW89_ETSI][4] = 58, @@ -9493,7 +9552,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][4] = 127, [1][0][0][0][RTW89_KCC][4] = 68, [1][0][0][0][RTW89_ACMA][4] = 58, - [1][0][0][0][RTW89_CN][4] = 60, + [1][0][0][0][RTW89_CN][4] = 56, [1][0][0][0][RTW89_UK][4] = 58, [1][0][0][0][RTW89_FCC][5] = 127, [1][0][0][0][RTW89_ETSI][5] = 58, @@ -9501,7 +9560,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][5] = 127, [1][0][0][0][RTW89_KCC][5] = 68, [1][0][0][0][RTW89_ACMA][5] = 58, - [1][0][0][0][RTW89_CN][5] = 60, + [1][0][0][0][RTW89_CN][5] = 56, [1][0][0][0][RTW89_UK][5] = 58, [1][0][0][0][RTW89_FCC][6] = 127, [1][0][0][0][RTW89_ETSI][6] = 58, @@ -9509,7 +9568,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][6] = 127, [1][0][0][0][RTW89_KCC][6] = 68, [1][0][0][0][RTW89_ACMA][6] = 58, - [1][0][0][0][RTW89_CN][6] = 60, + [1][0][0][0][RTW89_CN][6] = 56, [1][0][0][0][RTW89_UK][6] = 58, [1][0][0][0][RTW89_FCC][7] = 127, [1][0][0][0][RTW89_ETSI][7] = 58, @@ -9517,7 +9576,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][7] = 127, [1][0][0][0][RTW89_KCC][7] = 68, [1][0][0][0][RTW89_ACMA][7] = 58, - [1][0][0][0][RTW89_CN][7] = 60, + [1][0][0][0][RTW89_CN][7] = 56, [1][0][0][0][RTW89_UK][7] = 58, [1][0][0][0][RTW89_FCC][8] = 127, [1][0][0][0][RTW89_ETSI][8] = 58, @@ -9525,7 +9584,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][8] = 127, [1][0][0][0][RTW89_KCC][8] = 68, [1][0][0][0][RTW89_ACMA][8] = 58, - [1][0][0][0][RTW89_CN][8] = 60, + [1][0][0][0][RTW89_CN][8] = 56, [1][0][0][0][RTW89_UK][8] = 58, [1][0][0][0][RTW89_FCC][9] = 127, [1][0][0][0][RTW89_ETSI][9] = 58, @@ -9533,7 +9592,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][9] = 127, [1][0][0][0][RTW89_KCC][9] = 68, [1][0][0][0][RTW89_ACMA][9] = 58, - [1][0][0][0][RTW89_CN][9] = 60, + [1][0][0][0][RTW89_CN][9] = 56, [1][0][0][0][RTW89_UK][9] = 58, [1][0][0][0][RTW89_FCC][10] = 127, [1][0][0][0][RTW89_ETSI][10] = 58, @@ -9541,7 +9600,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_IC][10] = 127, [1][0][0][0][RTW89_KCC][10] = 68, [1][0][0][0][RTW89_ACMA][10] = 58, - [1][0][0][0][RTW89_CN][10] = 60, + [1][0][0][0][RTW89_CN][10] = 42, [1][0][0][0][RTW89_UK][10] = 58, [1][0][0][0][RTW89_FCC][11] = 127, [1][0][0][0][RTW89_ETSI][11] = 127, @@ -9781,7 +9840,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][12] = 64, [0][0][1][0][RTW89_KCC][12] = 74, [0][0][1][0][RTW89_ACMA][12] = 58, - [0][0][1][0][RTW89_CN][12] = 60, + [0][0][1][0][RTW89_CN][12] = 40, [0][0][1][0][RTW89_UK][12] = 58, [0][0][1][0][RTW89_FCC][13] = 127, [0][0][1][0][RTW89_ETSI][13] = 127, @@ -9909,7 +9968,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][0] = 78, [0][0][2][0][RTW89_KCC][0] = 76, [0][0][2][0][RTW89_ACMA][0] = 60, - [0][0][2][0][RTW89_CN][0] = 60, + [0][0][2][0][RTW89_CN][0] = 58, [0][0][2][0][RTW89_UK][0] = 60, [0][0][2][0][RTW89_FCC][1] = 78, [0][0][2][0][RTW89_ETSI][1] = 60, @@ -9917,7 +9976,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][1] = 78, [0][0][2][0][RTW89_KCC][1] = 76, [0][0][2][0][RTW89_ACMA][1] = 60, - [0][0][2][0][RTW89_CN][1] = 60, + [0][0][2][0][RTW89_CN][1] = 58, [0][0][2][0][RTW89_UK][1] = 60, [0][0][2][0][RTW89_FCC][2] = 80, [0][0][2][0][RTW89_ETSI][2] = 60, @@ -9925,7 +9984,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][2] = 80, [0][0][2][0][RTW89_KCC][2] = 76, [0][0][2][0][RTW89_ACMA][2] = 60, - [0][0][2][0][RTW89_CN][2] = 60, + [0][0][2][0][RTW89_CN][2] = 58, [0][0][2][0][RTW89_UK][2] = 60, [0][0][2][0][RTW89_FCC][3] = 80, [0][0][2][0][RTW89_ETSI][3] = 60, @@ -9933,7 +9992,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][3] = 80, [0][0][2][0][RTW89_KCC][3] = 76, [0][0][2][0][RTW89_ACMA][3] = 60, - [0][0][2][0][RTW89_CN][3] = 60, + [0][0][2][0][RTW89_CN][3] = 58, [0][0][2][0][RTW89_UK][3] = 60, [0][0][2][0][RTW89_FCC][4] = 80, [0][0][2][0][RTW89_ETSI][4] = 60, @@ -9941,7 +10000,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][4] = 80, [0][0][2][0][RTW89_KCC][4] = 76, [0][0][2][0][RTW89_ACMA][4] = 60, - [0][0][2][0][RTW89_CN][4] = 60, + [0][0][2][0][RTW89_CN][4] = 58, [0][0][2][0][RTW89_UK][4] = 60, [0][0][2][0][RTW89_FCC][5] = 80, [0][0][2][0][RTW89_ETSI][5] = 60, @@ -9949,7 +10008,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][5] = 80, [0][0][2][0][RTW89_KCC][5] = 76, [0][0][2][0][RTW89_ACMA][5] = 60, - [0][0][2][0][RTW89_CN][5] = 60, + [0][0][2][0][RTW89_CN][5] = 58, [0][0][2][0][RTW89_UK][5] = 60, [0][0][2][0][RTW89_FCC][6] = 80, [0][0][2][0][RTW89_ETSI][6] = 60, @@ -9957,7 +10016,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][6] = 80, [0][0][2][0][RTW89_KCC][6] = 76, [0][0][2][0][RTW89_ACMA][6] = 60, - [0][0][2][0][RTW89_CN][6] = 60, + [0][0][2][0][RTW89_CN][6] = 58, [0][0][2][0][RTW89_UK][6] = 60, [0][0][2][0][RTW89_FCC][7] = 80, [0][0][2][0][RTW89_ETSI][7] = 60, @@ -9965,7 +10024,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][7] = 80, [0][0][2][0][RTW89_KCC][7] = 76, [0][0][2][0][RTW89_ACMA][7] = 60, - [0][0][2][0][RTW89_CN][7] = 60, + [0][0][2][0][RTW89_CN][7] = 58, [0][0][2][0][RTW89_UK][7] = 60, [0][0][2][0][RTW89_FCC][8] = 78, [0][0][2][0][RTW89_ETSI][8] = 60, @@ -9973,7 +10032,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][8] = 78, [0][0][2][0][RTW89_KCC][8] = 76, [0][0][2][0][RTW89_ACMA][8] = 60, - [0][0][2][0][RTW89_CN][8] = 60, + [0][0][2][0][RTW89_CN][8] = 58, [0][0][2][0][RTW89_UK][8] = 60, [0][0][2][0][RTW89_FCC][9] = 74, [0][0][2][0][RTW89_ETSI][9] = 60, @@ -9981,7 +10040,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][9] = 74, [0][0][2][0][RTW89_KCC][9] = 76, [0][0][2][0][RTW89_ACMA][9] = 60, - [0][0][2][0][RTW89_CN][9] = 60, + [0][0][2][0][RTW89_CN][9] = 58, [0][0][2][0][RTW89_UK][9] = 60, [0][0][2][0][RTW89_FCC][10] = 74, [0][0][2][0][RTW89_ETSI][10] = 60, @@ -9989,7 +10048,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][10] = 74, [0][0][2][0][RTW89_KCC][10] = 76, [0][0][2][0][RTW89_ACMA][10] = 60, - [0][0][2][0][RTW89_CN][10] = 60, + [0][0][2][0][RTW89_CN][10] = 58, [0][0][2][0][RTW89_UK][10] = 60, [0][0][2][0][RTW89_FCC][11] = 68, [0][0][2][0][RTW89_ETSI][11] = 60, @@ -9997,7 +10056,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][11] = 68, [0][0][2][0][RTW89_KCC][11] = 76, [0][0][2][0][RTW89_ACMA][11] = 60, - [0][0][2][0][RTW89_CN][11] = 60, + [0][0][2][0][RTW89_CN][11] = 58, [0][0][2][0][RTW89_UK][11] = 60, [0][0][2][0][RTW89_FCC][12] = 68, [0][0][2][0][RTW89_ETSI][12] = 60, @@ -10005,7 +10064,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 68, [0][0][2][0][RTW89_KCC][12] = 76, [0][0][2][0][RTW89_ACMA][12] = 60, - [0][0][2][0][RTW89_CN][12] = 60, + [0][0][2][0][RTW89_CN][12] = 38, [0][0][2][0][RTW89_UK][12] = 60, [0][0][2][0][RTW89_FCC][13] = 127, [0][0][2][0][RTW89_ETSI][13] = 127, @@ -10261,7 +10320,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][2] = 70, [1][0][2][0][RTW89_KCC][2] = 76, [1][0][2][0][RTW89_ACMA][2] = 58, - [1][0][2][0][RTW89_CN][2] = 60, + [1][0][2][0][RTW89_CN][2] = 56, [1][0][2][0][RTW89_UK][2] = 58, [1][0][2][0][RTW89_FCC][3] = 70, [1][0][2][0][RTW89_ETSI][3] = 58, @@ -10269,7 +10328,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][3] = 70, [1][0][2][0][RTW89_KCC][3] = 76, [1][0][2][0][RTW89_ACMA][3] = 58, - [1][0][2][0][RTW89_CN][3] = 60, + [1][0][2][0][RTW89_CN][3] = 56, [1][0][2][0][RTW89_UK][3] = 58, [1][0][2][0][RTW89_FCC][4] = 74, [1][0][2][0][RTW89_ETSI][4] = 58, @@ -10277,7 +10336,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][4] = 74, [1][0][2][0][RTW89_KCC][4] = 76, [1][0][2][0][RTW89_ACMA][4] = 58, - [1][0][2][0][RTW89_CN][4] = 60, + [1][0][2][0][RTW89_CN][4] = 56, [1][0][2][0][RTW89_UK][4] = 58, [1][0][2][0][RTW89_FCC][5] = 76, [1][0][2][0][RTW89_ETSI][5] = 58, @@ -10285,7 +10344,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 76, [1][0][2][0][RTW89_KCC][5] = 76, [1][0][2][0][RTW89_ACMA][5] = 58, - [1][0][2][0][RTW89_CN][5] = 60, + [1][0][2][0][RTW89_CN][5] = 56, [1][0][2][0][RTW89_UK][5] = 58, [1][0][2][0][RTW89_FCC][6] = 76, [1][0][2][0][RTW89_ETSI][6] = 58, @@ -10293,7 +10352,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][6] = 76, [1][0][2][0][RTW89_KCC][6] = 76, [1][0][2][0][RTW89_ACMA][6] = 58, - [1][0][2][0][RTW89_CN][6] = 60, + [1][0][2][0][RTW89_CN][6] = 56, [1][0][2][0][RTW89_UK][6] = 58, [1][0][2][0][RTW89_FCC][7] = 76, [1][0][2][0][RTW89_ETSI][7] = 58, @@ -10301,7 +10360,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][7] = 76, [1][0][2][0][RTW89_KCC][7] = 76, [1][0][2][0][RTW89_ACMA][7] = 58, - [1][0][2][0][RTW89_CN][7] = 60, + [1][0][2][0][RTW89_CN][7] = 56, [1][0][2][0][RTW89_UK][7] = 58, [1][0][2][0][RTW89_FCC][8] = 78, [1][0][2][0][RTW89_ETSI][8] = 58, @@ -10309,7 +10368,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][8] = 78, [1][0][2][0][RTW89_KCC][8] = 76, [1][0][2][0][RTW89_ACMA][8] = 58, - [1][0][2][0][RTW89_CN][8] = 60, + [1][0][2][0][RTW89_CN][8] = 56, [1][0][2][0][RTW89_UK][8] = 58, [1][0][2][0][RTW89_FCC][9] = 74, [1][0][2][0][RTW89_ETSI][9] = 58, @@ -10317,7 +10376,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 74, [1][0][2][0][RTW89_KCC][9] = 76, [1][0][2][0][RTW89_ACMA][9] = 58, - [1][0][2][0][RTW89_CN][9] = 60, + [1][0][2][0][RTW89_CN][9] = 56, [1][0][2][0][RTW89_UK][9] = 58, [1][0][2][0][RTW89_FCC][10] = 68, [1][0][2][0][RTW89_ETSI][10] = 58, @@ -10325,7 +10384,7 @@ const s8 rtw89_8851b_txpwr_lmt_2g_type2[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][10] = 68, [1][0][2][0][RTW89_KCC][10] = 76, [1][0][2][0][RTW89_ACMA][10] = 58, - [1][0][2][0][RTW89_CN][10] = 60, + [1][0][2][0][RTW89_CN][10] = 48, [1][0][2][0][RTW89_UK][10] = 58, [1][0][2][0][RTW89_FCC][11] = 127, [1][0][2][0][RTW89_ETSI][11] = 127, @@ -10606,9 +10665,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][42] = 30, [0][0][1][0][RTW89_WW][44] = 30, [0][0][1][0][RTW89_WW][46] = 30, - [0][0][1][0][RTW89_WW][48] = 68, - [0][0][1][0][RTW89_WW][50] = 68, - [0][0][1][0][RTW89_WW][52] = 68, + [0][0][1][0][RTW89_WW][48] = 72, + [0][0][1][0][RTW89_WW][50] = 72, + [0][0][1][0][RTW89_WW][52] = 72, [0][1][1][0][RTW89_WW][0] = 0, [0][1][1][0][RTW89_WW][2] = 0, [0][1][1][0][RTW89_WW][4] = 0, @@ -10637,14 +10696,14 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][48] = 0, [0][1][1][0][RTW89_WW][50] = 0, [0][1][1][0][RTW89_WW][52] = 0, - [0][0][2][0][RTW89_WW][0] = 62, - [0][0][2][0][RTW89_WW][2] = 62, - [0][0][2][0][RTW89_WW][4] = 62, + [0][0][2][0][RTW89_WW][0] = 60, + [0][0][2][0][RTW89_WW][2] = 60, + [0][0][2][0][RTW89_WW][4] = 60, [0][0][2][0][RTW89_WW][6] = 54, - [0][0][2][0][RTW89_WW][8] = 62, - [0][0][2][0][RTW89_WW][10] = 62, - [0][0][2][0][RTW89_WW][12] = 62, - [0][0][2][0][RTW89_WW][14] = 62, + [0][0][2][0][RTW89_WW][8] = 60, + [0][0][2][0][RTW89_WW][10] = 60, + [0][0][2][0][RTW89_WW][12] = 60, + [0][0][2][0][RTW89_WW][14] = 60, [0][0][2][0][RTW89_WW][15] = 60, [0][0][2][0][RTW89_WW][17] = 62, [0][0][2][0][RTW89_WW][19] = 62, @@ -10662,9 +10721,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][42] = 30, [0][0][2][0][RTW89_WW][44] = 30, [0][0][2][0][RTW89_WW][46] = 30, - [0][0][2][0][RTW89_WW][48] = 70, - [0][0][2][0][RTW89_WW][50] = 70, - [0][0][2][0][RTW89_WW][52] = 70, + [0][0][2][0][RTW89_WW][48] = 74, + [0][0][2][0][RTW89_WW][50] = 74, + [0][0][2][0][RTW89_WW][52] = 74, [0][1][2][0][RTW89_WW][0] = 0, [0][1][2][0][RTW89_WW][2] = 0, [0][1][2][0][RTW89_WW][4] = 0, @@ -10721,11 +10780,11 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][48] = 0, [0][1][2][1][RTW89_WW][50] = 0, [0][1][2][1][RTW89_WW][52] = 0, - [1][0][2][0][RTW89_WW][1] = 60, + [1][0][2][0][RTW89_WW][1] = 64, [1][0][2][0][RTW89_WW][5] = 62, - [1][0][2][0][RTW89_WW][9] = 64, - [1][0][2][0][RTW89_WW][13] = 60, - [1][0][2][0][RTW89_WW][16] = 62, + [1][0][2][0][RTW89_WW][9] = 58, + [1][0][2][0][RTW89_WW][13] = 58, + [1][0][2][0][RTW89_WW][16] = 66, [1][0][2][0][RTW89_WW][20] = 66, [1][0][2][0][RTW89_WW][24] = 66, [1][0][2][0][RTW89_WW][28] = 66, @@ -10733,8 +10792,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][36] = 76, [1][0][2][0][RTW89_WW][39] = 30, [1][0][2][0][RTW89_WW][43] = 30, - [1][0][2][0][RTW89_WW][47] = 76, - [1][0][2][0][RTW89_WW][51] = 76, + [1][0][2][0][RTW89_WW][47] = 80, + [1][0][2][0][RTW89_WW][51] = 80, [1][1][2][0][RTW89_WW][1] = 0, [1][1][2][0][RTW89_WW][5] = 0, [1][1][2][0][RTW89_WW][9] = 0, @@ -10764,12 +10823,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][47] = 0, [1][1][2][1][RTW89_WW][51] = 0, [2][0][2][0][RTW89_WW][3] = 60, - [2][0][2][0][RTW89_WW][11] = 58, - [2][0][2][0][RTW89_WW][18] = 62, + [2][0][2][0][RTW89_WW][11] = 54, + [2][0][2][0][RTW89_WW][18] = 64, [2][0][2][0][RTW89_WW][26] = 64, [2][0][2][0][RTW89_WW][34] = 68, [2][0][2][0][RTW89_WW][41] = 30, - [2][0][2][0][RTW89_WW][49] = 68, + [2][0][2][0][RTW89_WW][49] = 72, [2][1][2][0][RTW89_WW][3] = 0, [2][1][2][0][RTW89_WW][11] = 0, [2][1][2][0][RTW89_WW][18] = 0, @@ -10784,8 +10843,8 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_WW][34] = 0, [2][1][2][1][RTW89_WW][41] = 0, [2][1][2][1][RTW89_WW][49] = 0, - [3][0][2][0][RTW89_WW][7] = 58, - [3][0][2][0][RTW89_WW][22] = 58, + [3][0][2][0][RTW89_WW][7] = 0, + [3][0][2][0][RTW89_WW][22] = 0, [3][0][2][0][RTW89_WW][45] = 0, [3][1][2][0][RTW89_WW][7] = 0, [3][1][2][0][RTW89_WW][22] = 0, @@ -10793,7 +10852,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_WW][7] = 0, [3][1][2][1][RTW89_WW][22] = 0, [3][1][2][1][RTW89_WW][45] = 0, - [0][0][1][0][RTW89_FCC][0] = 74, + [0][0][1][0][RTW89_FCC][0] = 78, [0][0][1][0][RTW89_ETSI][0] = 58, [0][0][1][0][RTW89_MKK][0] = 60, [0][0][1][0][RTW89_IC][0] = 62, @@ -10849,7 +10908,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][12] = 58, [0][0][1][0][RTW89_CN][12] = 60, [0][0][1][0][RTW89_UK][12] = 58, - [0][0][1][0][RTW89_FCC][14] = 72, + [0][0][1][0][RTW89_FCC][14] = 76, [0][0][1][0][RTW89_ETSI][14] = 58, [0][0][1][0][RTW89_MKK][14] = 60, [0][0][1][0][RTW89_IC][14] = 62, @@ -10857,10 +10916,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][14] = 58, [0][0][1][0][RTW89_CN][14] = 60, [0][0][1][0][RTW89_UK][14] = 58, - [0][0][1][0][RTW89_FCC][15] = 72, + [0][0][1][0][RTW89_FCC][15] = 76, [0][0][1][0][RTW89_ETSI][15] = 58, [0][0][1][0][RTW89_MKK][15] = 74, - [0][0][1][0][RTW89_IC][15] = 72, + [0][0][1][0][RTW89_IC][15] = 76, [0][0][1][0][RTW89_KCC][15] = 74, [0][0][1][0][RTW89_ACMA][15] = 58, [0][0][1][0][RTW89_CN][15] = 127, @@ -10937,10 +10996,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][33] = 60, [0][0][1][0][RTW89_CN][33] = 127, [0][0][1][0][RTW89_UK][33] = 60, - [0][0][1][0][RTW89_FCC][35] = 66, + [0][0][1][0][RTW89_FCC][35] = 70, [0][0][1][0][RTW89_ETSI][35] = 60, [0][0][1][0][RTW89_MKK][35] = 74, - [0][0][1][0][RTW89_IC][35] = 66, + [0][0][1][0][RTW89_IC][35] = 70, [0][0][1][0][RTW89_KCC][35] = 74, [0][0][1][0][RTW89_ACMA][35] = 60, [0][0][1][0][RTW89_CN][35] = 127, @@ -10959,7 +11018,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][38] = 78, [0][0][1][0][RTW89_KCC][38] = 70, [0][0][1][0][RTW89_ACMA][38] = 74, - [0][0][1][0][RTW89_CN][38] = 74, + [0][0][1][0][RTW89_CN][38] = 64, [0][0][1][0][RTW89_UK][38] = 58, [0][0][1][0][RTW89_FCC][40] = 78, [0][0][1][0][RTW89_ETSI][40] = 30, @@ -10967,7 +11026,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][40] = 78, [0][0][1][0][RTW89_KCC][40] = 74, [0][0][1][0][RTW89_ACMA][40] = 74, - [0][0][1][0][RTW89_CN][40] = 74, + [0][0][1][0][RTW89_CN][40] = 64, [0][0][1][0][RTW89_UK][40] = 58, [0][0][1][0][RTW89_FCC][42] = 78, [0][0][1][0][RTW89_ETSI][42] = 30, @@ -10975,7 +11034,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][42] = 78, [0][0][1][0][RTW89_KCC][42] = 74, [0][0][1][0][RTW89_ACMA][42] = 74, - [0][0][1][0][RTW89_CN][42] = 74, + [0][0][1][0][RTW89_CN][42] = 64, [0][0][1][0][RTW89_UK][42] = 58, [0][0][1][0][RTW89_FCC][44] = 78, [0][0][1][0][RTW89_ETSI][44] = 30, @@ -10983,7 +11042,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][44] = 78, [0][0][1][0][RTW89_KCC][44] = 74, [0][0][1][0][RTW89_ACMA][44] = 74, - [0][0][1][0][RTW89_CN][44] = 74, + [0][0][1][0][RTW89_CN][44] = 62, [0][0][1][0][RTW89_UK][44] = 58, [0][0][1][0][RTW89_FCC][46] = 78, [0][0][1][0][RTW89_ETSI][46] = 30, @@ -10991,9 +11050,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_IC][46] = 78, [0][0][1][0][RTW89_KCC][46] = 74, [0][0][1][0][RTW89_ACMA][46] = 74, - [0][0][1][0][RTW89_CN][46] = 74, + [0][0][1][0][RTW89_CN][46] = 62, [0][0][1][0][RTW89_UK][46] = 58, - [0][0][1][0][RTW89_FCC][48] = 68, + [0][0][1][0][RTW89_FCC][48] = 72, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, [0][0][1][0][RTW89_IC][48] = 127, @@ -11001,7 +11060,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][48] = 127, [0][0][1][0][RTW89_CN][48] = 127, [0][0][1][0][RTW89_UK][48] = 127, - [0][0][1][0][RTW89_FCC][50] = 68, + [0][0][1][0][RTW89_FCC][50] = 72, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, [0][0][1][0][RTW89_IC][50] = 127, @@ -11009,7 +11068,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_ACMA][50] = 127, [0][0][1][0][RTW89_CN][50] = 127, [0][0][1][0][RTW89_UK][50] = 127, - [0][0][1][0][RTW89_FCC][52] = 68, + [0][0][1][0][RTW89_FCC][52] = 72, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, [0][0][1][0][RTW89_IC][52] = 127, @@ -11241,13 +11300,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_ACMA][52] = 127, [0][1][1][0][RTW89_CN][52] = 127, [0][1][1][0][RTW89_UK][52] = 127, - [0][0][2][0][RTW89_FCC][0] = 72, + [0][0][2][0][RTW89_FCC][0] = 76, [0][0][2][0][RTW89_ETSI][0] = 62, [0][0][2][0][RTW89_MKK][0] = 62, [0][0][2][0][RTW89_IC][0] = 64, [0][0][2][0][RTW89_KCC][0] = 74, [0][0][2][0][RTW89_ACMA][0] = 62, - [0][0][2][0][RTW89_CN][0] = 62, + [0][0][2][0][RTW89_CN][0] = 60, [0][0][2][0][RTW89_UK][0] = 62, [0][0][2][0][RTW89_FCC][2] = 78, [0][0][2][0][RTW89_ETSI][2] = 62, @@ -11255,7 +11314,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][2] = 64, [0][0][2][0][RTW89_KCC][2] = 74, [0][0][2][0][RTW89_ACMA][2] = 62, - [0][0][2][0][RTW89_CN][2] = 62, + [0][0][2][0][RTW89_CN][2] = 60, [0][0][2][0][RTW89_UK][2] = 62, [0][0][2][0][RTW89_FCC][4] = 78, [0][0][2][0][RTW89_ETSI][4] = 62, @@ -11263,7 +11322,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][4] = 64, [0][0][2][0][RTW89_KCC][4] = 74, [0][0][2][0][RTW89_ACMA][4] = 62, - [0][0][2][0][RTW89_CN][4] = 62, + [0][0][2][0][RTW89_CN][4] = 60, [0][0][2][0][RTW89_UK][4] = 62, [0][0][2][0][RTW89_FCC][6] = 78, [0][0][2][0][RTW89_ETSI][6] = 62, @@ -11271,7 +11330,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][6] = 64, [0][0][2][0][RTW89_KCC][6] = 54, [0][0][2][0][RTW89_ACMA][6] = 62, - [0][0][2][0][RTW89_CN][6] = 62, + [0][0][2][0][RTW89_CN][6] = 60, [0][0][2][0][RTW89_UK][6] = 62, [0][0][2][0][RTW89_FCC][8] = 78, [0][0][2][0][RTW89_ETSI][8] = 62, @@ -11279,7 +11338,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][8] = 64, [0][0][2][0][RTW89_KCC][8] = 74, [0][0][2][0][RTW89_ACMA][8] = 62, - [0][0][2][0][RTW89_CN][8] = 62, + [0][0][2][0][RTW89_CN][8] = 60, [0][0][2][0][RTW89_UK][8] = 62, [0][0][2][0][RTW89_FCC][10] = 78, [0][0][2][0][RTW89_ETSI][10] = 62, @@ -11287,7 +11346,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][10] = 64, [0][0][2][0][RTW89_KCC][10] = 74, [0][0][2][0][RTW89_ACMA][10] = 62, - [0][0][2][0][RTW89_CN][10] = 62, + [0][0][2][0][RTW89_CN][10] = 60, [0][0][2][0][RTW89_UK][10] = 62, [0][0][2][0][RTW89_FCC][12] = 78, [0][0][2][0][RTW89_ETSI][12] = 62, @@ -11295,20 +11354,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][12] = 64, [0][0][2][0][RTW89_KCC][12] = 74, [0][0][2][0][RTW89_ACMA][12] = 62, - [0][0][2][0][RTW89_CN][12] = 62, + [0][0][2][0][RTW89_CN][12] = 60, [0][0][2][0][RTW89_UK][12] = 62, - [0][0][2][0][RTW89_FCC][14] = 70, + [0][0][2][0][RTW89_FCC][14] = 74, [0][0][2][0][RTW89_ETSI][14] = 62, [0][0][2][0][RTW89_MKK][14] = 62, [0][0][2][0][RTW89_IC][14] = 64, [0][0][2][0][RTW89_KCC][14] = 74, [0][0][2][0][RTW89_ACMA][14] = 62, - [0][0][2][0][RTW89_CN][14] = 62, + [0][0][2][0][RTW89_CN][14] = 60, [0][0][2][0][RTW89_UK][14] = 62, - [0][0][2][0][RTW89_FCC][15] = 70, + [0][0][2][0][RTW89_FCC][15] = 74, [0][0][2][0][RTW89_ETSI][15] = 60, [0][0][2][0][RTW89_MKK][15] = 74, - [0][0][2][0][RTW89_IC][15] = 70, + [0][0][2][0][RTW89_IC][15] = 74, [0][0][2][0][RTW89_KCC][15] = 74, [0][0][2][0][RTW89_ACMA][15] = 60, [0][0][2][0][RTW89_CN][15] = 127, @@ -11385,10 +11444,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][33] = 62, [0][0][2][0][RTW89_CN][33] = 127, [0][0][2][0][RTW89_UK][33] = 62, - [0][0][2][0][RTW89_FCC][35] = 68, + [0][0][2][0][RTW89_FCC][35] = 72, [0][0][2][0][RTW89_ETSI][35] = 62, [0][0][2][0][RTW89_MKK][35] = 74, - [0][0][2][0][RTW89_IC][35] = 68, + [0][0][2][0][RTW89_IC][35] = 72, [0][0][2][0][RTW89_KCC][35] = 74, [0][0][2][0][RTW89_ACMA][35] = 62, [0][0][2][0][RTW89_CN][35] = 127, @@ -11407,7 +11466,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][38] = 78, [0][0][2][0][RTW89_KCC][38] = 66, [0][0][2][0][RTW89_ACMA][38] = 74, - [0][0][2][0][RTW89_CN][38] = 74, + [0][0][2][0][RTW89_CN][38] = 66, [0][0][2][0][RTW89_UK][38] = 60, [0][0][2][0][RTW89_FCC][40] = 78, [0][0][2][0][RTW89_ETSI][40] = 30, @@ -11415,7 +11474,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][40] = 78, [0][0][2][0][RTW89_KCC][40] = 74, [0][0][2][0][RTW89_ACMA][40] = 74, - [0][0][2][0][RTW89_CN][40] = 74, + [0][0][2][0][RTW89_CN][40] = 66, [0][0][2][0][RTW89_UK][40] = 60, [0][0][2][0][RTW89_FCC][42] = 78, [0][0][2][0][RTW89_ETSI][42] = 30, @@ -11423,7 +11482,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][42] = 78, [0][0][2][0][RTW89_KCC][42] = 74, [0][0][2][0][RTW89_ACMA][42] = 74, - [0][0][2][0][RTW89_CN][42] = 74, + [0][0][2][0][RTW89_CN][42] = 66, [0][0][2][0][RTW89_UK][42] = 60, [0][0][2][0][RTW89_FCC][44] = 78, [0][0][2][0][RTW89_ETSI][44] = 30, @@ -11431,7 +11490,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][44] = 78, [0][0][2][0][RTW89_KCC][44] = 74, [0][0][2][0][RTW89_ACMA][44] = 74, - [0][0][2][0][RTW89_CN][44] = 74, + [0][0][2][0][RTW89_CN][44] = 64, [0][0][2][0][RTW89_UK][44] = 60, [0][0][2][0][RTW89_FCC][46] = 78, [0][0][2][0][RTW89_ETSI][46] = 30, @@ -11439,9 +11498,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_IC][46] = 78, [0][0][2][0][RTW89_KCC][46] = 74, [0][0][2][0][RTW89_ACMA][46] = 74, - [0][0][2][0][RTW89_CN][46] = 74, + [0][0][2][0][RTW89_CN][46] = 64, [0][0][2][0][RTW89_UK][46] = 60, - [0][0][2][0][RTW89_FCC][48] = 70, + [0][0][2][0][RTW89_FCC][48] = 74, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, [0][0][2][0][RTW89_IC][48] = 127, @@ -11449,7 +11508,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][48] = 127, [0][0][2][0][RTW89_CN][48] = 127, [0][0][2][0][RTW89_UK][48] = 127, - [0][0][2][0][RTW89_FCC][50] = 70, + [0][0][2][0][RTW89_FCC][50] = 74, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, [0][0][2][0][RTW89_IC][50] = 127, @@ -11457,7 +11516,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_ACMA][50] = 127, [0][0][2][0][RTW89_CN][50] = 127, [0][0][2][0][RTW89_UK][50] = 127, - [0][0][2][0][RTW89_FCC][52] = 70, + [0][0][2][0][RTW89_FCC][52] = 74, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, [0][0][2][0][RTW89_IC][52] = 127, @@ -11913,13 +11972,13 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_ACMA][52] = 127, [0][1][2][1][RTW89_CN][52] = 127, [0][1][2][1][RTW89_UK][52] = 127, - [1][0][2][0][RTW89_FCC][1] = 62, + [1][0][2][0][RTW89_FCC][1] = 66, [1][0][2][0][RTW89_ETSI][1] = 64, [1][0][2][0][RTW89_MKK][1] = 64, - [1][0][2][0][RTW89_IC][1] = 60, + [1][0][2][0][RTW89_IC][1] = 64, [1][0][2][0][RTW89_KCC][1] = 74, [1][0][2][0][RTW89_ACMA][1] = 64, - [1][0][2][0][RTW89_CN][1] = 64, + [1][0][2][0][RTW89_CN][1] = 66, [1][0][2][0][RTW89_UK][1] = 64, [1][0][2][0][RTW89_FCC][5] = 80, [1][0][2][0][RTW89_ETSI][5] = 64, @@ -11927,7 +11986,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][5] = 64, [1][0][2][0][RTW89_KCC][5] = 66, [1][0][2][0][RTW89_ACMA][5] = 64, - [1][0][2][0][RTW89_CN][5] = 64, + [1][0][2][0][RTW89_CN][5] = 66, [1][0][2][0][RTW89_UK][5] = 64, [1][0][2][0][RTW89_FCC][9] = 80, [1][0][2][0][RTW89_ETSI][9] = 64, @@ -11935,20 +11994,20 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][9] = 64, [1][0][2][0][RTW89_KCC][9] = 76, [1][0][2][0][RTW89_ACMA][9] = 64, - [1][0][2][0][RTW89_CN][9] = 64, + [1][0][2][0][RTW89_CN][9] = 58, [1][0][2][0][RTW89_UK][9] = 64, - [1][0][2][0][RTW89_FCC][13] = 60, + [1][0][2][0][RTW89_FCC][13] = 64, [1][0][2][0][RTW89_ETSI][13] = 64, [1][0][2][0][RTW89_MKK][13] = 64, - [1][0][2][0][RTW89_IC][13] = 60, + [1][0][2][0][RTW89_IC][13] = 64, [1][0][2][0][RTW89_KCC][13] = 72, [1][0][2][0][RTW89_ACMA][13] = 64, - [1][0][2][0][RTW89_CN][13] = 64, + [1][0][2][0][RTW89_CN][13] = 58, [1][0][2][0][RTW89_UK][13] = 64, - [1][0][2][0][RTW89_FCC][16] = 62, + [1][0][2][0][RTW89_FCC][16] = 66, [1][0][2][0][RTW89_ETSI][16] = 66, [1][0][2][0][RTW89_MKK][16] = 76, - [1][0][2][0][RTW89_IC][16] = 62, + [1][0][2][0][RTW89_IC][16] = 66, [1][0][2][0][RTW89_KCC][16] = 74, [1][0][2][0][RTW89_ACMA][16] = 66, [1][0][2][0][RTW89_CN][16] = 127, @@ -11956,7 +12015,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][20] = 80, [1][0][2][0][RTW89_ETSI][20] = 66, [1][0][2][0][RTW89_MKK][20] = 76, - [1][0][2][0][RTW89_IC][20] = 76, + [1][0][2][0][RTW89_IC][20] = 80, [1][0][2][0][RTW89_KCC][20] = 74, [1][0][2][0][RTW89_ACMA][20] = 66, [1][0][2][0][RTW89_CN][20] = 127, @@ -11977,10 +12036,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][28] = 127, [1][0][2][0][RTW89_CN][28] = 127, [1][0][2][0][RTW89_UK][28] = 66, - [1][0][2][0][RTW89_FCC][32] = 70, + [1][0][2][0][RTW89_FCC][32] = 74, [1][0][2][0][RTW89_ETSI][32] = 66, [1][0][2][0][RTW89_MKK][32] = 76, - [1][0][2][0][RTW89_IC][32] = 70, + [1][0][2][0][RTW89_IC][32] = 74, [1][0][2][0][RTW89_KCC][32] = 76, [1][0][2][0][RTW89_ACMA][32] = 66, [1][0][2][0][RTW89_CN][32] = 127, @@ -11996,10 +12055,10 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][39] = 80, [1][0][2][0][RTW89_ETSI][39] = 30, [1][0][2][0][RTW89_MKK][39] = 127, - [1][0][2][0][RTW89_IC][39] = 76, + [1][0][2][0][RTW89_IC][39] = 80, [1][0][2][0][RTW89_KCC][39] = 68, [1][0][2][0][RTW89_ACMA][39] = 76, - [1][0][2][0][RTW89_CN][39] = 70, + [1][0][2][0][RTW89_CN][39] = 56, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_FCC][43] = 80, [1][0][2][0][RTW89_ETSI][43] = 30, @@ -12007,9 +12066,9 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_IC][43] = 80, [1][0][2][0][RTW89_KCC][43] = 76, [1][0][2][0][RTW89_ACMA][43] = 76, - [1][0][2][0][RTW89_CN][43] = 76, + [1][0][2][0][RTW89_CN][43] = 64, [1][0][2][0][RTW89_UK][43] = 64, - [1][0][2][0][RTW89_FCC][47] = 76, + [1][0][2][0][RTW89_FCC][47] = 80, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, [1][0][2][0][RTW89_IC][47] = 127, @@ -12017,7 +12076,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_ACMA][47] = 127, [1][0][2][0][RTW89_CN][47] = 127, [1][0][2][0][RTW89_UK][47] = 127, - [1][0][2][0][RTW89_FCC][51] = 76, + [1][0][2][0][RTW89_FCC][51] = 80, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, [1][0][2][0][RTW89_IC][51] = 127, @@ -12249,26 +12308,26 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_ACMA][51] = 127, [1][1][2][1][RTW89_CN][51] = 127, [1][1][2][1][RTW89_UK][51] = 127, - [2][0][2][0][RTW89_FCC][3] = 68, + [2][0][2][0][RTW89_FCC][3] = 72, [2][0][2][0][RTW89_ETSI][3] = 64, [2][0][2][0][RTW89_MKK][3] = 62, - [2][0][2][0][RTW89_IC][3] = 60, + [2][0][2][0][RTW89_IC][3] = 64, [2][0][2][0][RTW89_KCC][3] = 68, [2][0][2][0][RTW89_ACMA][3] = 64, - [2][0][2][0][RTW89_CN][3] = 64, + [2][0][2][0][RTW89_CN][3] = 60, [2][0][2][0][RTW89_UK][3] = 64, - [2][0][2][0][RTW89_FCC][11] = 58, + [2][0][2][0][RTW89_FCC][11] = 62, [2][0][2][0][RTW89_ETSI][11] = 64, [2][0][2][0][RTW89_MKK][11] = 64, - [2][0][2][0][RTW89_IC][11] = 58, + [2][0][2][0][RTW89_IC][11] = 62, [2][0][2][0][RTW89_KCC][11] = 68, [2][0][2][0][RTW89_ACMA][11] = 64, - [2][0][2][0][RTW89_CN][11] = 64, + [2][0][2][0][RTW89_CN][11] = 54, [2][0][2][0][RTW89_UK][11] = 64, - [2][0][2][0][RTW89_FCC][18] = 62, + [2][0][2][0][RTW89_FCC][18] = 66, [2][0][2][0][RTW89_ETSI][18] = 64, [2][0][2][0][RTW89_MKK][18] = 68, - [2][0][2][0][RTW89_IC][18] = 62, + [2][0][2][0][RTW89_IC][18] = 66, [2][0][2][0][RTW89_KCC][18] = 68, [2][0][2][0][RTW89_ACMA][18] = 64, [2][0][2][0][RTW89_CN][18] = 127, @@ -12284,7 +12343,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][34] = 72, [2][0][2][0][RTW89_ETSI][34] = 127, [2][0][2][0][RTW89_MKK][34] = 68, - [2][0][2][0][RTW89_IC][34] = 68, + [2][0][2][0][RTW89_IC][34] = 72, [2][0][2][0][RTW89_KCC][34] = 68, [2][0][2][0][RTW89_ACMA][34] = 68, [2][0][2][0][RTW89_CN][34] = 127, @@ -12292,12 +12351,12 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][41] = 72, [2][0][2][0][RTW89_ETSI][41] = 30, [2][0][2][0][RTW89_MKK][41] = 127, - [2][0][2][0][RTW89_IC][41] = 68, + [2][0][2][0][RTW89_IC][41] = 72, [2][0][2][0][RTW89_KCC][41] = 64, [2][0][2][0][RTW89_ACMA][41] = 68, - [2][0][2][0][RTW89_CN][41] = 68, + [2][0][2][0][RTW89_CN][41] = 38, [2][0][2][0][RTW89_UK][41] = 64, - [2][0][2][0][RTW89_FCC][49] = 68, + [2][0][2][0][RTW89_FCC][49] = 72, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, [2][0][2][0][RTW89_IC][49] = 127, @@ -12423,7 +12482,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][7] = 127, [3][0][2][0][RTW89_KCC][7] = 127, [3][0][2][0][RTW89_ACMA][7] = 127, - [3][0][2][0][RTW89_CN][7] = 58, + [3][0][2][0][RTW89_CN][7] = 127, [3][0][2][0][RTW89_UK][7] = 127, [3][0][2][0][RTW89_FCC][22] = 127, [3][0][2][0][RTW89_ETSI][22] = 127, @@ -12431,7 +12490,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_IC][22] = 127, [3][0][2][0][RTW89_KCC][22] = 127, [3][0][2][0][RTW89_ACMA][22] = 127, - [3][0][2][0][RTW89_CN][22] = 58, + [3][0][2][0][RTW89_CN][22] = 127, [3][0][2][0][RTW89_UK][22] = 127, [3][0][2][0][RTW89_FCC][45] = 127, [3][0][2][0][RTW89_ETSI][45] = 127, @@ -12508,19 +12567,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][11] = 30, [0][0][RTW89_WW][12] = 30, [0][0][RTW89_WW][13] = 0, - [0][1][RTW89_WW][0] = 20, - [0][1][RTW89_WW][1] = 22, - [0][1][RTW89_WW][2] = 22, - [0][1][RTW89_WW][3] = 22, - [0][1][RTW89_WW][4] = 22, - [0][1][RTW89_WW][5] = 22, - [0][1][RTW89_WW][6] = 22, - [0][1][RTW89_WW][7] = 22, - [0][1][RTW89_WW][8] = 22, - [0][1][RTW89_WW][9] = 22, - [0][1][RTW89_WW][10] = 22, - [0][1][RTW89_WW][11] = 22, - [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][1] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][3] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][5] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][7] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][9] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][11] = 0, + [0][1][RTW89_WW][12] = 0, [0][1][RTW89_WW][13] = 0, [1][0][RTW89_WW][0] = 42, [1][0][RTW89_WW][1] = 42, @@ -12536,19 +12595,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][11] = 42, [1][0][RTW89_WW][12] = 34, [1][0][RTW89_WW][13] = 0, - [1][1][RTW89_WW][0] = 32, - [1][1][RTW89_WW][1] = 32, - [1][1][RTW89_WW][2] = 32, - [1][1][RTW89_WW][3] = 32, - [1][1][RTW89_WW][4] = 32, - [1][1][RTW89_WW][5] = 32, - [1][1][RTW89_WW][6] = 32, - [1][1][RTW89_WW][7] = 32, - [1][1][RTW89_WW][8] = 32, - [1][1][RTW89_WW][9] = 32, - [1][1][RTW89_WW][10] = 32, - [1][1][RTW89_WW][11] = 32, - [1][1][RTW89_WW][12] = 32, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][1] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][3] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][5] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][7] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][9] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][11] = 0, + [1][1][RTW89_WW][12] = 0, [1][1][RTW89_WW][13] = 0, [2][0][RTW89_WW][0] = 54, [2][0][RTW89_WW][1] = 54, @@ -12564,19 +12623,19 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][11] = 54, [2][0][RTW89_WW][12] = 34, [2][0][RTW89_WW][13] = 0, - [2][1][RTW89_WW][0] = 44, - [2][1][RTW89_WW][1] = 44, - [2][1][RTW89_WW][2] = 44, - [2][1][RTW89_WW][3] = 44, - [2][1][RTW89_WW][4] = 44, - [2][1][RTW89_WW][5] = 44, - [2][1][RTW89_WW][6] = 44, - [2][1][RTW89_WW][7] = 44, - [2][1][RTW89_WW][8] = 44, - [2][1][RTW89_WW][9] = 44, - [2][1][RTW89_WW][10] = 44, - [2][1][RTW89_WW][11] = 44, - [2][1][RTW89_WW][12] = 42, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][1] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][3] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][5] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][7] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][9] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][11] = 0, + [2][1][RTW89_WW][12] = 0, [2][1][RTW89_WW][13] = 0, [0][0][RTW89_FCC][0] = 60, [0][0][RTW89_ETSI][0] = 30, @@ -12696,7 +12755,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 20, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][1] = 127, [0][1][RTW89_ETSI][1] = 127, @@ -12704,7 +12763,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][1] = 127, [0][1][RTW89_KCC][1] = 127, [0][1][RTW89_ACMA][1] = 127, - [0][1][RTW89_CN][1] = 22, + [0][1][RTW89_CN][1] = 127, [0][1][RTW89_UK][1] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -12712,7 +12771,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 22, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][3] = 127, [0][1][RTW89_ETSI][3] = 127, @@ -12720,7 +12779,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][3] = 127, [0][1][RTW89_KCC][3] = 127, [0][1][RTW89_ACMA][3] = 127, - [0][1][RTW89_CN][3] = 22, + [0][1][RTW89_CN][3] = 127, [0][1][RTW89_UK][3] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -12728,7 +12787,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 22, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][5] = 127, [0][1][RTW89_ETSI][5] = 127, @@ -12736,7 +12795,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][5] = 127, [0][1][RTW89_KCC][5] = 127, [0][1][RTW89_ACMA][5] = 127, - [0][1][RTW89_CN][5] = 22, + [0][1][RTW89_CN][5] = 127, [0][1][RTW89_UK][5] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -12744,7 +12803,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 22, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][7] = 127, [0][1][RTW89_ETSI][7] = 127, @@ -12752,7 +12811,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][7] = 127, [0][1][RTW89_KCC][7] = 127, [0][1][RTW89_ACMA][7] = 127, - [0][1][RTW89_CN][7] = 22, + [0][1][RTW89_CN][7] = 127, [0][1][RTW89_UK][7] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -12760,7 +12819,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 22, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][9] = 127, [0][1][RTW89_ETSI][9] = 127, @@ -12768,7 +12827,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][9] = 127, [0][1][RTW89_KCC][9] = 127, [0][1][RTW89_ACMA][9] = 127, - [0][1][RTW89_CN][9] = 22, + [0][1][RTW89_CN][9] = 127, [0][1][RTW89_UK][9] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -12776,7 +12835,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 22, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][11] = 127, [0][1][RTW89_ETSI][11] = 127, @@ -12784,7 +12843,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][11] = 127, [0][1][RTW89_KCC][11] = 127, [0][1][RTW89_ACMA][11] = 127, - [0][1][RTW89_CN][11] = 22, + [0][1][RTW89_CN][11] = 127, [0][1][RTW89_UK][11] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -12792,7 +12851,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 20, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][13] = 127, [0][1][RTW89_ETSI][13] = 127, @@ -12920,7 +12979,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 32, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][1] = 127, [1][1][RTW89_ETSI][1] = 127, @@ -12928,7 +12987,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][1] = 127, [1][1][RTW89_KCC][1] = 127, [1][1][RTW89_ACMA][1] = 127, - [1][1][RTW89_CN][1] = 32, + [1][1][RTW89_CN][1] = 127, [1][1][RTW89_UK][1] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -12936,7 +12995,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 32, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][3] = 127, [1][1][RTW89_ETSI][3] = 127, @@ -12944,7 +13003,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][3] = 127, [1][1][RTW89_KCC][3] = 127, [1][1][RTW89_ACMA][3] = 127, - [1][1][RTW89_CN][3] = 32, + [1][1][RTW89_CN][3] = 127, [1][1][RTW89_UK][3] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -12952,7 +13011,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 32, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][5] = 127, [1][1][RTW89_ETSI][5] = 127, @@ -12960,7 +13019,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][5] = 127, [1][1][RTW89_KCC][5] = 127, [1][1][RTW89_ACMA][5] = 127, - [1][1][RTW89_CN][5] = 32, + [1][1][RTW89_CN][5] = 127, [1][1][RTW89_UK][5] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -12968,7 +13027,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 32, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][7] = 127, [1][1][RTW89_ETSI][7] = 127, @@ -12976,7 +13035,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][7] = 127, [1][1][RTW89_KCC][7] = 127, [1][1][RTW89_ACMA][7] = 127, - [1][1][RTW89_CN][7] = 32, + [1][1][RTW89_CN][7] = 127, [1][1][RTW89_UK][7] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -12984,7 +13043,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 32, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][9] = 127, [1][1][RTW89_ETSI][9] = 127, @@ -12992,7 +13051,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][9] = 127, [1][1][RTW89_KCC][9] = 127, [1][1][RTW89_ACMA][9] = 127, - [1][1][RTW89_CN][9] = 32, + [1][1][RTW89_CN][9] = 127, [1][1][RTW89_UK][9] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -13000,7 +13059,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 32, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][11] = 127, [1][1][RTW89_ETSI][11] = 127, @@ -13008,7 +13067,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][11] = 127, [1][1][RTW89_KCC][11] = 127, [1][1][RTW89_ACMA][11] = 127, - [1][1][RTW89_CN][11] = 32, + [1][1][RTW89_CN][11] = 127, [1][1][RTW89_UK][11] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -13016,7 +13075,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 32, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][13] = 127, [1][1][RTW89_ETSI][13] = 127, @@ -13144,7 +13203,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 44, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][1] = 127, [2][1][RTW89_ETSI][1] = 127, @@ -13152,7 +13211,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][1] = 127, [2][1][RTW89_KCC][1] = 127, [2][1][RTW89_ACMA][1] = 127, - [2][1][RTW89_CN][1] = 44, + [2][1][RTW89_CN][1] = 127, [2][1][RTW89_UK][1] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -13160,7 +13219,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 44, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][3] = 127, [2][1][RTW89_ETSI][3] = 127, @@ -13168,7 +13227,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][3] = 127, [2][1][RTW89_KCC][3] = 127, [2][1][RTW89_ACMA][3] = 127, - [2][1][RTW89_CN][3] = 44, + [2][1][RTW89_CN][3] = 127, [2][1][RTW89_UK][3] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -13176,7 +13235,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 44, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][5] = 127, [2][1][RTW89_ETSI][5] = 127, @@ -13184,7 +13243,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][5] = 127, [2][1][RTW89_KCC][5] = 127, [2][1][RTW89_ACMA][5] = 127, - [2][1][RTW89_CN][5] = 44, + [2][1][RTW89_CN][5] = 127, [2][1][RTW89_UK][5] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -13192,7 +13251,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 44, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][7] = 127, [2][1][RTW89_ETSI][7] = 127, @@ -13200,7 +13259,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][7] = 127, [2][1][RTW89_KCC][7] = 127, [2][1][RTW89_ACMA][7] = 127, - [2][1][RTW89_CN][7] = 44, + [2][1][RTW89_CN][7] = 127, [2][1][RTW89_UK][7] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -13208,7 +13267,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 44, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][9] = 127, [2][1][RTW89_ETSI][9] = 127, @@ -13216,7 +13275,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][9] = 127, [2][1][RTW89_KCC][9] = 127, [2][1][RTW89_ACMA][9] = 127, - [2][1][RTW89_CN][9] = 44, + [2][1][RTW89_CN][9] = 127, [2][1][RTW89_UK][9] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -13224,7 +13283,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 44, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][11] = 127, [2][1][RTW89_ETSI][11] = 127, @@ -13232,7 +13291,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][11] = 127, [2][1][RTW89_KCC][11] = 127, [2][1][RTW89_ACMA][11] = 127, - [2][1][RTW89_CN][11] = 44, + [2][1][RTW89_CN][11] = 127, [2][1][RTW89_UK][11] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -13240,7 +13299,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_2g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 42, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][13] = 127, [2][1][RTW89_ETSI][13] = 127, @@ -13283,14 +13342,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][48] = 40, [0][0][RTW89_WW][50] = 42, [0][0][RTW89_WW][52] = 38, - [0][1][RTW89_WW][0] = 4, - [0][1][RTW89_WW][2] = 4, - [0][1][RTW89_WW][4] = 4, - [0][1][RTW89_WW][6] = 4, - [0][1][RTW89_WW][8] = 4, - [0][1][RTW89_WW][10] = 4, - [0][1][RTW89_WW][12] = 4, - [0][1][RTW89_WW][14] = 4, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][2] = 0, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][8] = 0, + [0][1][RTW89_WW][10] = 0, + [0][1][RTW89_WW][12] = 0, + [0][1][RTW89_WW][14] = 0, [0][1][RTW89_WW][15] = 0, [0][1][RTW89_WW][17] = 0, [0][1][RTW89_WW][19] = 0, @@ -13303,11 +13362,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_WW][33] = 0, [0][1][RTW89_WW][35] = 0, [0][1][RTW89_WW][37] = 0, - [0][1][RTW89_WW][38] = 42, - [0][1][RTW89_WW][40] = 42, - [0][1][RTW89_WW][42] = 42, - [0][1][RTW89_WW][44] = 42, - [0][1][RTW89_WW][46] = 42, + [0][1][RTW89_WW][38] = 0, + [0][1][RTW89_WW][40] = 0, + [0][1][RTW89_WW][42] = 0, + [0][1][RTW89_WW][44] = 0, + [0][1][RTW89_WW][46] = 0, [0][1][RTW89_WW][48] = 0, [0][1][RTW89_WW][50] = 0, [0][1][RTW89_WW][52] = 0, @@ -13339,14 +13398,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][48] = 52, [1][0][RTW89_WW][50] = 52, [1][0][RTW89_WW][52] = 50, - [1][1][RTW89_WW][0] = 14, - [1][1][RTW89_WW][2] = 14, - [1][1][RTW89_WW][4] = 14, - [1][1][RTW89_WW][6] = 14, - [1][1][RTW89_WW][8] = 14, - [1][1][RTW89_WW][10] = 14, - [1][1][RTW89_WW][12] = 14, - [1][1][RTW89_WW][14] = 14, + [1][1][RTW89_WW][0] = 0, + [1][1][RTW89_WW][2] = 0, + [1][1][RTW89_WW][4] = 0, + [1][1][RTW89_WW][6] = 0, + [1][1][RTW89_WW][8] = 0, + [1][1][RTW89_WW][10] = 0, + [1][1][RTW89_WW][12] = 0, + [1][1][RTW89_WW][14] = 0, [1][1][RTW89_WW][15] = 0, [1][1][RTW89_WW][17] = 0, [1][1][RTW89_WW][19] = 0, @@ -13359,11 +13418,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][33] = 0, [1][1][RTW89_WW][35] = 0, [1][1][RTW89_WW][37] = 0, - [1][1][RTW89_WW][38] = 54, - [1][1][RTW89_WW][40] = 54, - [1][1][RTW89_WW][42] = 54, - [1][1][RTW89_WW][44] = 54, - [1][1][RTW89_WW][46] = 54, + [1][1][RTW89_WW][38] = 0, + [1][1][RTW89_WW][40] = 0, + [1][1][RTW89_WW][42] = 0, + [1][1][RTW89_WW][44] = 0, + [1][1][RTW89_WW][46] = 0, [1][1][RTW89_WW][48] = 0, [1][1][RTW89_WW][50] = 0, [1][1][RTW89_WW][52] = 0, @@ -13395,14 +13454,14 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][48] = 62, [2][0][RTW89_WW][50] = 62, [2][0][RTW89_WW][52] = 60, - [2][1][RTW89_WW][0] = 28, - [2][1][RTW89_WW][2] = 28, - [2][1][RTW89_WW][4] = 28, - [2][1][RTW89_WW][6] = 28, - [2][1][RTW89_WW][8] = 28, - [2][1][RTW89_WW][10] = 28, - [2][1][RTW89_WW][12] = 28, - [2][1][RTW89_WW][14] = 28, + [2][1][RTW89_WW][0] = 0, + [2][1][RTW89_WW][2] = 0, + [2][1][RTW89_WW][4] = 0, + [2][1][RTW89_WW][6] = 0, + [2][1][RTW89_WW][8] = 0, + [2][1][RTW89_WW][10] = 0, + [2][1][RTW89_WW][12] = 0, + [2][1][RTW89_WW][14] = 0, [2][1][RTW89_WW][15] = 0, [2][1][RTW89_WW][17] = 0, [2][1][RTW89_WW][19] = 0, @@ -13415,11 +13474,11 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][33] = 0, [2][1][RTW89_WW][35] = 0, [2][1][RTW89_WW][37] = 0, - [2][1][RTW89_WW][38] = 56, - [2][1][RTW89_WW][40] = 56, - [2][1][RTW89_WW][42] = 56, - [2][1][RTW89_WW][44] = 56, - [2][1][RTW89_WW][46] = 56, + [2][1][RTW89_WW][38] = 0, + [2][1][RTW89_WW][40] = 0, + [2][1][RTW89_WW][42] = 0, + [2][1][RTW89_WW][44] = 0, + [2][1][RTW89_WW][46] = 0, [2][1][RTW89_WW][48] = 0, [2][1][RTW89_WW][50] = 0, [2][1][RTW89_WW][52] = 0, @@ -13653,7 +13712,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][0] = 127, [0][1][RTW89_KCC][0] = 127, [0][1][RTW89_ACMA][0] = 127, - [0][1][RTW89_CN][0] = 4, + [0][1][RTW89_CN][0] = 127, [0][1][RTW89_UK][0] = 127, [0][1][RTW89_FCC][2] = 127, [0][1][RTW89_ETSI][2] = 127, @@ -13661,7 +13720,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][2] = 127, [0][1][RTW89_KCC][2] = 127, [0][1][RTW89_ACMA][2] = 127, - [0][1][RTW89_CN][2] = 4, + [0][1][RTW89_CN][2] = 127, [0][1][RTW89_UK][2] = 127, [0][1][RTW89_FCC][4] = 127, [0][1][RTW89_ETSI][4] = 127, @@ -13669,7 +13728,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][4] = 127, [0][1][RTW89_KCC][4] = 127, [0][1][RTW89_ACMA][4] = 127, - [0][1][RTW89_CN][4] = 4, + [0][1][RTW89_CN][4] = 127, [0][1][RTW89_UK][4] = 127, [0][1][RTW89_FCC][6] = 127, [0][1][RTW89_ETSI][6] = 127, @@ -13677,7 +13736,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][6] = 127, [0][1][RTW89_KCC][6] = 127, [0][1][RTW89_ACMA][6] = 127, - [0][1][RTW89_CN][6] = 4, + [0][1][RTW89_CN][6] = 127, [0][1][RTW89_UK][6] = 127, [0][1][RTW89_FCC][8] = 127, [0][1][RTW89_ETSI][8] = 127, @@ -13685,7 +13744,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][8] = 127, [0][1][RTW89_KCC][8] = 127, [0][1][RTW89_ACMA][8] = 127, - [0][1][RTW89_CN][8] = 4, + [0][1][RTW89_CN][8] = 127, [0][1][RTW89_UK][8] = 127, [0][1][RTW89_FCC][10] = 127, [0][1][RTW89_ETSI][10] = 127, @@ -13693,7 +13752,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][10] = 127, [0][1][RTW89_KCC][10] = 127, [0][1][RTW89_ACMA][10] = 127, - [0][1][RTW89_CN][10] = 4, + [0][1][RTW89_CN][10] = 127, [0][1][RTW89_UK][10] = 127, [0][1][RTW89_FCC][12] = 127, [0][1][RTW89_ETSI][12] = 127, @@ -13701,7 +13760,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][12] = 127, [0][1][RTW89_KCC][12] = 127, [0][1][RTW89_ACMA][12] = 127, - [0][1][RTW89_CN][12] = 4, + [0][1][RTW89_CN][12] = 127, [0][1][RTW89_UK][12] = 127, [0][1][RTW89_FCC][14] = 127, [0][1][RTW89_ETSI][14] = 127, @@ -13709,7 +13768,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][14] = 127, [0][1][RTW89_KCC][14] = 127, [0][1][RTW89_ACMA][14] = 127, - [0][1][RTW89_CN][14] = 4, + [0][1][RTW89_CN][14] = 127, [0][1][RTW89_UK][14] = 127, [0][1][RTW89_FCC][15] = 127, [0][1][RTW89_ETSI][15] = 127, @@ -13813,7 +13872,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][38] = 127, [0][1][RTW89_KCC][38] = 127, [0][1][RTW89_ACMA][38] = 127, - [0][1][RTW89_CN][38] = 42, + [0][1][RTW89_CN][38] = 127, [0][1][RTW89_UK][38] = 127, [0][1][RTW89_FCC][40] = 127, [0][1][RTW89_ETSI][40] = 127, @@ -13821,7 +13880,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][40] = 127, [0][1][RTW89_KCC][40] = 127, [0][1][RTW89_ACMA][40] = 127, - [0][1][RTW89_CN][40] = 42, + [0][1][RTW89_CN][40] = 127, [0][1][RTW89_UK][40] = 127, [0][1][RTW89_FCC][42] = 127, [0][1][RTW89_ETSI][42] = 127, @@ -13829,7 +13888,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][42] = 127, [0][1][RTW89_KCC][42] = 127, [0][1][RTW89_ACMA][42] = 127, - [0][1][RTW89_CN][42] = 42, + [0][1][RTW89_CN][42] = 127, [0][1][RTW89_UK][42] = 127, [0][1][RTW89_FCC][44] = 127, [0][1][RTW89_ETSI][44] = 127, @@ -13837,7 +13896,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][44] = 127, [0][1][RTW89_KCC][44] = 127, [0][1][RTW89_ACMA][44] = 127, - [0][1][RTW89_CN][44] = 42, + [0][1][RTW89_CN][44] = 127, [0][1][RTW89_UK][44] = 127, [0][1][RTW89_FCC][46] = 127, [0][1][RTW89_ETSI][46] = 127, @@ -13845,7 +13904,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_IC][46] = 127, [0][1][RTW89_KCC][46] = 127, [0][1][RTW89_ACMA][46] = 127, - [0][1][RTW89_CN][46] = 42, + [0][1][RTW89_CN][46] = 127, [0][1][RTW89_UK][46] = 127, [0][1][RTW89_FCC][48] = 127, [0][1][RTW89_ETSI][48] = 127, @@ -14101,7 +14160,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][0] = 127, [1][1][RTW89_KCC][0] = 127, [1][1][RTW89_ACMA][0] = 127, - [1][1][RTW89_CN][0] = 14, + [1][1][RTW89_CN][0] = 127, [1][1][RTW89_UK][0] = 127, [1][1][RTW89_FCC][2] = 127, [1][1][RTW89_ETSI][2] = 127, @@ -14109,7 +14168,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][2] = 127, [1][1][RTW89_KCC][2] = 127, [1][1][RTW89_ACMA][2] = 127, - [1][1][RTW89_CN][2] = 14, + [1][1][RTW89_CN][2] = 127, [1][1][RTW89_UK][2] = 127, [1][1][RTW89_FCC][4] = 127, [1][1][RTW89_ETSI][4] = 127, @@ -14117,7 +14176,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][4] = 127, [1][1][RTW89_KCC][4] = 127, [1][1][RTW89_ACMA][4] = 127, - [1][1][RTW89_CN][4] = 14, + [1][1][RTW89_CN][4] = 127, [1][1][RTW89_UK][4] = 127, [1][1][RTW89_FCC][6] = 127, [1][1][RTW89_ETSI][6] = 127, @@ -14125,7 +14184,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][6] = 127, [1][1][RTW89_KCC][6] = 127, [1][1][RTW89_ACMA][6] = 127, - [1][1][RTW89_CN][6] = 14, + [1][1][RTW89_CN][6] = 127, [1][1][RTW89_UK][6] = 127, [1][1][RTW89_FCC][8] = 127, [1][1][RTW89_ETSI][8] = 127, @@ -14133,7 +14192,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][8] = 127, [1][1][RTW89_KCC][8] = 127, [1][1][RTW89_ACMA][8] = 127, - [1][1][RTW89_CN][8] = 14, + [1][1][RTW89_CN][8] = 127, [1][1][RTW89_UK][8] = 127, [1][1][RTW89_FCC][10] = 127, [1][1][RTW89_ETSI][10] = 127, @@ -14141,7 +14200,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][10] = 127, [1][1][RTW89_KCC][10] = 127, [1][1][RTW89_ACMA][10] = 127, - [1][1][RTW89_CN][10] = 14, + [1][1][RTW89_CN][10] = 127, [1][1][RTW89_UK][10] = 127, [1][1][RTW89_FCC][12] = 127, [1][1][RTW89_ETSI][12] = 127, @@ -14149,7 +14208,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][12] = 127, [1][1][RTW89_KCC][12] = 127, [1][1][RTW89_ACMA][12] = 127, - [1][1][RTW89_CN][12] = 14, + [1][1][RTW89_CN][12] = 127, [1][1][RTW89_UK][12] = 127, [1][1][RTW89_FCC][14] = 127, [1][1][RTW89_ETSI][14] = 127, @@ -14157,7 +14216,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][14] = 127, [1][1][RTW89_KCC][14] = 127, [1][1][RTW89_ACMA][14] = 127, - [1][1][RTW89_CN][14] = 14, + [1][1][RTW89_CN][14] = 127, [1][1][RTW89_UK][14] = 127, [1][1][RTW89_FCC][15] = 127, [1][1][RTW89_ETSI][15] = 127, @@ -14261,7 +14320,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][38] = 127, [1][1][RTW89_KCC][38] = 127, [1][1][RTW89_ACMA][38] = 127, - [1][1][RTW89_CN][38] = 54, + [1][1][RTW89_CN][38] = 127, [1][1][RTW89_UK][38] = 127, [1][1][RTW89_FCC][40] = 127, [1][1][RTW89_ETSI][40] = 127, @@ -14269,7 +14328,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][40] = 127, [1][1][RTW89_KCC][40] = 127, [1][1][RTW89_ACMA][40] = 127, - [1][1][RTW89_CN][40] = 54, + [1][1][RTW89_CN][40] = 127, [1][1][RTW89_UK][40] = 127, [1][1][RTW89_FCC][42] = 127, [1][1][RTW89_ETSI][42] = 127, @@ -14277,7 +14336,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][42] = 127, [1][1][RTW89_KCC][42] = 127, [1][1][RTW89_ACMA][42] = 127, - [1][1][RTW89_CN][42] = 54, + [1][1][RTW89_CN][42] = 127, [1][1][RTW89_UK][42] = 127, [1][1][RTW89_FCC][44] = 127, [1][1][RTW89_ETSI][44] = 127, @@ -14285,7 +14344,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][44] = 127, [1][1][RTW89_KCC][44] = 127, [1][1][RTW89_ACMA][44] = 127, - [1][1][RTW89_CN][44] = 54, + [1][1][RTW89_CN][44] = 127, [1][1][RTW89_UK][44] = 127, [1][1][RTW89_FCC][46] = 127, [1][1][RTW89_ETSI][46] = 127, @@ -14293,7 +14352,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_IC][46] = 127, [1][1][RTW89_KCC][46] = 127, [1][1][RTW89_ACMA][46] = 127, - [1][1][RTW89_CN][46] = 54, + [1][1][RTW89_CN][46] = 127, [1][1][RTW89_UK][46] = 127, [1][1][RTW89_FCC][48] = 127, [1][1][RTW89_ETSI][48] = 127, @@ -14549,7 +14608,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][0] = 127, [2][1][RTW89_KCC][0] = 127, [2][1][RTW89_ACMA][0] = 127, - [2][1][RTW89_CN][0] = 28, + [2][1][RTW89_CN][0] = 127, [2][1][RTW89_UK][0] = 127, [2][1][RTW89_FCC][2] = 127, [2][1][RTW89_ETSI][2] = 127, @@ -14557,7 +14616,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][2] = 127, [2][1][RTW89_KCC][2] = 127, [2][1][RTW89_ACMA][2] = 127, - [2][1][RTW89_CN][2] = 28, + [2][1][RTW89_CN][2] = 127, [2][1][RTW89_UK][2] = 127, [2][1][RTW89_FCC][4] = 127, [2][1][RTW89_ETSI][4] = 127, @@ -14565,7 +14624,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][4] = 127, [2][1][RTW89_KCC][4] = 127, [2][1][RTW89_ACMA][4] = 127, - [2][1][RTW89_CN][4] = 28, + [2][1][RTW89_CN][4] = 127, [2][1][RTW89_UK][4] = 127, [2][1][RTW89_FCC][6] = 127, [2][1][RTW89_ETSI][6] = 127, @@ -14573,7 +14632,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][6] = 127, [2][1][RTW89_KCC][6] = 127, [2][1][RTW89_ACMA][6] = 127, - [2][1][RTW89_CN][6] = 28, + [2][1][RTW89_CN][6] = 127, [2][1][RTW89_UK][6] = 127, [2][1][RTW89_FCC][8] = 127, [2][1][RTW89_ETSI][8] = 127, @@ -14581,7 +14640,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][8] = 127, [2][1][RTW89_KCC][8] = 127, [2][1][RTW89_ACMA][8] = 127, - [2][1][RTW89_CN][8] = 28, + [2][1][RTW89_CN][8] = 127, [2][1][RTW89_UK][8] = 127, [2][1][RTW89_FCC][10] = 127, [2][1][RTW89_ETSI][10] = 127, @@ -14589,7 +14648,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][10] = 127, [2][1][RTW89_KCC][10] = 127, [2][1][RTW89_ACMA][10] = 127, - [2][1][RTW89_CN][10] = 28, + [2][1][RTW89_CN][10] = 127, [2][1][RTW89_UK][10] = 127, [2][1][RTW89_FCC][12] = 127, [2][1][RTW89_ETSI][12] = 127, @@ -14597,7 +14656,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][12] = 127, [2][1][RTW89_KCC][12] = 127, [2][1][RTW89_ACMA][12] = 127, - [2][1][RTW89_CN][12] = 28, + [2][1][RTW89_CN][12] = 127, [2][1][RTW89_UK][12] = 127, [2][1][RTW89_FCC][14] = 127, [2][1][RTW89_ETSI][14] = 127, @@ -14605,7 +14664,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][14] = 127, [2][1][RTW89_KCC][14] = 127, [2][1][RTW89_ACMA][14] = 127, - [2][1][RTW89_CN][14] = 28, + [2][1][RTW89_CN][14] = 127, [2][1][RTW89_UK][14] = 127, [2][1][RTW89_FCC][15] = 127, [2][1][RTW89_ETSI][15] = 127, @@ -14709,7 +14768,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][38] = 127, [2][1][RTW89_KCC][38] = 127, [2][1][RTW89_ACMA][38] = 127, - [2][1][RTW89_CN][38] = 56, + [2][1][RTW89_CN][38] = 127, [2][1][RTW89_UK][38] = 127, [2][1][RTW89_FCC][40] = 127, [2][1][RTW89_ETSI][40] = 127, @@ -14717,7 +14776,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][40] = 127, [2][1][RTW89_KCC][40] = 127, [2][1][RTW89_ACMA][40] = 127, - [2][1][RTW89_CN][40] = 56, + [2][1][RTW89_CN][40] = 127, [2][1][RTW89_UK][40] = 127, [2][1][RTW89_FCC][42] = 127, [2][1][RTW89_ETSI][42] = 127, @@ -14725,7 +14784,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][42] = 127, [2][1][RTW89_KCC][42] = 127, [2][1][RTW89_ACMA][42] = 127, - [2][1][RTW89_CN][42] = 56, + [2][1][RTW89_CN][42] = 127, [2][1][RTW89_UK][42] = 127, [2][1][RTW89_FCC][44] = 127, [2][1][RTW89_ETSI][44] = 127, @@ -14733,7 +14792,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][44] = 127, [2][1][RTW89_KCC][44] = 127, [2][1][RTW89_ACMA][44] = 127, - [2][1][RTW89_CN][44] = 56, + [2][1][RTW89_CN][44] = 127, [2][1][RTW89_UK][44] = 127, [2][1][RTW89_FCC][46] = 127, [2][1][RTW89_ETSI][46] = 127, @@ -14741,7 +14800,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_IC][46] = 127, [2][1][RTW89_KCC][46] = 127, [2][1][RTW89_ACMA][46] = 127, - [2][1][RTW89_CN][46] = 56, + [2][1][RTW89_CN][46] = 127, [2][1][RTW89_UK][46] = 127, [2][1][RTW89_FCC][48] = 127, [2][1][RTW89_ETSI][48] = 127, @@ -14794,12 +14853,20 @@ const struct rtw89_phy_table rtw89_8851b_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8851b_byr_table = { .data = rtw89_8851b_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8851b_txpwr_byrate), .load = rtw89_phy_load_txpwr_byrate, }; +static +const struct rtw89_txpwr_table rtw89_8851b_byr_table_type2 = { + .data = rtw89_8851b_txpwr_byrate_type2, + .size = ARRAY_SIZE(rtw89_8851b_txpwr_byrate_type2), + .load = rtw89_phy_load_txpwr_byrate, +}; + const struct rtw89_txpwr_track_cfg rtw89_8851b_trk_cfg = { .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n, .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p, @@ -14810,6 +14877,7 @@ const struct rtw89_txpwr_track_cfg rtw89_8851b_trk_cfg = { }; const struct rtw89_rfe_parms rtw89_8851b_dflt_parms = { + .byr_tbl = &rtw89_8851b_byr_table, .rule_2ghz = { .lmt = &rtw89_8851b_txpwr_lmt_2g, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_2g, @@ -14818,9 +14886,14 @@ const struct rtw89_rfe_parms rtw89_8851b_dflt_parms = { .lmt = &rtw89_8851b_txpwr_lmt_5g, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_5g, }, + .tx_shape = { + .lmt = &rtw89_8851b_tx_shape_lmt, + .lmt_ru = &rtw89_8851b_tx_shape_lmt_ru, + }, }; static const struct rtw89_rfe_parms rtw89_8851b_rfe_parms_type2 = { + .byr_tbl = &rtw89_8851b_byr_table_type2, .rule_2ghz = { .lmt = &rtw89_8851b_txpwr_lmt_2g_type2, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_2g_type2, @@ -14829,6 +14902,10 @@ static const struct rtw89_rfe_parms rtw89_8851b_rfe_parms_type2 = { .lmt = &rtw89_8851b_txpwr_lmt_5g_type2, .lmt_ru = &rtw89_8851b_txpwr_lmt_ru_5g_type2, }, + .tx_shape = { + .lmt = &rtw89_8851b_tx_shape_lmt, + .lmt_ru = &rtw89_8851b_tx_shape_lmt_ru, + }, }; const struct rtw89_rfe_parms_conf rtw89_8851b_rfe_parms_conf[] = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h index a8737de02f66..d8cf545d40a0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.h @@ -11,10 +11,7 @@ extern const struct rtw89_phy_table rtw89_8851b_phy_bb_table; extern const struct rtw89_phy_table rtw89_8851b_phy_bb_gain_table; extern const struct rtw89_phy_table rtw89_8851b_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8851b_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8851b_byr_table; extern const struct rtw89_txpwr_track_cfg rtw89_8851b_trk_cfg; -extern const u8 rtw89_8851b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM]; extern const struct rtw89_rfe_parms rtw89_8851b_dflt_parms; extern const struct rtw89_rfe_parms_conf rtw89_8851b_rfe_parms_conf[]; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 6257414a3b4b..0c36e6180e25 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -478,6 +478,10 @@ static const struct rtw89_dig_regs rtw8852a_dig_regs = { .seg0_pd_reg = R_SEG0R_PD, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT, B_PATH0_TIA_INIT_IDX_MSK}, @@ -704,10 +708,9 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, - mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); u8 txsc20 = 0, txsc40 = 0; switch (chan->band_width) { @@ -1380,13 +1383,13 @@ void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, pw_ofst); return; } - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); val_1t = pw_ofst; - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t); val_2t = max(val_1t - 3, -16); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, val_2t); rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] Set TB pwr_offset=(%d, %d)\n", val_1t, val_2t); @@ -1621,9 +1624,10 @@ void rtw8852a_bb_tx_mode_switch(struct rtw89_dev *rtwdev, rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx); } -static void rtw8852a_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8852a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8852a_btc_preagc_en_defs_tbl : + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852a_btc_preagc_en_defs_tbl : &rtw8852a_btc_preagc_dis_defs_tbl); } @@ -1680,9 +1684,10 @@ void rtw8852a_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) rtw89_write_rf(rtwdev, path, RR_LUTWE, 0xfffff, 0x0); } -static void rtw8852a_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8852a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG, B_PATH0_BTG_SHEN, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG, B_PATH1_BTG_SHEN, 0x3); rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); @@ -1963,15 +1968,15 @@ static void rtw8852a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) switch (level) { case 0: /* original */ default: - rtw8852a_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852a_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 1: /* for FDD free-run */ - rtw8852a_bb_ctrl_btc_preagc(rtwdev, true); + rtw8852a_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 2: /* for BTG Co-Rx*/ - rtw8852a_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852a_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 1; break; } @@ -2022,6 +2027,7 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8852a = { static const struct rtw89_chip_ops rtw8852a_chip_ops = { .enable_bb_rf = rtw89_mac_enable_bb_rf, .disable_bb_rf = rtw89_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8852a_bb_reset, .bb_sethw = rtw8852a_bb_sethw, .read_rf = rtw89_phy_read_rf, @@ -2042,9 +2048,9 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .set_txpwr_ctrl = rtw8852a_set_txpwr_ctrl, .init_txpwr_unit = rtw8852a_init_txpwr_unit, .get_thermal = rtw8852a_get_thermal, - .ctrl_btg = rtw8852a_ctrl_btg, + .ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx, .query_ppdu = rtw8852a_query_ppdu, - .bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx, .cfg_txrx_path = NULL, .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, .pwr_on_func = NULL, @@ -2071,10 +2077,15 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { const struct rtw89_chip_info rtw8852a_chip_info = { .chip_id = RTL8852A, + .chip_gen = RTW89_CHIP_AX, .ops = &rtw8852a_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8852A_FW_BASENAME, .fw_format_max = RTW8852A_FW_FORMAT_MAX, .try_ce_fw = false, + .bbmcu_nr = 0, + .needed_fw_elms = 0, .fifo_size = 458752, .small_fifo_size = false, .dle_scc_rsvd_size = 0, @@ -2094,7 +2105,6 @@ const struct rtw89_chip_info rtw8852a_chip_info = { &rtw89_8852a_phy_radiob_table,}, .nctl_table = &rtw89_8852a_phy_nctl_table, .nctl_post_table = NULL, - .byr_table = &rtw89_8852a_byr_table, .dflt_parms = &rtw89_8852a_dflt_parms, .rfe_parms_conf = NULL, .txpwr_factor_rf = 2, @@ -2107,7 +2117,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = { BIT(NL80211_BAND_5GHZ), .support_bw160 = false, .support_unii4 = false, - .support_ul_tb_ctrl = false, + .ul_tb_waveform_ctrl = false, + .ul_tb_pwr_diff = false, .hw_sec_hdr = false, .rf_path_num = 2, .tx_nss = 2, @@ -2150,6 +2161,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), .txwd_body_size = sizeof(struct rtw89_txwd_body), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8852a_h2c_regs, @@ -2163,6 +2175,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .dcfo_comp_sft = 10, .imr_info = &rtw8852a_imr_info, .rrsr_cfgs = &rtw8852a_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP, .dma_ch_mask = 0, .edcca_lvl_reg = R_SEG0R_EDCCA_LVL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c index be54194558ff..495890c180ef 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c @@ -51020,6 +51020,7 @@ const struct rtw89_phy_table rtw89_8852a_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8852a_byr_table = { .data = rtw89_8852a_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8852a_txpwr_byrate), @@ -51049,6 +51050,7 @@ const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table = { }; const struct rtw89_rfe_parms rtw89_8852a_dflt_parms = { + .byr_tbl = &rtw89_8852a_byr_table, .rule_2ghz = { .lmt = &rtw89_8852a_txpwr_lmt_2g, .lmt_ru = &rtw89_8852a_txpwr_lmt_ru_2g, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h index 41c379b1044d..7463ae6ee3f9 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h @@ -11,7 +11,6 @@ extern const struct rtw89_phy_table rtw89_8852a_phy_bb_table; extern const struct rtw89_phy_table rtw89_8852a_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852a_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852a_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8852a_byr_table; extern const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table; extern const struct rtw89_txpwr_track_cfg rtw89_8852a_trk_cfg; extern const struct rtw89_rfe_parms rtw89_8852a_dflt_parms; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index 718f993da62a..9d4e6f08218d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -310,6 +310,10 @@ static const struct rtw89_dig_regs rtw8852b_dig_regs = { .seg0_pd_reg = R_SEG0R_PD_V1, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1}, @@ -843,9 +847,9 @@ static void rtw8852b_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); u8 txsc20 = 0, txsc40 = 0; switch (chan->band_width) { @@ -1685,10 +1689,11 @@ static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) { + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = rtw89_8852b_tx_shape[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = rtw89_8852b_tx_shape[band][RTW89_RS_OFDM][regd]; + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; if (band == RTW89_BAND_2G) rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); @@ -1725,14 +1730,14 @@ void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, return; } - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); pw_ofst = max_t(s8, pw_ofst - 3, -16); - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); } @@ -1924,15 +1929,17 @@ void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx); } -static void rtw8852b_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8852b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8852b_btc_preagc_en_defs_tbl : + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852b_btc_preagc_en_defs_tbl : &rtw8852b_btc_preagc_dis_defs_tbl); } -static void rtw8852b_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8852b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, B_PATH0_BT_SHARE_V1, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, @@ -2013,9 +2020,9 @@ void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, if (chan->band_type == RTW89_BAND_2G && (rx_path == RF_B || rx_path == RF_AB)) - rtw8852b_ctrl_btg(rtwdev, true); + rtw8852b_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0); else - rtw8852b_ctrl_btg(rtwdev, false); + rtw8852b_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0); rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; @@ -2341,15 +2348,15 @@ static void rtw8852b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) switch (level) { case 0: /* original */ default: - rtw8852b_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 1: /* for FDD free-run */ - rtw8852b_bb_ctrl_btc_preagc(rtwdev, true); + rtw8852b_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 2: /* for BTG Co-Rx*/ - rtw8852b_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 1; break; } @@ -2445,6 +2452,7 @@ static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) static const struct rtw89_chip_ops rtw8852b_chip_ops = { .enable_bb_rf = rtw8852b_mac_enable_bb_rf, .disable_bb_rf = rtw8852b_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8852b_bb_reset, .bb_sethw = rtw8852b_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, @@ -2465,9 +2473,9 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .set_txpwr_ctrl = rtw8852b_set_txpwr_ctrl, .init_txpwr_unit = rtw8852b_init_txpwr_unit, .get_thermal = rtw8852b_get_thermal, - .ctrl_btg = rtw8852b_ctrl_btg, + .ctrl_btg_bt_rx = rtw8852b_ctrl_btg_bt_rx, .query_ppdu = rtw8852b_query_ppdu, - .bb_ctrl_btc_preagc = rtw8852b_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8852b_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8852b_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8852b_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852b_pwr_on_func, @@ -2503,10 +2511,15 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8852b = { const struct rtw89_chip_info rtw8852b_chip_info = { .chip_id = RTL8852B, + .chip_gen = RTW89_CHIP_AX, .ops = &rtw8852b_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8852B_FW_BASENAME, .fw_format_max = RTW8852B_FW_FORMAT_MAX, .try_ce_fw = true, + .bbmcu_nr = 0, + .needed_fw_elms = 0, .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, @@ -2526,7 +2539,6 @@ const struct rtw89_chip_info rtw8852b_chip_info = { &rtw89_8852b_phy_radiob_table,}, .nctl_table = &rtw89_8852b_phy_nctl_table, .nctl_post_table = NULL, - .byr_table = &rtw89_8852b_byr_table, .dflt_parms = &rtw89_8852b_dflt_parms, .rfe_parms_conf = NULL, .txpwr_factor_rf = 2, @@ -2539,7 +2551,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = { BIT(NL80211_BAND_5GHZ), .support_bw160 = false, .support_unii4 = true, - .support_ul_tb_ctrl = true, + .ul_tb_waveform_ctrl = true, + .ul_tb_pwr_diff = false, .hw_sec_hdr = false, .rf_path_num = 2, .tx_nss = 2, @@ -2582,6 +2595,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), .txwd_body_size = sizeof(struct rtw89_txwd_body), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8852b_h2c_regs, @@ -2595,6 +2609,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .dcfo_comp_sft = 10, .imr_info = &rtw8852b_imr_info, .rrsr_cfgs = &rtw8852b_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP_V1, .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) | BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) | diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index fa018e1f499b..259df67836a0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -846,7 +846,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, case ID_NBTXK: rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x011); - iqk_cmd = 0x308 | (1 << (4 + path)); + iqk_cmd = 0x408 | (1 << (4 + path)); break; case ID_NBRXK: rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); @@ -1078,7 +1078,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 { struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; bool kfail; - u8 gp = 0x3; + u8 gp = 0x2; switch (iqk_info->iqk_band[path]) { case RTW89_BAND_2G: diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c index 17124d851a22..d2ce16e98bac 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c @@ -14666,8 +14666,9 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; -const u8 rtw89_8852b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM] = { +static +const u8 rtw89_8852b_tx_shape_lmt[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { [0][0][RTW89_ACMA] = 0, [0][0][RTW89_CHILE] = 0, [0][0][RTW89_CN] = 0, @@ -14707,35 +14708,63 @@ const u8 rtw89_8852b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] }; static +const u8 rtw89_8852b_tx_shape_lmt_ru[RTW89_BAND_NUM][RTW89_REGD_NUM] = { + [0][RTW89_ACMA] = 0, + [0][RTW89_CHILE] = 0, + [0][RTW89_CN] = 0, + [0][RTW89_ETSI] = 0, + [0][RTW89_FCC] = 3, + [0][RTW89_IC] = 3, + [0][RTW89_KCC] = 0, + [0][RTW89_MEXICO] = 3, + [0][RTW89_MKK] = 0, + [0][RTW89_QATAR] = 0, + [0][RTW89_UK] = 0, + [0][RTW89_UKRAINE] = 0, + [1][RTW89_ACMA] = 0, + [1][RTW89_CHILE] = 0, + [1][RTW89_CN] = 0, + [1][RTW89_ETSI] = 0, + [1][RTW89_FCC] = 3, + [1][RTW89_IC] = 3, + [1][RTW89_KCC] = 0, + [1][RTW89_MEXICO] = 3, + [1][RTW89_MKK] = 0, + [1][RTW89_QATAR] = 0, + [1][RTW89_UK] = 0, + [1][RTW89_UKRAINE] = 0, +}; + +static const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][RTW89_WW][0] = 58, - [0][0][0][0][RTW89_WW][1] = 58, - [0][0][0][0][RTW89_WW][2] = 58, - [0][0][0][0][RTW89_WW][3] = 58, - [0][0][0][0][RTW89_WW][4] = 58, - [0][0][0][0][RTW89_WW][5] = 58, - [0][0][0][0][RTW89_WW][6] = 58, - [0][0][0][0][RTW89_WW][7] = 58, - [0][0][0][0][RTW89_WW][8] = 58, - [0][0][0][0][RTW89_WW][9] = 58, - [0][0][0][0][RTW89_WW][10] = 58, - [0][0][0][0][RTW89_WW][11] = 58, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, [0][0][0][0][RTW89_WW][12] = 56, [0][0][0][0][RTW89_WW][13] = 76, - [0][1][0][0][RTW89_WW][0] = 46, - [0][1][0][0][RTW89_WW][1] = 46, - [0][1][0][0][RTW89_WW][2] = 46, - [0][1][0][0][RTW89_WW][3] = 46, - [0][1][0][0][RTW89_WW][4] = 46, - [0][1][0][0][RTW89_WW][5] = 46, - [0][1][0][0][RTW89_WW][6] = 46, - [0][1][0][0][RTW89_WW][7] = 46, - [0][1][0][0][RTW89_WW][8] = 46, - [0][1][0][0][RTW89_WW][9] = 46, - [0][1][0][0][RTW89_WW][10] = 46, - [0][1][0][0][RTW89_WW][11] = 46, + [0][1][0][0][RTW89_WW][0] = 44, + [0][1][0][0][RTW89_WW][1] = 44, + [0][1][0][0][RTW89_WW][2] = 44, + [0][1][0][0][RTW89_WW][3] = 44, + [0][1][0][0][RTW89_WW][4] = 44, + [0][1][0][0][RTW89_WW][5] = 44, + [0][1][0][0][RTW89_WW][6] = 44, + [0][1][0][0][RTW89_WW][7] = 44, + [0][1][0][0][RTW89_WW][8] = 44, + [0][1][0][0][RTW89_WW][9] = 44, + [0][1][0][0][RTW89_WW][10] = 44, + [0][1][0][0][RTW89_WW][11] = 44, [0][1][0][0][RTW89_WW][12] = 42, [0][1][0][0][RTW89_WW][13] = 64, [1][0][0][0][RTW89_WW][0] = 0, @@ -14743,7 +14772,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][2] = 50, [1][0][0][0][RTW89_WW][3] = 50, [1][0][0][0][RTW89_WW][4] = 50, - [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][5] = 56, [1][0][0][0][RTW89_WW][6] = 50, [1][0][0][0][RTW89_WW][7] = 50, [1][0][0][0][RTW89_WW][8] = 50, @@ -14754,10 +14783,10 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][13] = 0, [1][1][0][0][RTW89_WW][0] = 0, [1][1][0][0][RTW89_WW][1] = 0, - [1][1][0][0][RTW89_WW][2] = 46, - [1][1][0][0][RTW89_WW][3] = 46, - [1][1][0][0][RTW89_WW][4] = 46, - [1][1][0][0][RTW89_WW][5] = 46, + [1][1][0][0][RTW89_WW][2] = 44, + [1][1][0][0][RTW89_WW][3] = 44, + [1][1][0][0][RTW89_WW][4] = 44, + [1][1][0][0][RTW89_WW][5] = 44, [1][1][0][0][RTW89_WW][6] = 34, [1][1][0][0][RTW89_WW][7] = 34, [1][1][0][0][RTW89_WW][8] = 34, @@ -14846,7 +14875,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][7] = 58, [1][0][2][0][RTW89_WW][8] = 58, [1][0][2][0][RTW89_WW][9] = 58, - [1][0][2][0][RTW89_WW][10] = 58, + [1][0][2][0][RTW89_WW][10] = 40, [1][0][2][0][RTW89_WW][11] = 0, [1][0][2][0][RTW89_WW][12] = 0, [1][0][2][0][RTW89_WW][13] = 0, @@ -14887,7 +14916,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][0] = 64, [0][0][0][0][RTW89_UKRAINE][0] = 58, [0][0][0][0][RTW89_MEXICO][0] = 78, - [0][0][0][0][RTW89_CN][0] = 58, + [0][0][0][0][RTW89_CN][0] = 56, [0][0][0][0][RTW89_QATAR][0] = 58, [0][0][0][0][RTW89_UK][0] = 58, [0][0][0][0][RTW89_FCC][1] = 78, @@ -14899,7 +14928,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][1] = 64, [0][0][0][0][RTW89_UKRAINE][1] = 58, [0][0][0][0][RTW89_MEXICO][1] = 78, - [0][0][0][0][RTW89_CN][1] = 58, + [0][0][0][0][RTW89_CN][1] = 56, [0][0][0][0][RTW89_QATAR][1] = 58, [0][0][0][0][RTW89_UK][1] = 58, [0][0][0][0][RTW89_FCC][2] = 78, @@ -14911,7 +14940,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][2] = 64, [0][0][0][0][RTW89_UKRAINE][2] = 58, [0][0][0][0][RTW89_MEXICO][2] = 78, - [0][0][0][0][RTW89_CN][2] = 58, + [0][0][0][0][RTW89_CN][2] = 56, [0][0][0][0][RTW89_QATAR][2] = 58, [0][0][0][0][RTW89_UK][2] = 58, [0][0][0][0][RTW89_FCC][3] = 78, @@ -14923,7 +14952,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][3] = 64, [0][0][0][0][RTW89_UKRAINE][3] = 58, [0][0][0][0][RTW89_MEXICO][3] = 78, - [0][0][0][0][RTW89_CN][3] = 58, + [0][0][0][0][RTW89_CN][3] = 56, [0][0][0][0][RTW89_QATAR][3] = 58, [0][0][0][0][RTW89_UK][3] = 58, [0][0][0][0][RTW89_FCC][4] = 78, @@ -14935,7 +14964,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][4] = 64, [0][0][0][0][RTW89_UKRAINE][4] = 58, [0][0][0][0][RTW89_MEXICO][4] = 78, - [0][0][0][0][RTW89_CN][4] = 58, + [0][0][0][0][RTW89_CN][4] = 56, [0][0][0][0][RTW89_QATAR][4] = 58, [0][0][0][0][RTW89_UK][4] = 58, [0][0][0][0][RTW89_FCC][5] = 78, @@ -14947,7 +14976,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][5] = 64, [0][0][0][0][RTW89_UKRAINE][5] = 58, [0][0][0][0][RTW89_MEXICO][5] = 78, - [0][0][0][0][RTW89_CN][5] = 58, + [0][0][0][0][RTW89_CN][5] = 56, [0][0][0][0][RTW89_QATAR][5] = 58, [0][0][0][0][RTW89_UK][5] = 58, [0][0][0][0][RTW89_FCC][6] = 78, @@ -14959,7 +14988,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][6] = 64, [0][0][0][0][RTW89_UKRAINE][6] = 58, [0][0][0][0][RTW89_MEXICO][6] = 78, - [0][0][0][0][RTW89_CN][6] = 58, + [0][0][0][0][RTW89_CN][6] = 56, [0][0][0][0][RTW89_QATAR][6] = 58, [0][0][0][0][RTW89_UK][6] = 58, [0][0][0][0][RTW89_FCC][7] = 78, @@ -14971,7 +15000,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][7] = 64, [0][0][0][0][RTW89_UKRAINE][7] = 58, [0][0][0][0][RTW89_MEXICO][7] = 78, - [0][0][0][0][RTW89_CN][7] = 58, + [0][0][0][0][RTW89_CN][7] = 56, [0][0][0][0][RTW89_QATAR][7] = 58, [0][0][0][0][RTW89_UK][7] = 58, [0][0][0][0][RTW89_FCC][8] = 78, @@ -14983,7 +15012,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][8] = 64, [0][0][0][0][RTW89_UKRAINE][8] = 58, [0][0][0][0][RTW89_MEXICO][8] = 78, - [0][0][0][0][RTW89_CN][8] = 58, + [0][0][0][0][RTW89_CN][8] = 56, [0][0][0][0][RTW89_QATAR][8] = 58, [0][0][0][0][RTW89_UK][8] = 58, [0][0][0][0][RTW89_FCC][9] = 78, @@ -14995,7 +15024,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][9] = 64, [0][0][0][0][RTW89_UKRAINE][9] = 58, [0][0][0][0][RTW89_MEXICO][9] = 78, - [0][0][0][0][RTW89_CN][9] = 58, + [0][0][0][0][RTW89_CN][9] = 56, [0][0][0][0][RTW89_QATAR][9] = 58, [0][0][0][0][RTW89_UK][9] = 58, [0][0][0][0][RTW89_FCC][10] = 78, @@ -15007,7 +15036,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][10] = 66, [0][0][0][0][RTW89_UKRAINE][10] = 58, [0][0][0][0][RTW89_MEXICO][10] = 78, - [0][0][0][0][RTW89_CN][10] = 58, + [0][0][0][0][RTW89_CN][10] = 56, [0][0][0][0][RTW89_QATAR][10] = 58, [0][0][0][0][RTW89_UK][10] = 58, [0][0][0][0][RTW89_FCC][11] = 70, @@ -15019,7 +15048,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][11] = 64, [0][0][0][0][RTW89_UKRAINE][11] = 58, [0][0][0][0][RTW89_MEXICO][11] = 70, - [0][0][0][0][RTW89_CN][11] = 58, + [0][0][0][0][RTW89_CN][11] = 56, [0][0][0][0][RTW89_QATAR][11] = 58, [0][0][0][0][RTW89_UK][11] = 58, [0][0][0][0][RTW89_FCC][12] = 56, @@ -15031,7 +15060,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_CHILE][12] = 56, [0][0][0][0][RTW89_UKRAINE][12] = 58, [0][0][0][0][RTW89_MEXICO][12] = 56, - [0][0][0][0][RTW89_CN][12] = 58, + [0][0][0][0][RTW89_CN][12] = 56, [0][0][0][0][RTW89_QATAR][12] = 58, [0][0][0][0][RTW89_UK][12] = 58, [0][0][0][0][RTW89_FCC][13] = 127, @@ -15055,7 +15084,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][0] = 50, [0][1][0][0][RTW89_UKRAINE][0] = 46, [0][1][0][0][RTW89_MEXICO][0] = 74, - [0][1][0][0][RTW89_CN][0] = 46, + [0][1][0][0][RTW89_CN][0] = 44, [0][1][0][0][RTW89_QATAR][0] = 46, [0][1][0][0][RTW89_UK][0] = 46, [0][1][0][0][RTW89_FCC][1] = 74, @@ -15067,7 +15096,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][1] = 50, [0][1][0][0][RTW89_UKRAINE][1] = 46, [0][1][0][0][RTW89_MEXICO][1] = 74, - [0][1][0][0][RTW89_CN][1] = 46, + [0][1][0][0][RTW89_CN][1] = 44, [0][1][0][0][RTW89_QATAR][1] = 46, [0][1][0][0][RTW89_UK][1] = 46, [0][1][0][0][RTW89_FCC][2] = 74, @@ -15079,7 +15108,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][2] = 50, [0][1][0][0][RTW89_UKRAINE][2] = 46, [0][1][0][0][RTW89_MEXICO][2] = 74, - [0][1][0][0][RTW89_CN][2] = 46, + [0][1][0][0][RTW89_CN][2] = 44, [0][1][0][0][RTW89_QATAR][2] = 46, [0][1][0][0][RTW89_UK][2] = 46, [0][1][0][0][RTW89_FCC][3] = 74, @@ -15091,7 +15120,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][3] = 50, [0][1][0][0][RTW89_UKRAINE][3] = 46, [0][1][0][0][RTW89_MEXICO][3] = 74, - [0][1][0][0][RTW89_CN][3] = 46, + [0][1][0][0][RTW89_CN][3] = 44, [0][1][0][0][RTW89_QATAR][3] = 46, [0][1][0][0][RTW89_UK][3] = 46, [0][1][0][0][RTW89_FCC][4] = 74, @@ -15103,7 +15132,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][4] = 50, [0][1][0][0][RTW89_UKRAINE][4] = 46, [0][1][0][0][RTW89_MEXICO][4] = 74, - [0][1][0][0][RTW89_CN][4] = 46, + [0][1][0][0][RTW89_CN][4] = 44, [0][1][0][0][RTW89_QATAR][4] = 46, [0][1][0][0][RTW89_UK][4] = 46, [0][1][0][0][RTW89_FCC][5] = 74, @@ -15115,7 +15144,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][5] = 50, [0][1][0][0][RTW89_UKRAINE][5] = 46, [0][1][0][0][RTW89_MEXICO][5] = 74, - [0][1][0][0][RTW89_CN][5] = 46, + [0][1][0][0][RTW89_CN][5] = 44, [0][1][0][0][RTW89_QATAR][5] = 46, [0][1][0][0][RTW89_UK][5] = 46, [0][1][0][0][RTW89_FCC][6] = 74, @@ -15127,7 +15156,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][6] = 52, [0][1][0][0][RTW89_UKRAINE][6] = 46, [0][1][0][0][RTW89_MEXICO][6] = 74, - [0][1][0][0][RTW89_CN][6] = 46, + [0][1][0][0][RTW89_CN][6] = 44, [0][1][0][0][RTW89_QATAR][6] = 46, [0][1][0][0][RTW89_UK][6] = 46, [0][1][0][0][RTW89_FCC][7] = 74, @@ -15139,7 +15168,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][7] = 50, [0][1][0][0][RTW89_UKRAINE][7] = 46, [0][1][0][0][RTW89_MEXICO][7] = 74, - [0][1][0][0][RTW89_CN][7] = 46, + [0][1][0][0][RTW89_CN][7] = 44, [0][1][0][0][RTW89_QATAR][7] = 46, [0][1][0][0][RTW89_UK][7] = 46, [0][1][0][0][RTW89_FCC][8] = 74, @@ -15151,7 +15180,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][8] = 50, [0][1][0][0][RTW89_UKRAINE][8] = 46, [0][1][0][0][RTW89_MEXICO][8] = 74, - [0][1][0][0][RTW89_CN][8] = 46, + [0][1][0][0][RTW89_CN][8] = 44, [0][1][0][0][RTW89_QATAR][8] = 46, [0][1][0][0][RTW89_UK][8] = 46, [0][1][0][0][RTW89_FCC][9] = 74, @@ -15163,7 +15192,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][9] = 50, [0][1][0][0][RTW89_UKRAINE][9] = 46, [0][1][0][0][RTW89_MEXICO][9] = 74, - [0][1][0][0][RTW89_CN][9] = 46, + [0][1][0][0][RTW89_CN][9] = 44, [0][1][0][0][RTW89_QATAR][9] = 46, [0][1][0][0][RTW89_UK][9] = 46, [0][1][0][0][RTW89_FCC][10] = 74, @@ -15175,7 +15204,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][10] = 52, [0][1][0][0][RTW89_UKRAINE][10] = 46, [0][1][0][0][RTW89_MEXICO][10] = 74, - [0][1][0][0][RTW89_CN][10] = 46, + [0][1][0][0][RTW89_CN][10] = 44, [0][1][0][0][RTW89_QATAR][10] = 46, [0][1][0][0][RTW89_UK][10] = 46, [0][1][0][0][RTW89_FCC][11] = 54, @@ -15187,7 +15216,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][11] = 50, [0][1][0][0][RTW89_UKRAINE][11] = 46, [0][1][0][0][RTW89_MEXICO][11] = 54, - [0][1][0][0][RTW89_CN][11] = 46, + [0][1][0][0][RTW89_CN][11] = 44, [0][1][0][0][RTW89_QATAR][11] = 46, [0][1][0][0][RTW89_UK][11] = 46, [0][1][0][0][RTW89_FCC][12] = 42, @@ -15199,7 +15228,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_CHILE][12] = 42, [0][1][0][0][RTW89_UKRAINE][12] = 46, [0][1][0][0][RTW89_MEXICO][12] = 42, - [0][1][0][0][RTW89_CN][12] = 46, + [0][1][0][0][RTW89_CN][12] = 44, [0][1][0][0][RTW89_QATAR][12] = 46, [0][1][0][0][RTW89_UK][12] = 46, [0][1][0][0][RTW89_FCC][13] = 127, @@ -15247,7 +15276,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][2] = 62, [1][0][0][0][RTW89_UKRAINE][2] = 58, [1][0][0][0][RTW89_MEXICO][2] = 50, - [1][0][0][0][RTW89_CN][2] = 58, + [1][0][0][0][RTW89_CN][2] = 56, [1][0][0][0][RTW89_QATAR][2] = 58, [1][0][0][0][RTW89_UK][2] = 58, [1][0][0][0][RTW89_FCC][3] = 50, @@ -15259,7 +15288,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][3] = 62, [1][0][0][0][RTW89_UKRAINE][3] = 58, [1][0][0][0][RTW89_MEXICO][3] = 50, - [1][0][0][0][RTW89_CN][3] = 58, + [1][0][0][0][RTW89_CN][3] = 56, [1][0][0][0][RTW89_QATAR][3] = 58, [1][0][0][0][RTW89_UK][3] = 58, [1][0][0][0][RTW89_FCC][4] = 50, @@ -15271,7 +15300,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][4] = 62, [1][0][0][0][RTW89_UKRAINE][4] = 58, [1][0][0][0][RTW89_MEXICO][4] = 50, - [1][0][0][0][RTW89_CN][4] = 58, + [1][0][0][0][RTW89_CN][4] = 56, [1][0][0][0][RTW89_QATAR][4] = 58, [1][0][0][0][RTW89_UK][4] = 58, [1][0][0][0][RTW89_FCC][5] = 66, @@ -15283,7 +15312,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][5] = 62, [1][0][0][0][RTW89_UKRAINE][5] = 58, [1][0][0][0][RTW89_MEXICO][5] = 66, - [1][0][0][0][RTW89_CN][5] = 58, + [1][0][0][0][RTW89_CN][5] = 56, [1][0][0][0][RTW89_QATAR][5] = 58, [1][0][0][0][RTW89_UK][5] = 58, [1][0][0][0][RTW89_FCC][6] = 50, @@ -15295,7 +15324,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][6] = 62, [1][0][0][0][RTW89_UKRAINE][6] = 58, [1][0][0][0][RTW89_MEXICO][6] = 50, - [1][0][0][0][RTW89_CN][6] = 58, + [1][0][0][0][RTW89_CN][6] = 56, [1][0][0][0][RTW89_QATAR][6] = 58, [1][0][0][0][RTW89_UK][6] = 58, [1][0][0][0][RTW89_FCC][7] = 50, @@ -15307,7 +15336,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][7] = 62, [1][0][0][0][RTW89_UKRAINE][7] = 58, [1][0][0][0][RTW89_MEXICO][7] = 50, - [1][0][0][0][RTW89_CN][7] = 58, + [1][0][0][0][RTW89_CN][7] = 56, [1][0][0][0][RTW89_QATAR][7] = 58, [1][0][0][0][RTW89_UK][7] = 58, [1][0][0][0][RTW89_FCC][8] = 50, @@ -15319,7 +15348,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][8] = 62, [1][0][0][0][RTW89_UKRAINE][8] = 58, [1][0][0][0][RTW89_MEXICO][8] = 50, - [1][0][0][0][RTW89_CN][8] = 58, + [1][0][0][0][RTW89_CN][8] = 56, [1][0][0][0][RTW89_QATAR][8] = 58, [1][0][0][0][RTW89_UK][8] = 58, [1][0][0][0][RTW89_FCC][9] = 42, @@ -15331,7 +15360,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][9] = 42, [1][0][0][0][RTW89_UKRAINE][9] = 58, [1][0][0][0][RTW89_MEXICO][9] = 42, - [1][0][0][0][RTW89_CN][9] = 58, + [1][0][0][0][RTW89_CN][9] = 56, [1][0][0][0][RTW89_QATAR][9] = 58, [1][0][0][0][RTW89_UK][9] = 58, [1][0][0][0][RTW89_FCC][10] = 30, @@ -15343,7 +15372,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_CHILE][10] = 30, [1][0][0][0][RTW89_UKRAINE][10] = 58, [1][0][0][0][RTW89_MEXICO][10] = 30, - [1][0][0][0][RTW89_CN][10] = 58, + [1][0][0][0][RTW89_CN][10] = 56, [1][0][0][0][RTW89_QATAR][10] = 58, [1][0][0][0][RTW89_UK][10] = 58, [1][0][0][0][RTW89_FCC][11] = 127, @@ -15415,7 +15444,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][2] = 50, [1][1][0][0][RTW89_UKRAINE][2] = 46, [1][1][0][0][RTW89_MEXICO][2] = 46, - [1][1][0][0][RTW89_CN][2] = 46, + [1][1][0][0][RTW89_CN][2] = 44, [1][1][0][0][RTW89_QATAR][2] = 46, [1][1][0][0][RTW89_UK][2] = 46, [1][1][0][0][RTW89_FCC][3] = 46, @@ -15427,7 +15456,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][3] = 50, [1][1][0][0][RTW89_UKRAINE][3] = 46, [1][1][0][0][RTW89_MEXICO][3] = 46, - [1][1][0][0][RTW89_CN][3] = 46, + [1][1][0][0][RTW89_CN][3] = 44, [1][1][0][0][RTW89_QATAR][3] = 46, [1][1][0][0][RTW89_UK][3] = 46, [1][1][0][0][RTW89_FCC][4] = 46, @@ -15439,7 +15468,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][4] = 50, [1][1][0][0][RTW89_UKRAINE][4] = 46, [1][1][0][0][RTW89_MEXICO][4] = 46, - [1][1][0][0][RTW89_CN][4] = 46, + [1][1][0][0][RTW89_CN][4] = 44, [1][1][0][0][RTW89_QATAR][4] = 46, [1][1][0][0][RTW89_UK][4] = 46, [1][1][0][0][RTW89_FCC][5] = 62, @@ -15451,7 +15480,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][5] = 50, [1][1][0][0][RTW89_UKRAINE][5] = 46, [1][1][0][0][RTW89_MEXICO][5] = 62, - [1][1][0][0][RTW89_CN][5] = 46, + [1][1][0][0][RTW89_CN][5] = 44, [1][1][0][0][RTW89_QATAR][5] = 46, [1][1][0][0][RTW89_UK][5] = 46, [1][1][0][0][RTW89_FCC][6] = 34, @@ -15463,7 +15492,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][6] = 50, [1][1][0][0][RTW89_UKRAINE][6] = 46, [1][1][0][0][RTW89_MEXICO][6] = 34, - [1][1][0][0][RTW89_CN][6] = 46, + [1][1][0][0][RTW89_CN][6] = 44, [1][1][0][0][RTW89_QATAR][6] = 46, [1][1][0][0][RTW89_UK][6] = 46, [1][1][0][0][RTW89_FCC][7] = 34, @@ -15475,7 +15504,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][7] = 50, [1][1][0][0][RTW89_UKRAINE][7] = 46, [1][1][0][0][RTW89_MEXICO][7] = 34, - [1][1][0][0][RTW89_CN][7] = 46, + [1][1][0][0][RTW89_CN][7] = 44, [1][1][0][0][RTW89_QATAR][7] = 46, [1][1][0][0][RTW89_UK][7] = 46, [1][1][0][0][RTW89_FCC][8] = 34, @@ -15487,7 +15516,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][8] = 50, [1][1][0][0][RTW89_UKRAINE][8] = 46, [1][1][0][0][RTW89_MEXICO][8] = 34, - [1][1][0][0][RTW89_CN][8] = 46, + [1][1][0][0][RTW89_CN][8] = 44, [1][1][0][0][RTW89_QATAR][8] = 46, [1][1][0][0][RTW89_UK][8] = 46, [1][1][0][0][RTW89_FCC][9] = 30, @@ -15499,7 +15528,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][9] = 30, [1][1][0][0][RTW89_UKRAINE][9] = 46, [1][1][0][0][RTW89_MEXICO][9] = 30, - [1][1][0][0][RTW89_CN][9] = 46, + [1][1][0][0][RTW89_CN][9] = 44, [1][1][0][0][RTW89_QATAR][9] = 46, [1][1][0][0][RTW89_UK][9] = 46, [1][1][0][0][RTW89_FCC][10] = 30, @@ -15511,7 +15540,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_CHILE][10] = 30, [1][1][0][0][RTW89_UKRAINE][10] = 46, [1][1][0][0][RTW89_MEXICO][10] = 30, - [1][1][0][0][RTW89_CN][10] = 46, + [1][1][0][0][RTW89_CN][10] = 44, [1][1][0][0][RTW89_QATAR][10] = 46, [1][1][0][0][RTW89_UK][10] = 46, [1][1][0][0][RTW89_FCC][11] = 127, @@ -16519,7 +16548,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_CHILE][10] = 66, [1][0][2][0][RTW89_UKRAINE][10] = 58, [1][0][2][0][RTW89_MEXICO][10] = 66, - [1][0][2][0][RTW89_CN][10] = 58, + [1][0][2][0][RTW89_CN][10] = 40, [1][0][2][0][RTW89_QATAR][10] = 58, [1][0][2][0][RTW89_UK][10] = 58, [1][0][2][0][RTW89_FCC][11] = 127, @@ -16687,7 +16716,7 @@ const s8 rtw89_8852b_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_CHILE][10] = 38, [1][1][2][0][RTW89_UKRAINE][10] = 46, [1][1][2][0][RTW89_MEXICO][10] = 38, - [1][1][2][0][RTW89_CN][10] = 46, + [1][1][2][0][RTW89_CN][10] = 40, [1][1][2][0][RTW89_QATAR][10] = 46, [1][1][2][0][RTW89_UK][10] = 46, [1][1][2][0][RTW89_FCC][11] = 127, @@ -16907,7 +16936,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][8] = 52, [0][0][1][0][RTW89_WW][10] = 52, [0][0][1][0][RTW89_WW][12] = 52, - [0][0][1][0][RTW89_WW][14] = 52, + [0][0][1][0][RTW89_WW][14] = 1, [0][0][1][0][RTW89_WW][15] = 52, [0][0][1][0][RTW89_WW][17] = 52, [0][0][1][0][RTW89_WW][19] = 52, @@ -16928,7 +16957,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][48] = 78, [0][0][1][0][RTW89_WW][50] = 78, [0][0][1][0][RTW89_WW][52] = 78, - [0][1][1][0][RTW89_WW][0] = 30, + [0][1][1][0][RTW89_WW][0] = 1, [0][1][1][0][RTW89_WW][2] = 32, [0][1][1][0][RTW89_WW][4] = 30, [0][1][1][0][RTW89_WW][6] = 30, @@ -17198,7 +17227,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][14] = 78, [0][0][1][0][RTW89_CN][14] = 58, [0][0][1][0][RTW89_QATAR][14] = 58, - [0][0][1][0][RTW89_UK][14] = 58, + [0][0][1][0][RTW89_UK][14] = 1, [0][0][1][0][RTW89_FCC][15] = 76, [0][0][1][0][RTW89_ETSI][15] = 58, [0][0][1][0][RTW89_MKK][15] = 76, @@ -17352,7 +17381,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][38] = 68, [0][0][1][0][RTW89_UKRAINE][38] = 28, [0][0][1][0][RTW89_MEXICO][38] = 78, - [0][0][1][0][RTW89_CN][38] = 76, + [0][0][1][0][RTW89_CN][38] = 62, [0][0][1][0][RTW89_QATAR][38] = 28, [0][0][1][0][RTW89_UK][38] = 58, [0][0][1][0][RTW89_FCC][40] = 78, @@ -17364,7 +17393,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][40] = 68, [0][0][1][0][RTW89_UKRAINE][40] = 28, [0][0][1][0][RTW89_MEXICO][40] = 78, - [0][0][1][0][RTW89_CN][40] = 76, + [0][0][1][0][RTW89_CN][40] = 62, [0][0][1][0][RTW89_QATAR][40] = 28, [0][0][1][0][RTW89_UK][40] = 58, [0][0][1][0][RTW89_FCC][42] = 78, @@ -17376,7 +17405,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][42] = 66, [0][0][1][0][RTW89_UKRAINE][42] = 28, [0][0][1][0][RTW89_MEXICO][42] = 78, - [0][0][1][0][RTW89_CN][42] = 76, + [0][0][1][0][RTW89_CN][42] = 62, [0][0][1][0][RTW89_QATAR][42] = 28, [0][0][1][0][RTW89_UK][42] = 58, [0][0][1][0][RTW89_FCC][44] = 78, @@ -17388,7 +17417,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][44] = 68, [0][0][1][0][RTW89_UKRAINE][44] = 28, [0][0][1][0][RTW89_MEXICO][44] = 78, - [0][0][1][0][RTW89_CN][44] = 76, + [0][0][1][0][RTW89_CN][44] = 62, [0][0][1][0][RTW89_QATAR][44] = 28, [0][0][1][0][RTW89_UK][44] = 58, [0][0][1][0][RTW89_FCC][46] = 78, @@ -17400,7 +17429,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_CHILE][46] = 68, [0][0][1][0][RTW89_UKRAINE][46] = 28, [0][0][1][0][RTW89_MEXICO][46] = 78, - [0][0][1][0][RTW89_CN][46] = 76, + [0][0][1][0][RTW89_CN][46] = 62, [0][0][1][0][RTW89_QATAR][46] = 28, [0][0][1][0][RTW89_UK][46] = 58, [0][0][1][0][RTW89_FCC][48] = 78, @@ -17450,7 +17479,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][0] = 50, [0][1][1][0][RTW89_CN][0] = 46, [0][1][1][0][RTW89_QATAR][0] = 46, - [0][1][1][0][RTW89_UK][0] = 46, + [0][1][1][0][RTW89_UK][0] = 1, [0][1][1][0][RTW89_FCC][2] = 68, [0][1][1][0][RTW89_ETSI][2] = 46, [0][1][1][0][RTW89_MKK][2] = 48, @@ -17688,7 +17717,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][38] = 48, [0][1][1][0][RTW89_UKRAINE][38] = 16, [0][1][1][0][RTW89_MEXICO][38] = 78, - [0][1][1][0][RTW89_CN][38] = 76, + [0][1][1][0][RTW89_CN][38] = 62, [0][1][1][0][RTW89_QATAR][38] = 16, [0][1][1][0][RTW89_UK][38] = 46, [0][1][1][0][RTW89_FCC][40] = 78, @@ -17700,7 +17729,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][40] = 48, [0][1][1][0][RTW89_UKRAINE][40] = 16, [0][1][1][0][RTW89_MEXICO][40] = 78, - [0][1][1][0][RTW89_CN][40] = 76, + [0][1][1][0][RTW89_CN][40] = 62, [0][1][1][0][RTW89_QATAR][40] = 16, [0][1][1][0][RTW89_UK][40] = 46, [0][1][1][0][RTW89_FCC][42] = 78, @@ -17712,7 +17741,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][42] = 48, [0][1][1][0][RTW89_UKRAINE][42] = 16, [0][1][1][0][RTW89_MEXICO][42] = 78, - [0][1][1][0][RTW89_CN][42] = 76, + [0][1][1][0][RTW89_CN][42] = 62, [0][1][1][0][RTW89_QATAR][42] = 16, [0][1][1][0][RTW89_UK][42] = 46, [0][1][1][0][RTW89_FCC][44] = 78, @@ -17724,7 +17753,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][44] = 48, [0][1][1][0][RTW89_UKRAINE][44] = 16, [0][1][1][0][RTW89_MEXICO][44] = 78, - [0][1][1][0][RTW89_CN][44] = 76, + [0][1][1][0][RTW89_CN][44] = 62, [0][1][1][0][RTW89_QATAR][44] = 16, [0][1][1][0][RTW89_UK][44] = 46, [0][1][1][0][RTW89_FCC][46] = 78, @@ -17736,7 +17765,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_CHILE][46] = 48, [0][1][1][0][RTW89_UKRAINE][46] = 16, [0][1][1][0][RTW89_MEXICO][46] = 78, - [0][1][1][0][RTW89_CN][46] = 76, + [0][1][1][0][RTW89_CN][46] = 62, [0][1][1][0][RTW89_QATAR][46] = 16, [0][1][1][0][RTW89_UK][46] = 46, [0][1][1][0][RTW89_FCC][48] = 56, @@ -17784,7 +17813,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][0] = 42, [0][0][2][0][RTW89_UKRAINE][0] = 52, [0][0][2][0][RTW89_MEXICO][0] = 62, - [0][0][2][0][RTW89_CN][0] = 60, + [0][0][2][0][RTW89_CN][0] = 58, [0][0][2][0][RTW89_QATAR][0] = 60, [0][0][2][0][RTW89_UK][0] = 60, [0][0][2][0][RTW89_FCC][2] = 78, @@ -17796,7 +17825,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][2] = 42, [0][0][2][0][RTW89_UKRAINE][2] = 52, [0][0][2][0][RTW89_MEXICO][2] = 62, - [0][0][2][0][RTW89_CN][2] = 60, + [0][0][2][0][RTW89_CN][2] = 58, [0][0][2][0][RTW89_QATAR][2] = 60, [0][0][2][0][RTW89_UK][2] = 60, [0][0][2][0][RTW89_FCC][4] = 78, @@ -17808,7 +17837,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][4] = 42, [0][0][2][0][RTW89_UKRAINE][4] = 52, [0][0][2][0][RTW89_MEXICO][4] = 62, - [0][0][2][0][RTW89_CN][4] = 60, + [0][0][2][0][RTW89_CN][4] = 58, [0][0][2][0][RTW89_QATAR][4] = 60, [0][0][2][0][RTW89_UK][4] = 60, [0][0][2][0][RTW89_FCC][6] = 78, @@ -17820,7 +17849,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][6] = 42, [0][0][2][0][RTW89_UKRAINE][6] = 52, [0][0][2][0][RTW89_MEXICO][6] = 62, - [0][0][2][0][RTW89_CN][6] = 60, + [0][0][2][0][RTW89_CN][6] = 58, [0][0][2][0][RTW89_QATAR][6] = 60, [0][0][2][0][RTW89_UK][6] = 60, [0][0][2][0][RTW89_FCC][8] = 78, @@ -18024,7 +18053,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][38] = 64, [0][0][2][0][RTW89_UKRAINE][38] = 28, [0][0][2][0][RTW89_MEXICO][38] = 78, - [0][0][2][0][RTW89_CN][38] = 76, + [0][0][2][0][RTW89_CN][38] = 62, [0][0][2][0][RTW89_QATAR][38] = 28, [0][0][2][0][RTW89_UK][38] = 60, [0][0][2][0][RTW89_FCC][40] = 78, @@ -18036,7 +18065,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][40] = 64, [0][0][2][0][RTW89_UKRAINE][40] = 28, [0][0][2][0][RTW89_MEXICO][40] = 78, - [0][0][2][0][RTW89_CN][40] = 76, + [0][0][2][0][RTW89_CN][40] = 62, [0][0][2][0][RTW89_QATAR][40] = 28, [0][0][2][0][RTW89_UK][40] = 60, [0][0][2][0][RTW89_FCC][42] = 78, @@ -18048,7 +18077,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][42] = 64, [0][0][2][0][RTW89_UKRAINE][42] = 28, [0][0][2][0][RTW89_MEXICO][42] = 78, - [0][0][2][0][RTW89_CN][42] = 76, + [0][0][2][0][RTW89_CN][42] = 62, [0][0][2][0][RTW89_QATAR][42] = 28, [0][0][2][0][RTW89_UK][42] = 60, [0][0][2][0][RTW89_FCC][44] = 78, @@ -18060,7 +18089,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][44] = 66, [0][0][2][0][RTW89_UKRAINE][44] = 28, [0][0][2][0][RTW89_MEXICO][44] = 78, - [0][0][2][0][RTW89_CN][44] = 76, + [0][0][2][0][RTW89_CN][44] = 62, [0][0][2][0][RTW89_QATAR][44] = 28, [0][0][2][0][RTW89_UK][44] = 60, [0][0][2][0][RTW89_FCC][46] = 78, @@ -18072,7 +18101,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_CHILE][46] = 66, [0][0][2][0][RTW89_UKRAINE][46] = 28, [0][0][2][0][RTW89_MEXICO][46] = 78, - [0][0][2][0][RTW89_CN][46] = 76, + [0][0][2][0][RTW89_CN][46] = 62, [0][0][2][0][RTW89_QATAR][46] = 28, [0][0][2][0][RTW89_UK][46] = 60, [0][0][2][0][RTW89_FCC][48] = 78, @@ -18120,7 +18149,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][0] = 30, [0][1][2][0][RTW89_UKRAINE][0] = 40, [0][1][2][0][RTW89_MEXICO][0] = 50, - [0][1][2][0][RTW89_CN][0] = 48, + [0][1][2][0][RTW89_CN][0] = 46, [0][1][2][0][RTW89_QATAR][0] = 48, [0][1][2][0][RTW89_UK][0] = 48, [0][1][2][0][RTW89_FCC][2] = 70, @@ -18132,7 +18161,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][2] = 30, [0][1][2][0][RTW89_UKRAINE][2] = 40, [0][1][2][0][RTW89_MEXICO][2] = 50, - [0][1][2][0][RTW89_CN][2] = 48, + [0][1][2][0][RTW89_CN][2] = 46, [0][1][2][0][RTW89_QATAR][2] = 48, [0][1][2][0][RTW89_UK][2] = 48, [0][1][2][0][RTW89_FCC][4] = 70, @@ -18144,7 +18173,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][4] = 30, [0][1][2][0][RTW89_UKRAINE][4] = 40, [0][1][2][0][RTW89_MEXICO][4] = 50, - [0][1][2][0][RTW89_CN][4] = 48, + [0][1][2][0][RTW89_CN][4] = 46, [0][1][2][0][RTW89_QATAR][4] = 48, [0][1][2][0][RTW89_UK][4] = 48, [0][1][2][0][RTW89_FCC][6] = 70, @@ -18156,7 +18185,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][6] = 30, [0][1][2][0][RTW89_UKRAINE][6] = 40, [0][1][2][0][RTW89_MEXICO][6] = 50, - [0][1][2][0][RTW89_CN][6] = 48, + [0][1][2][0][RTW89_CN][6] = 46, [0][1][2][0][RTW89_QATAR][6] = 48, [0][1][2][0][RTW89_UK][6] = 48, [0][1][2][0][RTW89_FCC][8] = 70, @@ -18360,7 +18389,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][38] = 50, [0][1][2][0][RTW89_UKRAINE][38] = 16, [0][1][2][0][RTW89_MEXICO][38] = 78, - [0][1][2][0][RTW89_CN][38] = 76, + [0][1][2][0][RTW89_CN][38] = 62, [0][1][2][0][RTW89_QATAR][38] = 16, [0][1][2][0][RTW89_UK][38] = 48, [0][1][2][0][RTW89_FCC][40] = 78, @@ -18372,7 +18401,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][40] = 50, [0][1][2][0][RTW89_UKRAINE][40] = 16, [0][1][2][0][RTW89_MEXICO][40] = 78, - [0][1][2][0][RTW89_CN][40] = 76, + [0][1][2][0][RTW89_CN][40] = 62, [0][1][2][0][RTW89_QATAR][40] = 16, [0][1][2][0][RTW89_UK][40] = 48, [0][1][2][0][RTW89_FCC][42] = 78, @@ -18384,7 +18413,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][42] = 52, [0][1][2][0][RTW89_UKRAINE][42] = 16, [0][1][2][0][RTW89_MEXICO][42] = 78, - [0][1][2][0][RTW89_CN][42] = 76, + [0][1][2][0][RTW89_CN][42] = 62, [0][1][2][0][RTW89_QATAR][42] = 16, [0][1][2][0][RTW89_UK][42] = 48, [0][1][2][0][RTW89_FCC][44] = 78, @@ -18396,7 +18425,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][44] = 52, [0][1][2][0][RTW89_UKRAINE][44] = 16, [0][1][2][0][RTW89_MEXICO][44] = 78, - [0][1][2][0][RTW89_CN][44] = 76, + [0][1][2][0][RTW89_CN][44] = 62, [0][1][2][0][RTW89_QATAR][44] = 16, [0][1][2][0][RTW89_UK][44] = 48, [0][1][2][0][RTW89_FCC][46] = 78, @@ -18408,7 +18437,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_CHILE][46] = 52, [0][1][2][0][RTW89_UKRAINE][46] = 16, [0][1][2][0][RTW89_MEXICO][46] = 78, - [0][1][2][0][RTW89_CN][46] = 76, + [0][1][2][0][RTW89_CN][46] = 62, [0][1][2][0][RTW89_QATAR][46] = 16, [0][1][2][0][RTW89_UK][46] = 48, [0][1][2][0][RTW89_FCC][48] = 58, @@ -18456,7 +18485,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][0] = 14, [0][1][2][1][RTW89_UKRAINE][0] = 28, [0][1][2][1][RTW89_MEXICO][0] = 50, - [0][1][2][1][RTW89_CN][0] = 36, + [0][1][2][1][RTW89_CN][0] = 34, [0][1][2][1][RTW89_QATAR][0] = 36, [0][1][2][1][RTW89_UK][0] = 36, [0][1][2][1][RTW89_FCC][2] = 68, @@ -18468,7 +18497,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][2] = 14, [0][1][2][1][RTW89_UKRAINE][2] = 28, [0][1][2][1][RTW89_MEXICO][2] = 50, - [0][1][2][1][RTW89_CN][2] = 36, + [0][1][2][1][RTW89_CN][2] = 34, [0][1][2][1][RTW89_QATAR][2] = 36, [0][1][2][1][RTW89_UK][2] = 36, [0][1][2][1][RTW89_FCC][4] = 68, @@ -18480,7 +18509,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][4] = 14, [0][1][2][1][RTW89_UKRAINE][4] = 28, [0][1][2][1][RTW89_MEXICO][4] = 50, - [0][1][2][1][RTW89_CN][4] = 36, + [0][1][2][1][RTW89_CN][4] = 34, [0][1][2][1][RTW89_QATAR][4] = 36, [0][1][2][1][RTW89_UK][4] = 36, [0][1][2][1][RTW89_FCC][6] = 68, @@ -18492,7 +18521,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][6] = 14, [0][1][2][1][RTW89_UKRAINE][6] = 28, [0][1][2][1][RTW89_MEXICO][6] = 50, - [0][1][2][1][RTW89_CN][6] = 36, + [0][1][2][1][RTW89_CN][6] = 34, [0][1][2][1][RTW89_QATAR][6] = 36, [0][1][2][1][RTW89_UK][6] = 36, [0][1][2][1][RTW89_FCC][8] = 68, @@ -18696,7 +18725,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][38] = 36, [0][1][2][1][RTW89_UKRAINE][38] = 4, [0][1][2][1][RTW89_MEXICO][38] = 78, - [0][1][2][1][RTW89_CN][38] = 72, + [0][1][2][1][RTW89_CN][38] = 62, [0][1][2][1][RTW89_QATAR][38] = 4, [0][1][2][1][RTW89_UK][38] = 36, [0][1][2][1][RTW89_FCC][40] = 78, @@ -18708,7 +18737,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][40] = 36, [0][1][2][1][RTW89_UKRAINE][40] = 4, [0][1][2][1][RTW89_MEXICO][40] = 78, - [0][1][2][1][RTW89_CN][40] = 72, + [0][1][2][1][RTW89_CN][40] = 62, [0][1][2][1][RTW89_QATAR][40] = 4, [0][1][2][1][RTW89_UK][40] = 36, [0][1][2][1][RTW89_FCC][42] = 78, @@ -18720,7 +18749,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][42] = 36, [0][1][2][1][RTW89_UKRAINE][42] = 4, [0][1][2][1][RTW89_MEXICO][42] = 78, - [0][1][2][1][RTW89_CN][42] = 72, + [0][1][2][1][RTW89_CN][42] = 62, [0][1][2][1][RTW89_QATAR][42] = 4, [0][1][2][1][RTW89_UK][42] = 36, [0][1][2][1][RTW89_FCC][44] = 78, @@ -18732,7 +18761,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][44] = 36, [0][1][2][1][RTW89_UKRAINE][44] = 4, [0][1][2][1][RTW89_MEXICO][44] = 78, - [0][1][2][1][RTW89_CN][44] = 76, + [0][1][2][1][RTW89_CN][44] = 62, [0][1][2][1][RTW89_QATAR][44] = 4, [0][1][2][1][RTW89_UK][44] = 36, [0][1][2][1][RTW89_FCC][46] = 78, @@ -18744,7 +18773,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_CHILE][46] = 36, [0][1][2][1][RTW89_UKRAINE][46] = 4, [0][1][2][1][RTW89_MEXICO][46] = 78, - [0][1][2][1][RTW89_CN][46] = 76, + [0][1][2][1][RTW89_CN][46] = 62, [0][1][2][1][RTW89_QATAR][46] = 4, [0][1][2][1][RTW89_UK][46] = 36, [0][1][2][1][RTW89_FCC][48] = 58, @@ -18912,7 +18941,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_CHILE][39] = 64, [1][0][2][0][RTW89_UKRAINE][39] = 28, [1][0][2][0][RTW89_MEXICO][39] = 78, - [1][0][2][0][RTW89_CN][39] = 70, + [1][0][2][0][RTW89_CN][39] = 56, [1][0][2][0][RTW89_QATAR][39] = 28, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_FCC][43] = 78, @@ -18924,7 +18953,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_CHILE][43] = 64, [1][0][2][0][RTW89_UKRAINE][43] = 28, [1][0][2][0][RTW89_MEXICO][43] = 78, - [1][0][2][0][RTW89_CN][43] = 74, + [1][0][2][0][RTW89_CN][43] = 62, [1][0][2][0][RTW89_QATAR][43] = 28, [1][0][2][0][RTW89_UK][43] = 62, [1][0][2][0][RTW89_FCC][47] = 78, @@ -19080,7 +19109,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_CHILE][39] = 52, [1][1][2][0][RTW89_UKRAINE][39] = 16, [1][1][2][0][RTW89_MEXICO][39] = 78, - [1][1][2][0][RTW89_CN][39] = 70, + [1][1][2][0][RTW89_CN][39] = 56, [1][1][2][0][RTW89_QATAR][39] = 16, [1][1][2][0][RTW89_UK][39] = 52, [1][1][2][0][RTW89_FCC][43] = 78, @@ -19092,7 +19121,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_CHILE][43] = 52, [1][1][2][0][RTW89_UKRAINE][43] = 16, [1][1][2][0][RTW89_MEXICO][43] = 78, - [1][1][2][0][RTW89_CN][43] = 74, + [1][1][2][0][RTW89_CN][43] = 62, [1][1][2][0][RTW89_QATAR][43] = 16, [1][1][2][0][RTW89_UK][43] = 52, [1][1][2][0][RTW89_FCC][47] = 68, @@ -19248,7 +19277,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_CHILE][39] = 36, [1][1][2][1][RTW89_UKRAINE][39] = 4, [1][1][2][1][RTW89_MEXICO][39] = 78, - [1][1][2][1][RTW89_CN][39] = 70, + [1][1][2][1][RTW89_CN][39] = 58, [1][1][2][1][RTW89_QATAR][39] = 4, [1][1][2][1][RTW89_UK][39] = 40, [1][1][2][1][RTW89_FCC][43] = 78, @@ -19260,7 +19289,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_CHILE][43] = 36, [1][1][2][1][RTW89_UKRAINE][43] = 4, [1][1][2][1][RTW89_MEXICO][43] = 78, - [1][1][2][1][RTW89_CN][43] = 74, + [1][1][2][1][RTW89_CN][43] = 62, [1][1][2][1][RTW89_QATAR][43] = 4, [1][1][2][1][RTW89_UK][43] = 40, [1][1][2][1][RTW89_FCC][47] = 68, @@ -19356,7 +19385,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_CHILE][41] = 64, [2][0][2][0][RTW89_UKRAINE][41] = 28, [2][0][2][0][RTW89_MEXICO][41] = 74, - [2][0][2][0][RTW89_CN][41] = 70, + [2][0][2][0][RTW89_CN][41] = 48, [2][0][2][0][RTW89_QATAR][41] = 28, [2][0][2][0][RTW89_UK][41] = 64, [2][0][2][0][RTW89_FCC][49] = 64, @@ -19440,7 +19469,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_CHILE][41] = 50, [2][1][2][0][RTW89_UKRAINE][41] = 16, [2][1][2][0][RTW89_MEXICO][41] = 74, - [2][1][2][0][RTW89_CN][41] = 70, + [2][1][2][0][RTW89_CN][41] = 48, [2][1][2][0][RTW89_QATAR][41] = 16, [2][1][2][0][RTW89_UK][41] = 52, [2][1][2][0][RTW89_FCC][49] = 58, @@ -19524,7 +19553,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_CHILE][41] = 36, [2][1][2][1][RTW89_UKRAINE][41] = 4, [2][1][2][1][RTW89_MEXICO][41] = 74, - [2][1][2][1][RTW89_CN][41] = 70, + [2][1][2][1][RTW89_CN][41] = 46, [2][1][2][1][RTW89_QATAR][41] = 4, [2][1][2][1][RTW89_UK][41] = 38, [2][1][2][1][RTW89_FCC][49] = 58, @@ -20669,10 +20698,10 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][48] = 32, [0][0][RTW89_WW][50] = 32, [0][0][RTW89_WW][52] = 32, - [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][0] = 1, [0][1][RTW89_WW][2] = 4, - [0][1][RTW89_WW][4] = 0, - [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][4] = 1, + [0][1][RTW89_WW][6] = 1, [0][1][RTW89_WW][8] = 12, [0][1][RTW89_WW][10] = 12, [0][1][RTW89_WW][12] = 12, @@ -21148,7 +21177,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][0] = 34, [0][1][RTW89_ETSI][0] = 12, [0][1][RTW89_MKK][0] = 12, - [0][1][RTW89_IC][0] = 0, + [0][1][RTW89_IC][0] = 1, [0][1][RTW89_KCC][0] = 28, [0][1][RTW89_ACMA][0] = 12, [0][1][RTW89_CHILE][0] = 14, @@ -21172,7 +21201,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][4] = 34, [0][1][RTW89_ETSI][4] = 12, [0][1][RTW89_MKK][4] = 14, - [0][1][RTW89_IC][4] = 0, + [0][1][RTW89_IC][4] = 1, [0][1][RTW89_KCC][4] = 28, [0][1][RTW89_ACMA][4] = 12, [0][1][RTW89_CHILE][4] = 12, @@ -21184,7 +21213,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][6] = 34, [0][1][RTW89_ETSI][6] = 12, [0][1][RTW89_MKK][6] = 14, - [0][1][RTW89_IC][6] = 0, + [0][1][RTW89_IC][6] = 1, [0][1][RTW89_KCC][6] = 2, [0][1][RTW89_ACMA][6] = 12, [0][1][RTW89_CHILE][6] = 12, @@ -21730,7 +21759,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][38] = 64, [1][0][RTW89_UKRAINE][38] = 28, [1][0][RTW89_MEXICO][38] = 84, - [1][0][RTW89_CN][38] = 74, + [1][0][RTW89_CN][38] = 62, [1][0][RTW89_QATAR][38] = 28, [1][0][RTW89_UK][38] = 38, [1][0][RTW89_FCC][40] = 84, @@ -21742,7 +21771,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][40] = 64, [1][0][RTW89_UKRAINE][40] = 28, [1][0][RTW89_MEXICO][40] = 84, - [1][0][RTW89_CN][40] = 74, + [1][0][RTW89_CN][40] = 62, [1][0][RTW89_QATAR][40] = 28, [1][0][RTW89_UK][40] = 38, [1][0][RTW89_FCC][42] = 84, @@ -21754,7 +21783,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][42] = 64, [1][0][RTW89_UKRAINE][42] = 28, [1][0][RTW89_MEXICO][42] = 84, - [1][0][RTW89_CN][42] = 74, + [1][0][RTW89_CN][42] = 62, [1][0][RTW89_QATAR][42] = 28, [1][0][RTW89_UK][42] = 38, [1][0][RTW89_FCC][44] = 84, @@ -21766,7 +21795,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][44] = 64, [1][0][RTW89_UKRAINE][44] = 28, [1][0][RTW89_MEXICO][44] = 84, - [1][0][RTW89_CN][44] = 74, + [1][0][RTW89_CN][44] = 62, [1][0][RTW89_QATAR][44] = 28, [1][0][RTW89_UK][44] = 38, [1][0][RTW89_FCC][46] = 84, @@ -21778,7 +21807,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_CHILE][46] = 64, [1][0][RTW89_UKRAINE][46] = 28, [1][0][RTW89_MEXICO][46] = 84, - [1][0][RTW89_CN][46] = 74, + [1][0][RTW89_CN][46] = 62, [1][0][RTW89_QATAR][46] = 28, [1][0][RTW89_UK][46] = 38, [1][0][RTW89_FCC][48] = 44, @@ -22402,7 +22431,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][38] = 64, [2][0][RTW89_UKRAINE][38] = 28, [2][0][RTW89_MEXICO][38] = 84, - [2][0][RTW89_CN][38] = 76, + [2][0][RTW89_CN][38] = 62, [2][0][RTW89_QATAR][38] = 28, [2][0][RTW89_UK][38] = 50, [2][0][RTW89_FCC][40] = 84, @@ -22414,7 +22443,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][40] = 64, [2][0][RTW89_UKRAINE][40] = 28, [2][0][RTW89_MEXICO][40] = 84, - [2][0][RTW89_CN][40] = 76, + [2][0][RTW89_CN][40] = 62, [2][0][RTW89_QATAR][40] = 28, [2][0][RTW89_UK][40] = 50, [2][0][RTW89_FCC][42] = 84, @@ -22426,7 +22455,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][42] = 66, [2][0][RTW89_UKRAINE][42] = 28, [2][0][RTW89_MEXICO][42] = 84, - [2][0][RTW89_CN][42] = 76, + [2][0][RTW89_CN][42] = 62, [2][0][RTW89_QATAR][42] = 28, [2][0][RTW89_UK][42] = 50, [2][0][RTW89_FCC][44] = 84, @@ -22438,7 +22467,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][44] = 64, [2][0][RTW89_UKRAINE][44] = 28, [2][0][RTW89_MEXICO][44] = 84, - [2][0][RTW89_CN][44] = 76, + [2][0][RTW89_CN][44] = 62, [2][0][RTW89_QATAR][44] = 28, [2][0][RTW89_UK][44] = 50, [2][0][RTW89_FCC][46] = 84, @@ -22450,7 +22479,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_CHILE][46] = 64, [2][0][RTW89_UKRAINE][46] = 28, [2][0][RTW89_MEXICO][46] = 84, - [2][0][RTW89_CN][46] = 76, + [2][0][RTW89_CN][46] = 62, [2][0][RTW89_QATAR][46] = 28, [2][0][RTW89_UK][46] = 50, [2][0][RTW89_FCC][48] = 56, @@ -22738,7 +22767,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][38] = 58, [2][1][RTW89_UKRAINE][38] = 16, [2][1][RTW89_MEXICO][38] = 84, - [2][1][RTW89_CN][38] = 64, + [2][1][RTW89_CN][38] = 62, [2][1][RTW89_QATAR][38] = 16, [2][1][RTW89_UK][38] = 38, [2][1][RTW89_FCC][40] = 84, @@ -22750,7 +22779,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][40] = 58, [2][1][RTW89_UKRAINE][40] = 16, [2][1][RTW89_MEXICO][40] = 84, - [2][1][RTW89_CN][40] = 64, + [2][1][RTW89_CN][40] = 62, [2][1][RTW89_QATAR][40] = 16, [2][1][RTW89_UK][40] = 38, [2][1][RTW89_FCC][42] = 84, @@ -22762,7 +22791,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][42] = 58, [2][1][RTW89_UKRAINE][42] = 16, [2][1][RTW89_MEXICO][42] = 84, - [2][1][RTW89_CN][42] = 64, + [2][1][RTW89_CN][42] = 62, [2][1][RTW89_QATAR][42] = 16, [2][1][RTW89_UK][42] = 38, [2][1][RTW89_FCC][44] = 84, @@ -22774,7 +22803,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][44] = 58, [2][1][RTW89_UKRAINE][44] = 16, [2][1][RTW89_MEXICO][44] = 84, - [2][1][RTW89_CN][44] = 64, + [2][1][RTW89_CN][44] = 62, [2][1][RTW89_QATAR][44] = 16, [2][1][RTW89_UK][44] = 38, [2][1][RTW89_FCC][46] = 84, @@ -22786,7 +22815,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_CHILE][46] = 58, [2][1][RTW89_UKRAINE][46] = 16, [2][1][RTW89_MEXICO][46] = 84, - [2][1][RTW89_CN][46] = 64, + [2][1][RTW89_CN][46] = 62, [2][1][RTW89_QATAR][46] = 16, [2][1][RTW89_UK][46] = 38, [2][1][RTW89_FCC][48] = 44, @@ -22859,6 +22888,7 @@ const struct rtw89_phy_table rtw89_8852b_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8852b_byr_table = { .data = rtw89_8852b_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8852b_txpwr_byrate), @@ -22881,6 +22911,7 @@ const struct rtw89_txpwr_track_cfg rtw89_8852b_trk_cfg = { }; const struct rtw89_rfe_parms rtw89_8852b_dflt_parms = { + .byr_tbl = &rtw89_8852b_byr_table, .rule_2ghz = { .lmt = &rtw89_8852b_txpwr_lmt_2g, .lmt_ru = &rtw89_8852b_txpwr_lmt_ru_2g, @@ -22889,4 +22920,8 @@ const struct rtw89_rfe_parms rtw89_8852b_dflt_parms = { .lmt = &rtw89_8852b_txpwr_lmt_5g, .lmt_ru = &rtw89_8852b_txpwr_lmt_ru_5g, }, + .tx_shape = { + .lmt = &rtw89_8852b_tx_shape_lmt, + .lmt_ru = &rtw89_8852b_tx_shape_lmt_ru, + }, }; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h index 7ef217629f46..da6c90e2ba93 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.h @@ -12,10 +12,7 @@ extern const struct rtw89_phy_table rtw89_8852b_phy_bb_gain_table; extern const struct rtw89_phy_table rtw89_8852b_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852b_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852b_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8852b_byr_table; extern const struct rtw89_txpwr_track_cfg rtw89_8852b_trk_cfg; -extern const u8 rtw89_8852b_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM]; extern const struct rtw89_rfe_parms rtw89_8852b_dflt_parms; #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 9c7c9812d4f4..3b7d8ab39bab 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2022 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "debug.h" #include "fw.h" @@ -146,6 +147,10 @@ static const struct rtw89_dig_regs rtw8852c_dig_regs = { .seg0_pd_reg = R_SEG0R_PD, .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK, .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, + .bmode_pd_reg = R_BMODE_PDTH_EN_V1, + .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1, + .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V1, + .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1, .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK}, .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK}, .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1}, @@ -162,7 +167,9 @@ static const struct rtw89_dig_regs rtw8852c_dig_regs = { B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK}, }; -static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg); +static void rtw8852c_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); + static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, enum rtw89_mac_idx mac_idx); @@ -606,10 +613,9 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, u8 mac_idx) { - u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, - mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); u8 txsc20 = 0, txsc40 = 0, txsc80 = 0; u8 rf_mod_val = 0, chk_rate_mask = 0; u32 txsc; @@ -1647,15 +1653,15 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev, } rtw8852c_spur_elimination(rtwdev, chan, pri_ch_idx, phy_idx); - rtw8852c_ctrl_btg(rtwdev, chan->band_type == RTW89_BAND_2G); + rtw8852c_ctrl_btg_bt_rx(rtwdev, chan->band_type == RTW89_BAND_2G, + RTW89_PHY_0); rtw8852c_5m_mask(rtwdev, chan, phy_idx); if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && rtwdev->hal.cv != CHIP_CAV) { rtw89_phy_write32_idx(rtwdev, R_P80_AT_HIGH_FREQ, B_P80_AT_HIGH_FREQ, 0x0, phy_idx); - reg = rtw89_mac_reg_by_idx(R_P80_AT_HIGH_FREQ_BB_WRP, - phy_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_P80_AT_HIGH_FREQ_BB_WRP, phy_idx); if (chan->primary_channel > chan->channel) { rtw89_phy_write32_mask(rtwdev, R_P80_AT_HIGH_FREQ_RU_ALLOC, @@ -1774,6 +1780,7 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) rtwdev->is_tssi_mode[RF_PATH_B] = false; memset(rfk_mcc, 0, sizeof(*rfk_mcc)); rtw8852c_lck_init(rtwdev); + rtw8852c_dpk_init(rtwdev); rtw8852c_rck(rtwdev); rtw8852c_dack(rtwdev); @@ -1859,12 +1866,12 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, for (i = 0; i < 4; i++) { /* 1TX */ - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_V1_MASK << (8 * i), val_1t); /* 2TX */ - reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_V1_MASK << (8 * i), val_2t); @@ -1962,10 +1969,11 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) { + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = rtw89_8852c_tx_shape[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd]; + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; if (band == RTW89_BAND_2G) rtw8852c_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); @@ -1973,6 +1981,11 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev, rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev, (enum rtw89_mac_idx)phy_idx, tx_shape_ofdm); + + rtw89_phy_write32_set(rtwdev, R_P0_DAC_COMP_POST_DPD_EN, + B_P0_DAC_COMP_POST_DPD_EN); + rtw89_phy_write32_set(rtwdev, R_P1_DAC_COMP_POST_DPD_EN, + B_P1_DAC_COMP_POST_DPD_EN); } static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev, @@ -2140,7 +2153,8 @@ static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path) 1); rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); - rtw8852c_ctrl_btg(rtwdev, band == RTW89_BAND_2G); + rtw8852c_ctrl_btg_bt_rx(rtwdev, band == RTW89_BAND_2G, + RTW89_PHY_0); rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, @@ -2181,7 +2195,7 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, for (addr = R_AX_MACID_ANT_TABLE; addr <= R_AX_MACID_ANT_TABLE_LAST; addr += 4) { - reg = rtw89_mac_reg_by_idx(addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, addr, mac_idx); rtw89_write32(rtwdev, reg, 0); } @@ -2211,14 +2225,15 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, for (i = 0; i < cr_size; i++) { rtw89_debug(rtwdev, RTW89_DBG_TSSI, "0x%x = 0x%x\n", path_com[i].addr, path_com[i].data); - reg = rtw89_mac_reg_by_idx(path_com[i].addr, mac_idx); + reg = rtw89_mac_reg_by_idx(rtwdev, path_com[i].addr, mac_idx); rtw89_write32(rtwdev, reg, path_com[i].data); } } -static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +static void rtw8852c_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (bt_en) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1, B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x3); rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1, @@ -2336,9 +2351,10 @@ static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev) } } -static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +static void rtw8852c_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) { - if (btg) { + if (en) { rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, B_PATH0_BT_SHARE_V1, 0x1); rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, @@ -2648,15 +2664,15 @@ static void rtw8852c_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) switch (level) { case 0: /* original */ default: - rtw8852c_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852c_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 1: /* for FDD free-run */ - rtw8852c_bb_ctrl_btc_preagc(rtwdev, true); + rtw8852c_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); btc->dm.wl_lna2 = 0; break; case 2: /* for BTG Co-Rx*/ - rtw8852c_bb_ctrl_btc_preagc(rtwdev, false); + rtw8852c_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); btc->dm.wl_lna2 = 1; break; } @@ -2741,6 +2757,10 @@ static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev) return 0; } +static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = { + .callbacks[RTW89_CHANCTX_CALLBACK_RFK] = rtw8852c_rfk_chanctx_cb, +}; + #ifdef CONFIG_PM static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, @@ -2753,6 +2773,7 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8852c = { static const struct rtw89_chip_ops rtw8852c_chip_ops = { .enable_bb_rf = rtw8852c_mac_enable_bb_rf, .disable_bb_rf = rtw8852c_mac_disable_bb_rf, + .bb_preinit = NULL, .bb_reset = rtw8852c_bb_reset, .bb_sethw = rtw8852c_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, @@ -2773,9 +2794,9 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl, .init_txpwr_unit = rtw8852c_init_txpwr_unit, .get_thermal = rtw8852c_get_thermal, - .ctrl_btg = rtw8852c_ctrl_btg, + .ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx, .query_ppdu = rtw8852c_query_ppdu, - .bb_ctrl_btc_preagc = rtw8852c_bb_ctrl_btc_preagc, + .ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx, .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852c_pwr_on_func, @@ -2802,10 +2823,15 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { const struct rtw89_chip_info rtw8852c_chip_info = { .chip_id = RTL8852C, + .chip_gen = RTW89_CHIP_AX, .ops = &rtw8852c_chip_ops, + .mac_def = &rtw89_mac_gen_ax, + .phy_def = &rtw89_phy_gen_ax, .fw_basename = RTW8852C_FW_BASENAME, .fw_format_max = RTW8852C_FW_FORMAT_MAX, .try_ce_fw = false, + .bbmcu_nr = 0, + .needed_fw_elms = 0, .fifo_size = 458752, .small_fifo_size = false, .dle_scc_rsvd_size = 0, @@ -2825,21 +2851,22 @@ const struct rtw89_chip_info rtw8852c_chip_info = { &rtw89_8852c_phy_radioa_table,}, .nctl_table = &rtw89_8852c_phy_nctl_table, .nctl_post_table = NULL, - .byr_table = &rtw89_8852c_byr_table, .dflt_parms = &rtw89_8852c_dflt_parms, .rfe_parms_conf = NULL, + .chanctx_listener = &rtw8852c_chanctx_listener, .txpwr_factor_rf = 2, .txpwr_factor_mac = 1, .dig_table = NULL, .dig_regs = &rtw8852c_dig_regs, .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table, - .support_chanctx_num = 1, + .support_chanctx_num = 2, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | BIT(NL80211_BAND_6GHZ), .support_bw160 = true, .support_unii4 = true, - .support_ul_tb_ctrl = false, + .ul_tb_waveform_ctrl = false, + .ul_tb_pwr_diff = true, .hw_sec_hdr = true, .rf_path_num = 2, .tx_nss = 2, @@ -2883,6 +2910,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1, .h2c_desc_size = sizeof(struct rtw89_rxdesc_short), .txwd_body_size = sizeof(struct rtw89_txwd_body_v1), + .txwd_info_size = sizeof(struct rtw89_txwd_info), .h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1, .h2c_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8}, .h2c_regs = rtw8852c_h2c_regs, @@ -2896,6 +2924,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dcfo_comp_sft = 12, .imr_info = &rtw8852c_imr_info, .rrsr_cfgs = &rtw8852c_rrsr_cfgs, + .bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0}, .bss_clr_map_reg = R_BSS_CLR_MAP, .dma_ch_mask = 0, .edcca_lvl_reg = R_SEG0R_EDCCA_LVL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index de7714f871d5..654e3e5507cb 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2022 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "debug.h" #include "phy.h" @@ -2893,18 +2894,37 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_bandwidth bw = chan->band_width; enum rtw89_band band = chan->band_type; + u32 clk = 0x0; rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl); - if (path == RF_PATH_A) + switch (bw) { + case RTW89_CHANNEL_WIDTH_80: + clk = 0x1; + break; + case RTW89_CHANNEL_WIDTH_80_80: + case RTW89_CHANNEL_WIDTH_160: + clk = 0x2; + break; + default: + break; + } + + if (path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ADC_CLK, + B_P0_TSSI_ADC_CLK, clk); rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, &rtw8852c_tssi_sys_defs_2g_a_tbl, &rtw8852c_tssi_sys_defs_5g_a_tbl); - else + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ADC_CLK, + B_P1_TSSI_ADC_CLK, clk); rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, &rtw8852c_tssi_sys_defs_2g_b_tbl, &rtw8852c_tssi_sys_defs_5g_b_tbl); + } } static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, @@ -4049,21 +4069,53 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; - u8 idx = rfk_mcc->table_idx; - int i; + DECLARE_BITMAP(map, RTW89_IQK_CHS_NR) = {}; + const struct rtw89_chan *chan; + enum rtw89_entity_mode mode; + u8 chan_idx; + u8 idx; + u8 i; - for (i = 0; i < RTW89_IQK_CHS_NR; i++) { - if (rfk_mcc->ch[idx] == 0) - break; - if (++idx >= RTW89_IQK_CHS_NR) - idx = 0; + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_MCC_PREPARE: + chan_idx = RTW89_SUB_ENTITY_1; + break; + default: + chan_idx = RTW89_SUB_ENTITY_0; + break; + } + + for (i = 0; i <= chan_idx; i++) { + chan = rtw89_chan_get(rtwdev, i); + + for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { + if (rfk_mcc->ch[idx] == chan->channel && + rfk_mcc->band[idx] == chan->band_type) { + if (i != chan_idx) { + set_bit(idx, map); + break; + } + + goto bottom; + } + } + } + + idx = find_first_zero_bit(map, RTW89_IQK_CHS_NR); + if (idx == RTW89_IQK_CHS_NR) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "%s: no empty rfk table; force replace the first\n", + __func__); + idx = 0; } - rfk_mcc->table_idx = idx; rfk_mcc->ch[idx] = chan->channel; rfk_mcc->band[idx] = chan->band_type; + +bottom: + rfk_mcc->table_idx = idx; } void rtw8852c_rck(struct rtw89_dev *rtwdev) @@ -4213,6 +4265,14 @@ trigger_rx_dck: rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); } +void rtw8852c_dpk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + dpk->is_dpk_enable = true; + dpk->is_dpk_reload_en = false; +} + void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { u32 tx_en; @@ -4222,8 +4282,6 @@ void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); - rtwdev->dpk.is_dpk_enable = true; - rtwdev->dpk.is_dpk_reload_en = false; _dpk(rtwdev, phy_idx, false); rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); @@ -4361,3 +4419,26 @@ void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, else rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false); } + +void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 path; + + switch (state) { + case RTW89_CHANCTX_STATE_MCC_START: + dpk->is_dpk_enable = false; + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) + _dpk_onoff(rtwdev, path, false); + break; + case RTW89_CHANCTX_STATE_MCC_STOP: + dpk->is_dpk_enable = true; + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) + _dpk_onoff(rtwdev, path, false); + rtw8852c_dpk(rtwdev, RTW89_PHY_0); + break; + default: + break; + } +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index 928a587cdd05..6605137e61aa 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -13,6 +13,7 @@ void rtw8852c_dack(struct rtw89_dev *rtwdev); void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe); void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev); +void rtw8852c_dpk_init(struct rtw89_dev *rtwdev); void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_dpk_track(struct rtw89_dev *rtwdev); void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); @@ -25,5 +26,7 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void rtw8852c_lck_init(struct rtw89_dev *rtwdev); void rtw8852c_lck_track(struct rtw89_dev *rtwdev); +void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state); #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c index d727d528b365..e5b0c2a686f0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c @@ -165,11 +165,11 @@ static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs[] = { RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0xb5b5), RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0xb5b5), RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16), - RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1f19), - RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x1c), + RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1313), + RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x13), RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041), - RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041), - RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x2001), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x00410041), + RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x0041), RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3), RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3), RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e), @@ -222,7 +222,7 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000), RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x026d000), RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00), - RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c18e8), RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x3dc80280), RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00000080), RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03), @@ -251,7 +251,7 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100), RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c), RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f), - RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0xc00), RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff), RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000), RTW89_DECL_RFK_WM(0x58f8, 0x000fffff, 0x000), @@ -260,14 +260,14 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_a); static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = { - RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x766c, 0x00001000, 0x0), RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f), RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40), RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040), RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000), RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x026d000), RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00), - RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c18e8), RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x3dc80280), RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00000080), RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03), @@ -296,7 +296,7 @@ static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = { RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100), RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c), RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f), - RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0xc00), RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff), RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000), RTW89_DECL_RFK_WM(0x78f8, 0x000fffff, 0x000), @@ -511,9 +511,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_a[] = RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x07d), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_a); @@ -531,9 +531,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_b[] = RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x07d), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_b); @@ -551,9 +551,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_a[] = RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x080), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_a); @@ -571,9 +571,9 @@ static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_b[] = RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), - RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), - RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x080), }; RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_b); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c index 4b272fdf1fd7..ab1a0aadc869 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -31525,8 +31525,9 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 }; -const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM] = { +static +const u8 rtw89_8852c_tx_shape_lmt[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { [0][0][RTW89_ACMA] = 0, [0][0][RTW89_CHILE] = 0, [0][0][RTW89_CN] = 0, @@ -31537,6 +31538,7 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [0][0][RTW89_MEXICO] = 1, [0][0][RTW89_MKK] = 0, [0][0][RTW89_QATAR] = 0, + [0][0][RTW89_THAILAND] = 0, [0][0][RTW89_UK] = 0, [0][0][RTW89_UKRAINE] = 0, [0][1][RTW89_ACMA] = 0, @@ -31549,6 +31551,7 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [0][1][RTW89_MEXICO] = 3, [0][1][RTW89_MKK] = 0, [0][1][RTW89_QATAR] = 0, + [0][1][RTW89_THAILAND] = 0, [0][1][RTW89_UK] = 0, [0][1][RTW89_UKRAINE] = 0, [1][1][RTW89_ACMA] = 0, @@ -31561,6 +31564,7 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [1][1][RTW89_MEXICO] = 3, [1][1][RTW89_MKK] = 0, [1][1][RTW89_QATAR] = 0, + [1][1][RTW89_THAILAND] = 0, [1][1][RTW89_UK] = 0, [1][1][RTW89_UKRAINE] = 0, [2][1][RTW89_ACMA] = 0, @@ -31571,25 +31575,66 @@ const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] [2][1][RTW89_KCC] = 0, [2][1][RTW89_MKK] = 0, [2][1][RTW89_QATAR] = 0, + [2][1][RTW89_THAILAND] = 0, [2][1][RTW89_UK] = 0, }; static +const u8 rtw89_8852c_tx_shape_lmt_ru[RTW89_BAND_NUM][RTW89_REGD_NUM] = { + [0][RTW89_ACMA] = 0, + [0][RTW89_CHILE] = 0, + [0][RTW89_CN] = 0, + [0][RTW89_ETSI] = 0, + [0][RTW89_FCC] = 3, + [0][RTW89_IC] = 3, + [0][RTW89_KCC] = 0, + [0][RTW89_MEXICO] = 3, + [0][RTW89_MKK] = 0, + [0][RTW89_QATAR] = 0, + [0][RTW89_THAILAND] = 0, + [0][RTW89_UK] = 0, + [0][RTW89_UKRAINE] = 0, + [1][RTW89_ACMA] = 0, + [1][RTW89_CHILE] = 0, + [1][RTW89_CN] = 0, + [1][RTW89_ETSI] = 0, + [1][RTW89_FCC] = 3, + [1][RTW89_IC] = 3, + [1][RTW89_KCC] = 0, + [1][RTW89_MEXICO] = 3, + [1][RTW89_MKK] = 0, + [1][RTW89_QATAR] = 0, + [1][RTW89_THAILAND] = 0, + [1][RTW89_UK] = 0, + [1][RTW89_UKRAINE] = 0, + [2][RTW89_ACMA] = 0, + [2][RTW89_CHILE] = 0, + [2][RTW89_ETSI] = 0, + [2][RTW89_FCC] = 0, + [2][RTW89_IC] = 0, + [2][RTW89_KCC] = 0, + [2][RTW89_MKK] = 0, + [2][RTW89_QATAR] = 0, + [2][RTW89_THAILAND] = 0, + [2][RTW89_UK] = 0, +}; + +static const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][RTW89_WW][0] = 58, - [0][0][0][0][RTW89_WW][1] = 58, - [0][0][0][0][RTW89_WW][2] = 58, - [0][0][0][0][RTW89_WW][3] = 58, - [0][0][0][0][RTW89_WW][4] = 58, - [0][0][0][0][RTW89_WW][5] = 58, - [0][0][0][0][RTW89_WW][6] = 58, - [0][0][0][0][RTW89_WW][7] = 58, - [0][0][0][0][RTW89_WW][8] = 58, - [0][0][0][0][RTW89_WW][9] = 58, - [0][0][0][0][RTW89_WW][10] = 58, - [0][0][0][0][RTW89_WW][11] = 58, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, [0][0][0][0][RTW89_WW][12] = 46, [0][0][0][0][RTW89_WW][13] = 72, [0][1][0][0][RTW89_WW][0] = 42, @@ -31609,9 +31654,9 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][0] = 0, [1][0][0][0][RTW89_WW][1] = 0, [1][0][0][0][RTW89_WW][2] = 44, - [1][0][0][0][RTW89_WW][3] = 58, - [1][0][0][0][RTW89_WW][4] = 58, - [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][3] = 56, + [1][0][0][0][RTW89_WW][4] = 56, + [1][0][0][0][RTW89_WW][5] = 56, [1][0][0][0][RTW89_WW][6] = 46, [1][0][0][0][RTW89_WW][7] = 46, [1][0][0][0][RTW89_WW][8] = 28, @@ -31622,10 +31667,10 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_WW][13] = 0, [1][1][0][0][RTW89_WW][0] = 0, [1][1][0][0][RTW89_WW][1] = 0, - [1][1][0][0][RTW89_WW][2] = 46, - [1][1][0][0][RTW89_WW][3] = 46, - [1][1][0][0][RTW89_WW][4] = 46, - [1][1][0][0][RTW89_WW][5] = 46, + [1][1][0][0][RTW89_WW][2] = 44, + [1][1][0][0][RTW89_WW][3] = 44, + [1][1][0][0][RTW89_WW][4] = 44, + [1][1][0][0][RTW89_WW][5] = 44, [1][1][0][0][RTW89_WW][6] = 40, [1][1][0][0][RTW89_WW][7] = 40, [1][1][0][0][RTW89_WW][8] = 14, @@ -31646,7 +31691,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][9] = 58, [0][0][1][0][RTW89_WW][10] = 58, [0][0][1][0][RTW89_WW][11] = 58, - [0][0][1][0][RTW89_WW][12] = 58, + [0][0][1][0][RTW89_WW][12] = 40, [0][0][1][0][RTW89_WW][13] = 0, [0][1][1][0][RTW89_WW][0] = 46, [0][1][1][0][RTW89_WW][1] = 46, @@ -31690,7 +31735,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_WW][11] = 46, [0][1][2][0][RTW89_WW][12] = 16, [0][1][2][0][RTW89_WW][13] = 0, - [0][1][2][1][RTW89_WW][0] = 36, + [0][1][2][1][RTW89_WW][0] = 34, [0][1][2][1][RTW89_WW][1] = 34, [0][1][2][1][RTW89_WW][2] = 34, [0][1][2][1][RTW89_WW][3] = 34, @@ -31742,7 +31787,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][7] = 34, [1][1][2][1][RTW89_WW][8] = 34, [1][1][2][1][RTW89_WW][9] = 34, - [1][1][2][1][RTW89_WW][10] = 36, + [1][1][2][1][RTW89_WW][10] = 34, [1][1][2][1][RTW89_WW][11] = 0, [1][1][2][1][RTW89_WW][12] = 0, [1][1][2][1][RTW89_WW][13] = 0, @@ -31752,156 +31797,169 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_IC][0] = 76, [0][0][0][0][RTW89_KCC][0] = 68, [0][0][0][0][RTW89_ACMA][0] = 60, - [0][0][0][0][RTW89_CN][0] = 58, + [0][0][0][0][RTW89_CN][0] = 56, [0][0][0][0][RTW89_UK][0] = 60, [0][0][0][0][RTW89_MEXICO][0] = 76, [0][0][0][0][RTW89_UKRAINE][0] = 60, [0][0][0][0][RTW89_CHILE][0] = 76, [0][0][0][0][RTW89_QATAR][0] = 60, + [0][0][0][0][RTW89_THAILAND][0] = 60, [0][0][0][0][RTW89_FCC][1] = 76, [0][0][0][0][RTW89_ETSI][1] = 60, [0][0][0][0][RTW89_MKK][1] = 68, [0][0][0][0][RTW89_IC][1] = 76, [0][0][0][0][RTW89_KCC][1] = 68, [0][0][0][0][RTW89_ACMA][1] = 60, - [0][0][0][0][RTW89_CN][1] = 58, + [0][0][0][0][RTW89_CN][1] = 56, [0][0][0][0][RTW89_UK][1] = 60, [0][0][0][0][RTW89_MEXICO][1] = 76, [0][0][0][0][RTW89_UKRAINE][1] = 60, [0][0][0][0][RTW89_CHILE][1] = 68, [0][0][0][0][RTW89_QATAR][1] = 60, + [0][0][0][0][RTW89_THAILAND][1] = 60, [0][0][0][0][RTW89_FCC][2] = 76, [0][0][0][0][RTW89_ETSI][2] = 60, [0][0][0][0][RTW89_MKK][2] = 68, [0][0][0][0][RTW89_IC][2] = 76, [0][0][0][0][RTW89_KCC][2] = 68, [0][0][0][0][RTW89_ACMA][2] = 60, - [0][0][0][0][RTW89_CN][2] = 58, + [0][0][0][0][RTW89_CN][2] = 56, [0][0][0][0][RTW89_UK][2] = 60, [0][0][0][0][RTW89_MEXICO][2] = 76, [0][0][0][0][RTW89_UKRAINE][2] = 60, [0][0][0][0][RTW89_CHILE][2] = 68, [0][0][0][0][RTW89_QATAR][2] = 60, + [0][0][0][0][RTW89_THAILAND][2] = 60, [0][0][0][0][RTW89_FCC][3] = 76, [0][0][0][0][RTW89_ETSI][3] = 60, [0][0][0][0][RTW89_MKK][3] = 68, [0][0][0][0][RTW89_IC][3] = 76, [0][0][0][0][RTW89_KCC][3] = 68, [0][0][0][0][RTW89_ACMA][3] = 60, - [0][0][0][0][RTW89_CN][3] = 58, + [0][0][0][0][RTW89_CN][3] = 56, [0][0][0][0][RTW89_UK][3] = 60, [0][0][0][0][RTW89_MEXICO][3] = 76, [0][0][0][0][RTW89_UKRAINE][3] = 60, [0][0][0][0][RTW89_CHILE][3] = 68, [0][0][0][0][RTW89_QATAR][3] = 60, + [0][0][0][0][RTW89_THAILAND][3] = 60, [0][0][0][0][RTW89_FCC][4] = 76, [0][0][0][0][RTW89_ETSI][4] = 60, [0][0][0][0][RTW89_MKK][4] = 68, [0][0][0][0][RTW89_IC][4] = 76, [0][0][0][0][RTW89_KCC][4] = 68, [0][0][0][0][RTW89_ACMA][4] = 60, - [0][0][0][0][RTW89_CN][4] = 58, + [0][0][0][0][RTW89_CN][4] = 56, [0][0][0][0][RTW89_UK][4] = 60, [0][0][0][0][RTW89_MEXICO][4] = 76, [0][0][0][0][RTW89_UKRAINE][4] = 60, [0][0][0][0][RTW89_CHILE][4] = 68, [0][0][0][0][RTW89_QATAR][4] = 60, + [0][0][0][0][RTW89_THAILAND][4] = 60, [0][0][0][0][RTW89_FCC][5] = 76, [0][0][0][0][RTW89_ETSI][5] = 60, [0][0][0][0][RTW89_MKK][5] = 68, [0][0][0][0][RTW89_IC][5] = 76, [0][0][0][0][RTW89_KCC][5] = 68, [0][0][0][0][RTW89_ACMA][5] = 60, - [0][0][0][0][RTW89_CN][5] = 58, + [0][0][0][0][RTW89_CN][5] = 56, [0][0][0][0][RTW89_UK][5] = 60, [0][0][0][0][RTW89_MEXICO][5] = 76, [0][0][0][0][RTW89_UKRAINE][5] = 60, [0][0][0][0][RTW89_CHILE][5] = 76, [0][0][0][0][RTW89_QATAR][5] = 60, + [0][0][0][0][RTW89_THAILAND][5] = 60, [0][0][0][0][RTW89_FCC][6] = 76, [0][0][0][0][RTW89_ETSI][6] = 60, [0][0][0][0][RTW89_MKK][6] = 68, [0][0][0][0][RTW89_IC][6] = 76, [0][0][0][0][RTW89_KCC][6] = 68, [0][0][0][0][RTW89_ACMA][6] = 60, - [0][0][0][0][RTW89_CN][6] = 58, + [0][0][0][0][RTW89_CN][6] = 56, [0][0][0][0][RTW89_UK][6] = 60, [0][0][0][0][RTW89_MEXICO][6] = 76, [0][0][0][0][RTW89_UKRAINE][6] = 60, [0][0][0][0][RTW89_CHILE][6] = 76, [0][0][0][0][RTW89_QATAR][6] = 60, + [0][0][0][0][RTW89_THAILAND][6] = 60, [0][0][0][0][RTW89_FCC][7] = 76, [0][0][0][0][RTW89_ETSI][7] = 60, [0][0][0][0][RTW89_MKK][7] = 68, [0][0][0][0][RTW89_IC][7] = 76, [0][0][0][0][RTW89_KCC][7] = 68, [0][0][0][0][RTW89_ACMA][7] = 60, - [0][0][0][0][RTW89_CN][7] = 58, + [0][0][0][0][RTW89_CN][7] = 56, [0][0][0][0][RTW89_UK][7] = 60, [0][0][0][0][RTW89_MEXICO][7] = 76, [0][0][0][0][RTW89_UKRAINE][7] = 60, [0][0][0][0][RTW89_CHILE][7] = 76, [0][0][0][0][RTW89_QATAR][7] = 60, + [0][0][0][0][RTW89_THAILAND][7] = 60, [0][0][0][0][RTW89_FCC][8] = 76, [0][0][0][0][RTW89_ETSI][8] = 60, [0][0][0][0][RTW89_MKK][8] = 68, [0][0][0][0][RTW89_IC][8] = 76, [0][0][0][0][RTW89_KCC][8] = 68, [0][0][0][0][RTW89_ACMA][8] = 60, - [0][0][0][0][RTW89_CN][8] = 58, + [0][0][0][0][RTW89_CN][8] = 56, [0][0][0][0][RTW89_UK][8] = 60, [0][0][0][0][RTW89_MEXICO][8] = 76, [0][0][0][0][RTW89_UKRAINE][8] = 60, [0][0][0][0][RTW89_CHILE][8] = 76, [0][0][0][0][RTW89_QATAR][8] = 60, + [0][0][0][0][RTW89_THAILAND][8] = 60, [0][0][0][0][RTW89_FCC][9] = 76, [0][0][0][0][RTW89_ETSI][9] = 60, [0][0][0][0][RTW89_MKK][9] = 68, [0][0][0][0][RTW89_IC][9] = 76, [0][0][0][0][RTW89_KCC][9] = 70, [0][0][0][0][RTW89_ACMA][9] = 60, - [0][0][0][0][RTW89_CN][9] = 58, + [0][0][0][0][RTW89_CN][9] = 56, [0][0][0][0][RTW89_UK][9] = 60, [0][0][0][0][RTW89_MEXICO][9] = 76, [0][0][0][0][RTW89_UKRAINE][9] = 60, [0][0][0][0][RTW89_CHILE][9] = 76, [0][0][0][0][RTW89_QATAR][9] = 60, + [0][0][0][0][RTW89_THAILAND][9] = 60, [0][0][0][0][RTW89_FCC][10] = 76, [0][0][0][0][RTW89_ETSI][10] = 60, [0][0][0][0][RTW89_MKK][10] = 68, [0][0][0][0][RTW89_IC][10] = 76, [0][0][0][0][RTW89_KCC][10] = 70, [0][0][0][0][RTW89_ACMA][10] = 60, - [0][0][0][0][RTW89_CN][10] = 58, + [0][0][0][0][RTW89_CN][10] = 56, [0][0][0][0][RTW89_UK][10] = 60, [0][0][0][0][RTW89_MEXICO][10] = 76, [0][0][0][0][RTW89_UKRAINE][10] = 60, [0][0][0][0][RTW89_CHILE][10] = 76, [0][0][0][0][RTW89_QATAR][10] = 60, + [0][0][0][0][RTW89_THAILAND][10] = 60, [0][0][0][0][RTW89_FCC][11] = 58, [0][0][0][0][RTW89_ETSI][11] = 60, [0][0][0][0][RTW89_MKK][11] = 68, [0][0][0][0][RTW89_IC][11] = 58, [0][0][0][0][RTW89_KCC][11] = 70, [0][0][0][0][RTW89_ACMA][11] = 60, - [0][0][0][0][RTW89_CN][11] = 58, + [0][0][0][0][RTW89_CN][11] = 56, [0][0][0][0][RTW89_UK][11] = 60, [0][0][0][0][RTW89_MEXICO][11] = 58, [0][0][0][0][RTW89_UKRAINE][11] = 60, [0][0][0][0][RTW89_CHILE][11] = 58, [0][0][0][0][RTW89_QATAR][11] = 60, + [0][0][0][0][RTW89_THAILAND][11] = 60, [0][0][0][0][RTW89_FCC][12] = 46, [0][0][0][0][RTW89_ETSI][12] = 60, [0][0][0][0][RTW89_MKK][12] = 68, [0][0][0][0][RTW89_IC][12] = 46, [0][0][0][0][RTW89_KCC][12] = 70, [0][0][0][0][RTW89_ACMA][12] = 60, - [0][0][0][0][RTW89_CN][12] = 58, + [0][0][0][0][RTW89_CN][12] = 56, [0][0][0][0][RTW89_UK][12] = 60, [0][0][0][0][RTW89_MEXICO][12] = 46, [0][0][0][0][RTW89_UKRAINE][12] = 60, [0][0][0][0][RTW89_CHILE][12] = 46, [0][0][0][0][RTW89_QATAR][12] = 60, + [0][0][0][0][RTW89_THAILAND][12] = 60, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, [0][0][0][0][RTW89_MKK][13] = 72, @@ -31914,6 +31972,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_UKRAINE][13] = 127, [0][0][0][0][RTW89_CHILE][13] = 127, [0][0][0][0][RTW89_QATAR][13] = 127, + [0][0][0][0][RTW89_THAILAND][13] = 127, [0][1][0][0][RTW89_FCC][0] = 76, [0][1][0][0][RTW89_ETSI][0] = 48, [0][1][0][0][RTW89_MKK][0] = 58, @@ -31926,6 +31985,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][0] = 48, [0][1][0][0][RTW89_CHILE][0] = 76, [0][1][0][0][RTW89_QATAR][0] = 48, + [0][1][0][0][RTW89_THAILAND][0] = 48, [0][1][0][0][RTW89_FCC][1] = 76, [0][1][0][0][RTW89_ETSI][1] = 48, [0][1][0][0][RTW89_MKK][1] = 58, @@ -31938,6 +31998,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][1] = 48, [0][1][0][0][RTW89_CHILE][1] = 54, [0][1][0][0][RTW89_QATAR][1] = 48, + [0][1][0][0][RTW89_THAILAND][1] = 48, [0][1][0][0][RTW89_FCC][2] = 76, [0][1][0][0][RTW89_ETSI][2] = 48, [0][1][0][0][RTW89_MKK][2] = 58, @@ -31950,6 +32011,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][2] = 48, [0][1][0][0][RTW89_CHILE][2] = 54, [0][1][0][0][RTW89_QATAR][2] = 48, + [0][1][0][0][RTW89_THAILAND][2] = 48, [0][1][0][0][RTW89_FCC][3] = 76, [0][1][0][0][RTW89_ETSI][3] = 48, [0][1][0][0][RTW89_MKK][3] = 58, @@ -31962,6 +32024,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][3] = 48, [0][1][0][0][RTW89_CHILE][3] = 54, [0][1][0][0][RTW89_QATAR][3] = 48, + [0][1][0][0][RTW89_THAILAND][3] = 48, [0][1][0][0][RTW89_FCC][4] = 76, [0][1][0][0][RTW89_ETSI][4] = 48, [0][1][0][0][RTW89_MKK][4] = 58, @@ -31974,6 +32037,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][4] = 48, [0][1][0][0][RTW89_CHILE][4] = 54, [0][1][0][0][RTW89_QATAR][4] = 48, + [0][1][0][0][RTW89_THAILAND][4] = 48, [0][1][0][0][RTW89_FCC][5] = 76, [0][1][0][0][RTW89_ETSI][5] = 48, [0][1][0][0][RTW89_MKK][5] = 58, @@ -31986,6 +32050,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][5] = 48, [0][1][0][0][RTW89_CHILE][5] = 76, [0][1][0][0][RTW89_QATAR][5] = 48, + [0][1][0][0][RTW89_THAILAND][5] = 48, [0][1][0][0][RTW89_FCC][6] = 76, [0][1][0][0][RTW89_ETSI][6] = 48, [0][1][0][0][RTW89_MKK][6] = 58, @@ -31998,6 +32063,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][6] = 48, [0][1][0][0][RTW89_CHILE][6] = 76, [0][1][0][0][RTW89_QATAR][6] = 48, + [0][1][0][0][RTW89_THAILAND][6] = 48, [0][1][0][0][RTW89_FCC][7] = 76, [0][1][0][0][RTW89_ETSI][7] = 48, [0][1][0][0][RTW89_MKK][7] = 58, @@ -32010,6 +32076,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][7] = 48, [0][1][0][0][RTW89_CHILE][7] = 76, [0][1][0][0][RTW89_QATAR][7] = 48, + [0][1][0][0][RTW89_THAILAND][7] = 48, [0][1][0][0][RTW89_FCC][8] = 76, [0][1][0][0][RTW89_ETSI][8] = 48, [0][1][0][0][RTW89_MKK][8] = 58, @@ -32022,6 +32089,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][8] = 48, [0][1][0][0][RTW89_CHILE][8] = 76, [0][1][0][0][RTW89_QATAR][8] = 48, + [0][1][0][0][RTW89_THAILAND][8] = 48, [0][1][0][0][RTW89_FCC][9] = 70, [0][1][0][0][RTW89_ETSI][9] = 48, [0][1][0][0][RTW89_MKK][9] = 58, @@ -32034,6 +32102,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][9] = 48, [0][1][0][0][RTW89_CHILE][9] = 70, [0][1][0][0][RTW89_QATAR][9] = 48, + [0][1][0][0][RTW89_THAILAND][9] = 48, [0][1][0][0][RTW89_FCC][10] = 72, [0][1][0][0][RTW89_ETSI][10] = 48, [0][1][0][0][RTW89_MKK][10] = 58, @@ -32046,6 +32115,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][10] = 48, [0][1][0][0][RTW89_CHILE][10] = 72, [0][1][0][0][RTW89_QATAR][10] = 48, + [0][1][0][0][RTW89_THAILAND][10] = 48, [0][1][0][0][RTW89_FCC][11] = 44, [0][1][0][0][RTW89_ETSI][11] = 48, [0][1][0][0][RTW89_MKK][11] = 58, @@ -32058,6 +32128,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][11] = 48, [0][1][0][0][RTW89_CHILE][11] = 44, [0][1][0][0][RTW89_QATAR][11] = 48, + [0][1][0][0][RTW89_THAILAND][11] = 48, [0][1][0][0][RTW89_FCC][12] = 18, [0][1][0][0][RTW89_ETSI][12] = 48, [0][1][0][0][RTW89_MKK][12] = 58, @@ -32070,6 +32141,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][12] = 48, [0][1][0][0][RTW89_CHILE][12] = 18, [0][1][0][0][RTW89_QATAR][12] = 48, + [0][1][0][0][RTW89_THAILAND][12] = 48, [0][1][0][0][RTW89_FCC][13] = 127, [0][1][0][0][RTW89_ETSI][13] = 127, [0][1][0][0][RTW89_MKK][13] = 60, @@ -32082,6 +32154,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_UKRAINE][13] = 127, [0][1][0][0][RTW89_CHILE][13] = 127, [0][1][0][0][RTW89_QATAR][13] = 127, + [0][1][0][0][RTW89_THAILAND][13] = 127, [1][0][0][0][RTW89_FCC][0] = 127, [1][0][0][0][RTW89_ETSI][0] = 127, [1][0][0][0][RTW89_MKK][0] = 127, @@ -32094,6 +32167,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][0] = 127, [1][0][0][0][RTW89_CHILE][0] = 127, [1][0][0][0][RTW89_QATAR][0] = 127, + [1][0][0][0][RTW89_THAILAND][0] = 127, [1][0][0][0][RTW89_FCC][1] = 127, [1][0][0][0][RTW89_ETSI][1] = 127, [1][0][0][0][RTW89_MKK][1] = 127, @@ -32106,114 +32180,124 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][1] = 127, [1][0][0][0][RTW89_CHILE][1] = 127, [1][0][0][0][RTW89_QATAR][1] = 127, + [1][0][0][0][RTW89_THAILAND][1] = 127, [1][0][0][0][RTW89_FCC][2] = 44, [1][0][0][0][RTW89_ETSI][2] = 60, [1][0][0][0][RTW89_MKK][2] = 66, [1][0][0][0][RTW89_IC][2] = 44, [1][0][0][0][RTW89_KCC][2] = 68, [1][0][0][0][RTW89_ACMA][2] = 60, - [1][0][0][0][RTW89_CN][2] = 58, + [1][0][0][0][RTW89_CN][2] = 56, [1][0][0][0][RTW89_UK][2] = 60, [1][0][0][0][RTW89_MEXICO][2] = 44, [1][0][0][0][RTW89_UKRAINE][2] = 60, [1][0][0][0][RTW89_CHILE][2] = 44, [1][0][0][0][RTW89_QATAR][2] = 60, + [1][0][0][0][RTW89_THAILAND][2] = 60, [1][0][0][0][RTW89_FCC][3] = 60, [1][0][0][0][RTW89_ETSI][3] = 60, [1][0][0][0][RTW89_MKK][3] = 66, [1][0][0][0][RTW89_IC][3] = 60, [1][0][0][0][RTW89_KCC][3] = 68, [1][0][0][0][RTW89_ACMA][3] = 60, - [1][0][0][0][RTW89_CN][3] = 58, + [1][0][0][0][RTW89_CN][3] = 56, [1][0][0][0][RTW89_UK][3] = 60, [1][0][0][0][RTW89_MEXICO][3] = 60, [1][0][0][0][RTW89_UKRAINE][3] = 60, [1][0][0][0][RTW89_CHILE][3] = 60, [1][0][0][0][RTW89_QATAR][3] = 60, + [1][0][0][0][RTW89_THAILAND][3] = 60, [1][0][0][0][RTW89_FCC][4] = 60, [1][0][0][0][RTW89_ETSI][4] = 60, [1][0][0][0][RTW89_MKK][4] = 66, [1][0][0][0][RTW89_IC][4] = 60, [1][0][0][0][RTW89_KCC][4] = 68, [1][0][0][0][RTW89_ACMA][4] = 60, - [1][0][0][0][RTW89_CN][4] = 58, + [1][0][0][0][RTW89_CN][4] = 56, [1][0][0][0][RTW89_UK][4] = 60, [1][0][0][0][RTW89_MEXICO][4] = 60, [1][0][0][0][RTW89_UKRAINE][4] = 60, [1][0][0][0][RTW89_CHILE][4] = 60, [1][0][0][0][RTW89_QATAR][4] = 60, + [1][0][0][0][RTW89_THAILAND][4] = 60, [1][0][0][0][RTW89_FCC][5] = 62, [1][0][0][0][RTW89_ETSI][5] = 60, [1][0][0][0][RTW89_MKK][5] = 66, [1][0][0][0][RTW89_IC][5] = 62, [1][0][0][0][RTW89_KCC][5] = 68, [1][0][0][0][RTW89_ACMA][5] = 60, - [1][0][0][0][RTW89_CN][5] = 58, + [1][0][0][0][RTW89_CN][5] = 56, [1][0][0][0][RTW89_UK][5] = 60, [1][0][0][0][RTW89_MEXICO][5] = 62, [1][0][0][0][RTW89_UKRAINE][5] = 60, [1][0][0][0][RTW89_CHILE][5] = 62, [1][0][0][0][RTW89_QATAR][5] = 60, + [1][0][0][0][RTW89_THAILAND][5] = 60, [1][0][0][0][RTW89_FCC][6] = 46, [1][0][0][0][RTW89_ETSI][6] = 60, [1][0][0][0][RTW89_MKK][6] = 66, [1][0][0][0][RTW89_IC][6] = 46, [1][0][0][0][RTW89_KCC][6] = 68, [1][0][0][0][RTW89_ACMA][6] = 60, - [1][0][0][0][RTW89_CN][6] = 58, + [1][0][0][0][RTW89_CN][6] = 56, [1][0][0][0][RTW89_UK][6] = 60, [1][0][0][0][RTW89_MEXICO][6] = 46, [1][0][0][0][RTW89_UKRAINE][6] = 60, [1][0][0][0][RTW89_CHILE][6] = 46, [1][0][0][0][RTW89_QATAR][6] = 60, + [1][0][0][0][RTW89_THAILAND][6] = 60, [1][0][0][0][RTW89_FCC][7] = 46, [1][0][0][0][RTW89_ETSI][7] = 60, [1][0][0][0][RTW89_MKK][7] = 66, [1][0][0][0][RTW89_IC][7] = 46, [1][0][0][0][RTW89_KCC][7] = 68, [1][0][0][0][RTW89_ACMA][7] = 60, - [1][0][0][0][RTW89_CN][7] = 58, + [1][0][0][0][RTW89_CN][7] = 56, [1][0][0][0][RTW89_UK][7] = 60, [1][0][0][0][RTW89_MEXICO][7] = 46, [1][0][0][0][RTW89_UKRAINE][7] = 60, [1][0][0][0][RTW89_CHILE][7] = 46, [1][0][0][0][RTW89_QATAR][7] = 60, + [1][0][0][0][RTW89_THAILAND][7] = 60, [1][0][0][0][RTW89_FCC][8] = 28, [1][0][0][0][RTW89_ETSI][8] = 60, [1][0][0][0][RTW89_MKK][8] = 66, [1][0][0][0][RTW89_IC][8] = 28, [1][0][0][0][RTW89_KCC][8] = 70, [1][0][0][0][RTW89_ACMA][8] = 60, - [1][0][0][0][RTW89_CN][8] = 58, + [1][0][0][0][RTW89_CN][8] = 56, [1][0][0][0][RTW89_UK][8] = 60, [1][0][0][0][RTW89_MEXICO][8] = 28, [1][0][0][0][RTW89_UKRAINE][8] = 60, [1][0][0][0][RTW89_CHILE][8] = 28, [1][0][0][0][RTW89_QATAR][8] = 60, + [1][0][0][0][RTW89_THAILAND][8] = 60, [1][0][0][0][RTW89_FCC][9] = 26, [1][0][0][0][RTW89_ETSI][9] = 60, [1][0][0][0][RTW89_MKK][9] = 66, [1][0][0][0][RTW89_IC][9] = 26, [1][0][0][0][RTW89_KCC][9] = 70, [1][0][0][0][RTW89_ACMA][9] = 60, - [1][0][0][0][RTW89_CN][9] = 58, + [1][0][0][0][RTW89_CN][9] = 56, [1][0][0][0][RTW89_UK][9] = 60, [1][0][0][0][RTW89_MEXICO][9] = 26, [1][0][0][0][RTW89_UKRAINE][9] = 60, [1][0][0][0][RTW89_CHILE][9] = 26, [1][0][0][0][RTW89_QATAR][9] = 60, + [1][0][0][0][RTW89_THAILAND][9] = 60, [1][0][0][0][RTW89_FCC][10] = 26, [1][0][0][0][RTW89_ETSI][10] = 60, [1][0][0][0][RTW89_MKK][10] = 66, [1][0][0][0][RTW89_IC][10] = 26, [1][0][0][0][RTW89_KCC][10] = 70, [1][0][0][0][RTW89_ACMA][10] = 60, - [1][0][0][0][RTW89_CN][10] = 58, + [1][0][0][0][RTW89_CN][10] = 56, [1][0][0][0][RTW89_UK][10] = 60, [1][0][0][0][RTW89_MEXICO][10] = 26, [1][0][0][0][RTW89_UKRAINE][10] = 60, [1][0][0][0][RTW89_CHILE][10] = 26, [1][0][0][0][RTW89_QATAR][10] = 60, + [1][0][0][0][RTW89_THAILAND][10] = 60, [1][0][0][0][RTW89_FCC][11] = 127, [1][0][0][0][RTW89_ETSI][11] = 127, [1][0][0][0][RTW89_MKK][11] = 127, @@ -32226,6 +32310,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][11] = 127, [1][0][0][0][RTW89_CHILE][11] = 127, [1][0][0][0][RTW89_QATAR][11] = 127, + [1][0][0][0][RTW89_THAILAND][11] = 127, [1][0][0][0][RTW89_FCC][12] = 127, [1][0][0][0][RTW89_ETSI][12] = 127, [1][0][0][0][RTW89_MKK][12] = 127, @@ -32238,6 +32323,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][12] = 127, [1][0][0][0][RTW89_CHILE][12] = 127, [1][0][0][0][RTW89_QATAR][12] = 127, + [1][0][0][0][RTW89_THAILAND][12] = 127, [1][0][0][0][RTW89_FCC][13] = 127, [1][0][0][0][RTW89_ETSI][13] = 127, [1][0][0][0][RTW89_MKK][13] = 127, @@ -32250,6 +32336,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_UKRAINE][13] = 127, [1][0][0][0][RTW89_CHILE][13] = 127, [1][0][0][0][RTW89_QATAR][13] = 127, + [1][0][0][0][RTW89_THAILAND][13] = 127, [1][1][0][0][RTW89_FCC][0] = 127, [1][1][0][0][RTW89_ETSI][0] = 127, [1][1][0][0][RTW89_MKK][0] = 127, @@ -32262,6 +32349,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][0] = 127, [1][1][0][0][RTW89_CHILE][0] = 127, [1][1][0][0][RTW89_QATAR][0] = 127, + [1][1][0][0][RTW89_THAILAND][0] = 127, [1][1][0][0][RTW89_FCC][1] = 127, [1][1][0][0][RTW89_ETSI][1] = 127, [1][1][0][0][RTW89_MKK][1] = 127, @@ -32274,114 +32362,124 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][1] = 127, [1][1][0][0][RTW89_CHILE][1] = 127, [1][1][0][0][RTW89_QATAR][1] = 127, + [1][1][0][0][RTW89_THAILAND][1] = 127, [1][1][0][0][RTW89_FCC][2] = 46, [1][1][0][0][RTW89_ETSI][2] = 48, [1][1][0][0][RTW89_MKK][2] = 58, [1][1][0][0][RTW89_IC][2] = 46, [1][1][0][0][RTW89_KCC][2] = 56, [1][1][0][0][RTW89_ACMA][2] = 48, - [1][1][0][0][RTW89_CN][2] = 46, + [1][1][0][0][RTW89_CN][2] = 44, [1][1][0][0][RTW89_UK][2] = 48, [1][1][0][0][RTW89_MEXICO][2] = 46, [1][1][0][0][RTW89_UKRAINE][2] = 48, [1][1][0][0][RTW89_CHILE][2] = 46, [1][1][0][0][RTW89_QATAR][2] = 48, + [1][1][0][0][RTW89_THAILAND][2] = 48, [1][1][0][0][RTW89_FCC][3] = 46, [1][1][0][0][RTW89_ETSI][3] = 48, [1][1][0][0][RTW89_MKK][3] = 58, [1][1][0][0][RTW89_IC][3] = 46, [1][1][0][0][RTW89_KCC][3] = 56, [1][1][0][0][RTW89_ACMA][3] = 48, - [1][1][0][0][RTW89_CN][3] = 46, + [1][1][0][0][RTW89_CN][3] = 44, [1][1][0][0][RTW89_UK][3] = 48, [1][1][0][0][RTW89_MEXICO][3] = 46, [1][1][0][0][RTW89_UKRAINE][3] = 48, [1][1][0][0][RTW89_CHILE][3] = 46, [1][1][0][0][RTW89_QATAR][3] = 48, + [1][1][0][0][RTW89_THAILAND][3] = 48, [1][1][0][0][RTW89_FCC][4] = 46, [1][1][0][0][RTW89_ETSI][4] = 48, [1][1][0][0][RTW89_MKK][4] = 58, [1][1][0][0][RTW89_IC][4] = 46, [1][1][0][0][RTW89_KCC][4] = 56, [1][1][0][0][RTW89_ACMA][4] = 48, - [1][1][0][0][RTW89_CN][4] = 46, + [1][1][0][0][RTW89_CN][4] = 44, [1][1][0][0][RTW89_UK][4] = 48, [1][1][0][0][RTW89_MEXICO][4] = 46, [1][1][0][0][RTW89_UKRAINE][4] = 48, [1][1][0][0][RTW89_CHILE][4] = 46, [1][1][0][0][RTW89_QATAR][4] = 48, + [1][1][0][0][RTW89_THAILAND][4] = 48, [1][1][0][0][RTW89_FCC][5] = 48, [1][1][0][0][RTW89_ETSI][5] = 48, [1][1][0][0][RTW89_MKK][5] = 58, [1][1][0][0][RTW89_IC][5] = 48, [1][1][0][0][RTW89_KCC][5] = 56, [1][1][0][0][RTW89_ACMA][5] = 48, - [1][1][0][0][RTW89_CN][5] = 46, + [1][1][0][0][RTW89_CN][5] = 44, [1][1][0][0][RTW89_UK][5] = 48, [1][1][0][0][RTW89_MEXICO][5] = 48, [1][1][0][0][RTW89_UKRAINE][5] = 48, [1][1][0][0][RTW89_CHILE][5] = 48, [1][1][0][0][RTW89_QATAR][5] = 48, + [1][1][0][0][RTW89_THAILAND][5] = 48, [1][1][0][0][RTW89_FCC][6] = 40, [1][1][0][0][RTW89_ETSI][6] = 48, [1][1][0][0][RTW89_MKK][6] = 58, [1][1][0][0][RTW89_IC][6] = 40, [1][1][0][0][RTW89_KCC][6] = 56, [1][1][0][0][RTW89_ACMA][6] = 48, - [1][1][0][0][RTW89_CN][6] = 46, + [1][1][0][0][RTW89_CN][6] = 44, [1][1][0][0][RTW89_UK][6] = 48, [1][1][0][0][RTW89_MEXICO][6] = 40, [1][1][0][0][RTW89_UKRAINE][6] = 48, [1][1][0][0][RTW89_CHILE][6] = 40, [1][1][0][0][RTW89_QATAR][6] = 48, + [1][1][0][0][RTW89_THAILAND][6] = 48, [1][1][0][0][RTW89_FCC][7] = 40, [1][1][0][0][RTW89_ETSI][7] = 48, [1][1][0][0][RTW89_MKK][7] = 58, [1][1][0][0][RTW89_IC][7] = 40, [1][1][0][0][RTW89_KCC][7] = 56, [1][1][0][0][RTW89_ACMA][7] = 48, - [1][1][0][0][RTW89_CN][7] = 46, + [1][1][0][0][RTW89_CN][7] = 44, [1][1][0][0][RTW89_UK][7] = 48, [1][1][0][0][RTW89_MEXICO][7] = 40, [1][1][0][0][RTW89_UKRAINE][7] = 48, [1][1][0][0][RTW89_CHILE][7] = 40, [1][1][0][0][RTW89_QATAR][7] = 48, + [1][1][0][0][RTW89_THAILAND][7] = 48, [1][1][0][0][RTW89_FCC][8] = 14, [1][1][0][0][RTW89_ETSI][8] = 48, [1][1][0][0][RTW89_MKK][8] = 58, [1][1][0][0][RTW89_IC][8] = 14, [1][1][0][0][RTW89_KCC][8] = 58, [1][1][0][0][RTW89_ACMA][8] = 48, - [1][1][0][0][RTW89_CN][8] = 46, + [1][1][0][0][RTW89_CN][8] = 44, [1][1][0][0][RTW89_UK][8] = 48, [1][1][0][0][RTW89_MEXICO][8] = 14, [1][1][0][0][RTW89_UKRAINE][8] = 48, [1][1][0][0][RTW89_CHILE][8] = 14, [1][1][0][0][RTW89_QATAR][8] = 48, + [1][1][0][0][RTW89_THAILAND][8] = 48, [1][1][0][0][RTW89_FCC][9] = 14, [1][1][0][0][RTW89_ETSI][9] = 48, [1][1][0][0][RTW89_MKK][9] = 58, [1][1][0][0][RTW89_IC][9] = 14, [1][1][0][0][RTW89_KCC][9] = 58, [1][1][0][0][RTW89_ACMA][9] = 48, - [1][1][0][0][RTW89_CN][9] = 46, + [1][1][0][0][RTW89_CN][9] = 44, [1][1][0][0][RTW89_UK][9] = 48, [1][1][0][0][RTW89_MEXICO][9] = 14, [1][1][0][0][RTW89_UKRAINE][9] = 48, [1][1][0][0][RTW89_CHILE][9] = 14, [1][1][0][0][RTW89_QATAR][9] = 48, + [1][1][0][0][RTW89_THAILAND][9] = 48, [1][1][0][0][RTW89_FCC][10] = 12, [1][1][0][0][RTW89_ETSI][10] = 48, [1][1][0][0][RTW89_MKK][10] = 56, [1][1][0][0][RTW89_IC][10] = 12, [1][1][0][0][RTW89_KCC][10] = 58, [1][1][0][0][RTW89_ACMA][10] = 48, - [1][1][0][0][RTW89_CN][10] = 46, + [1][1][0][0][RTW89_CN][10] = 44, [1][1][0][0][RTW89_UK][10] = 48, [1][1][0][0][RTW89_MEXICO][10] = 12, [1][1][0][0][RTW89_UKRAINE][10] = 48, [1][1][0][0][RTW89_CHILE][10] = 12, [1][1][0][0][RTW89_QATAR][10] = 48, + [1][1][0][0][RTW89_THAILAND][10] = 48, [1][1][0][0][RTW89_FCC][11] = 127, [1][1][0][0][RTW89_ETSI][11] = 127, [1][1][0][0][RTW89_MKK][11] = 127, @@ -32394,6 +32492,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][11] = 127, [1][1][0][0][RTW89_CHILE][11] = 127, [1][1][0][0][RTW89_QATAR][11] = 127, + [1][1][0][0][RTW89_THAILAND][11] = 127, [1][1][0][0][RTW89_FCC][12] = 127, [1][1][0][0][RTW89_ETSI][12] = 127, [1][1][0][0][RTW89_MKK][12] = 127, @@ -32406,6 +32505,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][12] = 127, [1][1][0][0][RTW89_CHILE][12] = 127, [1][1][0][0][RTW89_QATAR][12] = 127, + [1][1][0][0][RTW89_THAILAND][12] = 127, [1][1][0][0][RTW89_FCC][13] = 127, [1][1][0][0][RTW89_ETSI][13] = 127, [1][1][0][0][RTW89_MKK][13] = 127, @@ -32418,6 +32518,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_UKRAINE][13] = 127, [1][1][0][0][RTW89_CHILE][13] = 127, [1][1][0][0][RTW89_QATAR][13] = 127, + [1][1][0][0][RTW89_THAILAND][13] = 127, [0][0][1][0][RTW89_FCC][0] = 66, [0][0][1][0][RTW89_ETSI][0] = 60, [0][0][1][0][RTW89_MKK][0] = 76, @@ -32430,6 +32531,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][0] = 60, [0][0][1][0][RTW89_CHILE][0] = 66, [0][0][1][0][RTW89_QATAR][0] = 60, + [0][0][1][0][RTW89_THAILAND][0] = 60, [0][0][1][0][RTW89_FCC][1] = 68, [0][0][1][0][RTW89_ETSI][1] = 60, [0][0][1][0][RTW89_MKK][1] = 78, @@ -32442,6 +32544,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][1] = 60, [0][0][1][0][RTW89_CHILE][1] = 68, [0][0][1][0][RTW89_QATAR][1] = 60, + [0][0][1][0][RTW89_THAILAND][1] = 60, [0][0][1][0][RTW89_FCC][2] = 72, [0][0][1][0][RTW89_ETSI][2] = 60, [0][0][1][0][RTW89_MKK][2] = 78, @@ -32454,6 +32557,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][2] = 60, [0][0][1][0][RTW89_CHILE][2] = 62, [0][0][1][0][RTW89_QATAR][2] = 60, + [0][0][1][0][RTW89_THAILAND][2] = 60, [0][0][1][0][RTW89_FCC][3] = 76, [0][0][1][0][RTW89_ETSI][3] = 60, [0][0][1][0][RTW89_MKK][3] = 78, @@ -32466,6 +32570,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][3] = 60, [0][0][1][0][RTW89_CHILE][3] = 62, [0][0][1][0][RTW89_QATAR][3] = 60, + [0][0][1][0][RTW89_THAILAND][3] = 60, [0][0][1][0][RTW89_FCC][4] = 80, [0][0][1][0][RTW89_ETSI][4] = 60, [0][0][1][0][RTW89_MKK][4] = 78, @@ -32478,6 +32583,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][4] = 60, [0][0][1][0][RTW89_CHILE][4] = 62, [0][0][1][0][RTW89_QATAR][4] = 60, + [0][0][1][0][RTW89_THAILAND][4] = 60, [0][0][1][0][RTW89_FCC][5] = 80, [0][0][1][0][RTW89_ETSI][5] = 60, [0][0][1][0][RTW89_MKK][5] = 78, @@ -32490,6 +32596,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][5] = 60, [0][0][1][0][RTW89_CHILE][5] = 80, [0][0][1][0][RTW89_QATAR][5] = 60, + [0][0][1][0][RTW89_THAILAND][5] = 60, [0][0][1][0][RTW89_FCC][6] = 80, [0][0][1][0][RTW89_ETSI][6] = 60, [0][0][1][0][RTW89_MKK][6] = 76, @@ -32502,6 +32609,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][6] = 60, [0][0][1][0][RTW89_CHILE][6] = 70, [0][0][1][0][RTW89_QATAR][6] = 60, + [0][0][1][0][RTW89_THAILAND][6] = 60, [0][0][1][0][RTW89_FCC][7] = 80, [0][0][1][0][RTW89_ETSI][7] = 60, [0][0][1][0][RTW89_MKK][7] = 78, @@ -32514,6 +32622,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][7] = 60, [0][0][1][0][RTW89_CHILE][7] = 70, [0][0][1][0][RTW89_QATAR][7] = 60, + [0][0][1][0][RTW89_THAILAND][7] = 60, [0][0][1][0][RTW89_FCC][8] = 80, [0][0][1][0][RTW89_ETSI][8] = 60, [0][0][1][0][RTW89_MKK][8] = 78, @@ -32526,6 +32635,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][8] = 60, [0][0][1][0][RTW89_CHILE][8] = 70, [0][0][1][0][RTW89_QATAR][8] = 60, + [0][0][1][0][RTW89_THAILAND][8] = 60, [0][0][1][0][RTW89_FCC][9] = 76, [0][0][1][0][RTW89_ETSI][9] = 60, [0][0][1][0][RTW89_MKK][9] = 78, @@ -32538,6 +32648,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][9] = 60, [0][0][1][0][RTW89_CHILE][9] = 76, [0][0][1][0][RTW89_QATAR][9] = 60, + [0][0][1][0][RTW89_THAILAND][9] = 60, [0][0][1][0][RTW89_FCC][10] = 66, [0][0][1][0][RTW89_ETSI][10] = 60, [0][0][1][0][RTW89_MKK][10] = 78, @@ -32550,6 +32661,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][10] = 60, [0][0][1][0][RTW89_CHILE][10] = 66, [0][0][1][0][RTW89_QATAR][10] = 60, + [0][0][1][0][RTW89_THAILAND][10] = 60, [0][0][1][0][RTW89_FCC][11] = 62, [0][0][1][0][RTW89_ETSI][11] = 60, [0][0][1][0][RTW89_MKK][11] = 78, @@ -32562,18 +32674,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][11] = 60, [0][0][1][0][RTW89_CHILE][11] = 62, [0][0][1][0][RTW89_QATAR][11] = 60, + [0][0][1][0][RTW89_THAILAND][11] = 60, [0][0][1][0][RTW89_FCC][12] = 60, [0][0][1][0][RTW89_ETSI][12] = 60, [0][0][1][0][RTW89_MKK][12] = 78, [0][0][1][0][RTW89_IC][12] = 60, [0][0][1][0][RTW89_KCC][12] = 70, [0][0][1][0][RTW89_ACMA][12] = 60, - [0][0][1][0][RTW89_CN][12] = 58, + [0][0][1][0][RTW89_CN][12] = 40, [0][0][1][0][RTW89_UK][12] = 60, [0][0][1][0][RTW89_MEXICO][12] = 60, [0][0][1][0][RTW89_UKRAINE][12] = 60, [0][0][1][0][RTW89_CHILE][12] = 60, [0][0][1][0][RTW89_QATAR][12] = 60, + [0][0][1][0][RTW89_THAILAND][12] = 60, [0][0][1][0][RTW89_FCC][13] = 127, [0][0][1][0][RTW89_ETSI][13] = 127, [0][0][1][0][RTW89_MKK][13] = 127, @@ -32586,6 +32700,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][13] = 127, [0][0][1][0][RTW89_CHILE][13] = 127, [0][0][1][0][RTW89_QATAR][13] = 127, + [0][0][1][0][RTW89_THAILAND][13] = 127, [0][1][1][0][RTW89_FCC][0] = 66, [0][1][1][0][RTW89_ETSI][0] = 48, [0][1][1][0][RTW89_MKK][0] = 66, @@ -32598,6 +32713,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][0] = 48, [0][1][1][0][RTW89_CHILE][0] = 66, [0][1][1][0][RTW89_QATAR][0] = 48, + [0][1][1][0][RTW89_THAILAND][0] = 48, [0][1][1][0][RTW89_FCC][1] = 68, [0][1][1][0][RTW89_ETSI][1] = 48, [0][1][1][0][RTW89_MKK][1] = 66, @@ -32610,6 +32726,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][1] = 48, [0][1][1][0][RTW89_CHILE][1] = 68, [0][1][1][0][RTW89_QATAR][1] = 48, + [0][1][1][0][RTW89_THAILAND][1] = 48, [0][1][1][0][RTW89_FCC][2] = 72, [0][1][1][0][RTW89_ETSI][2] = 48, [0][1][1][0][RTW89_MKK][2] = 66, @@ -32622,6 +32739,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][2] = 48, [0][1][1][0][RTW89_CHILE][2] = 54, [0][1][1][0][RTW89_QATAR][2] = 48, + [0][1][1][0][RTW89_THAILAND][2] = 48, [0][1][1][0][RTW89_FCC][3] = 76, [0][1][1][0][RTW89_ETSI][3] = 48, [0][1][1][0][RTW89_MKK][3] = 66, @@ -32634,6 +32752,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][3] = 48, [0][1][1][0][RTW89_CHILE][3] = 54, [0][1][1][0][RTW89_QATAR][3] = 48, + [0][1][1][0][RTW89_THAILAND][3] = 48, [0][1][1][0][RTW89_FCC][4] = 80, [0][1][1][0][RTW89_ETSI][4] = 48, [0][1][1][0][RTW89_MKK][4] = 66, @@ -32646,6 +32765,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][4] = 48, [0][1][1][0][RTW89_CHILE][4] = 54, [0][1][1][0][RTW89_QATAR][4] = 48, + [0][1][1][0][RTW89_THAILAND][4] = 48, [0][1][1][0][RTW89_FCC][5] = 80, [0][1][1][0][RTW89_ETSI][5] = 48, [0][1][1][0][RTW89_MKK][5] = 66, @@ -32658,6 +32778,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][5] = 48, [0][1][1][0][RTW89_CHILE][5] = 80, [0][1][1][0][RTW89_QATAR][5] = 48, + [0][1][1][0][RTW89_THAILAND][5] = 48, [0][1][1][0][RTW89_FCC][6] = 80, [0][1][1][0][RTW89_ETSI][6] = 48, [0][1][1][0][RTW89_MKK][6] = 66, @@ -32670,6 +32791,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][6] = 48, [0][1][1][0][RTW89_CHILE][6] = 56, [0][1][1][0][RTW89_QATAR][6] = 48, + [0][1][1][0][RTW89_THAILAND][6] = 48, [0][1][1][0][RTW89_FCC][7] = 78, [0][1][1][0][RTW89_ETSI][7] = 48, [0][1][1][0][RTW89_MKK][7] = 66, @@ -32682,6 +32804,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][7] = 48, [0][1][1][0][RTW89_CHILE][7] = 56, [0][1][1][0][RTW89_QATAR][7] = 48, + [0][1][1][0][RTW89_THAILAND][7] = 48, [0][1][1][0][RTW89_FCC][8] = 74, [0][1][1][0][RTW89_ETSI][8] = 48, [0][1][1][0][RTW89_MKK][8] = 66, @@ -32694,6 +32817,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][8] = 48, [0][1][1][0][RTW89_CHILE][8] = 56, [0][1][1][0][RTW89_QATAR][8] = 48, + [0][1][1][0][RTW89_THAILAND][8] = 48, [0][1][1][0][RTW89_FCC][9] = 70, [0][1][1][0][RTW89_ETSI][9] = 48, [0][1][1][0][RTW89_MKK][9] = 66, @@ -32706,6 +32830,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][9] = 48, [0][1][1][0][RTW89_CHILE][9] = 70, [0][1][1][0][RTW89_QATAR][9] = 48, + [0][1][1][0][RTW89_THAILAND][9] = 48, [0][1][1][0][RTW89_FCC][10] = 62, [0][1][1][0][RTW89_ETSI][10] = 48, [0][1][1][0][RTW89_MKK][10] = 66, @@ -32718,6 +32843,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][10] = 48, [0][1][1][0][RTW89_CHILE][10] = 62, [0][1][1][0][RTW89_QATAR][10] = 48, + [0][1][1][0][RTW89_THAILAND][10] = 48, [0][1][1][0][RTW89_FCC][11] = 60, [0][1][1][0][RTW89_ETSI][11] = 48, [0][1][1][0][RTW89_MKK][11] = 66, @@ -32730,18 +32856,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][11] = 48, [0][1][1][0][RTW89_CHILE][11] = 60, [0][1][1][0][RTW89_QATAR][11] = 48, + [0][1][1][0][RTW89_THAILAND][11] = 48, [0][1][1][0][RTW89_FCC][12] = 36, [0][1][1][0][RTW89_ETSI][12] = 48, [0][1][1][0][RTW89_MKK][12] = 66, [0][1][1][0][RTW89_IC][12] = 36, [0][1][1][0][RTW89_KCC][12] = 64, [0][1][1][0][RTW89_ACMA][12] = 48, - [0][1][1][0][RTW89_CN][12] = 46, + [0][1][1][0][RTW89_CN][12] = 40, [0][1][1][0][RTW89_UK][12] = 48, [0][1][1][0][RTW89_MEXICO][12] = 36, [0][1][1][0][RTW89_UKRAINE][12] = 48, [0][1][1][0][RTW89_CHILE][12] = 36, [0][1][1][0][RTW89_QATAR][12] = 48, + [0][1][1][0][RTW89_THAILAND][12] = 48, [0][1][1][0][RTW89_FCC][13] = 127, [0][1][1][0][RTW89_ETSI][13] = 127, [0][1][1][0][RTW89_MKK][13] = 127, @@ -32754,6 +32882,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][13] = 127, [0][1][1][0][RTW89_CHILE][13] = 127, [0][1][1][0][RTW89_QATAR][13] = 127, + [0][1][1][0][RTW89_THAILAND][13] = 127, [0][0][2][0][RTW89_FCC][0] = 66, [0][0][2][0][RTW89_ETSI][0] = 60, [0][0][2][0][RTW89_MKK][0] = 78, @@ -32766,6 +32895,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][0] = 60, [0][0][2][0][RTW89_CHILE][0] = 66, [0][0][2][0][RTW89_QATAR][0] = 60, + [0][0][2][0][RTW89_THAILAND][0] = 60, [0][0][2][0][RTW89_FCC][1] = 70, [0][0][2][0][RTW89_ETSI][1] = 60, [0][0][2][0][RTW89_MKK][1] = 78, @@ -32778,6 +32908,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][1] = 60, [0][0][2][0][RTW89_CHILE][1] = 70, [0][0][2][0][RTW89_QATAR][1] = 60, + [0][0][2][0][RTW89_THAILAND][1] = 60, [0][0][2][0][RTW89_FCC][2] = 74, [0][0][2][0][RTW89_ETSI][2] = 60, [0][0][2][0][RTW89_MKK][2] = 78, @@ -32790,6 +32921,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][2] = 60, [0][0][2][0][RTW89_CHILE][2] = 64, [0][0][2][0][RTW89_QATAR][2] = 60, + [0][0][2][0][RTW89_THAILAND][2] = 60, [0][0][2][0][RTW89_FCC][3] = 78, [0][0][2][0][RTW89_ETSI][3] = 60, [0][0][2][0][RTW89_MKK][3] = 78, @@ -32802,6 +32934,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][3] = 60, [0][0][2][0][RTW89_CHILE][3] = 64, [0][0][2][0][RTW89_QATAR][3] = 60, + [0][0][2][0][RTW89_THAILAND][3] = 60, [0][0][2][0][RTW89_FCC][4] = 80, [0][0][2][0][RTW89_ETSI][4] = 60, [0][0][2][0][RTW89_MKK][4] = 78, @@ -32814,6 +32947,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][4] = 60, [0][0][2][0][RTW89_CHILE][4] = 64, [0][0][2][0][RTW89_QATAR][4] = 60, + [0][0][2][0][RTW89_THAILAND][4] = 60, [0][0][2][0][RTW89_FCC][5] = 80, [0][0][2][0][RTW89_ETSI][5] = 60, [0][0][2][0][RTW89_MKK][5] = 78, @@ -32826,6 +32960,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][5] = 60, [0][0][2][0][RTW89_CHILE][5] = 80, [0][0][2][0][RTW89_QATAR][5] = 60, + [0][0][2][0][RTW89_THAILAND][5] = 60, [0][0][2][0][RTW89_FCC][6] = 80, [0][0][2][0][RTW89_ETSI][6] = 60, [0][0][2][0][RTW89_MKK][6] = 78, @@ -32838,6 +32973,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][6] = 60, [0][0][2][0][RTW89_CHILE][6] = 68, [0][0][2][0][RTW89_QATAR][6] = 60, + [0][0][2][0][RTW89_THAILAND][6] = 60, [0][0][2][0][RTW89_FCC][7] = 80, [0][0][2][0][RTW89_ETSI][7] = 60, [0][0][2][0][RTW89_MKK][7] = 78, @@ -32850,6 +32986,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][7] = 60, [0][0][2][0][RTW89_CHILE][7] = 68, [0][0][2][0][RTW89_QATAR][7] = 60, + [0][0][2][0][RTW89_THAILAND][7] = 60, [0][0][2][0][RTW89_FCC][8] = 78, [0][0][2][0][RTW89_ETSI][8] = 60, [0][0][2][0][RTW89_MKK][8] = 78, @@ -32862,6 +32999,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][8] = 60, [0][0][2][0][RTW89_CHILE][8] = 68, [0][0][2][0][RTW89_QATAR][8] = 60, + [0][0][2][0][RTW89_THAILAND][8] = 60, [0][0][2][0][RTW89_FCC][9] = 74, [0][0][2][0][RTW89_ETSI][9] = 60, [0][0][2][0][RTW89_MKK][9] = 78, @@ -32874,6 +33012,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][9] = 60, [0][0][2][0][RTW89_CHILE][9] = 74, [0][0][2][0][RTW89_QATAR][9] = 60, + [0][0][2][0][RTW89_THAILAND][9] = 60, [0][0][2][0][RTW89_FCC][10] = 62, [0][0][2][0][RTW89_ETSI][10] = 60, [0][0][2][0][RTW89_MKK][10] = 78, @@ -32886,6 +33025,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][10] = 60, [0][0][2][0][RTW89_CHILE][10] = 62, [0][0][2][0][RTW89_QATAR][10] = 60, + [0][0][2][0][RTW89_THAILAND][10] = 60, [0][0][2][0][RTW89_FCC][11] = 60, [0][0][2][0][RTW89_ETSI][11] = 60, [0][0][2][0][RTW89_MKK][11] = 78, @@ -32898,18 +33038,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][11] = 60, [0][0][2][0][RTW89_CHILE][11] = 60, [0][0][2][0][RTW89_QATAR][11] = 60, + [0][0][2][0][RTW89_THAILAND][11] = 60, [0][0][2][0][RTW89_FCC][12] = 38, [0][0][2][0][RTW89_ETSI][12] = 60, [0][0][2][0][RTW89_MKK][12] = 78, [0][0][2][0][RTW89_IC][12] = 38, [0][0][2][0][RTW89_KCC][12] = 66, [0][0][2][0][RTW89_ACMA][12] = 60, - [0][0][2][0][RTW89_CN][12] = 58, + [0][0][2][0][RTW89_CN][12] = 38, [0][0][2][0][RTW89_UK][12] = 60, [0][0][2][0][RTW89_MEXICO][12] = 38, [0][0][2][0][RTW89_UKRAINE][12] = 60, [0][0][2][0][RTW89_CHILE][12] = 38, [0][0][2][0][RTW89_QATAR][12] = 60, + [0][0][2][0][RTW89_THAILAND][12] = 60, [0][0][2][0][RTW89_FCC][13] = 127, [0][0][2][0][RTW89_ETSI][13] = 127, [0][0][2][0][RTW89_MKK][13] = 127, @@ -32922,6 +33064,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][13] = 127, [0][0][2][0][RTW89_CHILE][13] = 127, [0][0][2][0][RTW89_QATAR][13] = 127, + [0][0][2][0][RTW89_THAILAND][13] = 127, [0][1][2][0][RTW89_FCC][0] = 64, [0][1][2][0][RTW89_ETSI][0] = 48, [0][1][2][0][RTW89_MKK][0] = 68, @@ -32934,6 +33077,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][0] = 48, [0][1][2][0][RTW89_CHILE][0] = 64, [0][1][2][0][RTW89_QATAR][0] = 48, + [0][1][2][0][RTW89_THAILAND][0] = 48, [0][1][2][0][RTW89_FCC][1] = 70, [0][1][2][0][RTW89_ETSI][1] = 48, [0][1][2][0][RTW89_MKK][1] = 68, @@ -32946,6 +33090,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][1] = 48, [0][1][2][0][RTW89_CHILE][1] = 70, [0][1][2][0][RTW89_QATAR][1] = 48, + [0][1][2][0][RTW89_THAILAND][1] = 48, [0][1][2][0][RTW89_FCC][2] = 74, [0][1][2][0][RTW89_ETSI][2] = 48, [0][1][2][0][RTW89_MKK][2] = 68, @@ -32958,6 +33103,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][2] = 48, [0][1][2][0][RTW89_CHILE][2] = 56, [0][1][2][0][RTW89_QATAR][2] = 48, + [0][1][2][0][RTW89_THAILAND][2] = 48, [0][1][2][0][RTW89_FCC][3] = 78, [0][1][2][0][RTW89_ETSI][3] = 48, [0][1][2][0][RTW89_MKK][3] = 68, @@ -32970,6 +33116,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][3] = 48, [0][1][2][0][RTW89_CHILE][3] = 56, [0][1][2][0][RTW89_QATAR][3] = 48, + [0][1][2][0][RTW89_THAILAND][3] = 48, [0][1][2][0][RTW89_FCC][4] = 80, [0][1][2][0][RTW89_ETSI][4] = 48, [0][1][2][0][RTW89_MKK][4] = 68, @@ -32982,6 +33129,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][4] = 48, [0][1][2][0][RTW89_CHILE][4] = 56, [0][1][2][0][RTW89_QATAR][4] = 48, + [0][1][2][0][RTW89_THAILAND][4] = 48, [0][1][2][0][RTW89_FCC][5] = 80, [0][1][2][0][RTW89_ETSI][5] = 48, [0][1][2][0][RTW89_MKK][5] = 68, @@ -32994,6 +33142,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][5] = 48, [0][1][2][0][RTW89_CHILE][5] = 78, [0][1][2][0][RTW89_QATAR][5] = 48, + [0][1][2][0][RTW89_THAILAND][5] = 48, [0][1][2][0][RTW89_FCC][6] = 80, [0][1][2][0][RTW89_ETSI][6] = 48, [0][1][2][0][RTW89_MKK][6] = 68, @@ -33006,6 +33155,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][6] = 48, [0][1][2][0][RTW89_CHILE][6] = 54, [0][1][2][0][RTW89_QATAR][6] = 48, + [0][1][2][0][RTW89_THAILAND][6] = 48, [0][1][2][0][RTW89_FCC][7] = 74, [0][1][2][0][RTW89_ETSI][7] = 48, [0][1][2][0][RTW89_MKK][7] = 68, @@ -33018,6 +33168,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][7] = 48, [0][1][2][0][RTW89_CHILE][7] = 54, [0][1][2][0][RTW89_QATAR][7] = 48, + [0][1][2][0][RTW89_THAILAND][7] = 48, [0][1][2][0][RTW89_FCC][8] = 70, [0][1][2][0][RTW89_ETSI][8] = 48, [0][1][2][0][RTW89_MKK][8] = 68, @@ -33030,6 +33181,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][8] = 48, [0][1][2][0][RTW89_CHILE][8] = 54, [0][1][2][0][RTW89_QATAR][8] = 48, + [0][1][2][0][RTW89_THAILAND][8] = 48, [0][1][2][0][RTW89_FCC][9] = 66, [0][1][2][0][RTW89_ETSI][9] = 48, [0][1][2][0][RTW89_MKK][9] = 68, @@ -33042,6 +33194,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][9] = 48, [0][1][2][0][RTW89_CHILE][9] = 66, [0][1][2][0][RTW89_QATAR][9] = 48, + [0][1][2][0][RTW89_THAILAND][9] = 48, [0][1][2][0][RTW89_FCC][10] = 58, [0][1][2][0][RTW89_ETSI][10] = 48, [0][1][2][0][RTW89_MKK][10] = 68, @@ -33054,6 +33207,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][10] = 48, [0][1][2][0][RTW89_CHILE][10] = 58, [0][1][2][0][RTW89_QATAR][10] = 48, + [0][1][2][0][RTW89_THAILAND][10] = 48, [0][1][2][0][RTW89_FCC][11] = 58, [0][1][2][0][RTW89_ETSI][11] = 48, [0][1][2][0][RTW89_MKK][11] = 68, @@ -33066,18 +33220,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][11] = 48, [0][1][2][0][RTW89_CHILE][11] = 58, [0][1][2][0][RTW89_QATAR][11] = 48, + [0][1][2][0][RTW89_THAILAND][11] = 48, [0][1][2][0][RTW89_FCC][12] = 16, [0][1][2][0][RTW89_ETSI][12] = 48, [0][1][2][0][RTW89_MKK][12] = 68, [0][1][2][0][RTW89_IC][12] = 16, [0][1][2][0][RTW89_KCC][12] = 64, [0][1][2][0][RTW89_ACMA][12] = 48, - [0][1][2][0][RTW89_CN][12] = 46, + [0][1][2][0][RTW89_CN][12] = 38, [0][1][2][0][RTW89_UK][12] = 48, [0][1][2][0][RTW89_MEXICO][12] = 16, [0][1][2][0][RTW89_UKRAINE][12] = 48, [0][1][2][0][RTW89_CHILE][12] = 16, [0][1][2][0][RTW89_QATAR][12] = 48, + [0][1][2][0][RTW89_THAILAND][12] = 48, [0][1][2][0][RTW89_FCC][13] = 127, [0][1][2][0][RTW89_ETSI][13] = 127, [0][1][2][0][RTW89_MKK][13] = 127, @@ -33090,18 +33246,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][13] = 127, [0][1][2][0][RTW89_CHILE][13] = 127, [0][1][2][0][RTW89_QATAR][13] = 127, + [0][1][2][0][RTW89_THAILAND][13] = 127, [0][1][2][1][RTW89_FCC][0] = 64, [0][1][2][1][RTW89_ETSI][0] = 36, [0][1][2][1][RTW89_MKK][0] = 68, [0][1][2][1][RTW89_IC][0] = 64, [0][1][2][1][RTW89_KCC][0] = 66, [0][1][2][1][RTW89_ACMA][0] = 36, - [0][1][2][1][RTW89_CN][0] = 36, + [0][1][2][1][RTW89_CN][0] = 34, [0][1][2][1][RTW89_UK][0] = 36, [0][1][2][1][RTW89_MEXICO][0] = 64, [0][1][2][1][RTW89_UKRAINE][0] = 36, [0][1][2][1][RTW89_CHILE][0] = 64, [0][1][2][1][RTW89_QATAR][0] = 36, + [0][1][2][1][RTW89_THAILAND][0] = 36, [0][1][2][1][RTW89_FCC][1] = 70, [0][1][2][1][RTW89_ETSI][1] = 36, [0][1][2][1][RTW89_MKK][1] = 68, @@ -33114,6 +33272,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][1] = 36, [0][1][2][1][RTW89_CHILE][1] = 70, [0][1][2][1][RTW89_QATAR][1] = 36, + [0][1][2][1][RTW89_THAILAND][1] = 36, [0][1][2][1][RTW89_FCC][2] = 74, [0][1][2][1][RTW89_ETSI][2] = 36, [0][1][2][1][RTW89_MKK][2] = 68, @@ -33126,6 +33285,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][2] = 36, [0][1][2][1][RTW89_CHILE][2] = 44, [0][1][2][1][RTW89_QATAR][2] = 36, + [0][1][2][1][RTW89_THAILAND][2] = 36, [0][1][2][1][RTW89_FCC][3] = 78, [0][1][2][1][RTW89_ETSI][3] = 36, [0][1][2][1][RTW89_MKK][3] = 68, @@ -33138,6 +33298,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][3] = 36, [0][1][2][1][RTW89_CHILE][3] = 44, [0][1][2][1][RTW89_QATAR][3] = 36, + [0][1][2][1][RTW89_THAILAND][3] = 36, [0][1][2][1][RTW89_FCC][4] = 80, [0][1][2][1][RTW89_ETSI][4] = 36, [0][1][2][1][RTW89_MKK][4] = 68, @@ -33150,6 +33311,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][4] = 36, [0][1][2][1][RTW89_CHILE][4] = 44, [0][1][2][1][RTW89_QATAR][4] = 36, + [0][1][2][1][RTW89_THAILAND][4] = 36, [0][1][2][1][RTW89_FCC][5] = 80, [0][1][2][1][RTW89_ETSI][5] = 36, [0][1][2][1][RTW89_MKK][5] = 68, @@ -33162,6 +33324,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][5] = 36, [0][1][2][1][RTW89_CHILE][5] = 74, [0][1][2][1][RTW89_QATAR][5] = 36, + [0][1][2][1][RTW89_THAILAND][5] = 36, [0][1][2][1][RTW89_FCC][6] = 80, [0][1][2][1][RTW89_ETSI][6] = 36, [0][1][2][1][RTW89_MKK][6] = 68, @@ -33174,6 +33337,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][6] = 36, [0][1][2][1][RTW89_CHILE][6] = 42, [0][1][2][1][RTW89_QATAR][6] = 36, + [0][1][2][1][RTW89_THAILAND][6] = 36, [0][1][2][1][RTW89_FCC][7] = 74, [0][1][2][1][RTW89_ETSI][7] = 36, [0][1][2][1][RTW89_MKK][7] = 68, @@ -33186,6 +33350,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][7] = 36, [0][1][2][1][RTW89_CHILE][7] = 42, [0][1][2][1][RTW89_QATAR][7] = 36, + [0][1][2][1][RTW89_THAILAND][7] = 36, [0][1][2][1][RTW89_FCC][8] = 70, [0][1][2][1][RTW89_ETSI][8] = 36, [0][1][2][1][RTW89_MKK][8] = 68, @@ -33198,6 +33363,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][8] = 36, [0][1][2][1][RTW89_CHILE][8] = 42, [0][1][2][1][RTW89_QATAR][8] = 36, + [0][1][2][1][RTW89_THAILAND][8] = 36, [0][1][2][1][RTW89_FCC][9] = 66, [0][1][2][1][RTW89_ETSI][9] = 36, [0][1][2][1][RTW89_MKK][9] = 68, @@ -33210,6 +33376,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][9] = 36, [0][1][2][1][RTW89_CHILE][9] = 66, [0][1][2][1][RTW89_QATAR][9] = 36, + [0][1][2][1][RTW89_THAILAND][9] = 36, [0][1][2][1][RTW89_FCC][10] = 58, [0][1][2][1][RTW89_ETSI][10] = 36, [0][1][2][1][RTW89_MKK][10] = 68, @@ -33222,6 +33389,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][10] = 36, [0][1][2][1][RTW89_CHILE][10] = 58, [0][1][2][1][RTW89_QATAR][10] = 36, + [0][1][2][1][RTW89_THAILAND][10] = 36, [0][1][2][1][RTW89_FCC][11] = 58, [0][1][2][1][RTW89_ETSI][11] = 36, [0][1][2][1][RTW89_MKK][11] = 68, @@ -33234,18 +33402,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][11] = 36, [0][1][2][1][RTW89_CHILE][11] = 58, [0][1][2][1][RTW89_QATAR][11] = 36, + [0][1][2][1][RTW89_THAILAND][11] = 36, [0][1][2][1][RTW89_FCC][12] = 16, [0][1][2][1][RTW89_ETSI][12] = 36, [0][1][2][1][RTW89_MKK][12] = 68, [0][1][2][1][RTW89_IC][12] = 16, [0][1][2][1][RTW89_KCC][12] = 64, [0][1][2][1][RTW89_ACMA][12] = 36, - [0][1][2][1][RTW89_CN][12] = 34, + [0][1][2][1][RTW89_CN][12] = 26, [0][1][2][1][RTW89_UK][12] = 36, [0][1][2][1][RTW89_MEXICO][12] = 16, [0][1][2][1][RTW89_UKRAINE][12] = 36, [0][1][2][1][RTW89_CHILE][12] = 16, [0][1][2][1][RTW89_QATAR][12] = 36, + [0][1][2][1][RTW89_THAILAND][12] = 36, [0][1][2][1][RTW89_FCC][13] = 127, [0][1][2][1][RTW89_ETSI][13] = 127, [0][1][2][1][RTW89_MKK][13] = 127, @@ -33258,6 +33428,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][13] = 127, [0][1][2][1][RTW89_CHILE][13] = 127, [0][1][2][1][RTW89_QATAR][13] = 127, + [0][1][2][1][RTW89_THAILAND][13] = 127, [1][0][2][0][RTW89_FCC][0] = 127, [1][0][2][0][RTW89_ETSI][0] = 127, [1][0][2][0][RTW89_MKK][0] = 127, @@ -33270,6 +33441,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][0] = 127, [1][0][2][0][RTW89_CHILE][0] = 127, [1][0][2][0][RTW89_QATAR][0] = 127, + [1][0][2][0][RTW89_THAILAND][0] = 127, [1][0][2][0][RTW89_FCC][1] = 127, [1][0][2][0][RTW89_ETSI][1] = 127, [1][0][2][0][RTW89_MKK][1] = 127, @@ -33282,6 +33454,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][1] = 127, [1][0][2][0][RTW89_CHILE][1] = 127, [1][0][2][0][RTW89_QATAR][1] = 127, + [1][0][2][0][RTW89_THAILAND][1] = 127, [1][0][2][0][RTW89_FCC][2] = 64, [1][0][2][0][RTW89_ETSI][2] = 60, [1][0][2][0][RTW89_MKK][2] = 74, @@ -33294,6 +33467,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][2] = 60, [1][0][2][0][RTW89_CHILE][2] = 64, [1][0][2][0][RTW89_QATAR][2] = 60, + [1][0][2][0][RTW89_THAILAND][2] = 60, [1][0][2][0][RTW89_FCC][3] = 64, [1][0][2][0][RTW89_ETSI][3] = 60, [1][0][2][0][RTW89_MKK][3] = 74, @@ -33306,6 +33480,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][3] = 60, [1][0][2][0][RTW89_CHILE][3] = 64, [1][0][2][0][RTW89_QATAR][3] = 60, + [1][0][2][0][RTW89_THAILAND][3] = 60, [1][0][2][0][RTW89_FCC][4] = 68, [1][0][2][0][RTW89_ETSI][4] = 60, [1][0][2][0][RTW89_MKK][4] = 74, @@ -33318,6 +33493,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][4] = 60, [1][0][2][0][RTW89_CHILE][4] = 68, [1][0][2][0][RTW89_QATAR][4] = 60, + [1][0][2][0][RTW89_THAILAND][4] = 60, [1][0][2][0][RTW89_FCC][5] = 68, [1][0][2][0][RTW89_ETSI][5] = 60, [1][0][2][0][RTW89_MKK][5] = 74, @@ -33330,6 +33506,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][5] = 60, [1][0][2][0][RTW89_CHILE][5] = 68, [1][0][2][0][RTW89_QATAR][5] = 60, + [1][0][2][0][RTW89_THAILAND][5] = 60, [1][0][2][0][RTW89_FCC][6] = 66, [1][0][2][0][RTW89_ETSI][6] = 60, [1][0][2][0][RTW89_MKK][6] = 74, @@ -33342,6 +33519,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][6] = 60, [1][0][2][0][RTW89_CHILE][6] = 66, [1][0][2][0][RTW89_QATAR][6] = 60, + [1][0][2][0][RTW89_THAILAND][6] = 60, [1][0][2][0][RTW89_FCC][7] = 62, [1][0][2][0][RTW89_ETSI][7] = 60, [1][0][2][0][RTW89_MKK][7] = 74, @@ -33354,6 +33532,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][7] = 60, [1][0][2][0][RTW89_CHILE][7] = 62, [1][0][2][0][RTW89_QATAR][7] = 60, + [1][0][2][0][RTW89_THAILAND][7] = 60, [1][0][2][0][RTW89_FCC][8] = 62, [1][0][2][0][RTW89_ETSI][8] = 60, [1][0][2][0][RTW89_MKK][8] = 74, @@ -33366,6 +33545,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][8] = 60, [1][0][2][0][RTW89_CHILE][8] = 62, [1][0][2][0][RTW89_QATAR][8] = 60, + [1][0][2][0][RTW89_THAILAND][8] = 60, [1][0][2][0][RTW89_FCC][9] = 60, [1][0][2][0][RTW89_ETSI][9] = 60, [1][0][2][0][RTW89_MKK][9] = 74, @@ -33378,6 +33558,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][9] = 60, [1][0][2][0][RTW89_CHILE][9] = 60, [1][0][2][0][RTW89_QATAR][9] = 60, + [1][0][2][0][RTW89_THAILAND][9] = 60, [1][0][2][0][RTW89_FCC][10] = 56, [1][0][2][0][RTW89_ETSI][10] = 60, [1][0][2][0][RTW89_MKK][10] = 74, @@ -33390,6 +33571,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][10] = 60, [1][0][2][0][RTW89_CHILE][10] = 56, [1][0][2][0][RTW89_QATAR][10] = 60, + [1][0][2][0][RTW89_THAILAND][10] = 60, [1][0][2][0][RTW89_FCC][11] = 127, [1][0][2][0][RTW89_ETSI][11] = 127, [1][0][2][0][RTW89_MKK][11] = 127, @@ -33402,6 +33584,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][11] = 127, [1][0][2][0][RTW89_CHILE][11] = 127, [1][0][2][0][RTW89_QATAR][11] = 127, + [1][0][2][0][RTW89_THAILAND][11] = 127, [1][0][2][0][RTW89_FCC][12] = 127, [1][0][2][0][RTW89_ETSI][12] = 127, [1][0][2][0][RTW89_MKK][12] = 127, @@ -33414,6 +33597,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][12] = 127, [1][0][2][0][RTW89_CHILE][12] = 127, [1][0][2][0][RTW89_QATAR][12] = 127, + [1][0][2][0][RTW89_THAILAND][12] = 127, [1][0][2][0][RTW89_FCC][13] = 127, [1][0][2][0][RTW89_ETSI][13] = 127, [1][0][2][0][RTW89_MKK][13] = 127, @@ -33426,6 +33610,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][13] = 127, [1][0][2][0][RTW89_CHILE][13] = 127, [1][0][2][0][RTW89_QATAR][13] = 127, + [1][0][2][0][RTW89_THAILAND][13] = 127, [1][1][2][0][RTW89_FCC][0] = 127, [1][1][2][0][RTW89_ETSI][0] = 127, [1][1][2][0][RTW89_MKK][0] = 127, @@ -33438,6 +33623,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][0] = 127, [1][1][2][0][RTW89_CHILE][0] = 127, [1][1][2][0][RTW89_QATAR][0] = 127, + [1][1][2][0][RTW89_THAILAND][0] = 127, [1][1][2][0][RTW89_FCC][1] = 127, [1][1][2][0][RTW89_ETSI][1] = 127, [1][1][2][0][RTW89_MKK][1] = 127, @@ -33450,6 +33636,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][1] = 127, [1][1][2][0][RTW89_CHILE][1] = 127, [1][1][2][0][RTW89_QATAR][1] = 127, + [1][1][2][0][RTW89_THAILAND][1] = 127, [1][1][2][0][RTW89_FCC][2] = 60, [1][1][2][0][RTW89_ETSI][2] = 48, [1][1][2][0][RTW89_MKK][2] = 68, @@ -33462,6 +33649,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][2] = 48, [1][1][2][0][RTW89_CHILE][2] = 60, [1][1][2][0][RTW89_QATAR][2] = 48, + [1][1][2][0][RTW89_THAILAND][2] = 48, [1][1][2][0][RTW89_FCC][3] = 60, [1][1][2][0][RTW89_ETSI][3] = 48, [1][1][2][0][RTW89_MKK][3] = 68, @@ -33474,6 +33662,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][3] = 48, [1][1][2][0][RTW89_CHILE][3] = 56, [1][1][2][0][RTW89_QATAR][3] = 48, + [1][1][2][0][RTW89_THAILAND][3] = 48, [1][1][2][0][RTW89_FCC][4] = 60, [1][1][2][0][RTW89_ETSI][4] = 48, [1][1][2][0][RTW89_MKK][4] = 68, @@ -33486,6 +33675,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][4] = 48, [1][1][2][0][RTW89_CHILE][4] = 56, [1][1][2][0][RTW89_QATAR][4] = 48, + [1][1][2][0][RTW89_THAILAND][4] = 48, [1][1][2][0][RTW89_FCC][5] = 60, [1][1][2][0][RTW89_ETSI][5] = 48, [1][1][2][0][RTW89_MKK][5] = 68, @@ -33498,6 +33688,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][5] = 48, [1][1][2][0][RTW89_CHILE][5] = 60, [1][1][2][0][RTW89_QATAR][5] = 48, + [1][1][2][0][RTW89_THAILAND][5] = 48, [1][1][2][0][RTW89_FCC][6] = 58, [1][1][2][0][RTW89_ETSI][6] = 48, [1][1][2][0][RTW89_MKK][6] = 68, @@ -33510,6 +33701,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][6] = 48, [1][1][2][0][RTW89_CHILE][6] = 52, [1][1][2][0][RTW89_QATAR][6] = 48, + [1][1][2][0][RTW89_THAILAND][6] = 48, [1][1][2][0][RTW89_FCC][7] = 54, [1][1][2][0][RTW89_ETSI][7] = 48, [1][1][2][0][RTW89_MKK][7] = 68, @@ -33522,6 +33714,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][7] = 48, [1][1][2][0][RTW89_CHILE][7] = 52, [1][1][2][0][RTW89_QATAR][7] = 48, + [1][1][2][0][RTW89_THAILAND][7] = 48, [1][1][2][0][RTW89_FCC][8] = 54, [1][1][2][0][RTW89_ETSI][8] = 48, [1][1][2][0][RTW89_MKK][8] = 68, @@ -33534,6 +33727,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][8] = 48, [1][1][2][0][RTW89_CHILE][8] = 54, [1][1][2][0][RTW89_QATAR][8] = 48, + [1][1][2][0][RTW89_THAILAND][8] = 48, [1][1][2][0][RTW89_FCC][9] = 54, [1][1][2][0][RTW89_ETSI][9] = 48, [1][1][2][0][RTW89_MKK][9] = 68, @@ -33546,6 +33740,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][9] = 48, [1][1][2][0][RTW89_CHILE][9] = 54, [1][1][2][0][RTW89_QATAR][9] = 48, + [1][1][2][0][RTW89_THAILAND][9] = 48, [1][1][2][0][RTW89_FCC][10] = 46, [1][1][2][0][RTW89_ETSI][10] = 48, [1][1][2][0][RTW89_MKK][10] = 68, @@ -33558,6 +33753,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][10] = 48, [1][1][2][0][RTW89_CHILE][10] = 46, [1][1][2][0][RTW89_QATAR][10] = 48, + [1][1][2][0][RTW89_THAILAND][10] = 48, [1][1][2][0][RTW89_FCC][11] = 127, [1][1][2][0][RTW89_ETSI][11] = 127, [1][1][2][0][RTW89_MKK][11] = 127, @@ -33570,6 +33766,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][11] = 127, [1][1][2][0][RTW89_CHILE][11] = 127, [1][1][2][0][RTW89_QATAR][11] = 127, + [1][1][2][0][RTW89_THAILAND][11] = 127, [1][1][2][0][RTW89_FCC][12] = 127, [1][1][2][0][RTW89_ETSI][12] = 127, [1][1][2][0][RTW89_MKK][12] = 127, @@ -33582,6 +33779,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][12] = 127, [1][1][2][0][RTW89_CHILE][12] = 127, [1][1][2][0][RTW89_QATAR][12] = 127, + [1][1][2][0][RTW89_THAILAND][12] = 127, [1][1][2][0][RTW89_FCC][13] = 127, [1][1][2][0][RTW89_ETSI][13] = 127, [1][1][2][0][RTW89_MKK][13] = 127, @@ -33594,6 +33792,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][13] = 127, [1][1][2][0][RTW89_CHILE][13] = 127, [1][1][2][0][RTW89_QATAR][13] = 127, + [1][1][2][0][RTW89_THAILAND][13] = 127, [1][1][2][1][RTW89_FCC][0] = 127, [1][1][2][1][RTW89_ETSI][0] = 127, [1][1][2][1][RTW89_MKK][0] = 127, @@ -33606,6 +33805,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][0] = 127, [1][1][2][1][RTW89_CHILE][0] = 127, [1][1][2][1][RTW89_QATAR][0] = 127, + [1][1][2][1][RTW89_THAILAND][0] = 127, [1][1][2][1][RTW89_FCC][1] = 127, [1][1][2][1][RTW89_ETSI][1] = 127, [1][1][2][1][RTW89_MKK][1] = 127, @@ -33618,6 +33818,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][1] = 127, [1][1][2][1][RTW89_CHILE][1] = 127, [1][1][2][1][RTW89_QATAR][1] = 127, + [1][1][2][1][RTW89_THAILAND][1] = 127, [1][1][2][1][RTW89_FCC][2] = 60, [1][1][2][1][RTW89_ETSI][2] = 36, [1][1][2][1][RTW89_MKK][2] = 68, @@ -33630,6 +33831,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][2] = 36, [1][1][2][1][RTW89_CHILE][2] = 60, [1][1][2][1][RTW89_QATAR][2] = 36, + [1][1][2][1][RTW89_THAILAND][2] = 36, [1][1][2][1][RTW89_FCC][3] = 60, [1][1][2][1][RTW89_ETSI][3] = 36, [1][1][2][1][RTW89_MKK][3] = 68, @@ -33642,6 +33844,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][3] = 36, [1][1][2][1][RTW89_CHILE][3] = 44, [1][1][2][1][RTW89_QATAR][3] = 36, + [1][1][2][1][RTW89_THAILAND][3] = 36, [1][1][2][1][RTW89_FCC][4] = 60, [1][1][2][1][RTW89_ETSI][4] = 36, [1][1][2][1][RTW89_MKK][4] = 68, @@ -33654,6 +33857,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][4] = 36, [1][1][2][1][RTW89_CHILE][4] = 44, [1][1][2][1][RTW89_QATAR][4] = 36, + [1][1][2][1][RTW89_THAILAND][4] = 36, [1][1][2][1][RTW89_FCC][5] = 60, [1][1][2][1][RTW89_ETSI][5] = 36, [1][1][2][1][RTW89_MKK][5] = 68, @@ -33666,6 +33870,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][5] = 36, [1][1][2][1][RTW89_CHILE][5] = 60, [1][1][2][1][RTW89_QATAR][5] = 36, + [1][1][2][1][RTW89_THAILAND][5] = 36, [1][1][2][1][RTW89_FCC][6] = 58, [1][1][2][1][RTW89_ETSI][6] = 36, [1][1][2][1][RTW89_MKK][6] = 68, @@ -33678,6 +33883,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][6] = 36, [1][1][2][1][RTW89_CHILE][6] = 40, [1][1][2][1][RTW89_QATAR][6] = 36, + [1][1][2][1][RTW89_THAILAND][6] = 36, [1][1][2][1][RTW89_FCC][7] = 54, [1][1][2][1][RTW89_ETSI][7] = 36, [1][1][2][1][RTW89_MKK][7] = 68, @@ -33690,6 +33896,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][7] = 36, [1][1][2][1][RTW89_CHILE][7] = 40, [1][1][2][1][RTW89_QATAR][7] = 36, + [1][1][2][1][RTW89_THAILAND][7] = 36, [1][1][2][1][RTW89_FCC][8] = 54, [1][1][2][1][RTW89_ETSI][8] = 36, [1][1][2][1][RTW89_MKK][8] = 68, @@ -33702,6 +33909,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][8] = 36, [1][1][2][1][RTW89_CHILE][8] = 54, [1][1][2][1][RTW89_QATAR][8] = 36, + [1][1][2][1][RTW89_THAILAND][8] = 36, [1][1][2][1][RTW89_FCC][9] = 54, [1][1][2][1][RTW89_ETSI][9] = 36, [1][1][2][1][RTW89_MKK][9] = 68, @@ -33714,18 +33922,20 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][9] = 36, [1][1][2][1][RTW89_CHILE][9] = 54, [1][1][2][1][RTW89_QATAR][9] = 36, + [1][1][2][1][RTW89_THAILAND][9] = 36, [1][1][2][1][RTW89_FCC][10] = 46, [1][1][2][1][RTW89_ETSI][10] = 36, [1][1][2][1][RTW89_MKK][10] = 68, [1][1][2][1][RTW89_IC][10] = 46, [1][1][2][1][RTW89_KCC][10] = 64, [1][1][2][1][RTW89_ACMA][10] = 36, - [1][1][2][1][RTW89_CN][10] = 36, + [1][1][2][1][RTW89_CN][10] = 34, [1][1][2][1][RTW89_UK][10] = 36, [1][1][2][1][RTW89_MEXICO][10] = 46, [1][1][2][1][RTW89_UKRAINE][10] = 36, [1][1][2][1][RTW89_CHILE][10] = 46, [1][1][2][1][RTW89_QATAR][10] = 36, + [1][1][2][1][RTW89_THAILAND][10] = 36, [1][1][2][1][RTW89_FCC][11] = 127, [1][1][2][1][RTW89_ETSI][11] = 127, [1][1][2][1][RTW89_MKK][11] = 127, @@ -33738,6 +33948,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][11] = 127, [1][1][2][1][RTW89_CHILE][11] = 127, [1][1][2][1][RTW89_QATAR][11] = 127, + [1][1][2][1][RTW89_THAILAND][11] = 127, [1][1][2][1][RTW89_FCC][12] = 127, [1][1][2][1][RTW89_ETSI][12] = 127, [1][1][2][1][RTW89_MKK][12] = 127, @@ -33750,6 +33961,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][12] = 127, [1][1][2][1][RTW89_CHILE][12] = 127, [1][1][2][1][RTW89_QATAR][12] = 127, + [1][1][2][1][RTW89_THAILAND][12] = 127, [1][1][2][1][RTW89_FCC][13] = 127, [1][1][2][1][RTW89_ETSI][13] = 127, [1][1][2][1][RTW89_MKK][13] = 127, @@ -33762,6 +33974,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][13] = 127, [1][1][2][1][RTW89_CHILE][13] = 127, [1][1][2][1][RTW89_QATAR][13] = 127, + [1][1][2][1][RTW89_THAILAND][13] = 127, }; static @@ -33992,6 +34205,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][0] = 54, [0][0][1][0][RTW89_CHILE][0] = 70, [0][0][1][0][RTW89_QATAR][0] = 66, + [0][0][1][0][RTW89_THAILAND][0] = 66, [0][0][1][0][RTW89_FCC][2] = 72, [0][0][1][0][RTW89_ETSI][2] = 66, [0][0][1][0][RTW89_MKK][2] = 66, @@ -34004,6 +34218,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][2] = 54, [0][0][1][0][RTW89_CHILE][2] = 70, [0][0][1][0][RTW89_QATAR][2] = 66, + [0][0][1][0][RTW89_THAILAND][2] = 66, [0][0][1][0][RTW89_FCC][4] = 72, [0][0][1][0][RTW89_ETSI][4] = 66, [0][0][1][0][RTW89_MKK][4] = 66, @@ -34016,6 +34231,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][4] = 54, [0][0][1][0][RTW89_CHILE][4] = 70, [0][0][1][0][RTW89_QATAR][4] = 66, + [0][0][1][0][RTW89_THAILAND][4] = 66, [0][0][1][0][RTW89_FCC][6] = 72, [0][0][1][0][RTW89_ETSI][6] = 66, [0][0][1][0][RTW89_MKK][6] = 66, @@ -34028,6 +34244,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][6] = 54, [0][0][1][0][RTW89_CHILE][6] = 70, [0][0][1][0][RTW89_QATAR][6] = 66, + [0][0][1][0][RTW89_THAILAND][6] = 66, [0][0][1][0][RTW89_FCC][8] = 72, [0][0][1][0][RTW89_ETSI][8] = 66, [0][0][1][0][RTW89_MKK][8] = 66, @@ -34040,6 +34257,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][8] = 54, [0][0][1][0][RTW89_CHILE][8] = 70, [0][0][1][0][RTW89_QATAR][8] = 66, + [0][0][1][0][RTW89_THAILAND][8] = 66, [0][0][1][0][RTW89_FCC][10] = 72, [0][0][1][0][RTW89_ETSI][10] = 66, [0][0][1][0][RTW89_MKK][10] = 66, @@ -34052,6 +34270,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][10] = 54, [0][0][1][0][RTW89_CHILE][10] = 70, [0][0][1][0][RTW89_QATAR][10] = 66, + [0][0][1][0][RTW89_THAILAND][10] = 66, [0][0][1][0][RTW89_FCC][12] = 72, [0][0][1][0][RTW89_ETSI][12] = 66, [0][0][1][0][RTW89_MKK][12] = 66, @@ -34064,6 +34283,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][12] = 54, [0][0][1][0][RTW89_CHILE][12] = 70, [0][0][1][0][RTW89_QATAR][12] = 66, + [0][0][1][0][RTW89_THAILAND][12] = 66, [0][0][1][0][RTW89_FCC][14] = 70, [0][0][1][0][RTW89_ETSI][14] = 66, [0][0][1][0][RTW89_MKK][14] = 66, @@ -34076,6 +34296,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][14] = 54, [0][0][1][0][RTW89_CHILE][14] = 68, [0][0][1][0][RTW89_QATAR][14] = 66, + [0][0][1][0][RTW89_THAILAND][14] = 66, [0][0][1][0][RTW89_FCC][15] = 72, [0][0][1][0][RTW89_ETSI][15] = 66, [0][0][1][0][RTW89_MKK][15] = 70, @@ -34088,6 +34309,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][15] = 54, [0][0][1][0][RTW89_CHILE][15] = 70, [0][0][1][0][RTW89_QATAR][15] = 66, + [0][0][1][0][RTW89_THAILAND][15] = 66, [0][0][1][0][RTW89_FCC][17] = 72, [0][0][1][0][RTW89_ETSI][17] = 66, [0][0][1][0][RTW89_MKK][17] = 70, @@ -34100,6 +34322,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][17] = 54, [0][0][1][0][RTW89_CHILE][17] = 70, [0][0][1][0][RTW89_QATAR][17] = 66, + [0][0][1][0][RTW89_THAILAND][17] = 66, [0][0][1][0][RTW89_FCC][19] = 72, [0][0][1][0][RTW89_ETSI][19] = 66, [0][0][1][0][RTW89_MKK][19] = 70, @@ -34112,6 +34335,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][19] = 54, [0][0][1][0][RTW89_CHILE][19] = 70, [0][0][1][0][RTW89_QATAR][19] = 66, + [0][0][1][0][RTW89_THAILAND][19] = 66, [0][0][1][0][RTW89_FCC][21] = 72, [0][0][1][0][RTW89_ETSI][21] = 66, [0][0][1][0][RTW89_MKK][21] = 70, @@ -34124,6 +34348,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][21] = 54, [0][0][1][0][RTW89_CHILE][21] = 70, [0][0][1][0][RTW89_QATAR][21] = 66, + [0][0][1][0][RTW89_THAILAND][21] = 66, [0][0][1][0][RTW89_FCC][23] = 72, [0][0][1][0][RTW89_ETSI][23] = 66, [0][0][1][0][RTW89_MKK][23] = 70, @@ -34136,6 +34361,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][23] = 54, [0][0][1][0][RTW89_CHILE][23] = 70, [0][0][1][0][RTW89_QATAR][23] = 66, + [0][0][1][0][RTW89_THAILAND][23] = 66, [0][0][1][0][RTW89_FCC][25] = 72, [0][0][1][0][RTW89_ETSI][25] = 66, [0][0][1][0][RTW89_MKK][25] = 70, @@ -34148,6 +34374,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][25] = 54, [0][0][1][0][RTW89_CHILE][25] = 70, [0][0][1][0][RTW89_QATAR][25] = 66, + [0][0][1][0][RTW89_THAILAND][25] = 66, [0][0][1][0][RTW89_FCC][27] = 72, [0][0][1][0][RTW89_ETSI][27] = 66, [0][0][1][0][RTW89_MKK][27] = 70, @@ -34160,6 +34387,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][27] = 54, [0][0][1][0][RTW89_CHILE][27] = 58, [0][0][1][0][RTW89_QATAR][27] = 66, + [0][0][1][0][RTW89_THAILAND][27] = 66, [0][0][1][0][RTW89_FCC][29] = 72, [0][0][1][0][RTW89_ETSI][29] = 66, [0][0][1][0][RTW89_MKK][29] = 70, @@ -34172,6 +34400,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][29] = 54, [0][0][1][0][RTW89_CHILE][29] = 58, [0][0][1][0][RTW89_QATAR][29] = 66, + [0][0][1][0][RTW89_THAILAND][29] = 66, [0][0][1][0][RTW89_FCC][31] = 72, [0][0][1][0][RTW89_ETSI][31] = 66, [0][0][1][0][RTW89_MKK][31] = 70, @@ -34184,6 +34413,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][31] = 54, [0][0][1][0][RTW89_CHILE][31] = 58, [0][0][1][0][RTW89_QATAR][31] = 66, + [0][0][1][0][RTW89_THAILAND][31] = 66, [0][0][1][0][RTW89_FCC][33] = 72, [0][0][1][0][RTW89_ETSI][33] = 66, [0][0][1][0][RTW89_MKK][33] = 70, @@ -34196,6 +34426,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][33] = 54, [0][0][1][0][RTW89_CHILE][33] = 58, [0][0][1][0][RTW89_QATAR][33] = 66, + [0][0][1][0][RTW89_THAILAND][33] = 66, [0][0][1][0][RTW89_FCC][35] = 60, [0][0][1][0][RTW89_ETSI][35] = 66, [0][0][1][0][RTW89_MKK][35] = 70, @@ -34208,6 +34439,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][35] = 54, [0][0][1][0][RTW89_CHILE][35] = 58, [0][0][1][0][RTW89_QATAR][35] = 66, + [0][0][1][0][RTW89_THAILAND][35] = 66, [0][0][1][0][RTW89_FCC][37] = 72, [0][0][1][0][RTW89_ETSI][37] = 127, [0][0][1][0][RTW89_MKK][37] = 70, @@ -34220,66 +34452,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][37] = 127, [0][0][1][0][RTW89_CHILE][37] = 70, [0][0][1][0][RTW89_QATAR][37] = 127, + [0][0][1][0][RTW89_THAILAND][37] = 127, [0][0][1][0][RTW89_FCC][38] = 72, [0][0][1][0][RTW89_ETSI][38] = 30, [0][0][1][0][RTW89_MKK][38] = 127, [0][0][1][0][RTW89_IC][38] = 72, [0][0][1][0][RTW89_KCC][38] = 62, [0][0][1][0][RTW89_ACMA][38] = 70, - [0][0][1][0][RTW89_CN][38] = 68, + [0][0][1][0][RTW89_CN][38] = 54, [0][0][1][0][RTW89_UK][38] = 64, [0][0][1][0][RTW89_MEXICO][38] = 72, [0][0][1][0][RTW89_UKRAINE][38] = 30, [0][0][1][0][RTW89_CHILE][38] = 70, [0][0][1][0][RTW89_QATAR][38] = 30, + [0][0][1][0][RTW89_THAILAND][38] = 30, [0][0][1][0][RTW89_FCC][40] = 72, [0][0][1][0][RTW89_ETSI][40] = 30, [0][0][1][0][RTW89_MKK][40] = 127, [0][0][1][0][RTW89_IC][40] = 72, [0][0][1][0][RTW89_KCC][40] = 62, [0][0][1][0][RTW89_ACMA][40] = 70, - [0][0][1][0][RTW89_CN][40] = 68, + [0][0][1][0][RTW89_CN][40] = 54, [0][0][1][0][RTW89_UK][40] = 64, [0][0][1][0][RTW89_MEXICO][40] = 72, [0][0][1][0][RTW89_UKRAINE][40] = 30, [0][0][1][0][RTW89_CHILE][40] = 70, [0][0][1][0][RTW89_QATAR][40] = 30, + [0][0][1][0][RTW89_THAILAND][40] = 30, [0][0][1][0][RTW89_FCC][42] = 72, [0][0][1][0][RTW89_ETSI][42] = 30, [0][0][1][0][RTW89_MKK][42] = 127, [0][0][1][0][RTW89_IC][42] = 72, [0][0][1][0][RTW89_KCC][42] = 62, [0][0][1][0][RTW89_ACMA][42] = 70, - [0][0][1][0][RTW89_CN][42] = 68, + [0][0][1][0][RTW89_CN][42] = 54, [0][0][1][0][RTW89_UK][42] = 64, [0][0][1][0][RTW89_MEXICO][42] = 72, [0][0][1][0][RTW89_UKRAINE][42] = 30, [0][0][1][0][RTW89_CHILE][42] = 70, [0][0][1][0][RTW89_QATAR][42] = 30, + [0][0][1][0][RTW89_THAILAND][42] = 30, [0][0][1][0][RTW89_FCC][44] = 72, [0][0][1][0][RTW89_ETSI][44] = 30, [0][0][1][0][RTW89_MKK][44] = 127, [0][0][1][0][RTW89_IC][44] = 72, [0][0][1][0][RTW89_KCC][44] = 62, [0][0][1][0][RTW89_ACMA][44] = 70, - [0][0][1][0][RTW89_CN][44] = 68, + [0][0][1][0][RTW89_CN][44] = 54, [0][0][1][0][RTW89_UK][44] = 64, [0][0][1][0][RTW89_MEXICO][44] = 72, [0][0][1][0][RTW89_UKRAINE][44] = 30, [0][0][1][0][RTW89_CHILE][44] = 70, [0][0][1][0][RTW89_QATAR][44] = 30, + [0][0][1][0][RTW89_THAILAND][44] = 30, [0][0][1][0][RTW89_FCC][46] = 72, [0][0][1][0][RTW89_ETSI][46] = 30, [0][0][1][0][RTW89_MKK][46] = 127, [0][0][1][0][RTW89_IC][46] = 72, [0][0][1][0][RTW89_KCC][46] = 62, [0][0][1][0][RTW89_ACMA][46] = 70, - [0][0][1][0][RTW89_CN][46] = 68, + [0][0][1][0][RTW89_CN][46] = 54, [0][0][1][0][RTW89_UK][46] = 64, [0][0][1][0][RTW89_MEXICO][46] = 72, [0][0][1][0][RTW89_UKRAINE][46] = 30, [0][0][1][0][RTW89_CHILE][46] = 70, [0][0][1][0][RTW89_QATAR][46] = 30, + [0][0][1][0][RTW89_THAILAND][46] = 30, [0][0][1][0][RTW89_FCC][48] = 72, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, @@ -34292,6 +34530,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][48] = 127, [0][0][1][0][RTW89_CHILE][48] = 127, [0][0][1][0][RTW89_QATAR][48] = 127, + [0][0][1][0][RTW89_THAILAND][48] = 127, [0][0][1][0][RTW89_FCC][50] = 72, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, @@ -34304,6 +34543,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][50] = 127, [0][0][1][0][RTW89_CHILE][50] = 127, [0][0][1][0][RTW89_QATAR][50] = 127, + [0][0][1][0][RTW89_THAILAND][50] = 127, [0][0][1][0][RTW89_FCC][52] = 72, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, @@ -34316,6 +34556,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_UKRAINE][52] = 127, [0][0][1][0][RTW89_CHILE][52] = 127, [0][0][1][0][RTW89_QATAR][52] = 127, + [0][0][1][0][RTW89_THAILAND][52] = 127, [0][1][1][0][RTW89_FCC][0] = 60, [0][1][1][0][RTW89_ETSI][0] = 54, [0][1][1][0][RTW89_MKK][0] = 54, @@ -34328,6 +34569,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][0] = 42, [0][1][1][0][RTW89_CHILE][0] = 60, [0][1][1][0][RTW89_QATAR][0] = 54, + [0][1][1][0][RTW89_THAILAND][0] = 54, [0][1][1][0][RTW89_FCC][2] = 60, [0][1][1][0][RTW89_ETSI][2] = 54, [0][1][1][0][RTW89_MKK][2] = 54, @@ -34340,6 +34582,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][2] = 42, [0][1][1][0][RTW89_CHILE][2] = 60, [0][1][1][0][RTW89_QATAR][2] = 54, + [0][1][1][0][RTW89_THAILAND][2] = 54, [0][1][1][0][RTW89_FCC][4] = 60, [0][1][1][0][RTW89_ETSI][4] = 54, [0][1][1][0][RTW89_MKK][4] = 54, @@ -34352,6 +34595,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][4] = 42, [0][1][1][0][RTW89_CHILE][4] = 60, [0][1][1][0][RTW89_QATAR][4] = 54, + [0][1][1][0][RTW89_THAILAND][4] = 54, [0][1][1][0][RTW89_FCC][6] = 60, [0][1][1][0][RTW89_ETSI][6] = 54, [0][1][1][0][RTW89_MKK][6] = 54, @@ -34364,6 +34608,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][6] = 42, [0][1][1][0][RTW89_CHILE][6] = 60, [0][1][1][0][RTW89_QATAR][6] = 54, + [0][1][1][0][RTW89_THAILAND][6] = 54, [0][1][1][0][RTW89_FCC][8] = 62, [0][1][1][0][RTW89_ETSI][8] = 54, [0][1][1][0][RTW89_MKK][8] = 52, @@ -34376,6 +34621,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][8] = 42, [0][1][1][0][RTW89_CHILE][8] = 62, [0][1][1][0][RTW89_QATAR][8] = 54, + [0][1][1][0][RTW89_THAILAND][8] = 54, [0][1][1][0][RTW89_FCC][10] = 62, [0][1][1][0][RTW89_ETSI][10] = 54, [0][1][1][0][RTW89_MKK][10] = 54, @@ -34388,6 +34634,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][10] = 42, [0][1][1][0][RTW89_CHILE][10] = 62, [0][1][1][0][RTW89_QATAR][10] = 54, + [0][1][1][0][RTW89_THAILAND][10] = 54, [0][1][1][0][RTW89_FCC][12] = 62, [0][1][1][0][RTW89_ETSI][12] = 54, [0][1][1][0][RTW89_MKK][12] = 54, @@ -34400,6 +34647,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][12] = 42, [0][1][1][0][RTW89_CHILE][12] = 62, [0][1][1][0][RTW89_QATAR][12] = 54, + [0][1][1][0][RTW89_THAILAND][12] = 54, [0][1][1][0][RTW89_FCC][14] = 60, [0][1][1][0][RTW89_ETSI][14] = 54, [0][1][1][0][RTW89_MKK][14] = 54, @@ -34412,6 +34660,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][14] = 42, [0][1][1][0][RTW89_CHILE][14] = 60, [0][1][1][0][RTW89_QATAR][14] = 54, + [0][1][1][0][RTW89_THAILAND][14] = 54, [0][1][1][0][RTW89_FCC][15] = 60, [0][1][1][0][RTW89_ETSI][15] = 54, [0][1][1][0][RTW89_MKK][15] = 70, @@ -34424,6 +34673,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][15] = 42, [0][1][1][0][RTW89_CHILE][15] = 60, [0][1][1][0][RTW89_QATAR][15] = 54, + [0][1][1][0][RTW89_THAILAND][15] = 54, [0][1][1][0][RTW89_FCC][17] = 60, [0][1][1][0][RTW89_ETSI][17] = 54, [0][1][1][0][RTW89_MKK][17] = 70, @@ -34436,6 +34686,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][17] = 42, [0][1][1][0][RTW89_CHILE][17] = 60, [0][1][1][0][RTW89_QATAR][17] = 54, + [0][1][1][0][RTW89_THAILAND][17] = 54, [0][1][1][0][RTW89_FCC][19] = 60, [0][1][1][0][RTW89_ETSI][19] = 54, [0][1][1][0][RTW89_MKK][19] = 70, @@ -34448,6 +34699,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][19] = 42, [0][1][1][0][RTW89_CHILE][19] = 60, [0][1][1][0][RTW89_QATAR][19] = 54, + [0][1][1][0][RTW89_THAILAND][19] = 54, [0][1][1][0][RTW89_FCC][21] = 60, [0][1][1][0][RTW89_ETSI][21] = 54, [0][1][1][0][RTW89_MKK][21] = 70, @@ -34460,6 +34712,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][21] = 42, [0][1][1][0][RTW89_CHILE][21] = 60, [0][1][1][0][RTW89_QATAR][21] = 54, + [0][1][1][0][RTW89_THAILAND][21] = 54, [0][1][1][0][RTW89_FCC][23] = 60, [0][1][1][0][RTW89_ETSI][23] = 54, [0][1][1][0][RTW89_MKK][23] = 70, @@ -34472,6 +34725,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][23] = 42, [0][1][1][0][RTW89_CHILE][23] = 60, [0][1][1][0][RTW89_QATAR][23] = 54, + [0][1][1][0][RTW89_THAILAND][23] = 54, [0][1][1][0][RTW89_FCC][25] = 60, [0][1][1][0][RTW89_ETSI][25] = 54, [0][1][1][0][RTW89_MKK][25] = 70, @@ -34484,6 +34738,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][25] = 42, [0][1][1][0][RTW89_CHILE][25] = 60, [0][1][1][0][RTW89_QATAR][25] = 54, + [0][1][1][0][RTW89_THAILAND][25] = 54, [0][1][1][0][RTW89_FCC][27] = 60, [0][1][1][0][RTW89_ETSI][27] = 54, [0][1][1][0][RTW89_MKK][27] = 70, @@ -34496,6 +34751,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][27] = 42, [0][1][1][0][RTW89_CHILE][27] = 52, [0][1][1][0][RTW89_QATAR][27] = 54, + [0][1][1][0][RTW89_THAILAND][27] = 54, [0][1][1][0][RTW89_FCC][29] = 60, [0][1][1][0][RTW89_ETSI][29] = 54, [0][1][1][0][RTW89_MKK][29] = 70, @@ -34508,6 +34764,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][29] = 42, [0][1][1][0][RTW89_CHILE][29] = 52, [0][1][1][0][RTW89_QATAR][29] = 54, + [0][1][1][0][RTW89_THAILAND][29] = 54, [0][1][1][0][RTW89_FCC][31] = 60, [0][1][1][0][RTW89_ETSI][31] = 54, [0][1][1][0][RTW89_MKK][31] = 70, @@ -34520,6 +34777,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][31] = 42, [0][1][1][0][RTW89_CHILE][31] = 52, [0][1][1][0][RTW89_QATAR][31] = 54, + [0][1][1][0][RTW89_THAILAND][31] = 54, [0][1][1][0][RTW89_FCC][33] = 60, [0][1][1][0][RTW89_ETSI][33] = 54, [0][1][1][0][RTW89_MKK][33] = 70, @@ -34532,6 +34790,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][33] = 42, [0][1][1][0][RTW89_CHILE][33] = 52, [0][1][1][0][RTW89_QATAR][33] = 54, + [0][1][1][0][RTW89_THAILAND][33] = 54, [0][1][1][0][RTW89_FCC][35] = 52, [0][1][1][0][RTW89_ETSI][35] = 54, [0][1][1][0][RTW89_MKK][35] = 70, @@ -34544,6 +34803,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][35] = 42, [0][1][1][0][RTW89_CHILE][35] = 52, [0][1][1][0][RTW89_QATAR][35] = 54, + [0][1][1][0][RTW89_THAILAND][35] = 54, [0][1][1][0][RTW89_FCC][37] = 62, [0][1][1][0][RTW89_ETSI][37] = 127, [0][1][1][0][RTW89_MKK][37] = 70, @@ -34556,66 +34816,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][37] = 127, [0][1][1][0][RTW89_CHILE][37] = 62, [0][1][1][0][RTW89_QATAR][37] = 127, + [0][1][1][0][RTW89_THAILAND][37] = 127, [0][1][1][0][RTW89_FCC][38] = 72, [0][1][1][0][RTW89_ETSI][38] = 18, [0][1][1][0][RTW89_MKK][38] = 127, [0][1][1][0][RTW89_IC][38] = 72, [0][1][1][0][RTW89_KCC][38] = 60, [0][1][1][0][RTW89_ACMA][38] = 70, - [0][1][1][0][RTW89_CN][38] = 64, + [0][1][1][0][RTW89_CN][38] = 54, [0][1][1][0][RTW89_UK][38] = 52, [0][1][1][0][RTW89_MEXICO][38] = 72, [0][1][1][0][RTW89_UKRAINE][38] = 18, [0][1][1][0][RTW89_CHILE][38] = 70, [0][1][1][0][RTW89_QATAR][38] = 18, + [0][1][1][0][RTW89_THAILAND][38] = 18, [0][1][1][0][RTW89_FCC][40] = 72, [0][1][1][0][RTW89_ETSI][40] = 18, [0][1][1][0][RTW89_MKK][40] = 127, [0][1][1][0][RTW89_IC][40] = 72, [0][1][1][0][RTW89_KCC][40] = 60, [0][1][1][0][RTW89_ACMA][40] = 70, - [0][1][1][0][RTW89_CN][40] = 64, + [0][1][1][0][RTW89_CN][40] = 54, [0][1][1][0][RTW89_UK][40] = 52, [0][1][1][0][RTW89_MEXICO][40] = 72, [0][1][1][0][RTW89_UKRAINE][40] = 18, [0][1][1][0][RTW89_CHILE][40] = 70, [0][1][1][0][RTW89_QATAR][40] = 18, + [0][1][1][0][RTW89_THAILAND][40] = 18, [0][1][1][0][RTW89_FCC][42] = 72, [0][1][1][0][RTW89_ETSI][42] = 18, [0][1][1][0][RTW89_MKK][42] = 127, [0][1][1][0][RTW89_IC][42] = 72, [0][1][1][0][RTW89_KCC][42] = 60, [0][1][1][0][RTW89_ACMA][42] = 70, - [0][1][1][0][RTW89_CN][42] = 64, + [0][1][1][0][RTW89_CN][42] = 54, [0][1][1][0][RTW89_UK][42] = 52, [0][1][1][0][RTW89_MEXICO][42] = 72, [0][1][1][0][RTW89_UKRAINE][42] = 18, [0][1][1][0][RTW89_CHILE][42] = 70, [0][1][1][0][RTW89_QATAR][42] = 18, + [0][1][1][0][RTW89_THAILAND][42] = 18, [0][1][1][0][RTW89_FCC][44] = 72, [0][1][1][0][RTW89_ETSI][44] = 18, [0][1][1][0][RTW89_MKK][44] = 127, [0][1][1][0][RTW89_IC][44] = 72, [0][1][1][0][RTW89_KCC][44] = 60, [0][1][1][0][RTW89_ACMA][44] = 70, - [0][1][1][0][RTW89_CN][44] = 60, + [0][1][1][0][RTW89_CN][44] = 54, [0][1][1][0][RTW89_UK][44] = 52, [0][1][1][0][RTW89_MEXICO][44] = 72, [0][1][1][0][RTW89_UKRAINE][44] = 18, [0][1][1][0][RTW89_CHILE][44] = 70, [0][1][1][0][RTW89_QATAR][44] = 18, + [0][1][1][0][RTW89_THAILAND][44] = 18, [0][1][1][0][RTW89_FCC][46] = 72, [0][1][1][0][RTW89_ETSI][46] = 18, [0][1][1][0][RTW89_MKK][46] = 127, [0][1][1][0][RTW89_IC][46] = 72, [0][1][1][0][RTW89_KCC][46] = 60, [0][1][1][0][RTW89_ACMA][46] = 70, - [0][1][1][0][RTW89_CN][46] = 60, + [0][1][1][0][RTW89_CN][46] = 54, [0][1][1][0][RTW89_UK][46] = 52, [0][1][1][0][RTW89_MEXICO][46] = 72, [0][1][1][0][RTW89_UKRAINE][46] = 18, [0][1][1][0][RTW89_CHILE][46] = 70, [0][1][1][0][RTW89_QATAR][46] = 18, + [0][1][1][0][RTW89_THAILAND][46] = 18, [0][1][1][0][RTW89_FCC][48] = 48, [0][1][1][0][RTW89_ETSI][48] = 127, [0][1][1][0][RTW89_MKK][48] = 127, @@ -34628,6 +34894,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][48] = 127, [0][1][1][0][RTW89_CHILE][48] = 127, [0][1][1][0][RTW89_QATAR][48] = 127, + [0][1][1][0][RTW89_THAILAND][48] = 127, [0][1][1][0][RTW89_FCC][50] = 48, [0][1][1][0][RTW89_ETSI][50] = 127, [0][1][1][0][RTW89_MKK][50] = 127, @@ -34640,6 +34907,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][50] = 127, [0][1][1][0][RTW89_CHILE][50] = 127, [0][1][1][0][RTW89_QATAR][50] = 127, + [0][1][1][0][RTW89_THAILAND][50] = 127, [0][1][1][0][RTW89_FCC][52] = 48, [0][1][1][0][RTW89_ETSI][52] = 127, [0][1][1][0][RTW89_MKK][52] = 127, @@ -34652,6 +34920,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_UKRAINE][52] = 127, [0][1][1][0][RTW89_CHILE][52] = 127, [0][1][1][0][RTW89_QATAR][52] = 127, + [0][1][1][0][RTW89_THAILAND][52] = 127, [0][0][2][0][RTW89_FCC][0] = 70, [0][0][2][0][RTW89_ETSI][0] = 66, [0][0][2][0][RTW89_MKK][0] = 68, @@ -34664,6 +34933,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][0] = 54, [0][0][2][0][RTW89_CHILE][0] = 68, [0][0][2][0][RTW89_QATAR][0] = 66, + [0][0][2][0][RTW89_THAILAND][0] = 66, [0][0][2][0][RTW89_FCC][2] = 72, [0][0][2][0][RTW89_ETSI][2] = 66, [0][0][2][0][RTW89_MKK][2] = 68, @@ -34676,6 +34946,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][2] = 54, [0][0][2][0][RTW89_CHILE][2] = 70, [0][0][2][0][RTW89_QATAR][2] = 66, + [0][0][2][0][RTW89_THAILAND][2] = 66, [0][0][2][0][RTW89_FCC][4] = 72, [0][0][2][0][RTW89_ETSI][4] = 66, [0][0][2][0][RTW89_MKK][4] = 68, @@ -34688,6 +34959,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][4] = 54, [0][0][2][0][RTW89_CHILE][4] = 70, [0][0][2][0][RTW89_QATAR][4] = 66, + [0][0][2][0][RTW89_THAILAND][4] = 66, [0][0][2][0][RTW89_FCC][6] = 72, [0][0][2][0][RTW89_ETSI][6] = 66, [0][0][2][0][RTW89_MKK][6] = 60, @@ -34700,6 +34972,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][6] = 54, [0][0][2][0][RTW89_CHILE][6] = 70, [0][0][2][0][RTW89_QATAR][6] = 66, + [0][0][2][0][RTW89_THAILAND][6] = 66, [0][0][2][0][RTW89_FCC][8] = 72, [0][0][2][0][RTW89_ETSI][8] = 66, [0][0][2][0][RTW89_MKK][8] = 58, @@ -34712,6 +34985,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][8] = 54, [0][0][2][0][RTW89_CHILE][8] = 70, [0][0][2][0][RTW89_QATAR][8] = 66, + [0][0][2][0][RTW89_THAILAND][8] = 66, [0][0][2][0][RTW89_FCC][10] = 72, [0][0][2][0][RTW89_ETSI][10] = 66, [0][0][2][0][RTW89_MKK][10] = 70, @@ -34724,6 +34998,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][10] = 54, [0][0][2][0][RTW89_CHILE][10] = 70, [0][0][2][0][RTW89_QATAR][10] = 66, + [0][0][2][0][RTW89_THAILAND][10] = 66, [0][0][2][0][RTW89_FCC][12] = 72, [0][0][2][0][RTW89_ETSI][12] = 66, [0][0][2][0][RTW89_MKK][12] = 70, @@ -34736,6 +35011,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][12] = 54, [0][0][2][0][RTW89_CHILE][12] = 70, [0][0][2][0][RTW89_QATAR][12] = 66, + [0][0][2][0][RTW89_THAILAND][12] = 66, [0][0][2][0][RTW89_FCC][14] = 68, [0][0][2][0][RTW89_ETSI][14] = 66, [0][0][2][0][RTW89_MKK][14] = 70, @@ -34748,6 +35024,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][14] = 54, [0][0][2][0][RTW89_CHILE][14] = 66, [0][0][2][0][RTW89_QATAR][14] = 66, + [0][0][2][0][RTW89_THAILAND][14] = 66, [0][0][2][0][RTW89_FCC][15] = 70, [0][0][2][0][RTW89_ETSI][15] = 66, [0][0][2][0][RTW89_MKK][15] = 70, @@ -34760,6 +35037,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][15] = 54, [0][0][2][0][RTW89_CHILE][15] = 68, [0][0][2][0][RTW89_QATAR][15] = 66, + [0][0][2][0][RTW89_THAILAND][15] = 66, [0][0][2][0][RTW89_FCC][17] = 72, [0][0][2][0][RTW89_ETSI][17] = 66, [0][0][2][0][RTW89_MKK][17] = 70, @@ -34772,6 +35050,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][17] = 54, [0][0][2][0][RTW89_CHILE][17] = 68, [0][0][2][0][RTW89_QATAR][17] = 66, + [0][0][2][0][RTW89_THAILAND][17] = 66, [0][0][2][0][RTW89_FCC][19] = 72, [0][0][2][0][RTW89_ETSI][19] = 66, [0][0][2][0][RTW89_MKK][19] = 70, @@ -34784,6 +35063,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][19] = 54, [0][0][2][0][RTW89_CHILE][19] = 68, [0][0][2][0][RTW89_QATAR][19] = 66, + [0][0][2][0][RTW89_THAILAND][19] = 66, [0][0][2][0][RTW89_FCC][21] = 72, [0][0][2][0][RTW89_ETSI][21] = 66, [0][0][2][0][RTW89_MKK][21] = 70, @@ -34796,6 +35076,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][21] = 54, [0][0][2][0][RTW89_CHILE][21] = 70, [0][0][2][0][RTW89_QATAR][21] = 66, + [0][0][2][0][RTW89_THAILAND][21] = 66, [0][0][2][0][RTW89_FCC][23] = 72, [0][0][2][0][RTW89_ETSI][23] = 66, [0][0][2][0][RTW89_MKK][23] = 70, @@ -34808,6 +35089,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][23] = 54, [0][0][2][0][RTW89_CHILE][23] = 70, [0][0][2][0][RTW89_QATAR][23] = 66, + [0][0][2][0][RTW89_THAILAND][23] = 66, [0][0][2][0][RTW89_FCC][25] = 72, [0][0][2][0][RTW89_ETSI][25] = 66, [0][0][2][0][RTW89_MKK][25] = 70, @@ -34820,6 +35102,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][25] = 54, [0][0][2][0][RTW89_CHILE][25] = 70, [0][0][2][0][RTW89_QATAR][25] = 66, + [0][0][2][0][RTW89_THAILAND][25] = 66, [0][0][2][0][RTW89_FCC][27] = 72, [0][0][2][0][RTW89_ETSI][27] = 66, [0][0][2][0][RTW89_MKK][27] = 70, @@ -34832,6 +35115,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][27] = 54, [0][0][2][0][RTW89_CHILE][27] = 56, [0][0][2][0][RTW89_QATAR][27] = 66, + [0][0][2][0][RTW89_THAILAND][27] = 66, [0][0][2][0][RTW89_FCC][29] = 72, [0][0][2][0][RTW89_ETSI][29] = 66, [0][0][2][0][RTW89_MKK][29] = 70, @@ -34844,6 +35128,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][29] = 54, [0][0][2][0][RTW89_CHILE][29] = 56, [0][0][2][0][RTW89_QATAR][29] = 66, + [0][0][2][0][RTW89_THAILAND][29] = 66, [0][0][2][0][RTW89_FCC][31] = 72, [0][0][2][0][RTW89_ETSI][31] = 66, [0][0][2][0][RTW89_MKK][31] = 70, @@ -34856,6 +35141,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][31] = 54, [0][0][2][0][RTW89_CHILE][31] = 56, [0][0][2][0][RTW89_QATAR][31] = 66, + [0][0][2][0][RTW89_THAILAND][31] = 66, [0][0][2][0][RTW89_FCC][33] = 72, [0][0][2][0][RTW89_ETSI][33] = 66, [0][0][2][0][RTW89_MKK][33] = 70, @@ -34868,6 +35154,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][33] = 54, [0][0][2][0][RTW89_CHILE][33] = 56, [0][0][2][0][RTW89_QATAR][33] = 66, + [0][0][2][0][RTW89_THAILAND][33] = 66, [0][0][2][0][RTW89_FCC][35] = 56, [0][0][2][0][RTW89_ETSI][35] = 66, [0][0][2][0][RTW89_MKK][35] = 70, @@ -34880,6 +35167,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][35] = 54, [0][0][2][0][RTW89_CHILE][35] = 56, [0][0][2][0][RTW89_QATAR][35] = 66, + [0][0][2][0][RTW89_THAILAND][35] = 66, [0][0][2][0][RTW89_FCC][37] = 72, [0][0][2][0][RTW89_ETSI][37] = 127, [0][0][2][0][RTW89_MKK][37] = 70, @@ -34892,66 +35180,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][37] = 127, [0][0][2][0][RTW89_CHILE][37] = 70, [0][0][2][0][RTW89_QATAR][37] = 127, + [0][0][2][0][RTW89_THAILAND][37] = 127, [0][0][2][0][RTW89_FCC][38] = 72, [0][0][2][0][RTW89_ETSI][38] = 30, [0][0][2][0][RTW89_MKK][38] = 127, [0][0][2][0][RTW89_IC][38] = 72, [0][0][2][0][RTW89_KCC][38] = 58, [0][0][2][0][RTW89_ACMA][38] = 70, - [0][0][2][0][RTW89_CN][38] = 68, + [0][0][2][0][RTW89_CN][38] = 56, [0][0][2][0][RTW89_UK][38] = 64, [0][0][2][0][RTW89_MEXICO][38] = 72, [0][0][2][0][RTW89_UKRAINE][38] = 30, [0][0][2][0][RTW89_CHILE][38] = 70, [0][0][2][0][RTW89_QATAR][38] = 30, + [0][0][2][0][RTW89_THAILAND][38] = 30, [0][0][2][0][RTW89_FCC][40] = 72, [0][0][2][0][RTW89_ETSI][40] = 30, [0][0][2][0][RTW89_MKK][40] = 127, [0][0][2][0][RTW89_IC][40] = 72, [0][0][2][0][RTW89_KCC][40] = 58, [0][0][2][0][RTW89_ACMA][40] = 70, - [0][0][2][0][RTW89_CN][40] = 68, + [0][0][2][0][RTW89_CN][40] = 56, [0][0][2][0][RTW89_UK][40] = 64, [0][0][2][0][RTW89_MEXICO][40] = 72, [0][0][2][0][RTW89_UKRAINE][40] = 30, [0][0][2][0][RTW89_CHILE][40] = 70, [0][0][2][0][RTW89_QATAR][40] = 30, + [0][0][2][0][RTW89_THAILAND][40] = 30, [0][0][2][0][RTW89_FCC][42] = 72, [0][0][2][0][RTW89_ETSI][42] = 30, [0][0][2][0][RTW89_MKK][42] = 127, [0][0][2][0][RTW89_IC][42] = 72, [0][0][2][0][RTW89_KCC][42] = 58, [0][0][2][0][RTW89_ACMA][42] = 70, - [0][0][2][0][RTW89_CN][42] = 68, + [0][0][2][0][RTW89_CN][42] = 56, [0][0][2][0][RTW89_UK][42] = 64, [0][0][2][0][RTW89_MEXICO][42] = 72, [0][0][2][0][RTW89_UKRAINE][42] = 30, [0][0][2][0][RTW89_CHILE][42] = 70, [0][0][2][0][RTW89_QATAR][42] = 30, + [0][0][2][0][RTW89_THAILAND][42] = 30, [0][0][2][0][RTW89_FCC][44] = 72, [0][0][2][0][RTW89_ETSI][44] = 30, [0][0][2][0][RTW89_MKK][44] = 127, [0][0][2][0][RTW89_IC][44] = 72, [0][0][2][0][RTW89_KCC][44] = 58, [0][0][2][0][RTW89_ACMA][44] = 70, - [0][0][2][0][RTW89_CN][44] = 68, + [0][0][2][0][RTW89_CN][44] = 56, [0][0][2][0][RTW89_UK][44] = 64, [0][0][2][0][RTW89_MEXICO][44] = 72, [0][0][2][0][RTW89_UKRAINE][44] = 30, [0][0][2][0][RTW89_CHILE][44] = 70, [0][0][2][0][RTW89_QATAR][44] = 30, + [0][0][2][0][RTW89_THAILAND][44] = 30, [0][0][2][0][RTW89_FCC][46] = 72, [0][0][2][0][RTW89_ETSI][46] = 30, [0][0][2][0][RTW89_MKK][46] = 127, [0][0][2][0][RTW89_IC][46] = 72, [0][0][2][0][RTW89_KCC][46] = 58, [0][0][2][0][RTW89_ACMA][46] = 70, - [0][0][2][0][RTW89_CN][46] = 68, + [0][0][2][0][RTW89_CN][46] = 56, [0][0][2][0][RTW89_UK][46] = 64, [0][0][2][0][RTW89_MEXICO][46] = 72, [0][0][2][0][RTW89_UKRAINE][46] = 30, [0][0][2][0][RTW89_CHILE][46] = 70, [0][0][2][0][RTW89_QATAR][46] = 30, + [0][0][2][0][RTW89_THAILAND][46] = 30, [0][0][2][0][RTW89_FCC][48] = 72, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, @@ -34964,6 +35258,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][48] = 127, [0][0][2][0][RTW89_CHILE][48] = 127, [0][0][2][0][RTW89_QATAR][48] = 127, + [0][0][2][0][RTW89_THAILAND][48] = 127, [0][0][2][0][RTW89_FCC][50] = 72, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, @@ -34976,6 +35271,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][50] = 127, [0][0][2][0][RTW89_CHILE][50] = 127, [0][0][2][0][RTW89_QATAR][50] = 127, + [0][0][2][0][RTW89_THAILAND][50] = 127, [0][0][2][0][RTW89_FCC][52] = 72, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, @@ -34988,6 +35284,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_UKRAINE][52] = 127, [0][0][2][0][RTW89_CHILE][52] = 127, [0][0][2][0][RTW89_QATAR][52] = 127, + [0][0][2][0][RTW89_THAILAND][52] = 127, [0][1][2][0][RTW89_FCC][0] = 60, [0][1][2][0][RTW89_ETSI][0] = 54, [0][1][2][0][RTW89_MKK][0] = 54, @@ -35000,6 +35297,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][0] = 42, [0][1][2][0][RTW89_CHILE][0] = 60, [0][1][2][0][RTW89_QATAR][0] = 54, + [0][1][2][0][RTW89_THAILAND][0] = 54, [0][1][2][0][RTW89_FCC][2] = 62, [0][1][2][0][RTW89_ETSI][2] = 54, [0][1][2][0][RTW89_MKK][2] = 54, @@ -35012,6 +35310,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][2] = 42, [0][1][2][0][RTW89_CHILE][2] = 62, [0][1][2][0][RTW89_QATAR][2] = 54, + [0][1][2][0][RTW89_THAILAND][2] = 54, [0][1][2][0][RTW89_FCC][4] = 62, [0][1][2][0][RTW89_ETSI][4] = 54, [0][1][2][0][RTW89_MKK][4] = 54, @@ -35024,6 +35323,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][4] = 42, [0][1][2][0][RTW89_CHILE][4] = 62, [0][1][2][0][RTW89_QATAR][4] = 54, + [0][1][2][0][RTW89_THAILAND][4] = 54, [0][1][2][0][RTW89_FCC][6] = 62, [0][1][2][0][RTW89_ETSI][6] = 54, [0][1][2][0][RTW89_MKK][6] = 50, @@ -35036,6 +35336,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][6] = 42, [0][1][2][0][RTW89_CHILE][6] = 62, [0][1][2][0][RTW89_QATAR][6] = 54, + [0][1][2][0][RTW89_THAILAND][6] = 54, [0][1][2][0][RTW89_FCC][8] = 62, [0][1][2][0][RTW89_ETSI][8] = 54, [0][1][2][0][RTW89_MKK][8] = 42, @@ -35048,6 +35349,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][8] = 42, [0][1][2][0][RTW89_CHILE][8] = 62, [0][1][2][0][RTW89_QATAR][8] = 54, + [0][1][2][0][RTW89_THAILAND][8] = 54, [0][1][2][0][RTW89_FCC][10] = 62, [0][1][2][0][RTW89_ETSI][10] = 54, [0][1][2][0][RTW89_MKK][10] = 54, @@ -35060,6 +35362,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][10] = 42, [0][1][2][0][RTW89_CHILE][10] = 62, [0][1][2][0][RTW89_QATAR][10] = 54, + [0][1][2][0][RTW89_THAILAND][10] = 54, [0][1][2][0][RTW89_FCC][12] = 62, [0][1][2][0][RTW89_ETSI][12] = 54, [0][1][2][0][RTW89_MKK][12] = 54, @@ -35072,6 +35375,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][12] = 42, [0][1][2][0][RTW89_CHILE][12] = 62, [0][1][2][0][RTW89_QATAR][12] = 54, + [0][1][2][0][RTW89_THAILAND][12] = 54, [0][1][2][0][RTW89_FCC][14] = 62, [0][1][2][0][RTW89_ETSI][14] = 54, [0][1][2][0][RTW89_MKK][14] = 54, @@ -35084,6 +35388,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][14] = 42, [0][1][2][0][RTW89_CHILE][14] = 62, [0][1][2][0][RTW89_QATAR][14] = 54, + [0][1][2][0][RTW89_THAILAND][14] = 54, [0][1][2][0][RTW89_FCC][15] = 60, [0][1][2][0][RTW89_ETSI][15] = 54, [0][1][2][0][RTW89_MKK][15] = 68, @@ -35096,6 +35401,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][15] = 42, [0][1][2][0][RTW89_CHILE][15] = 60, [0][1][2][0][RTW89_QATAR][15] = 54, + [0][1][2][0][RTW89_THAILAND][15] = 54, [0][1][2][0][RTW89_FCC][17] = 62, [0][1][2][0][RTW89_ETSI][17] = 54, [0][1][2][0][RTW89_MKK][17] = 68, @@ -35108,6 +35414,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][17] = 42, [0][1][2][0][RTW89_CHILE][17] = 60, [0][1][2][0][RTW89_QATAR][17] = 54, + [0][1][2][0][RTW89_THAILAND][17] = 54, [0][1][2][0][RTW89_FCC][19] = 62, [0][1][2][0][RTW89_ETSI][19] = 54, [0][1][2][0][RTW89_MKK][19] = 68, @@ -35120,6 +35427,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][19] = 42, [0][1][2][0][RTW89_CHILE][19] = 62, [0][1][2][0][RTW89_QATAR][19] = 54, + [0][1][2][0][RTW89_THAILAND][19] = 54, [0][1][2][0][RTW89_FCC][21] = 62, [0][1][2][0][RTW89_ETSI][21] = 54, [0][1][2][0][RTW89_MKK][21] = 68, @@ -35132,6 +35440,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][21] = 42, [0][1][2][0][RTW89_CHILE][21] = 62, [0][1][2][0][RTW89_QATAR][21] = 54, + [0][1][2][0][RTW89_THAILAND][21] = 54, [0][1][2][0][RTW89_FCC][23] = 62, [0][1][2][0][RTW89_ETSI][23] = 54, [0][1][2][0][RTW89_MKK][23] = 68, @@ -35144,6 +35453,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][23] = 42, [0][1][2][0][RTW89_CHILE][23] = 62, [0][1][2][0][RTW89_QATAR][23] = 54, + [0][1][2][0][RTW89_THAILAND][23] = 54, [0][1][2][0][RTW89_FCC][25] = 62, [0][1][2][0][RTW89_ETSI][25] = 54, [0][1][2][0][RTW89_MKK][25] = 68, @@ -35156,6 +35466,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][25] = 42, [0][1][2][0][RTW89_CHILE][25] = 62, [0][1][2][0][RTW89_QATAR][25] = 54, + [0][1][2][0][RTW89_THAILAND][25] = 54, [0][1][2][0][RTW89_FCC][27] = 62, [0][1][2][0][RTW89_ETSI][27] = 54, [0][1][2][0][RTW89_MKK][27] = 68, @@ -35168,6 +35479,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][27] = 42, [0][1][2][0][RTW89_CHILE][27] = 46, [0][1][2][0][RTW89_QATAR][27] = 54, + [0][1][2][0][RTW89_THAILAND][27] = 54, [0][1][2][0][RTW89_FCC][29] = 62, [0][1][2][0][RTW89_ETSI][29] = 54, [0][1][2][0][RTW89_MKK][29] = 68, @@ -35180,6 +35492,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][29] = 42, [0][1][2][0][RTW89_CHILE][29] = 46, [0][1][2][0][RTW89_QATAR][29] = 54, + [0][1][2][0][RTW89_THAILAND][29] = 54, [0][1][2][0][RTW89_FCC][31] = 62, [0][1][2][0][RTW89_ETSI][31] = 54, [0][1][2][0][RTW89_MKK][31] = 68, @@ -35192,6 +35505,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][31] = 42, [0][1][2][0][RTW89_CHILE][31] = 46, [0][1][2][0][RTW89_QATAR][31] = 54, + [0][1][2][0][RTW89_THAILAND][31] = 54, [0][1][2][0][RTW89_FCC][33] = 62, [0][1][2][0][RTW89_ETSI][33] = 54, [0][1][2][0][RTW89_MKK][33] = 68, @@ -35204,6 +35518,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][33] = 42, [0][1][2][0][RTW89_CHILE][33] = 46, [0][1][2][0][RTW89_QATAR][33] = 54, + [0][1][2][0][RTW89_THAILAND][33] = 54, [0][1][2][0][RTW89_FCC][35] = 46, [0][1][2][0][RTW89_ETSI][35] = 54, [0][1][2][0][RTW89_MKK][35] = 68, @@ -35216,6 +35531,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][35] = 42, [0][1][2][0][RTW89_CHILE][35] = 46, [0][1][2][0][RTW89_QATAR][35] = 54, + [0][1][2][0][RTW89_THAILAND][35] = 54, [0][1][2][0][RTW89_FCC][37] = 64, [0][1][2][0][RTW89_ETSI][37] = 127, [0][1][2][0][RTW89_MKK][37] = 68, @@ -35228,66 +35544,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][37] = 127, [0][1][2][0][RTW89_CHILE][37] = 64, [0][1][2][0][RTW89_QATAR][37] = 127, + [0][1][2][0][RTW89_THAILAND][37] = 127, [0][1][2][0][RTW89_FCC][38] = 72, [0][1][2][0][RTW89_ETSI][38] = 18, [0][1][2][0][RTW89_MKK][38] = 127, [0][1][2][0][RTW89_IC][38] = 72, [0][1][2][0][RTW89_KCC][38] = 56, [0][1][2][0][RTW89_ACMA][38] = 70, - [0][1][2][0][RTW89_CN][38] = 68, + [0][1][2][0][RTW89_CN][38] = 56, [0][1][2][0][RTW89_UK][38] = 52, [0][1][2][0][RTW89_MEXICO][38] = 72, [0][1][2][0][RTW89_UKRAINE][38] = 18, [0][1][2][0][RTW89_CHILE][38] = 70, [0][1][2][0][RTW89_QATAR][38] = 18, + [0][1][2][0][RTW89_THAILAND][38] = 18, [0][1][2][0][RTW89_FCC][40] = 72, [0][1][2][0][RTW89_ETSI][40] = 18, [0][1][2][0][RTW89_MKK][40] = 127, [0][1][2][0][RTW89_IC][40] = 72, [0][1][2][0][RTW89_KCC][40] = 56, [0][1][2][0][RTW89_ACMA][40] = 70, - [0][1][2][0][RTW89_CN][40] = 68, + [0][1][2][0][RTW89_CN][40] = 56, [0][1][2][0][RTW89_UK][40] = 52, [0][1][2][0][RTW89_MEXICO][40] = 72, [0][1][2][0][RTW89_UKRAINE][40] = 18, [0][1][2][0][RTW89_CHILE][40] = 70, [0][1][2][0][RTW89_QATAR][40] = 18, + [0][1][2][0][RTW89_THAILAND][40] = 18, [0][1][2][0][RTW89_FCC][42] = 72, [0][1][2][0][RTW89_ETSI][42] = 18, [0][1][2][0][RTW89_MKK][42] = 127, [0][1][2][0][RTW89_IC][42] = 72, [0][1][2][0][RTW89_KCC][42] = 56, [0][1][2][0][RTW89_ACMA][42] = 70, - [0][1][2][0][RTW89_CN][42] = 68, + [0][1][2][0][RTW89_CN][42] = 56, [0][1][2][0][RTW89_UK][42] = 52, [0][1][2][0][RTW89_MEXICO][42] = 72, [0][1][2][0][RTW89_UKRAINE][42] = 18, [0][1][2][0][RTW89_CHILE][42] = 70, [0][1][2][0][RTW89_QATAR][42] = 18, + [0][1][2][0][RTW89_THAILAND][42] = 18, [0][1][2][0][RTW89_FCC][44] = 72, [0][1][2][0][RTW89_ETSI][44] = 18, [0][1][2][0][RTW89_MKK][44] = 127, [0][1][2][0][RTW89_IC][44] = 72, [0][1][2][0][RTW89_KCC][44] = 56, [0][1][2][0][RTW89_ACMA][44] = 70, - [0][1][2][0][RTW89_CN][44] = 68, + [0][1][2][0][RTW89_CN][44] = 56, [0][1][2][0][RTW89_UK][44] = 52, [0][1][2][0][RTW89_MEXICO][44] = 72, [0][1][2][0][RTW89_UKRAINE][44] = 18, [0][1][2][0][RTW89_CHILE][44] = 70, [0][1][2][0][RTW89_QATAR][44] = 18, + [0][1][2][0][RTW89_THAILAND][44] = 18, [0][1][2][0][RTW89_FCC][46] = 72, [0][1][2][0][RTW89_ETSI][46] = 18, [0][1][2][0][RTW89_MKK][46] = 127, [0][1][2][0][RTW89_IC][46] = 72, [0][1][2][0][RTW89_KCC][46] = 56, [0][1][2][0][RTW89_ACMA][46] = 70, - [0][1][2][0][RTW89_CN][46] = 68, + [0][1][2][0][RTW89_CN][46] = 56, [0][1][2][0][RTW89_UK][46] = 52, [0][1][2][0][RTW89_MEXICO][46] = 72, [0][1][2][0][RTW89_UKRAINE][46] = 18, [0][1][2][0][RTW89_CHILE][46] = 70, [0][1][2][0][RTW89_QATAR][46] = 18, + [0][1][2][0][RTW89_THAILAND][46] = 18, [0][1][2][0][RTW89_FCC][48] = 48, [0][1][2][0][RTW89_ETSI][48] = 127, [0][1][2][0][RTW89_MKK][48] = 127, @@ -35300,6 +35622,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][48] = 127, [0][1][2][0][RTW89_CHILE][48] = 127, [0][1][2][0][RTW89_QATAR][48] = 127, + [0][1][2][0][RTW89_THAILAND][48] = 127, [0][1][2][0][RTW89_FCC][50] = 50, [0][1][2][0][RTW89_ETSI][50] = 127, [0][1][2][0][RTW89_MKK][50] = 127, @@ -35312,6 +35635,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][50] = 127, [0][1][2][0][RTW89_CHILE][50] = 127, [0][1][2][0][RTW89_QATAR][50] = 127, + [0][1][2][0][RTW89_THAILAND][50] = 127, [0][1][2][0][RTW89_FCC][52] = 48, [0][1][2][0][RTW89_ETSI][52] = 127, [0][1][2][0][RTW89_MKK][52] = 127, @@ -35324,6 +35648,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_UKRAINE][52] = 127, [0][1][2][0][RTW89_CHILE][52] = 127, [0][1][2][0][RTW89_QATAR][52] = 127, + [0][1][2][0][RTW89_THAILAND][52] = 127, [0][1][2][1][RTW89_FCC][0] = 60, [0][1][2][1][RTW89_ETSI][0] = 40, [0][1][2][1][RTW89_MKK][0] = 54, @@ -35336,6 +35661,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][0] = 30, [0][1][2][1][RTW89_CHILE][0] = 60, [0][1][2][1][RTW89_QATAR][0] = 40, + [0][1][2][1][RTW89_THAILAND][0] = 40, [0][1][2][1][RTW89_FCC][2] = 62, [0][1][2][1][RTW89_ETSI][2] = 40, [0][1][2][1][RTW89_MKK][2] = 54, @@ -35348,6 +35674,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][2] = 30, [0][1][2][1][RTW89_CHILE][2] = 60, [0][1][2][1][RTW89_QATAR][2] = 40, + [0][1][2][1][RTW89_THAILAND][2] = 40, [0][1][2][1][RTW89_FCC][4] = 62, [0][1][2][1][RTW89_ETSI][4] = 40, [0][1][2][1][RTW89_MKK][4] = 54, @@ -35360,6 +35687,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][4] = 30, [0][1][2][1][RTW89_CHILE][4] = 60, [0][1][2][1][RTW89_QATAR][4] = 40, + [0][1][2][1][RTW89_THAILAND][4] = 40, [0][1][2][1][RTW89_FCC][6] = 62, [0][1][2][1][RTW89_ETSI][6] = 40, [0][1][2][1][RTW89_MKK][6] = 50, @@ -35372,6 +35700,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][6] = 30, [0][1][2][1][RTW89_CHILE][6] = 60, [0][1][2][1][RTW89_QATAR][6] = 40, + [0][1][2][1][RTW89_THAILAND][6] = 40, [0][1][2][1][RTW89_FCC][8] = 62, [0][1][2][1][RTW89_ETSI][8] = 40, [0][1][2][1][RTW89_MKK][8] = 42, @@ -35384,6 +35713,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][8] = 30, [0][1][2][1][RTW89_CHILE][8] = 60, [0][1][2][1][RTW89_QATAR][8] = 40, + [0][1][2][1][RTW89_THAILAND][8] = 40, [0][1][2][1][RTW89_FCC][10] = 62, [0][1][2][1][RTW89_ETSI][10] = 40, [0][1][2][1][RTW89_MKK][10] = 54, @@ -35396,6 +35726,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][10] = 30, [0][1][2][1][RTW89_CHILE][10] = 60, [0][1][2][1][RTW89_QATAR][10] = 40, + [0][1][2][1][RTW89_THAILAND][10] = 40, [0][1][2][1][RTW89_FCC][12] = 62, [0][1][2][1][RTW89_ETSI][12] = 40, [0][1][2][1][RTW89_MKK][12] = 54, @@ -35408,6 +35739,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][12] = 30, [0][1][2][1][RTW89_CHILE][12] = 60, [0][1][2][1][RTW89_QATAR][12] = 40, + [0][1][2][1][RTW89_THAILAND][12] = 40, [0][1][2][1][RTW89_FCC][14] = 62, [0][1][2][1][RTW89_ETSI][14] = 40, [0][1][2][1][RTW89_MKK][14] = 54, @@ -35420,6 +35752,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][14] = 30, [0][1][2][1][RTW89_CHILE][14] = 60, [0][1][2][1][RTW89_QATAR][14] = 40, + [0][1][2][1][RTW89_THAILAND][14] = 40, [0][1][2][1][RTW89_FCC][15] = 60, [0][1][2][1][RTW89_ETSI][15] = 40, [0][1][2][1][RTW89_MKK][15] = 68, @@ -35432,6 +35765,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][15] = 30, [0][1][2][1][RTW89_CHILE][15] = 60, [0][1][2][1][RTW89_QATAR][15] = 40, + [0][1][2][1][RTW89_THAILAND][15] = 40, [0][1][2][1][RTW89_FCC][17] = 62, [0][1][2][1][RTW89_ETSI][17] = 40, [0][1][2][1][RTW89_MKK][17] = 68, @@ -35444,6 +35778,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][17] = 30, [0][1][2][1][RTW89_CHILE][17] = 60, [0][1][2][1][RTW89_QATAR][17] = 40, + [0][1][2][1][RTW89_THAILAND][17] = 40, [0][1][2][1][RTW89_FCC][19] = 62, [0][1][2][1][RTW89_ETSI][19] = 40, [0][1][2][1][RTW89_MKK][19] = 68, @@ -35456,6 +35791,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][19] = 30, [0][1][2][1][RTW89_CHILE][19] = 60, [0][1][2][1][RTW89_QATAR][19] = 40, + [0][1][2][1][RTW89_THAILAND][19] = 40, [0][1][2][1][RTW89_FCC][21] = 62, [0][1][2][1][RTW89_ETSI][21] = 40, [0][1][2][1][RTW89_MKK][21] = 68, @@ -35468,6 +35804,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][21] = 30, [0][1][2][1][RTW89_CHILE][21] = 60, [0][1][2][1][RTW89_QATAR][21] = 40, + [0][1][2][1][RTW89_THAILAND][21] = 40, [0][1][2][1][RTW89_FCC][23] = 62, [0][1][2][1][RTW89_ETSI][23] = 40, [0][1][2][1][RTW89_MKK][23] = 68, @@ -35480,6 +35817,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][23] = 30, [0][1][2][1][RTW89_CHILE][23] = 60, [0][1][2][1][RTW89_QATAR][23] = 40, + [0][1][2][1][RTW89_THAILAND][23] = 40, [0][1][2][1][RTW89_FCC][25] = 46, [0][1][2][1][RTW89_ETSI][25] = 40, [0][1][2][1][RTW89_MKK][25] = 68, @@ -35492,6 +35830,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][25] = 30, [0][1][2][1][RTW89_CHILE][25] = 60, [0][1][2][1][RTW89_QATAR][25] = 40, + [0][1][2][1][RTW89_THAILAND][25] = 40, [0][1][2][1][RTW89_FCC][27] = 46, [0][1][2][1][RTW89_ETSI][27] = 40, [0][1][2][1][RTW89_MKK][27] = 68, @@ -35504,6 +35843,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][27] = 30, [0][1][2][1][RTW89_CHILE][27] = 46, [0][1][2][1][RTW89_QATAR][27] = 40, + [0][1][2][1][RTW89_THAILAND][27] = 40, [0][1][2][1][RTW89_FCC][29] = 46, [0][1][2][1][RTW89_ETSI][29] = 40, [0][1][2][1][RTW89_MKK][29] = 68, @@ -35516,6 +35856,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][29] = 30, [0][1][2][1][RTW89_CHILE][29] = 46, [0][1][2][1][RTW89_QATAR][29] = 40, + [0][1][2][1][RTW89_THAILAND][29] = 40, [0][1][2][1][RTW89_FCC][31] = 46, [0][1][2][1][RTW89_ETSI][31] = 40, [0][1][2][1][RTW89_MKK][31] = 68, @@ -35528,6 +35869,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][31] = 30, [0][1][2][1][RTW89_CHILE][31] = 46, [0][1][2][1][RTW89_QATAR][31] = 40, + [0][1][2][1][RTW89_THAILAND][31] = 40, [0][1][2][1][RTW89_FCC][33] = 46, [0][1][2][1][RTW89_ETSI][33] = 40, [0][1][2][1][RTW89_MKK][33] = 68, @@ -35540,6 +35882,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][33] = 30, [0][1][2][1][RTW89_CHILE][33] = 46, [0][1][2][1][RTW89_QATAR][33] = 40, + [0][1][2][1][RTW89_THAILAND][33] = 40, [0][1][2][1][RTW89_FCC][35] = 46, [0][1][2][1][RTW89_ETSI][35] = 40, [0][1][2][1][RTW89_MKK][35] = 68, @@ -35552,6 +35895,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][35] = 30, [0][1][2][1][RTW89_CHILE][35] = 46, [0][1][2][1][RTW89_QATAR][35] = 40, + [0][1][2][1][RTW89_THAILAND][35] = 40, [0][1][2][1][RTW89_FCC][37] = 64, [0][1][2][1][RTW89_ETSI][37] = 127, [0][1][2][1][RTW89_MKK][37] = 68, @@ -35564,66 +35908,72 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][37] = 127, [0][1][2][1][RTW89_CHILE][37] = 64, [0][1][2][1][RTW89_QATAR][37] = 127, + [0][1][2][1][RTW89_THAILAND][37] = 127, [0][1][2][1][RTW89_FCC][38] = 72, [0][1][2][1][RTW89_ETSI][38] = 6, [0][1][2][1][RTW89_MKK][38] = 127, [0][1][2][1][RTW89_IC][38] = 72, [0][1][2][1][RTW89_KCC][38] = 56, [0][1][2][1][RTW89_ACMA][38] = 70, - [0][1][2][1][RTW89_CN][38] = 60, + [0][1][2][1][RTW89_CN][38] = 50, [0][1][2][1][RTW89_UK][38] = 40, [0][1][2][1][RTW89_MEXICO][38] = 72, [0][1][2][1][RTW89_UKRAINE][38] = 6, [0][1][2][1][RTW89_CHILE][38] = 60, [0][1][2][1][RTW89_QATAR][38] = 6, + [0][1][2][1][RTW89_THAILAND][38] = 6, [0][1][2][1][RTW89_FCC][40] = 72, [0][1][2][1][RTW89_ETSI][40] = 6, [0][1][2][1][RTW89_MKK][40] = 127, [0][1][2][1][RTW89_IC][40] = 72, [0][1][2][1][RTW89_KCC][40] = 56, [0][1][2][1][RTW89_ACMA][40] = 70, - [0][1][2][1][RTW89_CN][40] = 60, + [0][1][2][1][RTW89_CN][40] = 50, [0][1][2][1][RTW89_UK][40] = 40, [0][1][2][1][RTW89_MEXICO][40] = 72, [0][1][2][1][RTW89_UKRAINE][40] = 6, [0][1][2][1][RTW89_CHILE][40] = 60, [0][1][2][1][RTW89_QATAR][40] = 6, + [0][1][2][1][RTW89_THAILAND][40] = 6, [0][1][2][1][RTW89_FCC][42] = 72, [0][1][2][1][RTW89_ETSI][42] = 6, [0][1][2][1][RTW89_MKK][42] = 127, [0][1][2][1][RTW89_IC][42] = 72, [0][1][2][1][RTW89_KCC][42] = 56, [0][1][2][1][RTW89_ACMA][42] = 70, - [0][1][2][1][RTW89_CN][42] = 60, + [0][1][2][1][RTW89_CN][42] = 50, [0][1][2][1][RTW89_UK][42] = 40, [0][1][2][1][RTW89_MEXICO][42] = 72, [0][1][2][1][RTW89_UKRAINE][42] = 6, [0][1][2][1][RTW89_CHILE][42] = 60, [0][1][2][1][RTW89_QATAR][42] = 6, + [0][1][2][1][RTW89_THAILAND][42] = 6, [0][1][2][1][RTW89_FCC][44] = 72, [0][1][2][1][RTW89_ETSI][44] = 6, [0][1][2][1][RTW89_MKK][44] = 127, [0][1][2][1][RTW89_IC][44] = 72, [0][1][2][1][RTW89_KCC][44] = 56, [0][1][2][1][RTW89_ACMA][44] = 70, - [0][1][2][1][RTW89_CN][44] = 54, + [0][1][2][1][RTW89_CN][44] = 50, [0][1][2][1][RTW89_UK][44] = 40, [0][1][2][1][RTW89_MEXICO][44] = 72, [0][1][2][1][RTW89_UKRAINE][44] = 6, [0][1][2][1][RTW89_CHILE][44] = 60, [0][1][2][1][RTW89_QATAR][44] = 6, + [0][1][2][1][RTW89_THAILAND][44] = 6, [0][1][2][1][RTW89_FCC][46] = 72, [0][1][2][1][RTW89_ETSI][46] = 6, [0][1][2][1][RTW89_MKK][46] = 127, [0][1][2][1][RTW89_IC][46] = 72, [0][1][2][1][RTW89_KCC][46] = 56, [0][1][2][1][RTW89_ACMA][46] = 70, - [0][1][2][1][RTW89_CN][46] = 54, + [0][1][2][1][RTW89_CN][46] = 50, [0][1][2][1][RTW89_UK][46] = 40, [0][1][2][1][RTW89_MEXICO][46] = 72, [0][1][2][1][RTW89_UKRAINE][46] = 6, [0][1][2][1][RTW89_CHILE][46] = 60, [0][1][2][1][RTW89_QATAR][46] = 6, + [0][1][2][1][RTW89_THAILAND][46] = 6, [0][1][2][1][RTW89_FCC][48] = 48, [0][1][2][1][RTW89_ETSI][48] = 127, [0][1][2][1][RTW89_MKK][48] = 127, @@ -35636,6 +35986,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][48] = 127, [0][1][2][1][RTW89_CHILE][48] = 127, [0][1][2][1][RTW89_QATAR][48] = 127, + [0][1][2][1][RTW89_THAILAND][48] = 127, [0][1][2][1][RTW89_FCC][50] = 50, [0][1][2][1][RTW89_ETSI][50] = 127, [0][1][2][1][RTW89_MKK][50] = 127, @@ -35648,6 +35999,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][50] = 127, [0][1][2][1][RTW89_CHILE][50] = 127, [0][1][2][1][RTW89_QATAR][50] = 127, + [0][1][2][1][RTW89_THAILAND][50] = 127, [0][1][2][1][RTW89_FCC][52] = 48, [0][1][2][1][RTW89_ETSI][52] = 127, [0][1][2][1][RTW89_MKK][52] = 127, @@ -35660,6 +36012,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_UKRAINE][52] = 127, [0][1][2][1][RTW89_CHILE][52] = 127, [0][1][2][1][RTW89_QATAR][52] = 127, + [0][1][2][1][RTW89_THAILAND][52] = 127, [1][0][2][0][RTW89_FCC][1] = 64, [1][0][2][0][RTW89_ETSI][1] = 66, [1][0][2][0][RTW89_MKK][1] = 66, @@ -35672,6 +36025,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][1] = 54, [1][0][2][0][RTW89_CHILE][1] = 62, [1][0][2][0][RTW89_QATAR][1] = 66, + [1][0][2][0][RTW89_THAILAND][1] = 66, [1][0][2][0][RTW89_FCC][5] = 68, [1][0][2][0][RTW89_ETSI][5] = 66, [1][0][2][0][RTW89_MKK][5] = 66, @@ -35684,6 +36038,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][5] = 54, [1][0][2][0][RTW89_CHILE][5] = 66, [1][0][2][0][RTW89_QATAR][5] = 66, + [1][0][2][0][RTW89_THAILAND][5] = 66, [1][0][2][0][RTW89_FCC][9] = 68, [1][0][2][0][RTW89_ETSI][9] = 66, [1][0][2][0][RTW89_MKK][9] = 66, @@ -35696,6 +36051,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][9] = 54, [1][0][2][0][RTW89_CHILE][9] = 66, [1][0][2][0][RTW89_QATAR][9] = 66, + [1][0][2][0][RTW89_THAILAND][9] = 66, [1][0][2][0][RTW89_FCC][13] = 60, [1][0][2][0][RTW89_ETSI][13] = 66, [1][0][2][0][RTW89_MKK][13] = 66, @@ -35708,6 +36064,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][13] = 54, [1][0][2][0][RTW89_CHILE][13] = 60, [1][0][2][0][RTW89_QATAR][13] = 66, + [1][0][2][0][RTW89_THAILAND][13] = 66, [1][0][2][0][RTW89_FCC][16] = 64, [1][0][2][0][RTW89_ETSI][16] = 66, [1][0][2][0][RTW89_MKK][16] = 66, @@ -35720,6 +36077,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][16] = 54, [1][0][2][0][RTW89_CHILE][16] = 64, [1][0][2][0][RTW89_QATAR][16] = 66, + [1][0][2][0][RTW89_THAILAND][16] = 66, [1][0][2][0][RTW89_FCC][20] = 68, [1][0][2][0][RTW89_ETSI][20] = 66, [1][0][2][0][RTW89_MKK][20] = 66, @@ -35732,6 +36090,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][20] = 54, [1][0][2][0][RTW89_CHILE][20] = 66, [1][0][2][0][RTW89_QATAR][20] = 66, + [1][0][2][0][RTW89_THAILAND][20] = 66, [1][0][2][0][RTW89_FCC][24] = 68, [1][0][2][0][RTW89_ETSI][24] = 66, [1][0][2][0][RTW89_MKK][24] = 66, @@ -35744,6 +36103,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][24] = 54, [1][0][2][0][RTW89_CHILE][24] = 66, [1][0][2][0][RTW89_QATAR][24] = 66, + [1][0][2][0][RTW89_THAILAND][24] = 66, [1][0][2][0][RTW89_FCC][28] = 68, [1][0][2][0][RTW89_ETSI][28] = 66, [1][0][2][0][RTW89_MKK][28] = 66, @@ -35756,6 +36116,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][28] = 54, [1][0][2][0][RTW89_CHILE][28] = 62, [1][0][2][0][RTW89_QATAR][28] = 66, + [1][0][2][0][RTW89_THAILAND][28] = 66, [1][0][2][0][RTW89_FCC][32] = 62, [1][0][2][0][RTW89_ETSI][32] = 66, [1][0][2][0][RTW89_MKK][32] = 66, @@ -35768,6 +36129,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][32] = 54, [1][0][2][0][RTW89_CHILE][32] = 62, [1][0][2][0][RTW89_QATAR][32] = 66, + [1][0][2][0][RTW89_THAILAND][32] = 66, [1][0][2][0][RTW89_FCC][36] = 68, [1][0][2][0][RTW89_ETSI][36] = 127, [1][0][2][0][RTW89_MKK][36] = 66, @@ -35780,30 +36142,33 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][36] = 127, [1][0][2][0][RTW89_CHILE][36] = 66, [1][0][2][0][RTW89_QATAR][36] = 127, + [1][0][2][0][RTW89_THAILAND][36] = 127, [1][0][2][0][RTW89_FCC][39] = 68, [1][0][2][0][RTW89_ETSI][39] = 30, [1][0][2][0][RTW89_MKK][39] = 127, [1][0][2][0][RTW89_IC][39] = 68, [1][0][2][0][RTW89_KCC][39] = 66, [1][0][2][0][RTW89_ACMA][39] = 66, - [1][0][2][0][RTW89_CN][39] = 62, + [1][0][2][0][RTW89_CN][39] = 52, [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_MEXICO][39] = 68, [1][0][2][0][RTW89_UKRAINE][39] = 30, [1][0][2][0][RTW89_CHILE][39] = 66, [1][0][2][0][RTW89_QATAR][39] = 30, + [1][0][2][0][RTW89_THAILAND][39] = 30, [1][0][2][0][RTW89_FCC][43] = 68, [1][0][2][0][RTW89_ETSI][43] = 30, [1][0][2][0][RTW89_MKK][43] = 127, [1][0][2][0][RTW89_IC][43] = 68, [1][0][2][0][RTW89_KCC][43] = 66, [1][0][2][0][RTW89_ACMA][43] = 66, - [1][0][2][0][RTW89_CN][43] = 66, + [1][0][2][0][RTW89_CN][43] = 52, [1][0][2][0][RTW89_UK][43] = 64, [1][0][2][0][RTW89_MEXICO][43] = 68, [1][0][2][0][RTW89_UKRAINE][43] = 30, [1][0][2][0][RTW89_CHILE][43] = 66, [1][0][2][0][RTW89_QATAR][43] = 30, + [1][0][2][0][RTW89_THAILAND][43] = 30, [1][0][2][0][RTW89_FCC][47] = 68, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, @@ -35816,6 +36181,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][47] = 127, [1][0][2][0][RTW89_CHILE][47] = 127, [1][0][2][0][RTW89_QATAR][47] = 127, + [1][0][2][0][RTW89_THAILAND][47] = 127, [1][0][2][0][RTW89_FCC][51] = 68, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, @@ -35828,6 +36194,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_UKRAINE][51] = 127, [1][0][2][0][RTW89_CHILE][51] = 127, [1][0][2][0][RTW89_QATAR][51] = 127, + [1][0][2][0][RTW89_THAILAND][51] = 127, [1][1][2][0][RTW89_FCC][1] = 54, [1][1][2][0][RTW89_ETSI][1] = 54, [1][1][2][0][RTW89_MKK][1] = 48, @@ -35840,6 +36207,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][1] = 42, [1][1][2][0][RTW89_CHILE][1] = 54, [1][1][2][0][RTW89_QATAR][1] = 54, + [1][1][2][0][RTW89_THAILAND][1] = 54, [1][1][2][0][RTW89_FCC][5] = 68, [1][1][2][0][RTW89_ETSI][5] = 54, [1][1][2][0][RTW89_MKK][5] = 52, @@ -35852,6 +36220,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][5] = 42, [1][1][2][0][RTW89_CHILE][5] = 66, [1][1][2][0][RTW89_QATAR][5] = 54, + [1][1][2][0][RTW89_THAILAND][5] = 54, [1][1][2][0][RTW89_FCC][9] = 68, [1][1][2][0][RTW89_ETSI][9] = 54, [1][1][2][0][RTW89_MKK][9] = 52, @@ -35864,6 +36233,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][9] = 42, [1][1][2][0][RTW89_CHILE][9] = 66, [1][1][2][0][RTW89_QATAR][9] = 54, + [1][1][2][0][RTW89_THAILAND][9] = 54, [1][1][2][0][RTW89_FCC][13] = 54, [1][1][2][0][RTW89_ETSI][13] = 54, [1][1][2][0][RTW89_MKK][13] = 52, @@ -35876,6 +36246,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][13] = 42, [1][1][2][0][RTW89_CHILE][13] = 54, [1][1][2][0][RTW89_QATAR][13] = 54, + [1][1][2][0][RTW89_THAILAND][13] = 54, [1][1][2][0][RTW89_FCC][16] = 56, [1][1][2][0][RTW89_ETSI][16] = 54, [1][1][2][0][RTW89_MKK][16] = 66, @@ -35888,6 +36259,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][16] = 42, [1][1][2][0][RTW89_CHILE][16] = 54, [1][1][2][0][RTW89_QATAR][16] = 54, + [1][1][2][0][RTW89_THAILAND][16] = 54, [1][1][2][0][RTW89_FCC][20] = 68, [1][1][2][0][RTW89_ETSI][20] = 54, [1][1][2][0][RTW89_MKK][20] = 66, @@ -35900,6 +36272,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][20] = 42, [1][1][2][0][RTW89_CHILE][20] = 66, [1][1][2][0][RTW89_QATAR][20] = 54, + [1][1][2][0][RTW89_THAILAND][20] = 54, [1][1][2][0][RTW89_FCC][24] = 68, [1][1][2][0][RTW89_ETSI][24] = 54, [1][1][2][0][RTW89_MKK][24] = 66, @@ -35912,6 +36285,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][24] = 42, [1][1][2][0][RTW89_CHILE][24] = 66, [1][1][2][0][RTW89_QATAR][24] = 54, + [1][1][2][0][RTW89_THAILAND][24] = 54, [1][1][2][0][RTW89_FCC][28] = 68, [1][1][2][0][RTW89_ETSI][28] = 54, [1][1][2][0][RTW89_MKK][28] = 66, @@ -35924,6 +36298,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][28] = 42, [1][1][2][0][RTW89_CHILE][28] = 54, [1][1][2][0][RTW89_QATAR][28] = 54, + [1][1][2][0][RTW89_THAILAND][28] = 54, [1][1][2][0][RTW89_FCC][32] = 56, [1][1][2][0][RTW89_ETSI][32] = 54, [1][1][2][0][RTW89_MKK][32] = 66, @@ -35936,6 +36311,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][32] = 42, [1][1][2][0][RTW89_CHILE][32] = 54, [1][1][2][0][RTW89_QATAR][32] = 54, + [1][1][2][0][RTW89_THAILAND][32] = 54, [1][1][2][0][RTW89_FCC][36] = 68, [1][1][2][0][RTW89_ETSI][36] = 127, [1][1][2][0][RTW89_MKK][36] = 66, @@ -35948,30 +36324,33 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][36] = 127, [1][1][2][0][RTW89_CHILE][36] = 66, [1][1][2][0][RTW89_QATAR][36] = 127, + [1][1][2][0][RTW89_THAILAND][36] = 127, [1][1][2][0][RTW89_FCC][39] = 68, [1][1][2][0][RTW89_ETSI][39] = 18, [1][1][2][0][RTW89_MKK][39] = 127, [1][1][2][0][RTW89_IC][39] = 68, [1][1][2][0][RTW89_KCC][39] = 56, [1][1][2][0][RTW89_ACMA][39] = 66, - [1][1][2][0][RTW89_CN][39] = 62, + [1][1][2][0][RTW89_CN][39] = 52, [1][1][2][0][RTW89_UK][39] = 52, [1][1][2][0][RTW89_MEXICO][39] = 68, [1][1][2][0][RTW89_UKRAINE][39] = 18, [1][1][2][0][RTW89_CHILE][39] = 66, [1][1][2][0][RTW89_QATAR][39] = 18, + [1][1][2][0][RTW89_THAILAND][39] = 18, [1][1][2][0][RTW89_FCC][43] = 68, [1][1][2][0][RTW89_ETSI][43] = 18, [1][1][2][0][RTW89_MKK][43] = 127, [1][1][2][0][RTW89_IC][43] = 68, [1][1][2][0][RTW89_KCC][43] = 56, [1][1][2][0][RTW89_ACMA][43] = 66, - [1][1][2][0][RTW89_CN][43] = 66, + [1][1][2][0][RTW89_CN][43] = 52, [1][1][2][0][RTW89_UK][43] = 52, [1][1][2][0][RTW89_MEXICO][43] = 68, [1][1][2][0][RTW89_UKRAINE][43] = 18, [1][1][2][0][RTW89_CHILE][43] = 66, [1][1][2][0][RTW89_QATAR][43] = 18, + [1][1][2][0][RTW89_THAILAND][43] = 18, [1][1][2][0][RTW89_FCC][47] = 62, [1][1][2][0][RTW89_ETSI][47] = 127, [1][1][2][0][RTW89_MKK][47] = 127, @@ -35984,6 +36363,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][47] = 127, [1][1][2][0][RTW89_CHILE][47] = 127, [1][1][2][0][RTW89_QATAR][47] = 127, + [1][1][2][0][RTW89_THAILAND][47] = 127, [1][1][2][0][RTW89_FCC][51] = 60, [1][1][2][0][RTW89_ETSI][51] = 127, [1][1][2][0][RTW89_MKK][51] = 127, @@ -35996,6 +36376,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_UKRAINE][51] = 127, [1][1][2][0][RTW89_CHILE][51] = 127, [1][1][2][0][RTW89_QATAR][51] = 127, + [1][1][2][0][RTW89_THAILAND][51] = 127, [1][1][2][1][RTW89_FCC][1] = 54, [1][1][2][1][RTW89_ETSI][1] = 40, [1][1][2][1][RTW89_MKK][1] = 48, @@ -36008,6 +36389,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][1] = 30, [1][1][2][1][RTW89_CHILE][1] = 54, [1][1][2][1][RTW89_QATAR][1] = 40, + [1][1][2][1][RTW89_THAILAND][1] = 40, [1][1][2][1][RTW89_FCC][5] = 68, [1][1][2][1][RTW89_ETSI][5] = 40, [1][1][2][1][RTW89_MKK][5] = 52, @@ -36020,6 +36402,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][5] = 30, [1][1][2][1][RTW89_CHILE][5] = 60, [1][1][2][1][RTW89_QATAR][5] = 40, + [1][1][2][1][RTW89_THAILAND][5] = 40, [1][1][2][1][RTW89_FCC][9] = 68, [1][1][2][1][RTW89_ETSI][9] = 40, [1][1][2][1][RTW89_MKK][9] = 52, @@ -36032,6 +36415,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][9] = 30, [1][1][2][1][RTW89_CHILE][9] = 60, [1][1][2][1][RTW89_QATAR][9] = 40, + [1][1][2][1][RTW89_THAILAND][9] = 40, [1][1][2][1][RTW89_FCC][13] = 54, [1][1][2][1][RTW89_ETSI][13] = 40, [1][1][2][1][RTW89_MKK][13] = 52, @@ -36044,6 +36428,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][13] = 30, [1][1][2][1][RTW89_CHILE][13] = 54, [1][1][2][1][RTW89_QATAR][13] = 40, + [1][1][2][1][RTW89_THAILAND][13] = 40, [1][1][2][1][RTW89_FCC][16] = 56, [1][1][2][1][RTW89_ETSI][16] = 40, [1][1][2][1][RTW89_MKK][16] = 66, @@ -36056,6 +36441,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][16] = 30, [1][1][2][1][RTW89_CHILE][16] = 54, [1][1][2][1][RTW89_QATAR][16] = 40, + [1][1][2][1][RTW89_THAILAND][16] = 40, [1][1][2][1][RTW89_FCC][20] = 68, [1][1][2][1][RTW89_ETSI][20] = 40, [1][1][2][1][RTW89_MKK][20] = 66, @@ -36068,6 +36454,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][20] = 30, [1][1][2][1][RTW89_CHILE][20] = 60, [1][1][2][1][RTW89_QATAR][20] = 40, + [1][1][2][1][RTW89_THAILAND][20] = 40, [1][1][2][1][RTW89_FCC][24] = 68, [1][1][2][1][RTW89_ETSI][24] = 40, [1][1][2][1][RTW89_MKK][24] = 66, @@ -36080,6 +36467,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][24] = 30, [1][1][2][1][RTW89_CHILE][24] = 60, [1][1][2][1][RTW89_QATAR][24] = 40, + [1][1][2][1][RTW89_THAILAND][24] = 40, [1][1][2][1][RTW89_FCC][28] = 68, [1][1][2][1][RTW89_ETSI][28] = 40, [1][1][2][1][RTW89_MKK][28] = 66, @@ -36092,6 +36480,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][28] = 30, [1][1][2][1][RTW89_CHILE][28] = 54, [1][1][2][1][RTW89_QATAR][28] = 40, + [1][1][2][1][RTW89_THAILAND][28] = 40, [1][1][2][1][RTW89_FCC][32] = 56, [1][1][2][1][RTW89_ETSI][32] = 40, [1][1][2][1][RTW89_MKK][32] = 66, @@ -36104,6 +36493,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][32] = 30, [1][1][2][1][RTW89_CHILE][32] = 54, [1][1][2][1][RTW89_QATAR][32] = 40, + [1][1][2][1][RTW89_THAILAND][32] = 40, [1][1][2][1][RTW89_FCC][36] = 68, [1][1][2][1][RTW89_ETSI][36] = 127, [1][1][2][1][RTW89_MKK][36] = 66, @@ -36116,18 +36506,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][36] = 127, [1][1][2][1][RTW89_CHILE][36] = 66, [1][1][2][1][RTW89_QATAR][36] = 127, + [1][1][2][1][RTW89_THAILAND][36] = 127, [1][1][2][1][RTW89_FCC][39] = 68, [1][1][2][1][RTW89_ETSI][39] = 6, [1][1][2][1][RTW89_MKK][39] = 127, [1][1][2][1][RTW89_IC][39] = 68, [1][1][2][1][RTW89_KCC][39] = 56, [1][1][2][1][RTW89_ACMA][39] = 66, - [1][1][2][1][RTW89_CN][39] = 60, + [1][1][2][1][RTW89_CN][39] = 52, [1][1][2][1][RTW89_UK][39] = 40, [1][1][2][1][RTW89_MEXICO][39] = 68, [1][1][2][1][RTW89_UKRAINE][39] = 6, [1][1][2][1][RTW89_CHILE][39] = 60, [1][1][2][1][RTW89_QATAR][39] = 6, + [1][1][2][1][RTW89_THAILAND][39] = 6, [1][1][2][1][RTW89_FCC][43] = 68, [1][1][2][1][RTW89_ETSI][43] = 6, [1][1][2][1][RTW89_MKK][43] = 127, @@ -36140,6 +36532,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][43] = 6, [1][1][2][1][RTW89_CHILE][43] = 60, [1][1][2][1][RTW89_QATAR][43] = 6, + [1][1][2][1][RTW89_THAILAND][43] = 6, [1][1][2][1][RTW89_FCC][47] = 62, [1][1][2][1][RTW89_ETSI][47] = 127, [1][1][2][1][RTW89_MKK][47] = 127, @@ -36152,6 +36545,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][47] = 127, [1][1][2][1][RTW89_CHILE][47] = 127, [1][1][2][1][RTW89_QATAR][47] = 127, + [1][1][2][1][RTW89_THAILAND][47] = 127, [1][1][2][1][RTW89_FCC][51] = 60, [1][1][2][1][RTW89_ETSI][51] = 127, [1][1][2][1][RTW89_MKK][51] = 127, @@ -36164,6 +36558,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_UKRAINE][51] = 127, [1][1][2][1][RTW89_CHILE][51] = 127, [1][1][2][1][RTW89_QATAR][51] = 127, + [1][1][2][1][RTW89_THAILAND][51] = 127, [2][0][2][0][RTW89_FCC][3] = 58, [2][0][2][0][RTW89_ETSI][3] = 60, [2][0][2][0][RTW89_MKK][3] = 60, @@ -36176,6 +36571,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][3] = 54, [2][0][2][0][RTW89_CHILE][3] = 58, [2][0][2][0][RTW89_QATAR][3] = 60, + [2][0][2][0][RTW89_THAILAND][3] = 60, [2][0][2][0][RTW89_FCC][11] = 50, [2][0][2][0][RTW89_ETSI][11] = 60, [2][0][2][0][RTW89_MKK][11] = 60, @@ -36188,6 +36584,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][11] = 54, [2][0][2][0][RTW89_CHILE][11] = 50, [2][0][2][0][RTW89_QATAR][11] = 60, + [2][0][2][0][RTW89_THAILAND][11] = 60, [2][0][2][0][RTW89_FCC][18] = 60, [2][0][2][0][RTW89_ETSI][18] = 60, [2][0][2][0][RTW89_MKK][18] = 60, @@ -36200,6 +36597,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][18] = 54, [2][0][2][0][RTW89_CHILE][18] = 60, [2][0][2][0][RTW89_QATAR][18] = 60, + [2][0][2][0][RTW89_THAILAND][18] = 60, [2][0][2][0][RTW89_FCC][26] = 62, [2][0][2][0][RTW89_ETSI][26] = 60, [2][0][2][0][RTW89_MKK][26] = 60, @@ -36212,6 +36610,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][26] = 54, [2][0][2][0][RTW89_CHILE][26] = 60, [2][0][2][0][RTW89_QATAR][26] = 60, + [2][0][2][0][RTW89_THAILAND][26] = 60, [2][0][2][0][RTW89_FCC][34] = 62, [2][0][2][0][RTW89_ETSI][34] = 127, [2][0][2][0][RTW89_MKK][34] = 60, @@ -36224,18 +36623,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][34] = 127, [2][0][2][0][RTW89_CHILE][34] = 60, [2][0][2][0][RTW89_QATAR][34] = 127, + [2][0][2][0][RTW89_THAILAND][34] = 127, [2][0][2][0][RTW89_FCC][41] = 62, [2][0][2][0][RTW89_ETSI][41] = 30, [2][0][2][0][RTW89_MKK][41] = 127, [2][0][2][0][RTW89_IC][41] = 62, [2][0][2][0][RTW89_KCC][41] = 58, [2][0][2][0][RTW89_ACMA][41] = 60, - [2][0][2][0][RTW89_CN][41] = 62, + [2][0][2][0][RTW89_CN][41] = 42, [2][0][2][0][RTW89_UK][41] = 60, [2][0][2][0][RTW89_MEXICO][41] = 62, [2][0][2][0][RTW89_UKRAINE][41] = 30, [2][0][2][0][RTW89_CHILE][41] = 60, [2][0][2][0][RTW89_QATAR][41] = 30, + [2][0][2][0][RTW89_THAILAND][41] = 30, [2][0][2][0][RTW89_FCC][49] = 62, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, @@ -36248,6 +36649,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_UKRAINE][49] = 127, [2][0][2][0][RTW89_CHILE][49] = 127, [2][0][2][0][RTW89_QATAR][49] = 127, + [2][0][2][0][RTW89_THAILAND][49] = 127, [2][1][2][0][RTW89_FCC][3] = 48, [2][1][2][0][RTW89_ETSI][3] = 54, [2][1][2][0][RTW89_MKK][3] = 56, @@ -36260,18 +36662,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][3] = 42, [2][1][2][0][RTW89_CHILE][3] = 46, [2][1][2][0][RTW89_QATAR][3] = 54, + [2][1][2][0][RTW89_THAILAND][3] = 54, [2][1][2][0][RTW89_FCC][11] = 38, [2][1][2][0][RTW89_ETSI][11] = 54, [2][1][2][0][RTW89_MKK][11] = 54, [2][1][2][0][RTW89_IC][11] = 38, [2][1][2][0][RTW89_KCC][11] = 52, [2][1][2][0][RTW89_ACMA][11] = 54, - [2][1][2][0][RTW89_CN][11] = 52, + [2][1][2][0][RTW89_CN][11] = 50, [2][1][2][0][RTW89_UK][11] = 54, [2][1][2][0][RTW89_MEXICO][11] = 38, [2][1][2][0][RTW89_UKRAINE][11] = 42, [2][1][2][0][RTW89_CHILE][11] = 38, [2][1][2][0][RTW89_QATAR][11] = 54, + [2][1][2][0][RTW89_THAILAND][11] = 54, [2][1][2][0][RTW89_FCC][18] = 50, [2][1][2][0][RTW89_ETSI][18] = 54, [2][1][2][0][RTW89_MKK][18] = 60, @@ -36284,6 +36688,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][18] = 42, [2][1][2][0][RTW89_CHILE][18] = 50, [2][1][2][0][RTW89_QATAR][18] = 54, + [2][1][2][0][RTW89_THAILAND][18] = 54, [2][1][2][0][RTW89_FCC][26] = 52, [2][1][2][0][RTW89_ETSI][26] = 54, [2][1][2][0][RTW89_MKK][26] = 56, @@ -36296,6 +36701,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][26] = 42, [2][1][2][0][RTW89_CHILE][26] = 52, [2][1][2][0][RTW89_QATAR][26] = 54, + [2][1][2][0][RTW89_THAILAND][26] = 54, [2][1][2][0][RTW89_FCC][34] = 62, [2][1][2][0][RTW89_ETSI][34] = 127, [2][1][2][0][RTW89_MKK][34] = 60, @@ -36308,18 +36714,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][34] = 127, [2][1][2][0][RTW89_CHILE][34] = 60, [2][1][2][0][RTW89_QATAR][34] = 127, + [2][1][2][0][RTW89_THAILAND][34] = 127, [2][1][2][0][RTW89_FCC][41] = 60, [2][1][2][0][RTW89_ETSI][41] = 18, [2][1][2][0][RTW89_MKK][41] = 127, [2][1][2][0][RTW89_IC][41] = 60, [2][1][2][0][RTW89_KCC][41] = 50, [2][1][2][0][RTW89_ACMA][41] = 58, - [2][1][2][0][RTW89_CN][41] = 62, + [2][1][2][0][RTW89_CN][41] = 42, [2][1][2][0][RTW89_UK][41] = 52, [2][1][2][0][RTW89_MEXICO][41] = 60, [2][1][2][0][RTW89_UKRAINE][41] = 18, [2][1][2][0][RTW89_CHILE][41] = 58, [2][1][2][0][RTW89_QATAR][41] = 18, + [2][1][2][0][RTW89_THAILAND][41] = 18, [2][1][2][0][RTW89_FCC][49] = 62, [2][1][2][0][RTW89_ETSI][49] = 127, [2][1][2][0][RTW89_MKK][49] = 127, @@ -36332,6 +36740,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_UKRAINE][49] = 127, [2][1][2][0][RTW89_CHILE][49] = 127, [2][1][2][0][RTW89_QATAR][49] = 127, + [2][1][2][0][RTW89_THAILAND][49] = 127, [2][1][2][1][RTW89_FCC][3] = 48, [2][1][2][1][RTW89_ETSI][3] = 40, [2][1][2][1][RTW89_MKK][3] = 56, @@ -36344,6 +36753,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][3] = 30, [2][1][2][1][RTW89_CHILE][3] = 46, [2][1][2][1][RTW89_QATAR][3] = 40, + [2][1][2][1][RTW89_THAILAND][3] = 40, [2][1][2][1][RTW89_FCC][11] = 38, [2][1][2][1][RTW89_ETSI][11] = 40, [2][1][2][1][RTW89_MKK][11] = 54, @@ -36356,6 +36766,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][11] = 30, [2][1][2][1][RTW89_CHILE][11] = 38, [2][1][2][1][RTW89_QATAR][11] = 40, + [2][1][2][1][RTW89_THAILAND][11] = 40, [2][1][2][1][RTW89_FCC][18] = 50, [2][1][2][1][RTW89_ETSI][18] = 40, [2][1][2][1][RTW89_MKK][18] = 60, @@ -36368,6 +36779,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][18] = 30, [2][1][2][1][RTW89_CHILE][18] = 50, [2][1][2][1][RTW89_QATAR][18] = 40, + [2][1][2][1][RTW89_THAILAND][18] = 40, [2][1][2][1][RTW89_FCC][26] = 52, [2][1][2][1][RTW89_ETSI][26] = 42, [2][1][2][1][RTW89_MKK][26] = 56, @@ -36380,6 +36792,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][26] = 30, [2][1][2][1][RTW89_CHILE][26] = 52, [2][1][2][1][RTW89_QATAR][26] = 42, + [2][1][2][1][RTW89_THAILAND][26] = 42, [2][1][2][1][RTW89_FCC][34] = 62, [2][1][2][1][RTW89_ETSI][34] = 127, [2][1][2][1][RTW89_MKK][34] = 60, @@ -36392,18 +36805,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][34] = 127, [2][1][2][1][RTW89_CHILE][34] = 60, [2][1][2][1][RTW89_QATAR][34] = 127, + [2][1][2][1][RTW89_THAILAND][34] = 127, [2][1][2][1][RTW89_FCC][41] = 60, [2][1][2][1][RTW89_ETSI][41] = 6, [2][1][2][1][RTW89_MKK][41] = 127, [2][1][2][1][RTW89_IC][41] = 60, [2][1][2][1][RTW89_KCC][41] = 50, [2][1][2][1][RTW89_ACMA][41] = 58, - [2][1][2][1][RTW89_CN][41] = 40, + [2][1][2][1][RTW89_CN][41] = 36, [2][1][2][1][RTW89_UK][41] = 40, [2][1][2][1][RTW89_MEXICO][41] = 60, [2][1][2][1][RTW89_UKRAINE][41] = 6, [2][1][2][1][RTW89_CHILE][41] = 58, [2][1][2][1][RTW89_QATAR][41] = 6, + [2][1][2][1][RTW89_THAILAND][41] = 6, [2][1][2][1][RTW89_FCC][49] = 62, [2][1][2][1][RTW89_ETSI][49] = 127, [2][1][2][1][RTW89_MKK][49] = 127, @@ -36416,6 +36831,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_UKRAINE][49] = 127, [2][1][2][1][RTW89_CHILE][49] = 127, [2][1][2][1][RTW89_QATAR][49] = 127, + [2][1][2][1][RTW89_THAILAND][49] = 127, [3][0][2][0][RTW89_FCC][7] = 40, [3][0][2][0][RTW89_ETSI][7] = 50, [3][0][2][0][RTW89_MKK][7] = 50, @@ -36428,18 +36844,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_UKRAINE][7] = 50, [3][0][2][0][RTW89_CHILE][7] = 40, [3][0][2][0][RTW89_QATAR][7] = 50, + [3][0][2][0][RTW89_THAILAND][7] = 50, [3][0][2][0][RTW89_FCC][22] = 42, [3][0][2][0][RTW89_ETSI][22] = 50, [3][0][2][0][RTW89_MKK][22] = 50, [3][0][2][0][RTW89_IC][22] = 127, [3][0][2][0][RTW89_KCC][22] = 50, [3][0][2][0][RTW89_ACMA][22] = 127, - [3][0][2][0][RTW89_CN][22] = 66, + [3][0][2][0][RTW89_CN][22] = 127, [3][0][2][0][RTW89_UK][22] = 127, [3][0][2][0][RTW89_MEXICO][22] = 127, [3][0][2][0][RTW89_UKRAINE][22] = 50, [3][0][2][0][RTW89_CHILE][22] = 42, [3][0][2][0][RTW89_QATAR][22] = 50, + [3][0][2][0][RTW89_THAILAND][22] = 50, [3][0][2][0][RTW89_FCC][45] = 52, [3][0][2][0][RTW89_ETSI][45] = 127, [3][0][2][0][RTW89_MKK][45] = 127, @@ -36452,6 +36870,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_UKRAINE][45] = 127, [3][0][2][0][RTW89_CHILE][45] = 127, [3][0][2][0][RTW89_QATAR][45] = 127, + [3][0][2][0][RTW89_THAILAND][45] = 127, [3][1][2][0][RTW89_FCC][7] = 32, [3][1][2][0][RTW89_ETSI][7] = 50, [3][1][2][0][RTW89_MKK][7] = 36, @@ -36464,18 +36883,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_UKRAINE][7] = 50, [3][1][2][0][RTW89_CHILE][7] = 32, [3][1][2][0][RTW89_QATAR][7] = 50, + [3][1][2][0][RTW89_THAILAND][7] = 50, [3][1][2][0][RTW89_FCC][22] = 36, [3][1][2][0][RTW89_ETSI][22] = 50, [3][1][2][0][RTW89_MKK][22] = 48, [3][1][2][0][RTW89_IC][22] = 127, [3][1][2][0][RTW89_KCC][22] = 50, [3][1][2][0][RTW89_ACMA][22] = 127, - [3][1][2][0][RTW89_CN][22] = 54, + [3][1][2][0][RTW89_CN][22] = 127, [3][1][2][0][RTW89_UK][22] = 127, [3][1][2][0][RTW89_MEXICO][22] = 127, [3][1][2][0][RTW89_UKRAINE][22] = 50, [3][1][2][0][RTW89_CHILE][22] = 36, [3][1][2][0][RTW89_QATAR][22] = 50, + [3][1][2][0][RTW89_THAILAND][22] = 50, [3][1][2][0][RTW89_FCC][45] = 46, [3][1][2][0][RTW89_ETSI][45] = 127, [3][1][2][0][RTW89_MKK][45] = 127, @@ -36488,6 +36909,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_UKRAINE][45] = 127, [3][1][2][0][RTW89_CHILE][45] = 127, [3][1][2][0][RTW89_QATAR][45] = 127, + [3][1][2][0][RTW89_THAILAND][45] = 127, [3][1][2][1][RTW89_FCC][7] = 32, [3][1][2][1][RTW89_ETSI][7] = 42, [3][1][2][1][RTW89_MKK][7] = 36, @@ -36500,18 +36922,20 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_UKRAINE][7] = 42, [3][1][2][1][RTW89_CHILE][7] = 32, [3][1][2][1][RTW89_QATAR][7] = 42, + [3][1][2][1][RTW89_THAILAND][7] = 42, [3][1][2][1][RTW89_FCC][22] = 36, [3][1][2][1][RTW89_ETSI][22] = 42, [3][1][2][1][RTW89_MKK][22] = 48, [3][1][2][1][RTW89_IC][22] = 127, [3][1][2][1][RTW89_KCC][22] = 50, [3][1][2][1][RTW89_ACMA][22] = 127, - [3][1][2][1][RTW89_CN][22] = 42, + [3][1][2][1][RTW89_CN][22] = 127, [3][1][2][1][RTW89_UK][22] = 127, [3][1][2][1][RTW89_MEXICO][22] = 127, [3][1][2][1][RTW89_UKRAINE][22] = 42, [3][1][2][1][RTW89_CHILE][22] = 36, [3][1][2][1][RTW89_QATAR][22] = 42, + [3][1][2][1][RTW89_THAILAND][22] = 42, [3][1][2][1][RTW89_FCC][45] = 46, [3][1][2][1][RTW89_ETSI][45] = 127, [3][1][2][1][RTW89_MKK][45] = 127, @@ -36524,6 +36948,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_UKRAINE][45] = 127, [3][1][2][1][RTW89_CHILE][45] = 127, [3][1][2][1][RTW89_QATAR][45] = 127, + [3][1][2][1][RTW89_THAILAND][45] = 127, }; static @@ -36605,19 +37030,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][2][44] = 70, [0][0][1][0][RTW89_WW][0][45] = 22, [0][0][1][0][RTW89_WW][1][45] = 22, - [0][0][1][0][RTW89_WW][2][45] = 0, + [0][0][1][0][RTW89_WW][2][45] = 70, [0][0][1][0][RTW89_WW][0][47] = 22, [0][0][1][0][RTW89_WW][1][47] = 22, - [0][0][1][0][RTW89_WW][2][47] = 0, + [0][0][1][0][RTW89_WW][2][47] = 70, [0][0][1][0][RTW89_WW][0][49] = 24, [0][0][1][0][RTW89_WW][1][49] = 24, - [0][0][1][0][RTW89_WW][2][49] = 0, + [0][0][1][0][RTW89_WW][2][49] = 70, [0][0][1][0][RTW89_WW][0][51] = 22, [0][0][1][0][RTW89_WW][1][51] = 22, - [0][0][1][0][RTW89_WW][2][51] = 0, + [0][0][1][0][RTW89_WW][2][51] = 70, [0][0][1][0][RTW89_WW][0][53] = 22, [0][0][1][0][RTW89_WW][1][53] = 22, - [0][0][1][0][RTW89_WW][2][53] = 0, + [0][0][1][0][RTW89_WW][2][53] = 70, [0][0][1][0][RTW89_WW][0][55] = 22, [0][0][1][0][RTW89_WW][1][55] = 22, [0][0][1][0][RTW89_WW][2][55] = 68, @@ -36797,19 +37222,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][2][44] = 68, [0][1][1][0][RTW89_WW][0][45] = -2, [0][1][1][0][RTW89_WW][1][45] = -2, - [0][1][1][0][RTW89_WW][2][45] = 0, + [0][1][1][0][RTW89_WW][2][45] = 70, [0][1][1][0][RTW89_WW][0][47] = -2, [0][1][1][0][RTW89_WW][1][47] = -2, - [0][1][1][0][RTW89_WW][2][47] = 0, + [0][1][1][0][RTW89_WW][2][47] = 68, [0][1][1][0][RTW89_WW][0][49] = -2, [0][1][1][0][RTW89_WW][1][49] = -2, - [0][1][1][0][RTW89_WW][2][49] = 0, + [0][1][1][0][RTW89_WW][2][49] = 68, [0][1][1][0][RTW89_WW][0][51] = -2, [0][1][1][0][RTW89_WW][1][51] = -2, - [0][1][1][0][RTW89_WW][2][51] = 0, + [0][1][1][0][RTW89_WW][2][51] = 68, [0][1][1][0][RTW89_WW][0][53] = -2, [0][1][1][0][RTW89_WW][1][53] = -2, - [0][1][1][0][RTW89_WW][2][53] = 0, + [0][1][1][0][RTW89_WW][2][53] = 68, [0][1][1][0][RTW89_WW][0][55] = -2, [0][1][1][0][RTW89_WW][1][55] = -2, [0][1][1][0][RTW89_WW][2][55] = 68, @@ -36989,19 +37414,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][2][44] = 70, [0][0][2][0][RTW89_WW][0][45] = 22, [0][0][2][0][RTW89_WW][1][45] = 22, - [0][0][2][0][RTW89_WW][2][45] = 0, + [0][0][2][0][RTW89_WW][2][45] = 70, [0][0][2][0][RTW89_WW][0][47] = 22, [0][0][2][0][RTW89_WW][1][47] = 22, - [0][0][2][0][RTW89_WW][2][47] = 0, + [0][0][2][0][RTW89_WW][2][47] = 70, [0][0][2][0][RTW89_WW][0][49] = 24, [0][0][2][0][RTW89_WW][1][49] = 24, - [0][0][2][0][RTW89_WW][2][49] = 0, + [0][0][2][0][RTW89_WW][2][49] = 70, [0][0][2][0][RTW89_WW][0][51] = 22, [0][0][2][0][RTW89_WW][1][51] = 22, - [0][0][2][0][RTW89_WW][2][51] = 0, + [0][0][2][0][RTW89_WW][2][51] = 70, [0][0][2][0][RTW89_WW][0][53] = 22, [0][0][2][0][RTW89_WW][1][53] = 22, - [0][0][2][0][RTW89_WW][2][53] = 0, + [0][0][2][0][RTW89_WW][2][53] = 70, [0][0][2][0][RTW89_WW][0][55] = 22, [0][0][2][0][RTW89_WW][1][55] = 22, [0][0][2][0][RTW89_WW][2][55] = 68, @@ -37181,19 +37606,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_WW][2][44] = 68, [0][1][2][0][RTW89_WW][0][45] = -2, [0][1][2][0][RTW89_WW][1][45] = -2, - [0][1][2][0][RTW89_WW][2][45] = 0, + [0][1][2][0][RTW89_WW][2][45] = 70, [0][1][2][0][RTW89_WW][0][47] = -2, [0][1][2][0][RTW89_WW][1][47] = -2, - [0][1][2][0][RTW89_WW][2][47] = 0, + [0][1][2][0][RTW89_WW][2][47] = 68, [0][1][2][0][RTW89_WW][0][49] = -2, [0][1][2][0][RTW89_WW][1][49] = -2, - [0][1][2][0][RTW89_WW][2][49] = 0, + [0][1][2][0][RTW89_WW][2][49] = 68, [0][1][2][0][RTW89_WW][0][51] = -2, [0][1][2][0][RTW89_WW][1][51] = -2, - [0][1][2][0][RTW89_WW][2][51] = 0, + [0][1][2][0][RTW89_WW][2][51] = 68, [0][1][2][0][RTW89_WW][0][53] = -2, [0][1][2][0][RTW89_WW][1][53] = -2, - [0][1][2][0][RTW89_WW][2][53] = 0, + [0][1][2][0][RTW89_WW][2][53] = 68, [0][1][2][0][RTW89_WW][0][55] = -2, [0][1][2][0][RTW89_WW][1][55] = -2, [0][1][2][0][RTW89_WW][2][55] = 68, @@ -37373,19 +37798,19 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][2][44] = 68, [0][1][2][1][RTW89_WW][0][45] = -2, [0][1][2][1][RTW89_WW][1][45] = -2, - [0][1][2][1][RTW89_WW][2][45] = 0, + [0][1][2][1][RTW89_WW][2][45] = 70, [0][1][2][1][RTW89_WW][0][47] = -2, [0][1][2][1][RTW89_WW][1][47] = -2, - [0][1][2][1][RTW89_WW][2][47] = 0, + [0][1][2][1][RTW89_WW][2][47] = 68, [0][1][2][1][RTW89_WW][0][49] = -2, [0][1][2][1][RTW89_WW][1][49] = -2, - [0][1][2][1][RTW89_WW][2][49] = 0, + [0][1][2][1][RTW89_WW][2][49] = 68, [0][1][2][1][RTW89_WW][0][51] = -2, [0][1][2][1][RTW89_WW][1][51] = -2, - [0][1][2][1][RTW89_WW][2][51] = 0, + [0][1][2][1][RTW89_WW][2][51] = 68, [0][1][2][1][RTW89_WW][0][53] = -2, [0][1][2][1][RTW89_WW][1][53] = -2, - [0][1][2][1][RTW89_WW][2][53] = 0, + [0][1][2][1][RTW89_WW][2][53] = 68, [0][1][2][1][RTW89_WW][0][55] = -2, [0][1][2][1][RTW89_WW][1][55] = -2, [0][1][2][1][RTW89_WW][2][55] = 68, @@ -37529,10 +37954,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][2][43] = 70, [1][0][2][0][RTW89_WW][0][46] = 34, [1][0][2][0][RTW89_WW][1][46] = 34, - [1][0][2][0][RTW89_WW][2][46] = 0, + [1][0][2][0][RTW89_WW][2][46] = 68, [1][0][2][0][RTW89_WW][0][50] = 34, [1][0][2][0][RTW89_WW][1][50] = 34, - [1][0][2][0][RTW89_WW][2][50] = 0, + [1][0][2][0][RTW89_WW][2][50] = 68, [1][0][2][0][RTW89_WW][0][54] = 36, [1][0][2][0][RTW89_WW][1][54] = 36, [1][0][2][0][RTW89_WW][2][54] = 0, @@ -37625,10 +38050,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_WW][2][43] = 70, [1][1][2][0][RTW89_WW][0][46] = 12, [1][1][2][0][RTW89_WW][1][46] = 12, - [1][1][2][0][RTW89_WW][2][46] = 0, + [1][1][2][0][RTW89_WW][2][46] = 68, [1][1][2][0][RTW89_WW][0][50] = 12, [1][1][2][0][RTW89_WW][1][50] = 12, - [1][1][2][0][RTW89_WW][2][50] = 0, + [1][1][2][0][RTW89_WW][2][50] = 68, [1][1][2][0][RTW89_WW][0][54] = 10, [1][1][2][0][RTW89_WW][1][54] = 10, [1][1][2][0][RTW89_WW][2][54] = 0, @@ -37721,10 +38146,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][2][43] = 70, [1][1][2][1][RTW89_WW][0][46] = 12, [1][1][2][1][RTW89_WW][1][46] = 12, - [1][1][2][1][RTW89_WW][2][46] = 0, + [1][1][2][1][RTW89_WW][2][46] = 68, [1][1][2][1][RTW89_WW][0][50] = 12, [1][1][2][1][RTW89_WW][1][50] = 12, - [1][1][2][1][RTW89_WW][2][50] = 0, + [1][1][2][1][RTW89_WW][2][50] = 68, [1][1][2][1][RTW89_WW][0][54] = 10, [1][1][2][1][RTW89_WW][1][54] = 10, [1][1][2][1][RTW89_WW][2][54] = 0, @@ -37799,10 +38224,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_WW][2][41] = 60, [2][0][2][0][RTW89_WW][0][48] = 46, [2][0][2][0][RTW89_WW][1][48] = 46, - [2][0][2][0][RTW89_WW][2][48] = 0, + [2][0][2][0][RTW89_WW][2][48] = 60, [2][0][2][0][RTW89_WW][0][56] = 46, [2][0][2][0][RTW89_WW][1][56] = 46, - [2][0][2][0][RTW89_WW][2][56] = 0, + [2][0][2][0][RTW89_WW][2][56] = 58, [2][0][2][0][RTW89_WW][0][63] = 46, [2][0][2][0][RTW89_WW][1][63] = 46, [2][0][2][0][RTW89_WW][2][63] = 58, @@ -37847,10 +38272,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_WW][2][41] = 60, [2][1][2][0][RTW89_WW][0][48] = 22, [2][1][2][0][RTW89_WW][1][48] = 22, - [2][1][2][0][RTW89_WW][2][48] = 0, + [2][1][2][0][RTW89_WW][2][48] = 60, [2][1][2][0][RTW89_WW][0][56] = 20, [2][1][2][0][RTW89_WW][1][56] = 20, - [2][1][2][0][RTW89_WW][2][56] = 0, + [2][1][2][0][RTW89_WW][2][56] = 56, [2][1][2][0][RTW89_WW][0][63] = 22, [2][1][2][0][RTW89_WW][1][63] = 22, [2][1][2][0][RTW89_WW][2][63] = 58, @@ -37895,10 +38320,10 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_WW][2][41] = 60, [2][1][2][1][RTW89_WW][0][48] = 22, [2][1][2][1][RTW89_WW][1][48] = 22, - [2][1][2][1][RTW89_WW][2][48] = 0, + [2][1][2][1][RTW89_WW][2][48] = 60, [2][1][2][1][RTW89_WW][0][56] = 20, [2][1][2][1][RTW89_WW][1][56] = 20, - [2][1][2][1][RTW89_WW][2][56] = 0, + [2][1][2][1][RTW89_WW][2][56] = 56, [2][1][2][1][RTW89_WW][0][63] = 22, [2][1][2][1][RTW89_WW][1][63] = 22, [2][1][2][1][RTW89_WW][2][63] = 58, @@ -37934,7 +38359,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_WW][2][37] = 52, [3][0][2][0][RTW89_WW][0][52] = 54, [3][0][2][0][RTW89_WW][1][52] = 54, - [3][0][2][0][RTW89_WW][2][52] = 0, + [3][0][2][0][RTW89_WW][2][52] = 56, [3][0][2][0][RTW89_WW][0][67] = 54, [3][0][2][0][RTW89_WW][1][67] = 54, [3][0][2][0][RTW89_WW][2][67] = 54, @@ -37958,7 +38383,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_WW][2][37] = 52, [3][1][2][0][RTW89_WW][0][52] = 30, [3][1][2][0][RTW89_WW][1][52] = 30, - [3][1][2][0][RTW89_WW][2][52] = 0, + [3][1][2][0][RTW89_WW][2][52] = 56, [3][1][2][0][RTW89_WW][0][67] = 32, [3][1][2][0][RTW89_WW][1][67] = 32, [3][1][2][0][RTW89_WW][2][67] = 54, @@ -37982,7 +38407,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_WW][2][37] = 52, [3][1][2][1][RTW89_WW][0][52] = 30, [3][1][2][1][RTW89_WW][1][52] = 30, - [3][1][2][1][RTW89_WW][2][52] = 0, + [3][1][2][1][RTW89_WW][2][52] = 56, [3][1][2][1][RTW89_WW][0][67] = 32, [3][1][2][1][RTW89_WW][1][67] = 32, [3][1][2][1][RTW89_WW][2][67] = 54, @@ -38002,6 +38427,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][0] = 66, [0][0][1][0][RTW89_MKK][0][0] = 26, [0][0][1][0][RTW89_IC][1][0] = 24, + [0][0][1][0][RTW89_IC][2][0] = 56, [0][0][1][0][RTW89_KCC][1][0] = 24, [0][0][1][0][RTW89_KCC][0][0] = 24, [0][0][1][0][RTW89_ACMA][1][0] = 66, @@ -38011,6 +38437,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][0] = 28, [0][0][1][0][RTW89_UK][1][0] = 66, [0][0][1][0][RTW89_UK][0][0] = 28, + [0][0][1][0][RTW89_THAILAND][1][0] = 56, + [0][0][1][0][RTW89_THAILAND][0][0] = 24, [0][0][1][0][RTW89_FCC][1][2] = 22, [0][0][1][0][RTW89_FCC][2][2] = 56, [0][0][1][0][RTW89_ETSI][1][2] = 66, @@ -38018,6 +38446,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][2] = 66, [0][0][1][0][RTW89_MKK][0][2] = 26, [0][0][1][0][RTW89_IC][1][2] = 22, + [0][0][1][0][RTW89_IC][2][2] = 56, [0][0][1][0][RTW89_KCC][1][2] = 24, [0][0][1][0][RTW89_KCC][0][2] = 24, [0][0][1][0][RTW89_ACMA][1][2] = 66, @@ -38027,6 +38456,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][2] = 28, [0][0][1][0][RTW89_UK][1][2] = 66, [0][0][1][0][RTW89_UK][0][2] = 28, + [0][0][1][0][RTW89_THAILAND][1][2] = 56, + [0][0][1][0][RTW89_THAILAND][0][2] = 22, [0][0][1][0][RTW89_FCC][1][4] = 22, [0][0][1][0][RTW89_FCC][2][4] = 56, [0][0][1][0][RTW89_ETSI][1][4] = 66, @@ -38034,6 +38465,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][4] = 66, [0][0][1][0][RTW89_MKK][0][4] = 26, [0][0][1][0][RTW89_IC][1][4] = 22, + [0][0][1][0][RTW89_IC][2][4] = 56, [0][0][1][0][RTW89_KCC][1][4] = 24, [0][0][1][0][RTW89_KCC][0][4] = 24, [0][0][1][0][RTW89_ACMA][1][4] = 66, @@ -38043,6 +38475,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][4] = 28, [0][0][1][0][RTW89_UK][1][4] = 66, [0][0][1][0][RTW89_UK][0][4] = 28, + [0][0][1][0][RTW89_THAILAND][1][4] = 56, + [0][0][1][0][RTW89_THAILAND][0][4] = 22, [0][0][1][0][RTW89_FCC][1][6] = 22, [0][0][1][0][RTW89_FCC][2][6] = 56, [0][0][1][0][RTW89_ETSI][1][6] = 66, @@ -38050,6 +38484,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][6] = 66, [0][0][1][0][RTW89_MKK][0][6] = 26, [0][0][1][0][RTW89_IC][1][6] = 22, + [0][0][1][0][RTW89_IC][2][6] = 56, [0][0][1][0][RTW89_KCC][1][6] = 24, [0][0][1][0][RTW89_KCC][0][6] = 24, [0][0][1][0][RTW89_ACMA][1][6] = 66, @@ -38059,6 +38494,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][6] = 28, [0][0][1][0][RTW89_UK][1][6] = 66, [0][0][1][0][RTW89_UK][0][6] = 28, + [0][0][1][0][RTW89_THAILAND][1][6] = 56, + [0][0][1][0][RTW89_THAILAND][0][6] = 22, [0][0][1][0][RTW89_FCC][1][8] = 22, [0][0][1][0][RTW89_FCC][2][8] = 56, [0][0][1][0][RTW89_ETSI][1][8] = 66, @@ -38066,6 +38503,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][8] = 66, [0][0][1][0][RTW89_MKK][0][8] = 26, [0][0][1][0][RTW89_IC][1][8] = 22, + [0][0][1][0][RTW89_IC][2][8] = 56, [0][0][1][0][RTW89_KCC][1][8] = 24, [0][0][1][0][RTW89_KCC][0][8] = 24, [0][0][1][0][RTW89_ACMA][1][8] = 66, @@ -38075,6 +38513,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][8] = 28, [0][0][1][0][RTW89_UK][1][8] = 66, [0][0][1][0][RTW89_UK][0][8] = 28, + [0][0][1][0][RTW89_THAILAND][1][8] = 56, + [0][0][1][0][RTW89_THAILAND][0][8] = 22, [0][0][1][0][RTW89_FCC][1][10] = 22, [0][0][1][0][RTW89_FCC][2][10] = 56, [0][0][1][0][RTW89_ETSI][1][10] = 66, @@ -38082,6 +38522,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][10] = 66, [0][0][1][0][RTW89_MKK][0][10] = 26, [0][0][1][0][RTW89_IC][1][10] = 22, + [0][0][1][0][RTW89_IC][2][10] = 56, [0][0][1][0][RTW89_KCC][1][10] = 24, [0][0][1][0][RTW89_KCC][0][10] = 24, [0][0][1][0][RTW89_ACMA][1][10] = 66, @@ -38091,6 +38532,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][10] = 28, [0][0][1][0][RTW89_UK][1][10] = 66, [0][0][1][0][RTW89_UK][0][10] = 28, + [0][0][1][0][RTW89_THAILAND][1][10] = 56, + [0][0][1][0][RTW89_THAILAND][0][10] = 22, [0][0][1][0][RTW89_FCC][1][12] = 22, [0][0][1][0][RTW89_FCC][2][12] = 56, [0][0][1][0][RTW89_ETSI][1][12] = 66, @@ -38098,6 +38541,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][12] = 66, [0][0][1][0][RTW89_MKK][0][12] = 26, [0][0][1][0][RTW89_IC][1][12] = 22, + [0][0][1][0][RTW89_IC][2][12] = 56, [0][0][1][0][RTW89_KCC][1][12] = 24, [0][0][1][0][RTW89_KCC][0][12] = 24, [0][0][1][0][RTW89_ACMA][1][12] = 66, @@ -38107,6 +38551,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][12] = 28, [0][0][1][0][RTW89_UK][1][12] = 66, [0][0][1][0][RTW89_UK][0][12] = 28, + [0][0][1][0][RTW89_THAILAND][1][12] = 56, + [0][0][1][0][RTW89_THAILAND][0][12] = 22, [0][0][1][0][RTW89_FCC][1][14] = 22, [0][0][1][0][RTW89_FCC][2][14] = 56, [0][0][1][0][RTW89_ETSI][1][14] = 66, @@ -38114,6 +38560,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][14] = 66, [0][0][1][0][RTW89_MKK][0][14] = 26, [0][0][1][0][RTW89_IC][1][14] = 22, + [0][0][1][0][RTW89_IC][2][14] = 56, [0][0][1][0][RTW89_KCC][1][14] = 24, [0][0][1][0][RTW89_KCC][0][14] = 24, [0][0][1][0][RTW89_ACMA][1][14] = 66, @@ -38123,6 +38570,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][14] = 28, [0][0][1][0][RTW89_UK][1][14] = 66, [0][0][1][0][RTW89_UK][0][14] = 28, + [0][0][1][0][RTW89_THAILAND][1][14] = 56, + [0][0][1][0][RTW89_THAILAND][0][14] = 22, [0][0][1][0][RTW89_FCC][1][15] = 22, [0][0][1][0][RTW89_FCC][2][15] = 56, [0][0][1][0][RTW89_ETSI][1][15] = 66, @@ -38130,6 +38579,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][15] = 66, [0][0][1][0][RTW89_MKK][0][15] = 26, [0][0][1][0][RTW89_IC][1][15] = 22, + [0][0][1][0][RTW89_IC][2][15] = 56, [0][0][1][0][RTW89_KCC][1][15] = 24, [0][0][1][0][RTW89_KCC][0][15] = 24, [0][0][1][0][RTW89_ACMA][1][15] = 66, @@ -38139,6 +38589,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][15] = 28, [0][0][1][0][RTW89_UK][1][15] = 66, [0][0][1][0][RTW89_UK][0][15] = 28, + [0][0][1][0][RTW89_THAILAND][1][15] = 56, + [0][0][1][0][RTW89_THAILAND][0][15] = 22, [0][0][1][0][RTW89_FCC][1][17] = 22, [0][0][1][0][RTW89_FCC][2][17] = 56, [0][0][1][0][RTW89_ETSI][1][17] = 66, @@ -38146,6 +38598,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][17] = 66, [0][0][1][0][RTW89_MKK][0][17] = 26, [0][0][1][0][RTW89_IC][1][17] = 22, + [0][0][1][0][RTW89_IC][2][17] = 56, [0][0][1][0][RTW89_KCC][1][17] = 24, [0][0][1][0][RTW89_KCC][0][17] = 24, [0][0][1][0][RTW89_ACMA][1][17] = 66, @@ -38155,6 +38608,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][17] = 28, [0][0][1][0][RTW89_UK][1][17] = 66, [0][0][1][0][RTW89_UK][0][17] = 28, + [0][0][1][0][RTW89_THAILAND][1][17] = 56, + [0][0][1][0][RTW89_THAILAND][0][17] = 22, [0][0][1][0][RTW89_FCC][1][19] = 22, [0][0][1][0][RTW89_FCC][2][19] = 56, [0][0][1][0][RTW89_ETSI][1][19] = 66, @@ -38162,6 +38617,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][19] = 66, [0][0][1][0][RTW89_MKK][0][19] = 26, [0][0][1][0][RTW89_IC][1][19] = 22, + [0][0][1][0][RTW89_IC][2][19] = 56, [0][0][1][0][RTW89_KCC][1][19] = 24, [0][0][1][0][RTW89_KCC][0][19] = 24, [0][0][1][0][RTW89_ACMA][1][19] = 66, @@ -38171,6 +38627,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][19] = 28, [0][0][1][0][RTW89_UK][1][19] = 66, [0][0][1][0][RTW89_UK][0][19] = 28, + [0][0][1][0][RTW89_THAILAND][1][19] = 56, + [0][0][1][0][RTW89_THAILAND][0][19] = 22, [0][0][1][0][RTW89_FCC][1][21] = 22, [0][0][1][0][RTW89_FCC][2][21] = 56, [0][0][1][0][RTW89_ETSI][1][21] = 66, @@ -38178,6 +38636,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][21] = 66, [0][0][1][0][RTW89_MKK][0][21] = 26, [0][0][1][0][RTW89_IC][1][21] = 22, + [0][0][1][0][RTW89_IC][2][21] = 56, [0][0][1][0][RTW89_KCC][1][21] = 24, [0][0][1][0][RTW89_KCC][0][21] = 24, [0][0][1][0][RTW89_ACMA][1][21] = 66, @@ -38187,6 +38646,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][21] = 28, [0][0][1][0][RTW89_UK][1][21] = 66, [0][0][1][0][RTW89_UK][0][21] = 28, + [0][0][1][0][RTW89_THAILAND][1][21] = 56, + [0][0][1][0][RTW89_THAILAND][0][21] = 22, [0][0][1][0][RTW89_FCC][1][23] = 22, [0][0][1][0][RTW89_FCC][2][23] = 70, [0][0][1][0][RTW89_ETSI][1][23] = 66, @@ -38194,6 +38655,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][23] = 66, [0][0][1][0][RTW89_MKK][0][23] = 26, [0][0][1][0][RTW89_IC][1][23] = 22, + [0][0][1][0][RTW89_IC][2][23] = 70, [0][0][1][0][RTW89_KCC][1][23] = 24, [0][0][1][0][RTW89_KCC][0][23] = 26, [0][0][1][0][RTW89_ACMA][1][23] = 66, @@ -38203,6 +38665,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][23] = 28, [0][0][1][0][RTW89_UK][1][23] = 66, [0][0][1][0][RTW89_UK][0][23] = 28, + [0][0][1][0][RTW89_THAILAND][1][23] = 66, + [0][0][1][0][RTW89_THAILAND][0][23] = 22, [0][0][1][0][RTW89_FCC][1][25] = 22, [0][0][1][0][RTW89_FCC][2][25] = 70, [0][0][1][0][RTW89_ETSI][1][25] = 66, @@ -38210,6 +38674,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][25] = 66, [0][0][1][0][RTW89_MKK][0][25] = 26, [0][0][1][0][RTW89_IC][1][25] = 22, + [0][0][1][0][RTW89_IC][2][25] = 70, [0][0][1][0][RTW89_KCC][1][25] = 24, [0][0][1][0][RTW89_KCC][0][25] = 26, [0][0][1][0][RTW89_ACMA][1][25] = 66, @@ -38219,6 +38684,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][25] = 28, [0][0][1][0][RTW89_UK][1][25] = 66, [0][0][1][0][RTW89_UK][0][25] = 28, + [0][0][1][0][RTW89_THAILAND][1][25] = 66, + [0][0][1][0][RTW89_THAILAND][0][25] = 22, [0][0][1][0][RTW89_FCC][1][27] = 22, [0][0][1][0][RTW89_FCC][2][27] = 70, [0][0][1][0][RTW89_ETSI][1][27] = 66, @@ -38226,6 +38693,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][27] = 66, [0][0][1][0][RTW89_MKK][0][27] = 26, [0][0][1][0][RTW89_IC][1][27] = 22, + [0][0][1][0][RTW89_IC][2][27] = 70, [0][0][1][0][RTW89_KCC][1][27] = 24, [0][0][1][0][RTW89_KCC][0][27] = 26, [0][0][1][0][RTW89_ACMA][1][27] = 66, @@ -38235,6 +38703,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][27] = 28, [0][0][1][0][RTW89_UK][1][27] = 66, [0][0][1][0][RTW89_UK][0][27] = 28, + [0][0][1][0][RTW89_THAILAND][1][27] = 66, + [0][0][1][0][RTW89_THAILAND][0][27] = 22, [0][0][1][0][RTW89_FCC][1][29] = 22, [0][0][1][0][RTW89_FCC][2][29] = 70, [0][0][1][0][RTW89_ETSI][1][29] = 66, @@ -38242,6 +38712,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][29] = 66, [0][0][1][0][RTW89_MKK][0][29] = 26, [0][0][1][0][RTW89_IC][1][29] = 22, + [0][0][1][0][RTW89_IC][2][29] = 70, [0][0][1][0][RTW89_KCC][1][29] = 24, [0][0][1][0][RTW89_KCC][0][29] = 26, [0][0][1][0][RTW89_ACMA][1][29] = 66, @@ -38251,6 +38722,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][29] = 28, [0][0][1][0][RTW89_UK][1][29] = 66, [0][0][1][0][RTW89_UK][0][29] = 28, + [0][0][1][0][RTW89_THAILAND][1][29] = 66, + [0][0][1][0][RTW89_THAILAND][0][29] = 22, [0][0][1][0][RTW89_FCC][1][30] = 22, [0][0][1][0][RTW89_FCC][2][30] = 70, [0][0][1][0][RTW89_ETSI][1][30] = 66, @@ -38258,6 +38731,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][30] = 66, [0][0][1][0][RTW89_MKK][0][30] = 26, [0][0][1][0][RTW89_IC][1][30] = 22, + [0][0][1][0][RTW89_IC][2][30] = 70, [0][0][1][0][RTW89_KCC][1][30] = 24, [0][0][1][0][RTW89_KCC][0][30] = 26, [0][0][1][0][RTW89_ACMA][1][30] = 66, @@ -38267,6 +38741,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][30] = 28, [0][0][1][0][RTW89_UK][1][30] = 66, [0][0][1][0][RTW89_UK][0][30] = 28, + [0][0][1][0][RTW89_THAILAND][1][30] = 66, + [0][0][1][0][RTW89_THAILAND][0][30] = 22, [0][0][1][0][RTW89_FCC][1][32] = 22, [0][0][1][0][RTW89_FCC][2][32] = 70, [0][0][1][0][RTW89_ETSI][1][32] = 66, @@ -38274,6 +38750,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][32] = 66, [0][0][1][0][RTW89_MKK][0][32] = 26, [0][0][1][0][RTW89_IC][1][32] = 22, + [0][0][1][0][RTW89_IC][2][32] = 70, [0][0][1][0][RTW89_KCC][1][32] = 24, [0][0][1][0][RTW89_KCC][0][32] = 26, [0][0][1][0][RTW89_ACMA][1][32] = 66, @@ -38283,6 +38760,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][32] = 28, [0][0][1][0][RTW89_UK][1][32] = 66, [0][0][1][0][RTW89_UK][0][32] = 28, + [0][0][1][0][RTW89_THAILAND][1][32] = 66, + [0][0][1][0][RTW89_THAILAND][0][32] = 22, [0][0][1][0][RTW89_FCC][1][34] = 22, [0][0][1][0][RTW89_FCC][2][34] = 70, [0][0][1][0][RTW89_ETSI][1][34] = 66, @@ -38290,6 +38769,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][34] = 66, [0][0][1][0][RTW89_MKK][0][34] = 26, [0][0][1][0][RTW89_IC][1][34] = 22, + [0][0][1][0][RTW89_IC][2][34] = 70, [0][0][1][0][RTW89_KCC][1][34] = 24, [0][0][1][0][RTW89_KCC][0][34] = 26, [0][0][1][0][RTW89_ACMA][1][34] = 66, @@ -38299,6 +38779,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][34] = 28, [0][0][1][0][RTW89_UK][1][34] = 66, [0][0][1][0][RTW89_UK][0][34] = 28, + [0][0][1][0][RTW89_THAILAND][1][34] = 66, + [0][0][1][0][RTW89_THAILAND][0][34] = 22, [0][0][1][0][RTW89_FCC][1][36] = 22, [0][0][1][0][RTW89_FCC][2][36] = 70, [0][0][1][0][RTW89_ETSI][1][36] = 66, @@ -38306,6 +38788,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][36] = 66, [0][0][1][0][RTW89_MKK][0][36] = 26, [0][0][1][0][RTW89_IC][1][36] = 22, + [0][0][1][0][RTW89_IC][2][36] = 70, [0][0][1][0][RTW89_KCC][1][36] = 24, [0][0][1][0][RTW89_KCC][0][36] = 26, [0][0][1][0][RTW89_ACMA][1][36] = 66, @@ -38315,6 +38798,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][36] = 28, [0][0][1][0][RTW89_UK][1][36] = 66, [0][0][1][0][RTW89_UK][0][36] = 28, + [0][0][1][0][RTW89_THAILAND][1][36] = 66, + [0][0][1][0][RTW89_THAILAND][0][36] = 22, [0][0][1][0][RTW89_FCC][1][38] = 22, [0][0][1][0][RTW89_FCC][2][38] = 70, [0][0][1][0][RTW89_ETSI][1][38] = 66, @@ -38322,6 +38807,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][38] = 66, [0][0][1][0][RTW89_MKK][0][38] = 26, [0][0][1][0][RTW89_IC][1][38] = 22, + [0][0][1][0][RTW89_IC][2][38] = 70, [0][0][1][0][RTW89_KCC][1][38] = 24, [0][0][1][0][RTW89_KCC][0][38] = 26, [0][0][1][0][RTW89_ACMA][1][38] = 66, @@ -38331,6 +38817,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][38] = 28, [0][0][1][0][RTW89_UK][1][38] = 66, [0][0][1][0][RTW89_UK][0][38] = 28, + [0][0][1][0][RTW89_THAILAND][1][38] = 66, + [0][0][1][0][RTW89_THAILAND][0][38] = 22, [0][0][1][0][RTW89_FCC][1][40] = 22, [0][0][1][0][RTW89_FCC][2][40] = 70, [0][0][1][0][RTW89_ETSI][1][40] = 66, @@ -38338,6 +38826,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][40] = 66, [0][0][1][0][RTW89_MKK][0][40] = 26, [0][0][1][0][RTW89_IC][1][40] = 22, + [0][0][1][0][RTW89_IC][2][40] = 70, [0][0][1][0][RTW89_KCC][1][40] = 24, [0][0][1][0][RTW89_KCC][0][40] = 26, [0][0][1][0][RTW89_ACMA][1][40] = 66, @@ -38347,6 +38836,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][40] = 28, [0][0][1][0][RTW89_UK][1][40] = 66, [0][0][1][0][RTW89_UK][0][40] = 28, + [0][0][1][0][RTW89_THAILAND][1][40] = 66, + [0][0][1][0][RTW89_THAILAND][0][40] = 22, [0][0][1][0][RTW89_FCC][1][42] = 22, [0][0][1][0][RTW89_FCC][2][42] = 70, [0][0][1][0][RTW89_ETSI][1][42] = 66, @@ -38354,6 +38845,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][42] = 66, [0][0][1][0][RTW89_MKK][0][42] = 26, [0][0][1][0][RTW89_IC][1][42] = 22, + [0][0][1][0][RTW89_IC][2][42] = 70, [0][0][1][0][RTW89_KCC][1][42] = 24, [0][0][1][0][RTW89_KCC][0][42] = 26, [0][0][1][0][RTW89_ACMA][1][42] = 66, @@ -38363,6 +38855,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][42] = 28, [0][0][1][0][RTW89_UK][1][42] = 66, [0][0][1][0][RTW89_UK][0][42] = 28, + [0][0][1][0][RTW89_THAILAND][1][42] = 66, + [0][0][1][0][RTW89_THAILAND][0][42] = 22, [0][0][1][0][RTW89_FCC][1][44] = 22, [0][0][1][0][RTW89_FCC][2][44] = 70, [0][0][1][0][RTW89_ETSI][1][44] = 66, @@ -38370,6 +38864,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][44] = 44, [0][0][1][0][RTW89_MKK][0][44] = 28, [0][0][1][0][RTW89_IC][1][44] = 22, + [0][0][1][0][RTW89_IC][2][44] = 70, [0][0][1][0][RTW89_KCC][1][44] = 24, [0][0][1][0][RTW89_KCC][0][44] = 26, [0][0][1][0][RTW89_ACMA][1][44] = 66, @@ -38379,6 +38874,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][44] = 30, [0][0][1][0][RTW89_UK][1][44] = 66, [0][0][1][0][RTW89_UK][0][44] = 30, + [0][0][1][0][RTW89_THAILAND][1][44] = 68, + [0][0][1][0][RTW89_THAILAND][0][44] = 22, [0][0][1][0][RTW89_FCC][1][45] = 22, [0][0][1][0][RTW89_FCC][2][45] = 127, [0][0][1][0][RTW89_ETSI][1][45] = 127, @@ -38386,6 +38883,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][45] = 127, [0][0][1][0][RTW89_MKK][0][45] = 127, [0][0][1][0][RTW89_IC][1][45] = 22, + [0][0][1][0][RTW89_IC][2][45] = 70, [0][0][1][0][RTW89_KCC][1][45] = 24, [0][0][1][0][RTW89_KCC][0][45] = 127, [0][0][1][0][RTW89_ACMA][1][45] = 127, @@ -38395,6 +38893,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][45] = 127, [0][0][1][0][RTW89_UK][1][45] = 127, [0][0][1][0][RTW89_UK][0][45] = 127, + [0][0][1][0][RTW89_THAILAND][1][45] = 127, + [0][0][1][0][RTW89_THAILAND][0][45] = 127, [0][0][1][0][RTW89_FCC][1][47] = 22, [0][0][1][0][RTW89_FCC][2][47] = 127, [0][0][1][0][RTW89_ETSI][1][47] = 127, @@ -38402,6 +38902,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][47] = 127, [0][0][1][0][RTW89_MKK][0][47] = 127, [0][0][1][0][RTW89_IC][1][47] = 22, + [0][0][1][0][RTW89_IC][2][47] = 70, [0][0][1][0][RTW89_KCC][1][47] = 24, [0][0][1][0][RTW89_KCC][0][47] = 127, [0][0][1][0][RTW89_ACMA][1][47] = 127, @@ -38411,6 +38912,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][47] = 127, [0][0][1][0][RTW89_UK][1][47] = 127, [0][0][1][0][RTW89_UK][0][47] = 127, + [0][0][1][0][RTW89_THAILAND][1][47] = 127, + [0][0][1][0][RTW89_THAILAND][0][47] = 127, [0][0][1][0][RTW89_FCC][1][49] = 24, [0][0][1][0][RTW89_FCC][2][49] = 127, [0][0][1][0][RTW89_ETSI][1][49] = 127, @@ -38418,6 +38921,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][49] = 127, [0][0][1][0][RTW89_MKK][0][49] = 127, [0][0][1][0][RTW89_IC][1][49] = 24, + [0][0][1][0][RTW89_IC][2][49] = 70, [0][0][1][0][RTW89_KCC][1][49] = 24, [0][0][1][0][RTW89_KCC][0][49] = 127, [0][0][1][0][RTW89_ACMA][1][49] = 127, @@ -38427,6 +38931,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][49] = 127, [0][0][1][0][RTW89_UK][1][49] = 127, [0][0][1][0][RTW89_UK][0][49] = 127, + [0][0][1][0][RTW89_THAILAND][1][49] = 127, + [0][0][1][0][RTW89_THAILAND][0][49] = 127, [0][0][1][0][RTW89_FCC][1][51] = 22, [0][0][1][0][RTW89_FCC][2][51] = 127, [0][0][1][0][RTW89_ETSI][1][51] = 127, @@ -38434,6 +38940,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][51] = 127, [0][0][1][0][RTW89_MKK][0][51] = 127, [0][0][1][0][RTW89_IC][1][51] = 22, + [0][0][1][0][RTW89_IC][2][51] = 70, [0][0][1][0][RTW89_KCC][1][51] = 24, [0][0][1][0][RTW89_KCC][0][51] = 127, [0][0][1][0][RTW89_ACMA][1][51] = 127, @@ -38443,6 +38950,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][51] = 127, [0][0][1][0][RTW89_UK][1][51] = 127, [0][0][1][0][RTW89_UK][0][51] = 127, + [0][0][1][0][RTW89_THAILAND][1][51] = 127, + [0][0][1][0][RTW89_THAILAND][0][51] = 127, [0][0][1][0][RTW89_FCC][1][53] = 22, [0][0][1][0][RTW89_FCC][2][53] = 127, [0][0][1][0][RTW89_ETSI][1][53] = 127, @@ -38450,6 +38959,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][53] = 127, [0][0][1][0][RTW89_MKK][0][53] = 127, [0][0][1][0][RTW89_IC][1][53] = 22, + [0][0][1][0][RTW89_IC][2][53] = 70, [0][0][1][0][RTW89_KCC][1][53] = 24, [0][0][1][0][RTW89_KCC][0][53] = 127, [0][0][1][0][RTW89_ACMA][1][53] = 127, @@ -38459,6 +38969,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][53] = 127, [0][0][1][0][RTW89_UK][1][53] = 127, [0][0][1][0][RTW89_UK][0][53] = 127, + [0][0][1][0][RTW89_THAILAND][1][53] = 127, + [0][0][1][0][RTW89_THAILAND][0][53] = 127, [0][0][1][0][RTW89_FCC][1][55] = 22, [0][0][1][0][RTW89_FCC][2][55] = 68, [0][0][1][0][RTW89_ETSI][1][55] = 127, @@ -38466,6 +38978,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][55] = 127, [0][0][1][0][RTW89_MKK][0][55] = 127, [0][0][1][0][RTW89_IC][1][55] = 22, + [0][0][1][0][RTW89_IC][2][55] = 68, [0][0][1][0][RTW89_KCC][1][55] = 26, [0][0][1][0][RTW89_KCC][0][55] = 127, [0][0][1][0][RTW89_ACMA][1][55] = 127, @@ -38475,6 +38988,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][55] = 127, [0][0][1][0][RTW89_UK][1][55] = 127, [0][0][1][0][RTW89_UK][0][55] = 127, + [0][0][1][0][RTW89_THAILAND][1][55] = 127, + [0][0][1][0][RTW89_THAILAND][0][55] = 127, [0][0][1][0][RTW89_FCC][1][57] = 22, [0][0][1][0][RTW89_FCC][2][57] = 68, [0][0][1][0][RTW89_ETSI][1][57] = 127, @@ -38482,6 +38997,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][57] = 127, [0][0][1][0][RTW89_MKK][0][57] = 127, [0][0][1][0][RTW89_IC][1][57] = 22, + [0][0][1][0][RTW89_IC][2][57] = 68, [0][0][1][0][RTW89_KCC][1][57] = 26, [0][0][1][0][RTW89_KCC][0][57] = 127, [0][0][1][0][RTW89_ACMA][1][57] = 127, @@ -38491,6 +39007,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][57] = 127, [0][0][1][0][RTW89_UK][1][57] = 127, [0][0][1][0][RTW89_UK][0][57] = 127, + [0][0][1][0][RTW89_THAILAND][1][57] = 127, + [0][0][1][0][RTW89_THAILAND][0][57] = 127, [0][0][1][0][RTW89_FCC][1][59] = 22, [0][0][1][0][RTW89_FCC][2][59] = 68, [0][0][1][0][RTW89_ETSI][1][59] = 127, @@ -38498,6 +39016,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][59] = 127, [0][0][1][0][RTW89_MKK][0][59] = 127, [0][0][1][0][RTW89_IC][1][59] = 22, + [0][0][1][0][RTW89_IC][2][59] = 68, [0][0][1][0][RTW89_KCC][1][59] = 26, [0][0][1][0][RTW89_KCC][0][59] = 127, [0][0][1][0][RTW89_ACMA][1][59] = 127, @@ -38507,6 +39026,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][59] = 127, [0][0][1][0][RTW89_UK][1][59] = 127, [0][0][1][0][RTW89_UK][0][59] = 127, + [0][0][1][0][RTW89_THAILAND][1][59] = 127, + [0][0][1][0][RTW89_THAILAND][0][59] = 127, [0][0][1][0][RTW89_FCC][1][60] = 22, [0][0][1][0][RTW89_FCC][2][60] = 68, [0][0][1][0][RTW89_ETSI][1][60] = 127, @@ -38514,6 +39035,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][60] = 127, [0][0][1][0][RTW89_MKK][0][60] = 127, [0][0][1][0][RTW89_IC][1][60] = 22, + [0][0][1][0][RTW89_IC][2][60] = 68, [0][0][1][0][RTW89_KCC][1][60] = 26, [0][0][1][0][RTW89_KCC][0][60] = 127, [0][0][1][0][RTW89_ACMA][1][60] = 127, @@ -38523,6 +39045,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][60] = 127, [0][0][1][0][RTW89_UK][1][60] = 127, [0][0][1][0][RTW89_UK][0][60] = 127, + [0][0][1][0][RTW89_THAILAND][1][60] = 127, + [0][0][1][0][RTW89_THAILAND][0][60] = 127, [0][0][1][0][RTW89_FCC][1][62] = 22, [0][0][1][0][RTW89_FCC][2][62] = 68, [0][0][1][0][RTW89_ETSI][1][62] = 127, @@ -38530,6 +39054,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][62] = 127, [0][0][1][0][RTW89_MKK][0][62] = 127, [0][0][1][0][RTW89_IC][1][62] = 22, + [0][0][1][0][RTW89_IC][2][62] = 68, [0][0][1][0][RTW89_KCC][1][62] = 26, [0][0][1][0][RTW89_KCC][0][62] = 127, [0][0][1][0][RTW89_ACMA][1][62] = 127, @@ -38539,6 +39064,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][62] = 127, [0][0][1][0][RTW89_UK][1][62] = 127, [0][0][1][0][RTW89_UK][0][62] = 127, + [0][0][1][0][RTW89_THAILAND][1][62] = 127, + [0][0][1][0][RTW89_THAILAND][0][62] = 127, [0][0][1][0][RTW89_FCC][1][64] = 22, [0][0][1][0][RTW89_FCC][2][64] = 68, [0][0][1][0][RTW89_ETSI][1][64] = 127, @@ -38546,6 +39073,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][64] = 127, [0][0][1][0][RTW89_MKK][0][64] = 127, [0][0][1][0][RTW89_IC][1][64] = 22, + [0][0][1][0][RTW89_IC][2][64] = 68, [0][0][1][0][RTW89_KCC][1][64] = 26, [0][0][1][0][RTW89_KCC][0][64] = 127, [0][0][1][0][RTW89_ACMA][1][64] = 127, @@ -38555,6 +39083,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][64] = 127, [0][0][1][0][RTW89_UK][1][64] = 127, [0][0][1][0][RTW89_UK][0][64] = 127, + [0][0][1][0][RTW89_THAILAND][1][64] = 127, + [0][0][1][0][RTW89_THAILAND][0][64] = 127, [0][0][1][0][RTW89_FCC][1][66] = 22, [0][0][1][0][RTW89_FCC][2][66] = 68, [0][0][1][0][RTW89_ETSI][1][66] = 127, @@ -38562,6 +39092,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][66] = 127, [0][0][1][0][RTW89_MKK][0][66] = 127, [0][0][1][0][RTW89_IC][1][66] = 22, + [0][0][1][0][RTW89_IC][2][66] = 68, [0][0][1][0][RTW89_KCC][1][66] = 26, [0][0][1][0][RTW89_KCC][0][66] = 127, [0][0][1][0][RTW89_ACMA][1][66] = 127, @@ -38571,6 +39102,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][66] = 127, [0][0][1][0][RTW89_UK][1][66] = 127, [0][0][1][0][RTW89_UK][0][66] = 127, + [0][0][1][0][RTW89_THAILAND][1][66] = 127, + [0][0][1][0][RTW89_THAILAND][0][66] = 127, [0][0][1][0][RTW89_FCC][1][68] = 22, [0][0][1][0][RTW89_FCC][2][68] = 68, [0][0][1][0][RTW89_ETSI][1][68] = 127, @@ -38578,6 +39111,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][68] = 127, [0][0][1][0][RTW89_MKK][0][68] = 127, [0][0][1][0][RTW89_IC][1][68] = 22, + [0][0][1][0][RTW89_IC][2][68] = 68, [0][0][1][0][RTW89_KCC][1][68] = 26, [0][0][1][0][RTW89_KCC][0][68] = 127, [0][0][1][0][RTW89_ACMA][1][68] = 127, @@ -38587,6 +39121,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][68] = 127, [0][0][1][0][RTW89_UK][1][68] = 127, [0][0][1][0][RTW89_UK][0][68] = 127, + [0][0][1][0][RTW89_THAILAND][1][68] = 127, + [0][0][1][0][RTW89_THAILAND][0][68] = 127, [0][0][1][0][RTW89_FCC][1][70] = 24, [0][0][1][0][RTW89_FCC][2][70] = 68, [0][0][1][0][RTW89_ETSI][1][70] = 127, @@ -38594,6 +39130,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][70] = 127, [0][0][1][0][RTW89_MKK][0][70] = 127, [0][0][1][0][RTW89_IC][1][70] = 24, + [0][0][1][0][RTW89_IC][2][70] = 68, [0][0][1][0][RTW89_KCC][1][70] = 26, [0][0][1][0][RTW89_KCC][0][70] = 127, [0][0][1][0][RTW89_ACMA][1][70] = 127, @@ -38603,6 +39140,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][70] = 127, [0][0][1][0][RTW89_UK][1][70] = 127, [0][0][1][0][RTW89_UK][0][70] = 127, + [0][0][1][0][RTW89_THAILAND][1][70] = 127, + [0][0][1][0][RTW89_THAILAND][0][70] = 127, [0][0][1][0][RTW89_FCC][1][72] = 22, [0][0][1][0][RTW89_FCC][2][72] = 68, [0][0][1][0][RTW89_ETSI][1][72] = 127, @@ -38610,6 +39149,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][72] = 127, [0][0][1][0][RTW89_MKK][0][72] = 127, [0][0][1][0][RTW89_IC][1][72] = 22, + [0][0][1][0][RTW89_IC][2][72] = 68, [0][0][1][0][RTW89_KCC][1][72] = 26, [0][0][1][0][RTW89_KCC][0][72] = 127, [0][0][1][0][RTW89_ACMA][1][72] = 127, @@ -38619,6 +39159,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][72] = 127, [0][0][1][0][RTW89_UK][1][72] = 127, [0][0][1][0][RTW89_UK][0][72] = 127, + [0][0][1][0][RTW89_THAILAND][1][72] = 127, + [0][0][1][0][RTW89_THAILAND][0][72] = 127, [0][0][1][0][RTW89_FCC][1][74] = 22, [0][0][1][0][RTW89_FCC][2][74] = 68, [0][0][1][0][RTW89_ETSI][1][74] = 127, @@ -38626,6 +39168,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][74] = 127, [0][0][1][0][RTW89_MKK][0][74] = 127, [0][0][1][0][RTW89_IC][1][74] = 22, + [0][0][1][0][RTW89_IC][2][74] = 68, [0][0][1][0][RTW89_KCC][1][74] = 26, [0][0][1][0][RTW89_KCC][0][74] = 127, [0][0][1][0][RTW89_ACMA][1][74] = 127, @@ -38635,6 +39178,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][74] = 127, [0][0][1][0][RTW89_UK][1][74] = 127, [0][0][1][0][RTW89_UK][0][74] = 127, + [0][0][1][0][RTW89_THAILAND][1][74] = 127, + [0][0][1][0][RTW89_THAILAND][0][74] = 127, [0][0][1][0][RTW89_FCC][1][75] = 22, [0][0][1][0][RTW89_FCC][2][75] = 68, [0][0][1][0][RTW89_ETSI][1][75] = 127, @@ -38642,6 +39187,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][75] = 127, [0][0][1][0][RTW89_MKK][0][75] = 127, [0][0][1][0][RTW89_IC][1][75] = 22, + [0][0][1][0][RTW89_IC][2][75] = 68, [0][0][1][0][RTW89_KCC][1][75] = 26, [0][0][1][0][RTW89_KCC][0][75] = 127, [0][0][1][0][RTW89_ACMA][1][75] = 127, @@ -38651,6 +39197,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][75] = 127, [0][0][1][0][RTW89_UK][1][75] = 127, [0][0][1][0][RTW89_UK][0][75] = 127, + [0][0][1][0][RTW89_THAILAND][1][75] = 127, + [0][0][1][0][RTW89_THAILAND][0][75] = 127, [0][0][1][0][RTW89_FCC][1][77] = 22, [0][0][1][0][RTW89_FCC][2][77] = 68, [0][0][1][0][RTW89_ETSI][1][77] = 127, @@ -38658,6 +39206,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][77] = 127, [0][0][1][0][RTW89_MKK][0][77] = 127, [0][0][1][0][RTW89_IC][1][77] = 22, + [0][0][1][0][RTW89_IC][2][77] = 68, [0][0][1][0][RTW89_KCC][1][77] = 26, [0][0][1][0][RTW89_KCC][0][77] = 127, [0][0][1][0][RTW89_ACMA][1][77] = 127, @@ -38667,6 +39216,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][77] = 127, [0][0][1][0][RTW89_UK][1][77] = 127, [0][0][1][0][RTW89_UK][0][77] = 127, + [0][0][1][0][RTW89_THAILAND][1][77] = 127, + [0][0][1][0][RTW89_THAILAND][0][77] = 127, [0][0][1][0][RTW89_FCC][1][79] = 22, [0][0][1][0][RTW89_FCC][2][79] = 68, [0][0][1][0][RTW89_ETSI][1][79] = 127, @@ -38674,6 +39225,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][79] = 127, [0][0][1][0][RTW89_MKK][0][79] = 127, [0][0][1][0][RTW89_IC][1][79] = 22, + [0][0][1][0][RTW89_IC][2][79] = 68, [0][0][1][0][RTW89_KCC][1][79] = 26, [0][0][1][0][RTW89_KCC][0][79] = 127, [0][0][1][0][RTW89_ACMA][1][79] = 127, @@ -38683,6 +39235,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][79] = 127, [0][0][1][0][RTW89_UK][1][79] = 127, [0][0][1][0][RTW89_UK][0][79] = 127, + [0][0][1][0][RTW89_THAILAND][1][79] = 127, + [0][0][1][0][RTW89_THAILAND][0][79] = 127, [0][0][1][0][RTW89_FCC][1][81] = 22, [0][0][1][0][RTW89_FCC][2][81] = 68, [0][0][1][0][RTW89_ETSI][1][81] = 127, @@ -38690,6 +39244,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][81] = 127, [0][0][1][0][RTW89_MKK][0][81] = 127, [0][0][1][0][RTW89_IC][1][81] = 22, + [0][0][1][0][RTW89_IC][2][81] = 68, [0][0][1][0][RTW89_KCC][1][81] = 26, [0][0][1][0][RTW89_KCC][0][81] = 127, [0][0][1][0][RTW89_ACMA][1][81] = 127, @@ -38699,6 +39254,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][81] = 127, [0][0][1][0][RTW89_UK][1][81] = 127, [0][0][1][0][RTW89_UK][0][81] = 127, + [0][0][1][0][RTW89_THAILAND][1][81] = 127, + [0][0][1][0][RTW89_THAILAND][0][81] = 127, [0][0][1][0][RTW89_FCC][1][83] = 22, [0][0][1][0][RTW89_FCC][2][83] = 68, [0][0][1][0][RTW89_ETSI][1][83] = 127, @@ -38706,6 +39263,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][83] = 127, [0][0][1][0][RTW89_MKK][0][83] = 127, [0][0][1][0][RTW89_IC][1][83] = 22, + [0][0][1][0][RTW89_IC][2][83] = 68, [0][0][1][0][RTW89_KCC][1][83] = 32, [0][0][1][0][RTW89_KCC][0][83] = 127, [0][0][1][0][RTW89_ACMA][1][83] = 127, @@ -38715,6 +39273,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][83] = 127, [0][0][1][0][RTW89_UK][1][83] = 127, [0][0][1][0][RTW89_UK][0][83] = 127, + [0][0][1][0][RTW89_THAILAND][1][83] = 127, + [0][0][1][0][RTW89_THAILAND][0][83] = 127, [0][0][1][0][RTW89_FCC][1][85] = 22, [0][0][1][0][RTW89_FCC][2][85] = 68, [0][0][1][0][RTW89_ETSI][1][85] = 127, @@ -38722,6 +39282,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][85] = 127, [0][0][1][0][RTW89_MKK][0][85] = 127, [0][0][1][0][RTW89_IC][1][85] = 22, + [0][0][1][0][RTW89_IC][2][85] = 68, [0][0][1][0][RTW89_KCC][1][85] = 32, [0][0][1][0][RTW89_KCC][0][85] = 127, [0][0][1][0][RTW89_ACMA][1][85] = 127, @@ -38731,6 +39292,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][85] = 127, [0][0][1][0][RTW89_UK][1][85] = 127, [0][0][1][0][RTW89_UK][0][85] = 127, + [0][0][1][0][RTW89_THAILAND][1][85] = 127, + [0][0][1][0][RTW89_THAILAND][0][85] = 127, [0][0][1][0][RTW89_FCC][1][87] = 22, [0][0][1][0][RTW89_FCC][2][87] = 127, [0][0][1][0][RTW89_ETSI][1][87] = 127, @@ -38738,6 +39301,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][87] = 127, [0][0][1][0][RTW89_MKK][0][87] = 127, [0][0][1][0][RTW89_IC][1][87] = 22, + [0][0][1][0][RTW89_IC][2][87] = 127, [0][0][1][0][RTW89_KCC][1][87] = 32, [0][0][1][0][RTW89_KCC][0][87] = 127, [0][0][1][0][RTW89_ACMA][1][87] = 127, @@ -38747,6 +39311,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][87] = 127, [0][0][1][0][RTW89_UK][1][87] = 127, [0][0][1][0][RTW89_UK][0][87] = 127, + [0][0][1][0][RTW89_THAILAND][1][87] = 127, + [0][0][1][0][RTW89_THAILAND][0][87] = 127, [0][0][1][0][RTW89_FCC][1][89] = 22, [0][0][1][0][RTW89_FCC][2][89] = 127, [0][0][1][0][RTW89_ETSI][1][89] = 127, @@ -38754,6 +39320,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][89] = 127, [0][0][1][0][RTW89_MKK][0][89] = 127, [0][0][1][0][RTW89_IC][1][89] = 22, + [0][0][1][0][RTW89_IC][2][89] = 127, [0][0][1][0][RTW89_KCC][1][89] = 32, [0][0][1][0][RTW89_KCC][0][89] = 127, [0][0][1][0][RTW89_ACMA][1][89] = 127, @@ -38763,6 +39330,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][89] = 127, [0][0][1][0][RTW89_UK][1][89] = 127, [0][0][1][0][RTW89_UK][0][89] = 127, + [0][0][1][0][RTW89_THAILAND][1][89] = 127, + [0][0][1][0][RTW89_THAILAND][0][89] = 127, [0][0][1][0][RTW89_FCC][1][90] = 22, [0][0][1][0][RTW89_FCC][2][90] = 127, [0][0][1][0][RTW89_ETSI][1][90] = 127, @@ -38770,6 +39339,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][90] = 127, [0][0][1][0][RTW89_MKK][0][90] = 127, [0][0][1][0][RTW89_IC][1][90] = 22, + [0][0][1][0][RTW89_IC][2][90] = 127, [0][0][1][0][RTW89_KCC][1][90] = 32, [0][0][1][0][RTW89_KCC][0][90] = 127, [0][0][1][0][RTW89_ACMA][1][90] = 127, @@ -38779,6 +39349,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][90] = 127, [0][0][1][0][RTW89_UK][1][90] = 127, [0][0][1][0][RTW89_UK][0][90] = 127, + [0][0][1][0][RTW89_THAILAND][1][90] = 127, + [0][0][1][0][RTW89_THAILAND][0][90] = 127, [0][0][1][0][RTW89_FCC][1][92] = 22, [0][0][1][0][RTW89_FCC][2][92] = 127, [0][0][1][0][RTW89_ETSI][1][92] = 127, @@ -38786,6 +39358,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][92] = 127, [0][0][1][0][RTW89_MKK][0][92] = 127, [0][0][1][0][RTW89_IC][1][92] = 22, + [0][0][1][0][RTW89_IC][2][92] = 127, [0][0][1][0][RTW89_KCC][1][92] = 32, [0][0][1][0][RTW89_KCC][0][92] = 127, [0][0][1][0][RTW89_ACMA][1][92] = 127, @@ -38795,6 +39368,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][92] = 127, [0][0][1][0][RTW89_UK][1][92] = 127, [0][0][1][0][RTW89_UK][0][92] = 127, + [0][0][1][0][RTW89_THAILAND][1][92] = 127, + [0][0][1][0][RTW89_THAILAND][0][92] = 127, [0][0][1][0][RTW89_FCC][1][94] = 22, [0][0][1][0][RTW89_FCC][2][94] = 127, [0][0][1][0][RTW89_ETSI][1][94] = 127, @@ -38802,6 +39377,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][94] = 127, [0][0][1][0][RTW89_MKK][0][94] = 127, [0][0][1][0][RTW89_IC][1][94] = 22, + [0][0][1][0][RTW89_IC][2][94] = 127, [0][0][1][0][RTW89_KCC][1][94] = 32, [0][0][1][0][RTW89_KCC][0][94] = 127, [0][0][1][0][RTW89_ACMA][1][94] = 127, @@ -38811,6 +39387,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][94] = 127, [0][0][1][0][RTW89_UK][1][94] = 127, [0][0][1][0][RTW89_UK][0][94] = 127, + [0][0][1][0][RTW89_THAILAND][1][94] = 127, + [0][0][1][0][RTW89_THAILAND][0][94] = 127, [0][0][1][0][RTW89_FCC][1][96] = 22, [0][0][1][0][RTW89_FCC][2][96] = 127, [0][0][1][0][RTW89_ETSI][1][96] = 127, @@ -38818,6 +39396,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][96] = 127, [0][0][1][0][RTW89_MKK][0][96] = 127, [0][0][1][0][RTW89_IC][1][96] = 22, + [0][0][1][0][RTW89_IC][2][96] = 127, [0][0][1][0][RTW89_KCC][1][96] = 32, [0][0][1][0][RTW89_KCC][0][96] = 127, [0][0][1][0][RTW89_ACMA][1][96] = 127, @@ -38827,6 +39406,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][96] = 127, [0][0][1][0][RTW89_UK][1][96] = 127, [0][0][1][0][RTW89_UK][0][96] = 127, + [0][0][1][0][RTW89_THAILAND][1][96] = 127, + [0][0][1][0][RTW89_THAILAND][0][96] = 127, [0][0][1][0][RTW89_FCC][1][98] = 22, [0][0][1][0][RTW89_FCC][2][98] = 127, [0][0][1][0][RTW89_ETSI][1][98] = 127, @@ -38834,6 +39415,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][98] = 127, [0][0][1][0][RTW89_MKK][0][98] = 127, [0][0][1][0][RTW89_IC][1][98] = 22, + [0][0][1][0][RTW89_IC][2][98] = 127, [0][0][1][0][RTW89_KCC][1][98] = 32, [0][0][1][0][RTW89_KCC][0][98] = 127, [0][0][1][0][RTW89_ACMA][1][98] = 127, @@ -38843,6 +39425,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][98] = 127, [0][0][1][0][RTW89_UK][1][98] = 127, [0][0][1][0][RTW89_UK][0][98] = 127, + [0][0][1][0][RTW89_THAILAND][1][98] = 127, + [0][0][1][0][RTW89_THAILAND][0][98] = 127, [0][0][1][0][RTW89_FCC][1][100] = 22, [0][0][1][0][RTW89_FCC][2][100] = 127, [0][0][1][0][RTW89_ETSI][1][100] = 127, @@ -38850,6 +39434,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][100] = 127, [0][0][1][0][RTW89_MKK][0][100] = 127, [0][0][1][0][RTW89_IC][1][100] = 22, + [0][0][1][0][RTW89_IC][2][100] = 127, [0][0][1][0][RTW89_KCC][1][100] = 32, [0][0][1][0][RTW89_KCC][0][100] = 127, [0][0][1][0][RTW89_ACMA][1][100] = 127, @@ -38859,6 +39444,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][100] = 127, [0][0][1][0][RTW89_UK][1][100] = 127, [0][0][1][0][RTW89_UK][0][100] = 127, + [0][0][1][0][RTW89_THAILAND][1][100] = 127, + [0][0][1][0][RTW89_THAILAND][0][100] = 127, [0][0][1][0][RTW89_FCC][1][102] = 22, [0][0][1][0][RTW89_FCC][2][102] = 127, [0][0][1][0][RTW89_ETSI][1][102] = 127, @@ -38866,6 +39453,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][102] = 127, [0][0][1][0][RTW89_MKK][0][102] = 127, [0][0][1][0][RTW89_IC][1][102] = 22, + [0][0][1][0][RTW89_IC][2][102] = 127, [0][0][1][0][RTW89_KCC][1][102] = 32, [0][0][1][0][RTW89_KCC][0][102] = 127, [0][0][1][0][RTW89_ACMA][1][102] = 127, @@ -38875,6 +39463,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][102] = 127, [0][0][1][0][RTW89_UK][1][102] = 127, [0][0][1][0][RTW89_UK][0][102] = 127, + [0][0][1][0][RTW89_THAILAND][1][102] = 127, + [0][0][1][0][RTW89_THAILAND][0][102] = 127, [0][0][1][0][RTW89_FCC][1][104] = 22, [0][0][1][0][RTW89_FCC][2][104] = 127, [0][0][1][0][RTW89_ETSI][1][104] = 127, @@ -38882,6 +39472,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][104] = 127, [0][0][1][0][RTW89_MKK][0][104] = 127, [0][0][1][0][RTW89_IC][1][104] = 22, + [0][0][1][0][RTW89_IC][2][104] = 127, [0][0][1][0][RTW89_KCC][1][104] = 32, [0][0][1][0][RTW89_KCC][0][104] = 127, [0][0][1][0][RTW89_ACMA][1][104] = 127, @@ -38891,6 +39482,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][104] = 127, [0][0][1][0][RTW89_UK][1][104] = 127, [0][0][1][0][RTW89_UK][0][104] = 127, + [0][0][1][0][RTW89_THAILAND][1][104] = 127, + [0][0][1][0][RTW89_THAILAND][0][104] = 127, [0][0][1][0][RTW89_FCC][1][105] = 22, [0][0][1][0][RTW89_FCC][2][105] = 127, [0][0][1][0][RTW89_ETSI][1][105] = 127, @@ -38898,6 +39491,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][105] = 127, [0][0][1][0][RTW89_MKK][0][105] = 127, [0][0][1][0][RTW89_IC][1][105] = 22, + [0][0][1][0][RTW89_IC][2][105] = 127, [0][0][1][0][RTW89_KCC][1][105] = 32, [0][0][1][0][RTW89_KCC][0][105] = 127, [0][0][1][0][RTW89_ACMA][1][105] = 127, @@ -38907,6 +39501,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][105] = 127, [0][0][1][0][RTW89_UK][1][105] = 127, [0][0][1][0][RTW89_UK][0][105] = 127, + [0][0][1][0][RTW89_THAILAND][1][105] = 127, + [0][0][1][0][RTW89_THAILAND][0][105] = 127, [0][0][1][0][RTW89_FCC][1][107] = 24, [0][0][1][0][RTW89_FCC][2][107] = 127, [0][0][1][0][RTW89_ETSI][1][107] = 127, @@ -38914,6 +39510,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][107] = 127, [0][0][1][0][RTW89_MKK][0][107] = 127, [0][0][1][0][RTW89_IC][1][107] = 24, + [0][0][1][0][RTW89_IC][2][107] = 127, [0][0][1][0][RTW89_KCC][1][107] = 32, [0][0][1][0][RTW89_KCC][0][107] = 127, [0][0][1][0][RTW89_ACMA][1][107] = 127, @@ -38923,6 +39520,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][107] = 127, [0][0][1][0][RTW89_UK][1][107] = 127, [0][0][1][0][RTW89_UK][0][107] = 127, + [0][0][1][0][RTW89_THAILAND][1][107] = 127, + [0][0][1][0][RTW89_THAILAND][0][107] = 127, [0][0][1][0][RTW89_FCC][1][109] = 24, [0][0][1][0][RTW89_FCC][2][109] = 127, [0][0][1][0][RTW89_ETSI][1][109] = 127, @@ -38930,6 +39529,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][109] = 127, [0][0][1][0][RTW89_MKK][0][109] = 127, [0][0][1][0][RTW89_IC][1][109] = 24, + [0][0][1][0][RTW89_IC][2][109] = 127, [0][0][1][0][RTW89_KCC][1][109] = 32, [0][0][1][0][RTW89_KCC][0][109] = 127, [0][0][1][0][RTW89_ACMA][1][109] = 127, @@ -38939,6 +39539,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][109] = 127, [0][0][1][0][RTW89_UK][1][109] = 127, [0][0][1][0][RTW89_UK][0][109] = 127, + [0][0][1][0][RTW89_THAILAND][1][109] = 127, + [0][0][1][0][RTW89_THAILAND][0][109] = 127, [0][0][1][0][RTW89_FCC][1][111] = 127, [0][0][1][0][RTW89_FCC][2][111] = 127, [0][0][1][0][RTW89_ETSI][1][111] = 127, @@ -38946,6 +39548,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][111] = 127, [0][0][1][0][RTW89_MKK][0][111] = 127, [0][0][1][0][RTW89_IC][1][111] = 127, + [0][0][1][0][RTW89_IC][2][111] = 127, [0][0][1][0][RTW89_KCC][1][111] = 127, [0][0][1][0][RTW89_KCC][0][111] = 127, [0][0][1][0][RTW89_ACMA][1][111] = 127, @@ -38955,6 +39558,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][111] = 127, [0][0][1][0][RTW89_UK][1][111] = 127, [0][0][1][0][RTW89_UK][0][111] = 127, + [0][0][1][0][RTW89_THAILAND][1][111] = 127, + [0][0][1][0][RTW89_THAILAND][0][111] = 127, [0][0][1][0][RTW89_FCC][1][113] = 127, [0][0][1][0][RTW89_FCC][2][113] = 127, [0][0][1][0][RTW89_ETSI][1][113] = 127, @@ -38962,6 +39567,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][113] = 127, [0][0][1][0][RTW89_MKK][0][113] = 127, [0][0][1][0][RTW89_IC][1][113] = 127, + [0][0][1][0][RTW89_IC][2][113] = 127, [0][0][1][0][RTW89_KCC][1][113] = 127, [0][0][1][0][RTW89_KCC][0][113] = 127, [0][0][1][0][RTW89_ACMA][1][113] = 127, @@ -38971,6 +39577,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][113] = 127, [0][0][1][0][RTW89_UK][1][113] = 127, [0][0][1][0][RTW89_UK][0][113] = 127, + [0][0][1][0][RTW89_THAILAND][1][113] = 127, + [0][0][1][0][RTW89_THAILAND][0][113] = 127, [0][0][1][0][RTW89_FCC][1][115] = 127, [0][0][1][0][RTW89_FCC][2][115] = 127, [0][0][1][0][RTW89_ETSI][1][115] = 127, @@ -38978,6 +39586,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][115] = 127, [0][0][1][0][RTW89_MKK][0][115] = 127, [0][0][1][0][RTW89_IC][1][115] = 127, + [0][0][1][0][RTW89_IC][2][115] = 127, [0][0][1][0][RTW89_KCC][1][115] = 127, [0][0][1][0][RTW89_KCC][0][115] = 127, [0][0][1][0][RTW89_ACMA][1][115] = 127, @@ -38987,6 +39596,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][115] = 127, [0][0][1][0][RTW89_UK][1][115] = 127, [0][0][1][0][RTW89_UK][0][115] = 127, + [0][0][1][0][RTW89_THAILAND][1][115] = 127, + [0][0][1][0][RTW89_THAILAND][0][115] = 127, [0][0][1][0][RTW89_FCC][1][117] = 127, [0][0][1][0][RTW89_FCC][2][117] = 127, [0][0][1][0][RTW89_ETSI][1][117] = 127, @@ -38994,6 +39605,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][117] = 127, [0][0][1][0][RTW89_MKK][0][117] = 127, [0][0][1][0][RTW89_IC][1][117] = 127, + [0][0][1][0][RTW89_IC][2][117] = 127, [0][0][1][0][RTW89_KCC][1][117] = 127, [0][0][1][0][RTW89_KCC][0][117] = 127, [0][0][1][0][RTW89_ACMA][1][117] = 127, @@ -39003,6 +39615,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][117] = 127, [0][0][1][0][RTW89_UK][1][117] = 127, [0][0][1][0][RTW89_UK][0][117] = 127, + [0][0][1][0][RTW89_THAILAND][1][117] = 127, + [0][0][1][0][RTW89_THAILAND][0][117] = 127, [0][0][1][0][RTW89_FCC][1][119] = 127, [0][0][1][0][RTW89_FCC][2][119] = 127, [0][0][1][0][RTW89_ETSI][1][119] = 127, @@ -39010,6 +39624,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][1][119] = 127, [0][0][1][0][RTW89_MKK][0][119] = 127, [0][0][1][0][RTW89_IC][1][119] = 127, + [0][0][1][0][RTW89_IC][2][119] = 127, [0][0][1][0][RTW89_KCC][1][119] = 127, [0][0][1][0][RTW89_KCC][0][119] = 127, [0][0][1][0][RTW89_ACMA][1][119] = 127, @@ -39019,6 +39634,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_QATAR][0][119] = 127, [0][0][1][0][RTW89_UK][1][119] = 127, [0][0][1][0][RTW89_UK][0][119] = 127, + [0][0][1][0][RTW89_THAILAND][1][119] = 127, + [0][0][1][0][RTW89_THAILAND][0][119] = 127, [0][1][1][0][RTW89_FCC][1][0] = -2, [0][1][1][0][RTW89_FCC][2][0] = 54, [0][1][1][0][RTW89_ETSI][1][0] = 54, @@ -39026,6 +39643,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][0] = 56, [0][1][1][0][RTW89_MKK][0][0] = 16, [0][1][1][0][RTW89_IC][1][0] = -2, + [0][1][1][0][RTW89_IC][2][0] = 54, [0][1][1][0][RTW89_KCC][1][0] = 12, [0][1][1][0][RTW89_KCC][0][0] = 10, [0][1][1][0][RTW89_ACMA][1][0] = 54, @@ -39035,6 +39653,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][0] = 18, [0][1][1][0][RTW89_UK][1][0] = 54, [0][1][1][0][RTW89_UK][0][0] = 18, + [0][1][1][0][RTW89_THAILAND][1][0] = 44, + [0][1][1][0][RTW89_THAILAND][0][0] = -2, [0][1][1][0][RTW89_FCC][1][2] = -4, [0][1][1][0][RTW89_FCC][2][2] = 54, [0][1][1][0][RTW89_ETSI][1][2] = 54, @@ -39042,6 +39662,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][2] = 54, [0][1][1][0][RTW89_MKK][0][2] = 16, [0][1][1][0][RTW89_IC][1][2] = -4, + [0][1][1][0][RTW89_IC][2][2] = 54, [0][1][1][0][RTW89_KCC][1][2] = 12, [0][1][1][0][RTW89_KCC][0][2] = 12, [0][1][1][0][RTW89_ACMA][1][2] = 54, @@ -39051,6 +39672,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][2] = 18, [0][1][1][0][RTW89_UK][1][2] = 54, [0][1][1][0][RTW89_UK][0][2] = 18, + [0][1][1][0][RTW89_THAILAND][1][2] = 44, + [0][1][1][0][RTW89_THAILAND][0][2] = -4, [0][1][1][0][RTW89_FCC][1][4] = -4, [0][1][1][0][RTW89_FCC][2][4] = 54, [0][1][1][0][RTW89_ETSI][1][4] = 54, @@ -39058,6 +39681,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][4] = 54, [0][1][1][0][RTW89_MKK][0][4] = 16, [0][1][1][0][RTW89_IC][1][4] = -4, + [0][1][1][0][RTW89_IC][2][4] = 54, [0][1][1][0][RTW89_KCC][1][4] = 12, [0][1][1][0][RTW89_KCC][0][4] = 12, [0][1][1][0][RTW89_ACMA][1][4] = 54, @@ -39067,6 +39691,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][4] = 18, [0][1][1][0][RTW89_UK][1][4] = 54, [0][1][1][0][RTW89_UK][0][4] = 18, + [0][1][1][0][RTW89_THAILAND][1][4] = 44, + [0][1][1][0][RTW89_THAILAND][0][4] = -4, [0][1][1][0][RTW89_FCC][1][6] = -4, [0][1][1][0][RTW89_FCC][2][6] = 54, [0][1][1][0][RTW89_ETSI][1][6] = 54, @@ -39074,6 +39700,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][6] = 54, [0][1][1][0][RTW89_MKK][0][6] = 16, [0][1][1][0][RTW89_IC][1][6] = -4, + [0][1][1][0][RTW89_IC][2][6] = 54, [0][1][1][0][RTW89_KCC][1][6] = 12, [0][1][1][0][RTW89_KCC][0][6] = 12, [0][1][1][0][RTW89_ACMA][1][6] = 54, @@ -39083,6 +39710,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][6] = 18, [0][1][1][0][RTW89_UK][1][6] = 54, [0][1][1][0][RTW89_UK][0][6] = 18, + [0][1][1][0][RTW89_THAILAND][1][6] = 44, + [0][1][1][0][RTW89_THAILAND][0][6] = -4, [0][1][1][0][RTW89_FCC][1][8] = -4, [0][1][1][0][RTW89_FCC][2][8] = 54, [0][1][1][0][RTW89_ETSI][1][8] = 54, @@ -39090,6 +39719,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][8] = 54, [0][1][1][0][RTW89_MKK][0][8] = 16, [0][1][1][0][RTW89_IC][1][8] = -4, + [0][1][1][0][RTW89_IC][2][8] = 54, [0][1][1][0][RTW89_KCC][1][8] = 12, [0][1][1][0][RTW89_KCC][0][8] = 12, [0][1][1][0][RTW89_ACMA][1][8] = 54, @@ -39099,6 +39729,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][8] = 18, [0][1][1][0][RTW89_UK][1][8] = 54, [0][1][1][0][RTW89_UK][0][8] = 18, + [0][1][1][0][RTW89_THAILAND][1][8] = 44, + [0][1][1][0][RTW89_THAILAND][0][8] = -4, [0][1][1][0][RTW89_FCC][1][10] = -4, [0][1][1][0][RTW89_FCC][2][10] = 54, [0][1][1][0][RTW89_ETSI][1][10] = 54, @@ -39106,6 +39738,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][10] = 54, [0][1][1][0][RTW89_MKK][0][10] = 16, [0][1][1][0][RTW89_IC][1][10] = -4, + [0][1][1][0][RTW89_IC][2][10] = 54, [0][1][1][0][RTW89_KCC][1][10] = 12, [0][1][1][0][RTW89_KCC][0][10] = 12, [0][1][1][0][RTW89_ACMA][1][10] = 54, @@ -39115,6 +39748,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][10] = 18, [0][1][1][0][RTW89_UK][1][10] = 54, [0][1][1][0][RTW89_UK][0][10] = 18, + [0][1][1][0][RTW89_THAILAND][1][10] = 44, + [0][1][1][0][RTW89_THAILAND][0][10] = -4, [0][1][1][0][RTW89_FCC][1][12] = -4, [0][1][1][0][RTW89_FCC][2][12] = 54, [0][1][1][0][RTW89_ETSI][1][12] = 54, @@ -39122,6 +39757,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][12] = 54, [0][1][1][0][RTW89_MKK][0][12] = 16, [0][1][1][0][RTW89_IC][1][12] = -4, + [0][1][1][0][RTW89_IC][2][12] = 54, [0][1][1][0][RTW89_KCC][1][12] = 12, [0][1][1][0][RTW89_KCC][0][12] = 12, [0][1][1][0][RTW89_ACMA][1][12] = 54, @@ -39131,6 +39767,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][12] = 18, [0][1][1][0][RTW89_UK][1][12] = 54, [0][1][1][0][RTW89_UK][0][12] = 18, + [0][1][1][0][RTW89_THAILAND][1][12] = 44, + [0][1][1][0][RTW89_THAILAND][0][12] = -4, [0][1][1][0][RTW89_FCC][1][14] = -4, [0][1][1][0][RTW89_FCC][2][14] = 54, [0][1][1][0][RTW89_ETSI][1][14] = 54, @@ -39138,6 +39776,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][14] = 54, [0][1][1][0][RTW89_MKK][0][14] = 16, [0][1][1][0][RTW89_IC][1][14] = -4, + [0][1][1][0][RTW89_IC][2][14] = 54, [0][1][1][0][RTW89_KCC][1][14] = 12, [0][1][1][0][RTW89_KCC][0][14] = 12, [0][1][1][0][RTW89_ACMA][1][14] = 54, @@ -39147,6 +39786,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][14] = 18, [0][1][1][0][RTW89_UK][1][14] = 54, [0][1][1][0][RTW89_UK][0][14] = 18, + [0][1][1][0][RTW89_THAILAND][1][14] = 44, + [0][1][1][0][RTW89_THAILAND][0][14] = -4, [0][1][1][0][RTW89_FCC][1][15] = -4, [0][1][1][0][RTW89_FCC][2][15] = 54, [0][1][1][0][RTW89_ETSI][1][15] = 54, @@ -39154,6 +39795,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][15] = 54, [0][1][1][0][RTW89_MKK][0][15] = 16, [0][1][1][0][RTW89_IC][1][15] = -4, + [0][1][1][0][RTW89_IC][2][15] = 54, [0][1][1][0][RTW89_KCC][1][15] = 12, [0][1][1][0][RTW89_KCC][0][15] = 12, [0][1][1][0][RTW89_ACMA][1][15] = 54, @@ -39163,6 +39805,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][15] = 18, [0][1][1][0][RTW89_UK][1][15] = 54, [0][1][1][0][RTW89_UK][0][15] = 18, + [0][1][1][0][RTW89_THAILAND][1][15] = 44, + [0][1][1][0][RTW89_THAILAND][0][15] = -4, [0][1][1][0][RTW89_FCC][1][17] = -4, [0][1][1][0][RTW89_FCC][2][17] = 54, [0][1][1][0][RTW89_ETSI][1][17] = 54, @@ -39170,6 +39814,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][17] = 54, [0][1][1][0][RTW89_MKK][0][17] = 16, [0][1][1][0][RTW89_IC][1][17] = -4, + [0][1][1][0][RTW89_IC][2][17] = 54, [0][1][1][0][RTW89_KCC][1][17] = 12, [0][1][1][0][RTW89_KCC][0][17] = 12, [0][1][1][0][RTW89_ACMA][1][17] = 54, @@ -39179,6 +39824,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][17] = 18, [0][1][1][0][RTW89_UK][1][17] = 54, [0][1][1][0][RTW89_UK][0][17] = 18, + [0][1][1][0][RTW89_THAILAND][1][17] = 44, + [0][1][1][0][RTW89_THAILAND][0][17] = -4, [0][1][1][0][RTW89_FCC][1][19] = -4, [0][1][1][0][RTW89_FCC][2][19] = 54, [0][1][1][0][RTW89_ETSI][1][19] = 54, @@ -39186,6 +39833,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][19] = 54, [0][1][1][0][RTW89_MKK][0][19] = 16, [0][1][1][0][RTW89_IC][1][19] = -4, + [0][1][1][0][RTW89_IC][2][19] = 54, [0][1][1][0][RTW89_KCC][1][19] = 12, [0][1][1][0][RTW89_KCC][0][19] = 12, [0][1][1][0][RTW89_ACMA][1][19] = 54, @@ -39195,6 +39843,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][19] = 18, [0][1][1][0][RTW89_UK][1][19] = 54, [0][1][1][0][RTW89_UK][0][19] = 18, + [0][1][1][0][RTW89_THAILAND][1][19] = 44, + [0][1][1][0][RTW89_THAILAND][0][19] = -4, [0][1][1][0][RTW89_FCC][1][21] = -4, [0][1][1][0][RTW89_FCC][2][21] = 54, [0][1][1][0][RTW89_ETSI][1][21] = 54, @@ -39202,6 +39852,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][21] = 54, [0][1][1][0][RTW89_MKK][0][21] = 16, [0][1][1][0][RTW89_IC][1][21] = -4, + [0][1][1][0][RTW89_IC][2][21] = 54, [0][1][1][0][RTW89_KCC][1][21] = 12, [0][1][1][0][RTW89_KCC][0][21] = 12, [0][1][1][0][RTW89_ACMA][1][21] = 54, @@ -39211,6 +39862,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][21] = 18, [0][1][1][0][RTW89_UK][1][21] = 54, [0][1][1][0][RTW89_UK][0][21] = 18, + [0][1][1][0][RTW89_THAILAND][1][21] = 44, + [0][1][1][0][RTW89_THAILAND][0][21] = -4, [0][1][1][0][RTW89_FCC][1][23] = -4, [0][1][1][0][RTW89_FCC][2][23] = 68, [0][1][1][0][RTW89_ETSI][1][23] = 54, @@ -39218,6 +39871,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][23] = 54, [0][1][1][0][RTW89_MKK][0][23] = 16, [0][1][1][0][RTW89_IC][1][23] = -4, + [0][1][1][0][RTW89_IC][2][23] = 68, [0][1][1][0][RTW89_KCC][1][23] = 12, [0][1][1][0][RTW89_KCC][0][23] = 10, [0][1][1][0][RTW89_ACMA][1][23] = 54, @@ -39227,6 +39881,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][23] = 18, [0][1][1][0][RTW89_UK][1][23] = 54, [0][1][1][0][RTW89_UK][0][23] = 18, + [0][1][1][0][RTW89_THAILAND][1][23] = 44, + [0][1][1][0][RTW89_THAILAND][0][23] = -4, [0][1][1][0][RTW89_FCC][1][25] = -4, [0][1][1][0][RTW89_FCC][2][25] = 68, [0][1][1][0][RTW89_ETSI][1][25] = 54, @@ -39234,6 +39890,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][25] = 54, [0][1][1][0][RTW89_MKK][0][25] = 16, [0][1][1][0][RTW89_IC][1][25] = -4, + [0][1][1][0][RTW89_IC][2][25] = 68, [0][1][1][0][RTW89_KCC][1][25] = 12, [0][1][1][0][RTW89_KCC][0][25] = 14, [0][1][1][0][RTW89_ACMA][1][25] = 54, @@ -39243,6 +39900,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][25] = 18, [0][1][1][0][RTW89_UK][1][25] = 54, [0][1][1][0][RTW89_UK][0][25] = 18, + [0][1][1][0][RTW89_THAILAND][1][25] = 42, + [0][1][1][0][RTW89_THAILAND][0][25] = -4, [0][1][1][0][RTW89_FCC][1][27] = -4, [0][1][1][0][RTW89_FCC][2][27] = 68, [0][1][1][0][RTW89_ETSI][1][27] = 54, @@ -39250,6 +39909,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][27] = 54, [0][1][1][0][RTW89_MKK][0][27] = 16, [0][1][1][0][RTW89_IC][1][27] = -4, + [0][1][1][0][RTW89_IC][2][27] = 68, [0][1][1][0][RTW89_KCC][1][27] = 12, [0][1][1][0][RTW89_KCC][0][27] = 14, [0][1][1][0][RTW89_ACMA][1][27] = 54, @@ -39259,6 +39919,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][27] = 18, [0][1][1][0][RTW89_UK][1][27] = 54, [0][1][1][0][RTW89_UK][0][27] = 18, + [0][1][1][0][RTW89_THAILAND][1][27] = 42, + [0][1][1][0][RTW89_THAILAND][0][27] = -4, [0][1][1][0][RTW89_FCC][1][29] = -4, [0][1][1][0][RTW89_FCC][2][29] = 68, [0][1][1][0][RTW89_ETSI][1][29] = 54, @@ -39266,6 +39928,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][29] = 54, [0][1][1][0][RTW89_MKK][0][29] = 16, [0][1][1][0][RTW89_IC][1][29] = -4, + [0][1][1][0][RTW89_IC][2][29] = 68, [0][1][1][0][RTW89_KCC][1][29] = 12, [0][1][1][0][RTW89_KCC][0][29] = 14, [0][1][1][0][RTW89_ACMA][1][29] = 54, @@ -39275,6 +39938,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][29] = 18, [0][1][1][0][RTW89_UK][1][29] = 54, [0][1][1][0][RTW89_UK][0][29] = 18, + [0][1][1][0][RTW89_THAILAND][1][29] = 42, + [0][1][1][0][RTW89_THAILAND][0][29] = -4, [0][1][1][0][RTW89_FCC][1][30] = -4, [0][1][1][0][RTW89_FCC][2][30] = 68, [0][1][1][0][RTW89_ETSI][1][30] = 54, @@ -39282,6 +39947,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][30] = 54, [0][1][1][0][RTW89_MKK][0][30] = 16, [0][1][1][0][RTW89_IC][1][30] = -4, + [0][1][1][0][RTW89_IC][2][30] = 68, [0][1][1][0][RTW89_KCC][1][30] = 12, [0][1][1][0][RTW89_KCC][0][30] = 14, [0][1][1][0][RTW89_ACMA][1][30] = 54, @@ -39291,6 +39957,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][30] = 18, [0][1][1][0][RTW89_UK][1][30] = 54, [0][1][1][0][RTW89_UK][0][30] = 18, + [0][1][1][0][RTW89_THAILAND][1][30] = 42, + [0][1][1][0][RTW89_THAILAND][0][30] = -4, [0][1][1][0][RTW89_FCC][1][32] = -4, [0][1][1][0][RTW89_FCC][2][32] = 68, [0][1][1][0][RTW89_ETSI][1][32] = 54, @@ -39298,6 +39966,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][32] = 54, [0][1][1][0][RTW89_MKK][0][32] = 16, [0][1][1][0][RTW89_IC][1][32] = -4, + [0][1][1][0][RTW89_IC][2][32] = 68, [0][1][1][0][RTW89_KCC][1][32] = 12, [0][1][1][0][RTW89_KCC][0][32] = 14, [0][1][1][0][RTW89_ACMA][1][32] = 54, @@ -39307,6 +39976,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][32] = 18, [0][1][1][0][RTW89_UK][1][32] = 54, [0][1][1][0][RTW89_UK][0][32] = 18, + [0][1][1][0][RTW89_THAILAND][1][32] = 42, + [0][1][1][0][RTW89_THAILAND][0][32] = -4, [0][1][1][0][RTW89_FCC][1][34] = -4, [0][1][1][0][RTW89_FCC][2][34] = 68, [0][1][1][0][RTW89_ETSI][1][34] = 54, @@ -39314,6 +39985,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][34] = 54, [0][1][1][0][RTW89_MKK][0][34] = 16, [0][1][1][0][RTW89_IC][1][34] = -4, + [0][1][1][0][RTW89_IC][2][34] = 68, [0][1][1][0][RTW89_KCC][1][34] = 12, [0][1][1][0][RTW89_KCC][0][34] = 14, [0][1][1][0][RTW89_ACMA][1][34] = 54, @@ -39323,6 +39995,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][34] = 18, [0][1][1][0][RTW89_UK][1][34] = 54, [0][1][1][0][RTW89_UK][0][34] = 18, + [0][1][1][0][RTW89_THAILAND][1][34] = 42, + [0][1][1][0][RTW89_THAILAND][0][34] = -4, [0][1][1][0][RTW89_FCC][1][36] = -4, [0][1][1][0][RTW89_FCC][2][36] = 68, [0][1][1][0][RTW89_ETSI][1][36] = 54, @@ -39330,6 +40004,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][36] = 54, [0][1][1][0][RTW89_MKK][0][36] = 16, [0][1][1][0][RTW89_IC][1][36] = -4, + [0][1][1][0][RTW89_IC][2][36] = 68, [0][1][1][0][RTW89_KCC][1][36] = 12, [0][1][1][0][RTW89_KCC][0][36] = 14, [0][1][1][0][RTW89_ACMA][1][36] = 54, @@ -39339,6 +40014,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][36] = 18, [0][1][1][0][RTW89_UK][1][36] = 54, [0][1][1][0][RTW89_UK][0][36] = 18, + [0][1][1][0][RTW89_THAILAND][1][36] = 42, + [0][1][1][0][RTW89_THAILAND][0][36] = -4, [0][1][1][0][RTW89_FCC][1][38] = -4, [0][1][1][0][RTW89_FCC][2][38] = 68, [0][1][1][0][RTW89_ETSI][1][38] = 54, @@ -39346,6 +40023,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][38] = 54, [0][1][1][0][RTW89_MKK][0][38] = 16, [0][1][1][0][RTW89_IC][1][38] = -4, + [0][1][1][0][RTW89_IC][2][38] = 68, [0][1][1][0][RTW89_KCC][1][38] = 12, [0][1][1][0][RTW89_KCC][0][38] = 14, [0][1][1][0][RTW89_ACMA][1][38] = 54, @@ -39355,6 +40033,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][38] = 18, [0][1][1][0][RTW89_UK][1][38] = 54, [0][1][1][0][RTW89_UK][0][38] = 18, + [0][1][1][0][RTW89_THAILAND][1][38] = 42, + [0][1][1][0][RTW89_THAILAND][0][38] = -4, [0][1][1][0][RTW89_FCC][1][40] = -4, [0][1][1][0][RTW89_FCC][2][40] = 68, [0][1][1][0][RTW89_ETSI][1][40] = 54, @@ -39362,6 +40042,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][40] = 54, [0][1][1][0][RTW89_MKK][0][40] = 16, [0][1][1][0][RTW89_IC][1][40] = -4, + [0][1][1][0][RTW89_IC][2][40] = 68, [0][1][1][0][RTW89_KCC][1][40] = 12, [0][1][1][0][RTW89_KCC][0][40] = 14, [0][1][1][0][RTW89_ACMA][1][40] = 54, @@ -39371,6 +40052,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][40] = 18, [0][1][1][0][RTW89_UK][1][40] = 54, [0][1][1][0][RTW89_UK][0][40] = 18, + [0][1][1][0][RTW89_THAILAND][1][40] = 42, + [0][1][1][0][RTW89_THAILAND][0][40] = -4, [0][1][1][0][RTW89_FCC][1][42] = -4, [0][1][1][0][RTW89_FCC][2][42] = 68, [0][1][1][0][RTW89_ETSI][1][42] = 54, @@ -39378,6 +40061,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][42] = 54, [0][1][1][0][RTW89_MKK][0][42] = 16, [0][1][1][0][RTW89_IC][1][42] = -4, + [0][1][1][0][RTW89_IC][2][42] = 68, [0][1][1][0][RTW89_KCC][1][42] = 12, [0][1][1][0][RTW89_KCC][0][42] = 14, [0][1][1][0][RTW89_ACMA][1][42] = 54, @@ -39387,6 +40071,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][42] = 18, [0][1][1][0][RTW89_UK][1][42] = 54, [0][1][1][0][RTW89_UK][0][42] = 18, + [0][1][1][0][RTW89_THAILAND][1][42] = 42, + [0][1][1][0][RTW89_THAILAND][0][42] = -4, [0][1][1][0][RTW89_FCC][1][44] = -2, [0][1][1][0][RTW89_FCC][2][44] = 68, [0][1][1][0][RTW89_ETSI][1][44] = 54, @@ -39394,6 +40080,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][44] = 34, [0][1][1][0][RTW89_MKK][0][44] = 16, [0][1][1][0][RTW89_IC][1][44] = -2, + [0][1][1][0][RTW89_IC][2][44] = 68, [0][1][1][0][RTW89_KCC][1][44] = 12, [0][1][1][0][RTW89_KCC][0][44] = 12, [0][1][1][0][RTW89_ACMA][1][44] = 54, @@ -39403,6 +40090,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][44] = 18, [0][1][1][0][RTW89_UK][1][44] = 54, [0][1][1][0][RTW89_UK][0][44] = 18, + [0][1][1][0][RTW89_THAILAND][1][44] = 42, + [0][1][1][0][RTW89_THAILAND][0][44] = -2, [0][1][1][0][RTW89_FCC][1][45] = -2, [0][1][1][0][RTW89_FCC][2][45] = 127, [0][1][1][0][RTW89_ETSI][1][45] = 127, @@ -39410,6 +40099,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][45] = 127, [0][1][1][0][RTW89_MKK][0][45] = 127, [0][1][1][0][RTW89_IC][1][45] = -2, + [0][1][1][0][RTW89_IC][2][45] = 70, [0][1][1][0][RTW89_KCC][1][45] = 12, [0][1][1][0][RTW89_KCC][0][45] = 127, [0][1][1][0][RTW89_ACMA][1][45] = 127, @@ -39419,6 +40109,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][45] = 127, [0][1][1][0][RTW89_UK][1][45] = 127, [0][1][1][0][RTW89_UK][0][45] = 127, + [0][1][1][0][RTW89_THAILAND][1][45] = 127, + [0][1][1][0][RTW89_THAILAND][0][45] = 127, [0][1][1][0][RTW89_FCC][1][47] = -2, [0][1][1][0][RTW89_FCC][2][47] = 127, [0][1][1][0][RTW89_ETSI][1][47] = 127, @@ -39426,6 +40118,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][47] = 127, [0][1][1][0][RTW89_MKK][0][47] = 127, [0][1][1][0][RTW89_IC][1][47] = -2, + [0][1][1][0][RTW89_IC][2][47] = 68, [0][1][1][0][RTW89_KCC][1][47] = 12, [0][1][1][0][RTW89_KCC][0][47] = 127, [0][1][1][0][RTW89_ACMA][1][47] = 127, @@ -39435,6 +40128,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][47] = 127, [0][1][1][0][RTW89_UK][1][47] = 127, [0][1][1][0][RTW89_UK][0][47] = 127, + [0][1][1][0][RTW89_THAILAND][1][47] = 127, + [0][1][1][0][RTW89_THAILAND][0][47] = 127, [0][1][1][0][RTW89_FCC][1][49] = -2, [0][1][1][0][RTW89_FCC][2][49] = 127, [0][1][1][0][RTW89_ETSI][1][49] = 127, @@ -39442,6 +40137,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][49] = 127, [0][1][1][0][RTW89_MKK][0][49] = 127, [0][1][1][0][RTW89_IC][1][49] = -2, + [0][1][1][0][RTW89_IC][2][49] = 68, [0][1][1][0][RTW89_KCC][1][49] = 12, [0][1][1][0][RTW89_KCC][0][49] = 127, [0][1][1][0][RTW89_ACMA][1][49] = 127, @@ -39451,6 +40147,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][49] = 127, [0][1][1][0][RTW89_UK][1][49] = 127, [0][1][1][0][RTW89_UK][0][49] = 127, + [0][1][1][0][RTW89_THAILAND][1][49] = 127, + [0][1][1][0][RTW89_THAILAND][0][49] = 127, [0][1][1][0][RTW89_FCC][1][51] = -2, [0][1][1][0][RTW89_FCC][2][51] = 127, [0][1][1][0][RTW89_ETSI][1][51] = 127, @@ -39458,6 +40156,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][51] = 127, [0][1][1][0][RTW89_MKK][0][51] = 127, [0][1][1][0][RTW89_IC][1][51] = -2, + [0][1][1][0][RTW89_IC][2][51] = 68, [0][1][1][0][RTW89_KCC][1][51] = 12, [0][1][1][0][RTW89_KCC][0][51] = 127, [0][1][1][0][RTW89_ACMA][1][51] = 127, @@ -39467,6 +40166,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][51] = 127, [0][1][1][0][RTW89_UK][1][51] = 127, [0][1][1][0][RTW89_UK][0][51] = 127, + [0][1][1][0][RTW89_THAILAND][1][51] = 127, + [0][1][1][0][RTW89_THAILAND][0][51] = 127, [0][1][1][0][RTW89_FCC][1][53] = -2, [0][1][1][0][RTW89_FCC][2][53] = 127, [0][1][1][0][RTW89_ETSI][1][53] = 127, @@ -39474,6 +40175,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][53] = 127, [0][1][1][0][RTW89_MKK][0][53] = 127, [0][1][1][0][RTW89_IC][1][53] = -2, + [0][1][1][0][RTW89_IC][2][53] = 68, [0][1][1][0][RTW89_KCC][1][53] = 12, [0][1][1][0][RTW89_KCC][0][53] = 127, [0][1][1][0][RTW89_ACMA][1][53] = 127, @@ -39483,6 +40185,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][53] = 127, [0][1][1][0][RTW89_UK][1][53] = 127, [0][1][1][0][RTW89_UK][0][53] = 127, + [0][1][1][0][RTW89_THAILAND][1][53] = 127, + [0][1][1][0][RTW89_THAILAND][0][53] = 127, [0][1][1][0][RTW89_FCC][1][55] = -2, [0][1][1][0][RTW89_FCC][2][55] = 68, [0][1][1][0][RTW89_ETSI][1][55] = 127, @@ -39490,6 +40194,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][55] = 127, [0][1][1][0][RTW89_MKK][0][55] = 127, [0][1][1][0][RTW89_IC][1][55] = -2, + [0][1][1][0][RTW89_IC][2][55] = 68, [0][1][1][0][RTW89_KCC][1][55] = 12, [0][1][1][0][RTW89_KCC][0][55] = 127, [0][1][1][0][RTW89_ACMA][1][55] = 127, @@ -39499,6 +40204,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][55] = 127, [0][1][1][0][RTW89_UK][1][55] = 127, [0][1][1][0][RTW89_UK][0][55] = 127, + [0][1][1][0][RTW89_THAILAND][1][55] = 127, + [0][1][1][0][RTW89_THAILAND][0][55] = 127, [0][1][1][0][RTW89_FCC][1][57] = -2, [0][1][1][0][RTW89_FCC][2][57] = 68, [0][1][1][0][RTW89_ETSI][1][57] = 127, @@ -39506,6 +40213,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][57] = 127, [0][1][1][0][RTW89_MKK][0][57] = 127, [0][1][1][0][RTW89_IC][1][57] = -2, + [0][1][1][0][RTW89_IC][2][57] = 68, [0][1][1][0][RTW89_KCC][1][57] = 12, [0][1][1][0][RTW89_KCC][0][57] = 127, [0][1][1][0][RTW89_ACMA][1][57] = 127, @@ -39515,6 +40223,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][57] = 127, [0][1][1][0][RTW89_UK][1][57] = 127, [0][1][1][0][RTW89_UK][0][57] = 127, + [0][1][1][0][RTW89_THAILAND][1][57] = 127, + [0][1][1][0][RTW89_THAILAND][0][57] = 127, [0][1][1][0][RTW89_FCC][1][59] = -2, [0][1][1][0][RTW89_FCC][2][59] = 68, [0][1][1][0][RTW89_ETSI][1][59] = 127, @@ -39522,6 +40232,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][59] = 127, [0][1][1][0][RTW89_MKK][0][59] = 127, [0][1][1][0][RTW89_IC][1][59] = -2, + [0][1][1][0][RTW89_IC][2][59] = 68, [0][1][1][0][RTW89_KCC][1][59] = 12, [0][1][1][0][RTW89_KCC][0][59] = 127, [0][1][1][0][RTW89_ACMA][1][59] = 127, @@ -39531,6 +40242,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][59] = 127, [0][1][1][0][RTW89_UK][1][59] = 127, [0][1][1][0][RTW89_UK][0][59] = 127, + [0][1][1][0][RTW89_THAILAND][1][59] = 127, + [0][1][1][0][RTW89_THAILAND][0][59] = 127, [0][1][1][0][RTW89_FCC][1][60] = -2, [0][1][1][0][RTW89_FCC][2][60] = 68, [0][1][1][0][RTW89_ETSI][1][60] = 127, @@ -39538,6 +40251,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][60] = 127, [0][1][1][0][RTW89_MKK][0][60] = 127, [0][1][1][0][RTW89_IC][1][60] = -2, + [0][1][1][0][RTW89_IC][2][60] = 68, [0][1][1][0][RTW89_KCC][1][60] = 12, [0][1][1][0][RTW89_KCC][0][60] = 127, [0][1][1][0][RTW89_ACMA][1][60] = 127, @@ -39547,6 +40261,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][60] = 127, [0][1][1][0][RTW89_UK][1][60] = 127, [0][1][1][0][RTW89_UK][0][60] = 127, + [0][1][1][0][RTW89_THAILAND][1][60] = 127, + [0][1][1][0][RTW89_THAILAND][0][60] = 127, [0][1][1][0][RTW89_FCC][1][62] = -2, [0][1][1][0][RTW89_FCC][2][62] = 68, [0][1][1][0][RTW89_ETSI][1][62] = 127, @@ -39554,6 +40270,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][62] = 127, [0][1][1][0][RTW89_MKK][0][62] = 127, [0][1][1][0][RTW89_IC][1][62] = -2, + [0][1][1][0][RTW89_IC][2][62] = 68, [0][1][1][0][RTW89_KCC][1][62] = 12, [0][1][1][0][RTW89_KCC][0][62] = 127, [0][1][1][0][RTW89_ACMA][1][62] = 127, @@ -39563,6 +40280,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][62] = 127, [0][1][1][0][RTW89_UK][1][62] = 127, [0][1][1][0][RTW89_UK][0][62] = 127, + [0][1][1][0][RTW89_THAILAND][1][62] = 127, + [0][1][1][0][RTW89_THAILAND][0][62] = 127, [0][1][1][0][RTW89_FCC][1][64] = -2, [0][1][1][0][RTW89_FCC][2][64] = 68, [0][1][1][0][RTW89_ETSI][1][64] = 127, @@ -39570,6 +40289,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][64] = 127, [0][1][1][0][RTW89_MKK][0][64] = 127, [0][1][1][0][RTW89_IC][1][64] = -2, + [0][1][1][0][RTW89_IC][2][64] = 68, [0][1][1][0][RTW89_KCC][1][64] = 12, [0][1][1][0][RTW89_KCC][0][64] = 127, [0][1][1][0][RTW89_ACMA][1][64] = 127, @@ -39579,6 +40299,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][64] = 127, [0][1][1][0][RTW89_UK][1][64] = 127, [0][1][1][0][RTW89_UK][0][64] = 127, + [0][1][1][0][RTW89_THAILAND][1][64] = 127, + [0][1][1][0][RTW89_THAILAND][0][64] = 127, [0][1][1][0][RTW89_FCC][1][66] = -2, [0][1][1][0][RTW89_FCC][2][66] = 68, [0][1][1][0][RTW89_ETSI][1][66] = 127, @@ -39586,6 +40308,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][66] = 127, [0][1][1][0][RTW89_MKK][0][66] = 127, [0][1][1][0][RTW89_IC][1][66] = -2, + [0][1][1][0][RTW89_IC][2][66] = 68, [0][1][1][0][RTW89_KCC][1][66] = 12, [0][1][1][0][RTW89_KCC][0][66] = 127, [0][1][1][0][RTW89_ACMA][1][66] = 127, @@ -39595,6 +40318,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][66] = 127, [0][1][1][0][RTW89_UK][1][66] = 127, [0][1][1][0][RTW89_UK][0][66] = 127, + [0][1][1][0][RTW89_THAILAND][1][66] = 127, + [0][1][1][0][RTW89_THAILAND][0][66] = 127, [0][1][1][0][RTW89_FCC][1][68] = -2, [0][1][1][0][RTW89_FCC][2][68] = 68, [0][1][1][0][RTW89_ETSI][1][68] = 127, @@ -39602,6 +40327,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][68] = 127, [0][1][1][0][RTW89_MKK][0][68] = 127, [0][1][1][0][RTW89_IC][1][68] = -2, + [0][1][1][0][RTW89_IC][2][68] = 68, [0][1][1][0][RTW89_KCC][1][68] = 12, [0][1][1][0][RTW89_KCC][0][68] = 127, [0][1][1][0][RTW89_ACMA][1][68] = 127, @@ -39611,6 +40337,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][68] = 127, [0][1][1][0][RTW89_UK][1][68] = 127, [0][1][1][0][RTW89_UK][0][68] = 127, + [0][1][1][0][RTW89_THAILAND][1][68] = 127, + [0][1][1][0][RTW89_THAILAND][0][68] = 127, [0][1][1][0][RTW89_FCC][1][70] = -2, [0][1][1][0][RTW89_FCC][2][70] = 68, [0][1][1][0][RTW89_ETSI][1][70] = 127, @@ -39618,6 +40346,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][70] = 127, [0][1][1][0][RTW89_MKK][0][70] = 127, [0][1][1][0][RTW89_IC][1][70] = -2, + [0][1][1][0][RTW89_IC][2][70] = 68, [0][1][1][0][RTW89_KCC][1][70] = 12, [0][1][1][0][RTW89_KCC][0][70] = 127, [0][1][1][0][RTW89_ACMA][1][70] = 127, @@ -39627,6 +40356,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][70] = 127, [0][1][1][0][RTW89_UK][1][70] = 127, [0][1][1][0][RTW89_UK][0][70] = 127, + [0][1][1][0][RTW89_THAILAND][1][70] = 127, + [0][1][1][0][RTW89_THAILAND][0][70] = 127, [0][1][1][0][RTW89_FCC][1][72] = -2, [0][1][1][0][RTW89_FCC][2][72] = 68, [0][1][1][0][RTW89_ETSI][1][72] = 127, @@ -39634,6 +40365,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][72] = 127, [0][1][1][0][RTW89_MKK][0][72] = 127, [0][1][1][0][RTW89_IC][1][72] = -2, + [0][1][1][0][RTW89_IC][2][72] = 68, [0][1][1][0][RTW89_KCC][1][72] = 12, [0][1][1][0][RTW89_KCC][0][72] = 127, [0][1][1][0][RTW89_ACMA][1][72] = 127, @@ -39643,6 +40375,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][72] = 127, [0][1][1][0][RTW89_UK][1][72] = 127, [0][1][1][0][RTW89_UK][0][72] = 127, + [0][1][1][0][RTW89_THAILAND][1][72] = 127, + [0][1][1][0][RTW89_THAILAND][0][72] = 127, [0][1][1][0][RTW89_FCC][1][74] = -2, [0][1][1][0][RTW89_FCC][2][74] = 68, [0][1][1][0][RTW89_ETSI][1][74] = 127, @@ -39650,6 +40384,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][74] = 127, [0][1][1][0][RTW89_MKK][0][74] = 127, [0][1][1][0][RTW89_IC][1][74] = -2, + [0][1][1][0][RTW89_IC][2][74] = 68, [0][1][1][0][RTW89_KCC][1][74] = 12, [0][1][1][0][RTW89_KCC][0][74] = 127, [0][1][1][0][RTW89_ACMA][1][74] = 127, @@ -39659,6 +40394,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][74] = 127, [0][1][1][0][RTW89_UK][1][74] = 127, [0][1][1][0][RTW89_UK][0][74] = 127, + [0][1][1][0][RTW89_THAILAND][1][74] = 127, + [0][1][1][0][RTW89_THAILAND][0][74] = 127, [0][1][1][0][RTW89_FCC][1][75] = -2, [0][1][1][0][RTW89_FCC][2][75] = 68, [0][1][1][0][RTW89_ETSI][1][75] = 127, @@ -39666,6 +40403,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][75] = 127, [0][1][1][0][RTW89_MKK][0][75] = 127, [0][1][1][0][RTW89_IC][1][75] = -2, + [0][1][1][0][RTW89_IC][2][75] = 68, [0][1][1][0][RTW89_KCC][1][75] = 12, [0][1][1][0][RTW89_KCC][0][75] = 127, [0][1][1][0][RTW89_ACMA][1][75] = 127, @@ -39675,6 +40413,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][75] = 127, [0][1][1][0][RTW89_UK][1][75] = 127, [0][1][1][0][RTW89_UK][0][75] = 127, + [0][1][1][0][RTW89_THAILAND][1][75] = 127, + [0][1][1][0][RTW89_THAILAND][0][75] = 127, [0][1][1][0][RTW89_FCC][1][77] = -2, [0][1][1][0][RTW89_FCC][2][77] = 68, [0][1][1][0][RTW89_ETSI][1][77] = 127, @@ -39682,6 +40422,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][77] = 127, [0][1][1][0][RTW89_MKK][0][77] = 127, [0][1][1][0][RTW89_IC][1][77] = -2, + [0][1][1][0][RTW89_IC][2][77] = 68, [0][1][1][0][RTW89_KCC][1][77] = 12, [0][1][1][0][RTW89_KCC][0][77] = 127, [0][1][1][0][RTW89_ACMA][1][77] = 127, @@ -39691,6 +40432,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][77] = 127, [0][1][1][0][RTW89_UK][1][77] = 127, [0][1][1][0][RTW89_UK][0][77] = 127, + [0][1][1][0][RTW89_THAILAND][1][77] = 127, + [0][1][1][0][RTW89_THAILAND][0][77] = 127, [0][1][1][0][RTW89_FCC][1][79] = -2, [0][1][1][0][RTW89_FCC][2][79] = 68, [0][1][1][0][RTW89_ETSI][1][79] = 127, @@ -39698,6 +40441,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][79] = 127, [0][1][1][0][RTW89_MKK][0][79] = 127, [0][1][1][0][RTW89_IC][1][79] = -2, + [0][1][1][0][RTW89_IC][2][79] = 68, [0][1][1][0][RTW89_KCC][1][79] = 12, [0][1][1][0][RTW89_KCC][0][79] = 127, [0][1][1][0][RTW89_ACMA][1][79] = 127, @@ -39707,6 +40451,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][79] = 127, [0][1][1][0][RTW89_UK][1][79] = 127, [0][1][1][0][RTW89_UK][0][79] = 127, + [0][1][1][0][RTW89_THAILAND][1][79] = 127, + [0][1][1][0][RTW89_THAILAND][0][79] = 127, [0][1][1][0][RTW89_FCC][1][81] = -2, [0][1][1][0][RTW89_FCC][2][81] = 68, [0][1][1][0][RTW89_ETSI][1][81] = 127, @@ -39714,6 +40460,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][81] = 127, [0][1][1][0][RTW89_MKK][0][81] = 127, [0][1][1][0][RTW89_IC][1][81] = -2, + [0][1][1][0][RTW89_IC][2][81] = 68, [0][1][1][0][RTW89_KCC][1][81] = 12, [0][1][1][0][RTW89_KCC][0][81] = 127, [0][1][1][0][RTW89_ACMA][1][81] = 127, @@ -39723,6 +40470,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][81] = 127, [0][1][1][0][RTW89_UK][1][81] = 127, [0][1][1][0][RTW89_UK][0][81] = 127, + [0][1][1][0][RTW89_THAILAND][1][81] = 127, + [0][1][1][0][RTW89_THAILAND][0][81] = 127, [0][1][1][0][RTW89_FCC][1][83] = -2, [0][1][1][0][RTW89_FCC][2][83] = 68, [0][1][1][0][RTW89_ETSI][1][83] = 127, @@ -39730,6 +40479,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][83] = 127, [0][1][1][0][RTW89_MKK][0][83] = 127, [0][1][1][0][RTW89_IC][1][83] = -2, + [0][1][1][0][RTW89_IC][2][83] = 68, [0][1][1][0][RTW89_KCC][1][83] = 20, [0][1][1][0][RTW89_KCC][0][83] = 127, [0][1][1][0][RTW89_ACMA][1][83] = 127, @@ -39739,6 +40489,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][83] = 127, [0][1][1][0][RTW89_UK][1][83] = 127, [0][1][1][0][RTW89_UK][0][83] = 127, + [0][1][1][0][RTW89_THAILAND][1][83] = 127, + [0][1][1][0][RTW89_THAILAND][0][83] = 127, [0][1][1][0][RTW89_FCC][1][85] = -2, [0][1][1][0][RTW89_FCC][2][85] = 68, [0][1][1][0][RTW89_ETSI][1][85] = 127, @@ -39746,6 +40498,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][85] = 127, [0][1][1][0][RTW89_MKK][0][85] = 127, [0][1][1][0][RTW89_IC][1][85] = -2, + [0][1][1][0][RTW89_IC][2][85] = 68, [0][1][1][0][RTW89_KCC][1][85] = 20, [0][1][1][0][RTW89_KCC][0][85] = 127, [0][1][1][0][RTW89_ACMA][1][85] = 127, @@ -39755,6 +40508,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][85] = 127, [0][1][1][0][RTW89_UK][1][85] = 127, [0][1][1][0][RTW89_UK][0][85] = 127, + [0][1][1][0][RTW89_THAILAND][1][85] = 127, + [0][1][1][0][RTW89_THAILAND][0][85] = 127, [0][1][1][0][RTW89_FCC][1][87] = -2, [0][1][1][0][RTW89_FCC][2][87] = 127, [0][1][1][0][RTW89_ETSI][1][87] = 127, @@ -39762,6 +40517,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][87] = 127, [0][1][1][0][RTW89_MKK][0][87] = 127, [0][1][1][0][RTW89_IC][1][87] = -2, + [0][1][1][0][RTW89_IC][2][87] = 127, [0][1][1][0][RTW89_KCC][1][87] = 20, [0][1][1][0][RTW89_KCC][0][87] = 127, [0][1][1][0][RTW89_ACMA][1][87] = 127, @@ -39771,6 +40527,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][87] = 127, [0][1][1][0][RTW89_UK][1][87] = 127, [0][1][1][0][RTW89_UK][0][87] = 127, + [0][1][1][0][RTW89_THAILAND][1][87] = 127, + [0][1][1][0][RTW89_THAILAND][0][87] = 127, [0][1][1][0][RTW89_FCC][1][89] = -2, [0][1][1][0][RTW89_FCC][2][89] = 127, [0][1][1][0][RTW89_ETSI][1][89] = 127, @@ -39778,6 +40536,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][89] = 127, [0][1][1][0][RTW89_MKK][0][89] = 127, [0][1][1][0][RTW89_IC][1][89] = -2, + [0][1][1][0][RTW89_IC][2][89] = 127, [0][1][1][0][RTW89_KCC][1][89] = 20, [0][1][1][0][RTW89_KCC][0][89] = 127, [0][1][1][0][RTW89_ACMA][1][89] = 127, @@ -39787,6 +40546,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][89] = 127, [0][1][1][0][RTW89_UK][1][89] = 127, [0][1][1][0][RTW89_UK][0][89] = 127, + [0][1][1][0][RTW89_THAILAND][1][89] = 127, + [0][1][1][0][RTW89_THAILAND][0][89] = 127, [0][1][1][0][RTW89_FCC][1][90] = -2, [0][1][1][0][RTW89_FCC][2][90] = 127, [0][1][1][0][RTW89_ETSI][1][90] = 127, @@ -39794,6 +40555,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][90] = 127, [0][1][1][0][RTW89_MKK][0][90] = 127, [0][1][1][0][RTW89_IC][1][90] = -2, + [0][1][1][0][RTW89_IC][2][90] = 127, [0][1][1][0][RTW89_KCC][1][90] = 20, [0][1][1][0][RTW89_KCC][0][90] = 127, [0][1][1][0][RTW89_ACMA][1][90] = 127, @@ -39803,6 +40565,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][90] = 127, [0][1][1][0][RTW89_UK][1][90] = 127, [0][1][1][0][RTW89_UK][0][90] = 127, + [0][1][1][0][RTW89_THAILAND][1][90] = 127, + [0][1][1][0][RTW89_THAILAND][0][90] = 127, [0][1][1][0][RTW89_FCC][1][92] = -2, [0][1][1][0][RTW89_FCC][2][92] = 127, [0][1][1][0][RTW89_ETSI][1][92] = 127, @@ -39810,6 +40574,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][92] = 127, [0][1][1][0][RTW89_MKK][0][92] = 127, [0][1][1][0][RTW89_IC][1][92] = -2, + [0][1][1][0][RTW89_IC][2][92] = 127, [0][1][1][0][RTW89_KCC][1][92] = 20, [0][1][1][0][RTW89_KCC][0][92] = 127, [0][1][1][0][RTW89_ACMA][1][92] = 127, @@ -39819,6 +40584,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][92] = 127, [0][1][1][0][RTW89_UK][1][92] = 127, [0][1][1][0][RTW89_UK][0][92] = 127, + [0][1][1][0][RTW89_THAILAND][1][92] = 127, + [0][1][1][0][RTW89_THAILAND][0][92] = 127, [0][1][1][0][RTW89_FCC][1][94] = -2, [0][1][1][0][RTW89_FCC][2][94] = 127, [0][1][1][0][RTW89_ETSI][1][94] = 127, @@ -39826,6 +40593,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][94] = 127, [0][1][1][0][RTW89_MKK][0][94] = 127, [0][1][1][0][RTW89_IC][1][94] = -2, + [0][1][1][0][RTW89_IC][2][94] = 127, [0][1][1][0][RTW89_KCC][1][94] = 20, [0][1][1][0][RTW89_KCC][0][94] = 127, [0][1][1][0][RTW89_ACMA][1][94] = 127, @@ -39835,6 +40603,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][94] = 127, [0][1][1][0][RTW89_UK][1][94] = 127, [0][1][1][0][RTW89_UK][0][94] = 127, + [0][1][1][0][RTW89_THAILAND][1][94] = 127, + [0][1][1][0][RTW89_THAILAND][0][94] = 127, [0][1][1][0][RTW89_FCC][1][96] = -2, [0][1][1][0][RTW89_FCC][2][96] = 127, [0][1][1][0][RTW89_ETSI][1][96] = 127, @@ -39842,6 +40612,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][96] = 127, [0][1][1][0][RTW89_MKK][0][96] = 127, [0][1][1][0][RTW89_IC][1][96] = -2, + [0][1][1][0][RTW89_IC][2][96] = 127, [0][1][1][0][RTW89_KCC][1][96] = 20, [0][1][1][0][RTW89_KCC][0][96] = 127, [0][1][1][0][RTW89_ACMA][1][96] = 127, @@ -39851,6 +40622,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][96] = 127, [0][1][1][0][RTW89_UK][1][96] = 127, [0][1][1][0][RTW89_UK][0][96] = 127, + [0][1][1][0][RTW89_THAILAND][1][96] = 127, + [0][1][1][0][RTW89_THAILAND][0][96] = 127, [0][1][1][0][RTW89_FCC][1][98] = -2, [0][1][1][0][RTW89_FCC][2][98] = 127, [0][1][1][0][RTW89_ETSI][1][98] = 127, @@ -39858,6 +40631,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][98] = 127, [0][1][1][0][RTW89_MKK][0][98] = 127, [0][1][1][0][RTW89_IC][1][98] = -2, + [0][1][1][0][RTW89_IC][2][98] = 127, [0][1][1][0][RTW89_KCC][1][98] = 20, [0][1][1][0][RTW89_KCC][0][98] = 127, [0][1][1][0][RTW89_ACMA][1][98] = 127, @@ -39867,6 +40641,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][98] = 127, [0][1][1][0][RTW89_UK][1][98] = 127, [0][1][1][0][RTW89_UK][0][98] = 127, + [0][1][1][0][RTW89_THAILAND][1][98] = 127, + [0][1][1][0][RTW89_THAILAND][0][98] = 127, [0][1][1][0][RTW89_FCC][1][100] = -2, [0][1][1][0][RTW89_FCC][2][100] = 127, [0][1][1][0][RTW89_ETSI][1][100] = 127, @@ -39874,6 +40650,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][100] = 127, [0][1][1][0][RTW89_MKK][0][100] = 127, [0][1][1][0][RTW89_IC][1][100] = -2, + [0][1][1][0][RTW89_IC][2][100] = 127, [0][1][1][0][RTW89_KCC][1][100] = 20, [0][1][1][0][RTW89_KCC][0][100] = 127, [0][1][1][0][RTW89_ACMA][1][100] = 127, @@ -39883,6 +40660,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][100] = 127, [0][1][1][0][RTW89_UK][1][100] = 127, [0][1][1][0][RTW89_UK][0][100] = 127, + [0][1][1][0][RTW89_THAILAND][1][100] = 127, + [0][1][1][0][RTW89_THAILAND][0][100] = 127, [0][1][1][0][RTW89_FCC][1][102] = -2, [0][1][1][0][RTW89_FCC][2][102] = 127, [0][1][1][0][RTW89_ETSI][1][102] = 127, @@ -39890,6 +40669,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][102] = 127, [0][1][1][0][RTW89_MKK][0][102] = 127, [0][1][1][0][RTW89_IC][1][102] = -2, + [0][1][1][0][RTW89_IC][2][102] = 127, [0][1][1][0][RTW89_KCC][1][102] = 20, [0][1][1][0][RTW89_KCC][0][102] = 127, [0][1][1][0][RTW89_ACMA][1][102] = 127, @@ -39899,6 +40679,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][102] = 127, [0][1][1][0][RTW89_UK][1][102] = 127, [0][1][1][0][RTW89_UK][0][102] = 127, + [0][1][1][0][RTW89_THAILAND][1][102] = 127, + [0][1][1][0][RTW89_THAILAND][0][102] = 127, [0][1][1][0][RTW89_FCC][1][104] = -2, [0][1][1][0][RTW89_FCC][2][104] = 127, [0][1][1][0][RTW89_ETSI][1][104] = 127, @@ -39906,6 +40688,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][104] = 127, [0][1][1][0][RTW89_MKK][0][104] = 127, [0][1][1][0][RTW89_IC][1][104] = -2, + [0][1][1][0][RTW89_IC][2][104] = 127, [0][1][1][0][RTW89_KCC][1][104] = 20, [0][1][1][0][RTW89_KCC][0][104] = 127, [0][1][1][0][RTW89_ACMA][1][104] = 127, @@ -39915,6 +40698,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][104] = 127, [0][1][1][0][RTW89_UK][1][104] = 127, [0][1][1][0][RTW89_UK][0][104] = 127, + [0][1][1][0][RTW89_THAILAND][1][104] = 127, + [0][1][1][0][RTW89_THAILAND][0][104] = 127, [0][1][1][0][RTW89_FCC][1][105] = -2, [0][1][1][0][RTW89_FCC][2][105] = 127, [0][1][1][0][RTW89_ETSI][1][105] = 127, @@ -39922,6 +40707,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][105] = 127, [0][1][1][0][RTW89_MKK][0][105] = 127, [0][1][1][0][RTW89_IC][1][105] = -2, + [0][1][1][0][RTW89_IC][2][105] = 127, [0][1][1][0][RTW89_KCC][1][105] = 20, [0][1][1][0][RTW89_KCC][0][105] = 127, [0][1][1][0][RTW89_ACMA][1][105] = 127, @@ -39931,6 +40717,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][105] = 127, [0][1][1][0][RTW89_UK][1][105] = 127, [0][1][1][0][RTW89_UK][0][105] = 127, + [0][1][1][0][RTW89_THAILAND][1][105] = 127, + [0][1][1][0][RTW89_THAILAND][0][105] = 127, [0][1][1][0][RTW89_FCC][1][107] = 1, [0][1][1][0][RTW89_FCC][2][107] = 127, [0][1][1][0][RTW89_ETSI][1][107] = 127, @@ -39938,6 +40726,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][107] = 127, [0][1][1][0][RTW89_MKK][0][107] = 127, [0][1][1][0][RTW89_IC][1][107] = 1, + [0][1][1][0][RTW89_IC][2][107] = 127, [0][1][1][0][RTW89_KCC][1][107] = 20, [0][1][1][0][RTW89_KCC][0][107] = 127, [0][1][1][0][RTW89_ACMA][1][107] = 127, @@ -39947,6 +40736,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][107] = 127, [0][1][1][0][RTW89_UK][1][107] = 127, [0][1][1][0][RTW89_UK][0][107] = 127, + [0][1][1][0][RTW89_THAILAND][1][107] = 127, + [0][1][1][0][RTW89_THAILAND][0][107] = 127, [0][1][1][0][RTW89_FCC][1][109] = 1, [0][1][1][0][RTW89_FCC][2][109] = 127, [0][1][1][0][RTW89_ETSI][1][109] = 127, @@ -39954,6 +40745,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][109] = 127, [0][1][1][0][RTW89_MKK][0][109] = 127, [0][1][1][0][RTW89_IC][1][109] = 1, + [0][1][1][0][RTW89_IC][2][109] = 127, [0][1][1][0][RTW89_KCC][1][109] = 20, [0][1][1][0][RTW89_KCC][0][109] = 127, [0][1][1][0][RTW89_ACMA][1][109] = 127, @@ -39963,6 +40755,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][109] = 127, [0][1][1][0][RTW89_UK][1][109] = 127, [0][1][1][0][RTW89_UK][0][109] = 127, + [0][1][1][0][RTW89_THAILAND][1][109] = 127, + [0][1][1][0][RTW89_THAILAND][0][109] = 127, [0][1][1][0][RTW89_FCC][1][111] = 127, [0][1][1][0][RTW89_FCC][2][111] = 127, [0][1][1][0][RTW89_ETSI][1][111] = 127, @@ -39970,6 +40764,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][111] = 127, [0][1][1][0][RTW89_MKK][0][111] = 127, [0][1][1][0][RTW89_IC][1][111] = 127, + [0][1][1][0][RTW89_IC][2][111] = 127, [0][1][1][0][RTW89_KCC][1][111] = 127, [0][1][1][0][RTW89_KCC][0][111] = 127, [0][1][1][0][RTW89_ACMA][1][111] = 127, @@ -39979,6 +40774,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][111] = 127, [0][1][1][0][RTW89_UK][1][111] = 127, [0][1][1][0][RTW89_UK][0][111] = 127, + [0][1][1][0][RTW89_THAILAND][1][111] = 127, + [0][1][1][0][RTW89_THAILAND][0][111] = 127, [0][1][1][0][RTW89_FCC][1][113] = 127, [0][1][1][0][RTW89_FCC][2][113] = 127, [0][1][1][0][RTW89_ETSI][1][113] = 127, @@ -39986,6 +40783,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][113] = 127, [0][1][1][0][RTW89_MKK][0][113] = 127, [0][1][1][0][RTW89_IC][1][113] = 127, + [0][1][1][0][RTW89_IC][2][113] = 127, [0][1][1][0][RTW89_KCC][1][113] = 127, [0][1][1][0][RTW89_KCC][0][113] = 127, [0][1][1][0][RTW89_ACMA][1][113] = 127, @@ -39995,6 +40793,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][113] = 127, [0][1][1][0][RTW89_UK][1][113] = 127, [0][1][1][0][RTW89_UK][0][113] = 127, + [0][1][1][0][RTW89_THAILAND][1][113] = 127, + [0][1][1][0][RTW89_THAILAND][0][113] = 127, [0][1][1][0][RTW89_FCC][1][115] = 127, [0][1][1][0][RTW89_FCC][2][115] = 127, [0][1][1][0][RTW89_ETSI][1][115] = 127, @@ -40002,6 +40802,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][115] = 127, [0][1][1][0][RTW89_MKK][0][115] = 127, [0][1][1][0][RTW89_IC][1][115] = 127, + [0][1][1][0][RTW89_IC][2][115] = 127, [0][1][1][0][RTW89_KCC][1][115] = 127, [0][1][1][0][RTW89_KCC][0][115] = 127, [0][1][1][0][RTW89_ACMA][1][115] = 127, @@ -40011,6 +40812,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][115] = 127, [0][1][1][0][RTW89_UK][1][115] = 127, [0][1][1][0][RTW89_UK][0][115] = 127, + [0][1][1][0][RTW89_THAILAND][1][115] = 127, + [0][1][1][0][RTW89_THAILAND][0][115] = 127, [0][1][1][0][RTW89_FCC][1][117] = 127, [0][1][1][0][RTW89_FCC][2][117] = 127, [0][1][1][0][RTW89_ETSI][1][117] = 127, @@ -40018,6 +40821,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][117] = 127, [0][1][1][0][RTW89_MKK][0][117] = 127, [0][1][1][0][RTW89_IC][1][117] = 127, + [0][1][1][0][RTW89_IC][2][117] = 127, [0][1][1][0][RTW89_KCC][1][117] = 127, [0][1][1][0][RTW89_KCC][0][117] = 127, [0][1][1][0][RTW89_ACMA][1][117] = 127, @@ -40027,6 +40831,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][117] = 127, [0][1][1][0][RTW89_UK][1][117] = 127, [0][1][1][0][RTW89_UK][0][117] = 127, + [0][1][1][0][RTW89_THAILAND][1][117] = 127, + [0][1][1][0][RTW89_THAILAND][0][117] = 127, [0][1][1][0][RTW89_FCC][1][119] = 127, [0][1][1][0][RTW89_FCC][2][119] = 127, [0][1][1][0][RTW89_ETSI][1][119] = 127, @@ -40034,6 +40840,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][1][119] = 127, [0][1][1][0][RTW89_MKK][0][119] = 127, [0][1][1][0][RTW89_IC][1][119] = 127, + [0][1][1][0][RTW89_IC][2][119] = 127, [0][1][1][0][RTW89_KCC][1][119] = 127, [0][1][1][0][RTW89_KCC][0][119] = 127, [0][1][1][0][RTW89_ACMA][1][119] = 127, @@ -40043,6 +40850,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_QATAR][0][119] = 127, [0][1][1][0][RTW89_UK][1][119] = 127, [0][1][1][0][RTW89_UK][0][119] = 127, + [0][1][1][0][RTW89_THAILAND][1][119] = 127, + [0][1][1][0][RTW89_THAILAND][0][119] = 127, [0][0][2][0][RTW89_FCC][1][0] = 24, [0][0][2][0][RTW89_FCC][2][0] = 56, [0][0][2][0][RTW89_ETSI][1][0] = 66, @@ -40050,6 +40859,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][0] = 66, [0][0][2][0][RTW89_MKK][0][0] = 26, [0][0][2][0][RTW89_IC][1][0] = 24, + [0][0][2][0][RTW89_IC][2][0] = 56, [0][0][2][0][RTW89_KCC][1][0] = 24, [0][0][2][0][RTW89_KCC][0][0] = 24, [0][0][2][0][RTW89_ACMA][1][0] = 66, @@ -40059,6 +40869,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][0] = 28, [0][0][2][0][RTW89_UK][1][0] = 66, [0][0][2][0][RTW89_UK][0][0] = 28, + [0][0][2][0][RTW89_THAILAND][1][0] = 56, + [0][0][2][0][RTW89_THAILAND][0][0] = 24, [0][0][2][0][RTW89_FCC][1][2] = 22, [0][0][2][0][RTW89_FCC][2][2] = 56, [0][0][2][0][RTW89_ETSI][1][2] = 66, @@ -40066,6 +40878,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][2] = 66, [0][0][2][0][RTW89_MKK][0][2] = 26, [0][0][2][0][RTW89_IC][1][2] = 22, + [0][0][2][0][RTW89_IC][2][2] = 56, [0][0][2][0][RTW89_KCC][1][2] = 24, [0][0][2][0][RTW89_KCC][0][2] = 24, [0][0][2][0][RTW89_ACMA][1][2] = 66, @@ -40075,6 +40888,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][2] = 28, [0][0][2][0][RTW89_UK][1][2] = 66, [0][0][2][0][RTW89_UK][0][2] = 28, + [0][0][2][0][RTW89_THAILAND][1][2] = 56, + [0][0][2][0][RTW89_THAILAND][0][2] = 22, [0][0][2][0][RTW89_FCC][1][4] = 22, [0][0][2][0][RTW89_FCC][2][4] = 56, [0][0][2][0][RTW89_ETSI][1][4] = 66, @@ -40082,6 +40897,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][4] = 66, [0][0][2][0][RTW89_MKK][0][4] = 26, [0][0][2][0][RTW89_IC][1][4] = 22, + [0][0][2][0][RTW89_IC][2][4] = 56, [0][0][2][0][RTW89_KCC][1][4] = 24, [0][0][2][0][RTW89_KCC][0][4] = 24, [0][0][2][0][RTW89_ACMA][1][4] = 66, @@ -40091,6 +40907,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][4] = 28, [0][0][2][0][RTW89_UK][1][4] = 66, [0][0][2][0][RTW89_UK][0][4] = 28, + [0][0][2][0][RTW89_THAILAND][1][4] = 56, + [0][0][2][0][RTW89_THAILAND][0][4] = 22, [0][0][2][0][RTW89_FCC][1][6] = 22, [0][0][2][0][RTW89_FCC][2][6] = 56, [0][0][2][0][RTW89_ETSI][1][6] = 66, @@ -40098,6 +40916,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][6] = 66, [0][0][2][0][RTW89_MKK][0][6] = 26, [0][0][2][0][RTW89_IC][1][6] = 22, + [0][0][2][0][RTW89_IC][2][6] = 56, [0][0][2][0][RTW89_KCC][1][6] = 24, [0][0][2][0][RTW89_KCC][0][6] = 24, [0][0][2][0][RTW89_ACMA][1][6] = 66, @@ -40107,6 +40926,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][6] = 28, [0][0][2][0][RTW89_UK][1][6] = 66, [0][0][2][0][RTW89_UK][0][6] = 28, + [0][0][2][0][RTW89_THAILAND][1][6] = 56, + [0][0][2][0][RTW89_THAILAND][0][6] = 22, [0][0][2][0][RTW89_FCC][1][8] = 22, [0][0][2][0][RTW89_FCC][2][8] = 56, [0][0][2][0][RTW89_ETSI][1][8] = 66, @@ -40114,6 +40935,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][8] = 66, [0][0][2][0][RTW89_MKK][0][8] = 26, [0][0][2][0][RTW89_IC][1][8] = 22, + [0][0][2][0][RTW89_IC][2][8] = 56, [0][0][2][0][RTW89_KCC][1][8] = 24, [0][0][2][0][RTW89_KCC][0][8] = 24, [0][0][2][0][RTW89_ACMA][1][8] = 66, @@ -40123,6 +40945,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][8] = 28, [0][0][2][0][RTW89_UK][1][8] = 66, [0][0][2][0][RTW89_UK][0][8] = 28, + [0][0][2][0][RTW89_THAILAND][1][8] = 56, + [0][0][2][0][RTW89_THAILAND][0][8] = 22, [0][0][2][0][RTW89_FCC][1][10] = 22, [0][0][2][0][RTW89_FCC][2][10] = 56, [0][0][2][0][RTW89_ETSI][1][10] = 66, @@ -40130,6 +40954,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][10] = 66, [0][0][2][0][RTW89_MKK][0][10] = 26, [0][0][2][0][RTW89_IC][1][10] = 22, + [0][0][2][0][RTW89_IC][2][10] = 56, [0][0][2][0][RTW89_KCC][1][10] = 24, [0][0][2][0][RTW89_KCC][0][10] = 24, [0][0][2][0][RTW89_ACMA][1][10] = 66, @@ -40139,6 +40964,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][10] = 28, [0][0][2][0][RTW89_UK][1][10] = 66, [0][0][2][0][RTW89_UK][0][10] = 28, + [0][0][2][0][RTW89_THAILAND][1][10] = 56, + [0][0][2][0][RTW89_THAILAND][0][10] = 22, [0][0][2][0][RTW89_FCC][1][12] = 22, [0][0][2][0][RTW89_FCC][2][12] = 56, [0][0][2][0][RTW89_ETSI][1][12] = 66, @@ -40146,6 +40973,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][12] = 66, [0][0][2][0][RTW89_MKK][0][12] = 26, [0][0][2][0][RTW89_IC][1][12] = 22, + [0][0][2][0][RTW89_IC][2][12] = 56, [0][0][2][0][RTW89_KCC][1][12] = 24, [0][0][2][0][RTW89_KCC][0][12] = 24, [0][0][2][0][RTW89_ACMA][1][12] = 66, @@ -40155,6 +40983,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][12] = 28, [0][0][2][0][RTW89_UK][1][12] = 66, [0][0][2][0][RTW89_UK][0][12] = 28, + [0][0][2][0][RTW89_THAILAND][1][12] = 56, + [0][0][2][0][RTW89_THAILAND][0][12] = 22, [0][0][2][0][RTW89_FCC][1][14] = 22, [0][0][2][0][RTW89_FCC][2][14] = 56, [0][0][2][0][RTW89_ETSI][1][14] = 66, @@ -40162,6 +40992,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][14] = 66, [0][0][2][0][RTW89_MKK][0][14] = 26, [0][0][2][0][RTW89_IC][1][14] = 22, + [0][0][2][0][RTW89_IC][2][14] = 56, [0][0][2][0][RTW89_KCC][1][14] = 24, [0][0][2][0][RTW89_KCC][0][14] = 24, [0][0][2][0][RTW89_ACMA][1][14] = 66, @@ -40171,6 +41002,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][14] = 28, [0][0][2][0][RTW89_UK][1][14] = 66, [0][0][2][0][RTW89_UK][0][14] = 28, + [0][0][2][0][RTW89_THAILAND][1][14] = 56, + [0][0][2][0][RTW89_THAILAND][0][14] = 22, [0][0][2][0][RTW89_FCC][1][15] = 22, [0][0][2][0][RTW89_FCC][2][15] = 56, [0][0][2][0][RTW89_ETSI][1][15] = 66, @@ -40178,6 +41011,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][15] = 66, [0][0][2][0][RTW89_MKK][0][15] = 26, [0][0][2][0][RTW89_IC][1][15] = 22, + [0][0][2][0][RTW89_IC][2][15] = 56, [0][0][2][0][RTW89_KCC][1][15] = 24, [0][0][2][0][RTW89_KCC][0][15] = 24, [0][0][2][0][RTW89_ACMA][1][15] = 66, @@ -40187,6 +41021,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][15] = 28, [0][0][2][0][RTW89_UK][1][15] = 66, [0][0][2][0][RTW89_UK][0][15] = 28, + [0][0][2][0][RTW89_THAILAND][1][15] = 56, + [0][0][2][0][RTW89_THAILAND][0][15] = 22, [0][0][2][0][RTW89_FCC][1][17] = 22, [0][0][2][0][RTW89_FCC][2][17] = 56, [0][0][2][0][RTW89_ETSI][1][17] = 66, @@ -40194,6 +41030,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][17] = 66, [0][0][2][0][RTW89_MKK][0][17] = 26, [0][0][2][0][RTW89_IC][1][17] = 22, + [0][0][2][0][RTW89_IC][2][17] = 56, [0][0][2][0][RTW89_KCC][1][17] = 24, [0][0][2][0][RTW89_KCC][0][17] = 24, [0][0][2][0][RTW89_ACMA][1][17] = 66, @@ -40203,6 +41040,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][17] = 28, [0][0][2][0][RTW89_UK][1][17] = 66, [0][0][2][0][RTW89_UK][0][17] = 28, + [0][0][2][0][RTW89_THAILAND][1][17] = 56, + [0][0][2][0][RTW89_THAILAND][0][17] = 22, [0][0][2][0][RTW89_FCC][1][19] = 22, [0][0][2][0][RTW89_FCC][2][19] = 56, [0][0][2][0][RTW89_ETSI][1][19] = 66, @@ -40210,6 +41049,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][19] = 66, [0][0][2][0][RTW89_MKK][0][19] = 26, [0][0][2][0][RTW89_IC][1][19] = 22, + [0][0][2][0][RTW89_IC][2][19] = 56, [0][0][2][0][RTW89_KCC][1][19] = 24, [0][0][2][0][RTW89_KCC][0][19] = 24, [0][0][2][0][RTW89_ACMA][1][19] = 66, @@ -40219,6 +41059,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][19] = 28, [0][0][2][0][RTW89_UK][1][19] = 66, [0][0][2][0][RTW89_UK][0][19] = 28, + [0][0][2][0][RTW89_THAILAND][1][19] = 56, + [0][0][2][0][RTW89_THAILAND][0][19] = 22, [0][0][2][0][RTW89_FCC][1][21] = 22, [0][0][2][0][RTW89_FCC][2][21] = 56, [0][0][2][0][RTW89_ETSI][1][21] = 66, @@ -40226,6 +41068,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][21] = 66, [0][0][2][0][RTW89_MKK][0][21] = 26, [0][0][2][0][RTW89_IC][1][21] = 22, + [0][0][2][0][RTW89_IC][2][21] = 56, [0][0][2][0][RTW89_KCC][1][21] = 24, [0][0][2][0][RTW89_KCC][0][21] = 24, [0][0][2][0][RTW89_ACMA][1][21] = 66, @@ -40235,6 +41078,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][21] = 28, [0][0][2][0][RTW89_UK][1][21] = 66, [0][0][2][0][RTW89_UK][0][21] = 28, + [0][0][2][0][RTW89_THAILAND][1][21] = 56, + [0][0][2][0][RTW89_THAILAND][0][21] = 22, [0][0][2][0][RTW89_FCC][1][23] = 22, [0][0][2][0][RTW89_FCC][2][23] = 70, [0][0][2][0][RTW89_ETSI][1][23] = 66, @@ -40242,6 +41087,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][23] = 66, [0][0][2][0][RTW89_MKK][0][23] = 26, [0][0][2][0][RTW89_IC][1][23] = 22, + [0][0][2][0][RTW89_IC][2][23] = 70, [0][0][2][0][RTW89_KCC][1][23] = 24, [0][0][2][0][RTW89_KCC][0][23] = 26, [0][0][2][0][RTW89_ACMA][1][23] = 66, @@ -40251,6 +41097,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][23] = 28, [0][0][2][0][RTW89_UK][1][23] = 66, [0][0][2][0][RTW89_UK][0][23] = 28, + [0][0][2][0][RTW89_THAILAND][1][23] = 66, + [0][0][2][0][RTW89_THAILAND][0][23] = 22, [0][0][2][0][RTW89_FCC][1][25] = 22, [0][0][2][0][RTW89_FCC][2][25] = 70, [0][0][2][0][RTW89_ETSI][1][25] = 66, @@ -40258,6 +41106,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][25] = 66, [0][0][2][0][RTW89_MKK][0][25] = 26, [0][0][2][0][RTW89_IC][1][25] = 22, + [0][0][2][0][RTW89_IC][2][25] = 70, [0][0][2][0][RTW89_KCC][1][25] = 24, [0][0][2][0][RTW89_KCC][0][25] = 26, [0][0][2][0][RTW89_ACMA][1][25] = 66, @@ -40267,6 +41116,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][25] = 28, [0][0][2][0][RTW89_UK][1][25] = 66, [0][0][2][0][RTW89_UK][0][25] = 28, + [0][0][2][0][RTW89_THAILAND][1][25] = 66, + [0][0][2][0][RTW89_THAILAND][0][25] = 22, [0][0][2][0][RTW89_FCC][1][27] = 22, [0][0][2][0][RTW89_FCC][2][27] = 70, [0][0][2][0][RTW89_ETSI][1][27] = 66, @@ -40274,6 +41125,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][27] = 66, [0][0][2][0][RTW89_MKK][0][27] = 26, [0][0][2][0][RTW89_IC][1][27] = 22, + [0][0][2][0][RTW89_IC][2][27] = 70, [0][0][2][0][RTW89_KCC][1][27] = 24, [0][0][2][0][RTW89_KCC][0][27] = 26, [0][0][2][0][RTW89_ACMA][1][27] = 66, @@ -40283,6 +41135,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][27] = 28, [0][0][2][0][RTW89_UK][1][27] = 66, [0][0][2][0][RTW89_UK][0][27] = 28, + [0][0][2][0][RTW89_THAILAND][1][27] = 66, + [0][0][2][0][RTW89_THAILAND][0][27] = 22, [0][0][2][0][RTW89_FCC][1][29] = 22, [0][0][2][0][RTW89_FCC][2][29] = 70, [0][0][2][0][RTW89_ETSI][1][29] = 66, @@ -40290,6 +41144,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][29] = 66, [0][0][2][0][RTW89_MKK][0][29] = 26, [0][0][2][0][RTW89_IC][1][29] = 22, + [0][0][2][0][RTW89_IC][2][29] = 70, [0][0][2][0][RTW89_KCC][1][29] = 24, [0][0][2][0][RTW89_KCC][0][29] = 26, [0][0][2][0][RTW89_ACMA][1][29] = 66, @@ -40299,6 +41154,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][29] = 28, [0][0][2][0][RTW89_UK][1][29] = 66, [0][0][2][0][RTW89_UK][0][29] = 28, + [0][0][2][0][RTW89_THAILAND][1][29] = 66, + [0][0][2][0][RTW89_THAILAND][0][29] = 22, [0][0][2][0][RTW89_FCC][1][30] = 22, [0][0][2][0][RTW89_FCC][2][30] = 70, [0][0][2][0][RTW89_ETSI][1][30] = 66, @@ -40306,6 +41163,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][30] = 66, [0][0][2][0][RTW89_MKK][0][30] = 26, [0][0][2][0][RTW89_IC][1][30] = 22, + [0][0][2][0][RTW89_IC][2][30] = 70, [0][0][2][0][RTW89_KCC][1][30] = 24, [0][0][2][0][RTW89_KCC][0][30] = 26, [0][0][2][0][RTW89_ACMA][1][30] = 66, @@ -40315,6 +41173,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][30] = 28, [0][0][2][0][RTW89_UK][1][30] = 66, [0][0][2][0][RTW89_UK][0][30] = 28, + [0][0][2][0][RTW89_THAILAND][1][30] = 66, + [0][0][2][0][RTW89_THAILAND][0][30] = 22, [0][0][2][0][RTW89_FCC][1][32] = 22, [0][0][2][0][RTW89_FCC][2][32] = 70, [0][0][2][0][RTW89_ETSI][1][32] = 66, @@ -40322,6 +41182,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][32] = 66, [0][0][2][0][RTW89_MKK][0][32] = 26, [0][0][2][0][RTW89_IC][1][32] = 22, + [0][0][2][0][RTW89_IC][2][32] = 70, [0][0][2][0][RTW89_KCC][1][32] = 24, [0][0][2][0][RTW89_KCC][0][32] = 26, [0][0][2][0][RTW89_ACMA][1][32] = 66, @@ -40331,6 +41192,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][32] = 28, [0][0][2][0][RTW89_UK][1][32] = 66, [0][0][2][0][RTW89_UK][0][32] = 28, + [0][0][2][0][RTW89_THAILAND][1][32] = 66, + [0][0][2][0][RTW89_THAILAND][0][32] = 22, [0][0][2][0][RTW89_FCC][1][34] = 22, [0][0][2][0][RTW89_FCC][2][34] = 70, [0][0][2][0][RTW89_ETSI][1][34] = 66, @@ -40338,6 +41201,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][34] = 66, [0][0][2][0][RTW89_MKK][0][34] = 26, [0][0][2][0][RTW89_IC][1][34] = 22, + [0][0][2][0][RTW89_IC][2][34] = 70, [0][0][2][0][RTW89_KCC][1][34] = 24, [0][0][2][0][RTW89_KCC][0][34] = 26, [0][0][2][0][RTW89_ACMA][1][34] = 66, @@ -40347,6 +41211,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][34] = 28, [0][0][2][0][RTW89_UK][1][34] = 66, [0][0][2][0][RTW89_UK][0][34] = 28, + [0][0][2][0][RTW89_THAILAND][1][34] = 66, + [0][0][2][0][RTW89_THAILAND][0][34] = 22, [0][0][2][0][RTW89_FCC][1][36] = 22, [0][0][2][0][RTW89_FCC][2][36] = 70, [0][0][2][0][RTW89_ETSI][1][36] = 66, @@ -40354,6 +41220,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][36] = 66, [0][0][2][0][RTW89_MKK][0][36] = 26, [0][0][2][0][RTW89_IC][1][36] = 22, + [0][0][2][0][RTW89_IC][2][36] = 70, [0][0][2][0][RTW89_KCC][1][36] = 24, [0][0][2][0][RTW89_KCC][0][36] = 26, [0][0][2][0][RTW89_ACMA][1][36] = 66, @@ -40363,6 +41230,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][36] = 28, [0][0][2][0][RTW89_UK][1][36] = 66, [0][0][2][0][RTW89_UK][0][36] = 28, + [0][0][2][0][RTW89_THAILAND][1][36] = 66, + [0][0][2][0][RTW89_THAILAND][0][36] = 22, [0][0][2][0][RTW89_FCC][1][38] = 22, [0][0][2][0][RTW89_FCC][2][38] = 70, [0][0][2][0][RTW89_ETSI][1][38] = 66, @@ -40370,6 +41239,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][38] = 66, [0][0][2][0][RTW89_MKK][0][38] = 26, [0][0][2][0][RTW89_IC][1][38] = 22, + [0][0][2][0][RTW89_IC][2][38] = 70, [0][0][2][0][RTW89_KCC][1][38] = 24, [0][0][2][0][RTW89_KCC][0][38] = 26, [0][0][2][0][RTW89_ACMA][1][38] = 66, @@ -40379,6 +41249,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][38] = 28, [0][0][2][0][RTW89_UK][1][38] = 66, [0][0][2][0][RTW89_UK][0][38] = 28, + [0][0][2][0][RTW89_THAILAND][1][38] = 66, + [0][0][2][0][RTW89_THAILAND][0][38] = 22, [0][0][2][0][RTW89_FCC][1][40] = 22, [0][0][2][0][RTW89_FCC][2][40] = 70, [0][0][2][0][RTW89_ETSI][1][40] = 66, @@ -40386,6 +41258,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][40] = 66, [0][0][2][0][RTW89_MKK][0][40] = 26, [0][0][2][0][RTW89_IC][1][40] = 22, + [0][0][2][0][RTW89_IC][2][40] = 70, [0][0][2][0][RTW89_KCC][1][40] = 24, [0][0][2][0][RTW89_KCC][0][40] = 26, [0][0][2][0][RTW89_ACMA][1][40] = 66, @@ -40395,6 +41268,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][40] = 28, [0][0][2][0][RTW89_UK][1][40] = 66, [0][0][2][0][RTW89_UK][0][40] = 28, + [0][0][2][0][RTW89_THAILAND][1][40] = 66, + [0][0][2][0][RTW89_THAILAND][0][40] = 22, [0][0][2][0][RTW89_FCC][1][42] = 22, [0][0][2][0][RTW89_FCC][2][42] = 70, [0][0][2][0][RTW89_ETSI][1][42] = 66, @@ -40402,6 +41277,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][42] = 66, [0][0][2][0][RTW89_MKK][0][42] = 26, [0][0][2][0][RTW89_IC][1][42] = 22, + [0][0][2][0][RTW89_IC][2][42] = 70, [0][0][2][0][RTW89_KCC][1][42] = 24, [0][0][2][0][RTW89_KCC][0][42] = 26, [0][0][2][0][RTW89_ACMA][1][42] = 66, @@ -40411,6 +41287,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][42] = 28, [0][0][2][0][RTW89_UK][1][42] = 66, [0][0][2][0][RTW89_UK][0][42] = 28, + [0][0][2][0][RTW89_THAILAND][1][42] = 66, + [0][0][2][0][RTW89_THAILAND][0][42] = 22, [0][0][2][0][RTW89_FCC][1][44] = 22, [0][0][2][0][RTW89_FCC][2][44] = 70, [0][0][2][0][RTW89_ETSI][1][44] = 66, @@ -40418,6 +41296,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][44] = 44, [0][0][2][0][RTW89_MKK][0][44] = 28, [0][0][2][0][RTW89_IC][1][44] = 22, + [0][0][2][0][RTW89_IC][2][44] = 70, [0][0][2][0][RTW89_KCC][1][44] = 24, [0][0][2][0][RTW89_KCC][0][44] = 26, [0][0][2][0][RTW89_ACMA][1][44] = 66, @@ -40427,6 +41306,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][44] = 30, [0][0][2][0][RTW89_UK][1][44] = 66, [0][0][2][0][RTW89_UK][0][44] = 30, + [0][0][2][0][RTW89_THAILAND][1][44] = 68, + [0][0][2][0][RTW89_THAILAND][0][44] = 22, [0][0][2][0][RTW89_FCC][1][45] = 22, [0][0][2][0][RTW89_FCC][2][45] = 127, [0][0][2][0][RTW89_ETSI][1][45] = 127, @@ -40434,6 +41315,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][45] = 127, [0][0][2][0][RTW89_MKK][0][45] = 127, [0][0][2][0][RTW89_IC][1][45] = 22, + [0][0][2][0][RTW89_IC][2][45] = 70, [0][0][2][0][RTW89_KCC][1][45] = 24, [0][0][2][0][RTW89_KCC][0][45] = 127, [0][0][2][0][RTW89_ACMA][1][45] = 127, @@ -40443,6 +41325,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][45] = 127, [0][0][2][0][RTW89_UK][1][45] = 127, [0][0][2][0][RTW89_UK][0][45] = 127, + [0][0][2][0][RTW89_THAILAND][1][45] = 127, + [0][0][2][0][RTW89_THAILAND][0][45] = 127, [0][0][2][0][RTW89_FCC][1][47] = 22, [0][0][2][0][RTW89_FCC][2][47] = 127, [0][0][2][0][RTW89_ETSI][1][47] = 127, @@ -40450,6 +41334,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][47] = 127, [0][0][2][0][RTW89_MKK][0][47] = 127, [0][0][2][0][RTW89_IC][1][47] = 22, + [0][0][2][0][RTW89_IC][2][47] = 70, [0][0][2][0][RTW89_KCC][1][47] = 24, [0][0][2][0][RTW89_KCC][0][47] = 127, [0][0][2][0][RTW89_ACMA][1][47] = 127, @@ -40459,6 +41344,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][47] = 127, [0][0][2][0][RTW89_UK][1][47] = 127, [0][0][2][0][RTW89_UK][0][47] = 127, + [0][0][2][0][RTW89_THAILAND][1][47] = 127, + [0][0][2][0][RTW89_THAILAND][0][47] = 127, [0][0][2][0][RTW89_FCC][1][49] = 24, [0][0][2][0][RTW89_FCC][2][49] = 127, [0][0][2][0][RTW89_ETSI][1][49] = 127, @@ -40466,6 +41353,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][49] = 127, [0][0][2][0][RTW89_MKK][0][49] = 127, [0][0][2][0][RTW89_IC][1][49] = 24, + [0][0][2][0][RTW89_IC][2][49] = 70, [0][0][2][0][RTW89_KCC][1][49] = 24, [0][0][2][0][RTW89_KCC][0][49] = 127, [0][0][2][0][RTW89_ACMA][1][49] = 127, @@ -40475,6 +41363,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][49] = 127, [0][0][2][0][RTW89_UK][1][49] = 127, [0][0][2][0][RTW89_UK][0][49] = 127, + [0][0][2][0][RTW89_THAILAND][1][49] = 127, + [0][0][2][0][RTW89_THAILAND][0][49] = 127, [0][0][2][0][RTW89_FCC][1][51] = 22, [0][0][2][0][RTW89_FCC][2][51] = 127, [0][0][2][0][RTW89_ETSI][1][51] = 127, @@ -40482,6 +41372,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][51] = 127, [0][0][2][0][RTW89_MKK][0][51] = 127, [0][0][2][0][RTW89_IC][1][51] = 22, + [0][0][2][0][RTW89_IC][2][51] = 70, [0][0][2][0][RTW89_KCC][1][51] = 24, [0][0][2][0][RTW89_KCC][0][51] = 127, [0][0][2][0][RTW89_ACMA][1][51] = 127, @@ -40491,6 +41382,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][51] = 127, [0][0][2][0][RTW89_UK][1][51] = 127, [0][0][2][0][RTW89_UK][0][51] = 127, + [0][0][2][0][RTW89_THAILAND][1][51] = 127, + [0][0][2][0][RTW89_THAILAND][0][51] = 127, [0][0][2][0][RTW89_FCC][1][53] = 22, [0][0][2][0][RTW89_FCC][2][53] = 127, [0][0][2][0][RTW89_ETSI][1][53] = 127, @@ -40498,6 +41391,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][53] = 127, [0][0][2][0][RTW89_MKK][0][53] = 127, [0][0][2][0][RTW89_IC][1][53] = 22, + [0][0][2][0][RTW89_IC][2][53] = 70, [0][0][2][0][RTW89_KCC][1][53] = 24, [0][0][2][0][RTW89_KCC][0][53] = 127, [0][0][2][0][RTW89_ACMA][1][53] = 127, @@ -40507,6 +41401,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][53] = 127, [0][0][2][0][RTW89_UK][1][53] = 127, [0][0][2][0][RTW89_UK][0][53] = 127, + [0][0][2][0][RTW89_THAILAND][1][53] = 127, + [0][0][2][0][RTW89_THAILAND][0][53] = 127, [0][0][2][0][RTW89_FCC][1][55] = 22, [0][0][2][0][RTW89_FCC][2][55] = 68, [0][0][2][0][RTW89_ETSI][1][55] = 127, @@ -40514,6 +41410,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][55] = 127, [0][0][2][0][RTW89_MKK][0][55] = 127, [0][0][2][0][RTW89_IC][1][55] = 22, + [0][0][2][0][RTW89_IC][2][55] = 68, [0][0][2][0][RTW89_KCC][1][55] = 26, [0][0][2][0][RTW89_KCC][0][55] = 127, [0][0][2][0][RTW89_ACMA][1][55] = 127, @@ -40523,6 +41420,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][55] = 127, [0][0][2][0][RTW89_UK][1][55] = 127, [0][0][2][0][RTW89_UK][0][55] = 127, + [0][0][2][0][RTW89_THAILAND][1][55] = 127, + [0][0][2][0][RTW89_THAILAND][0][55] = 127, [0][0][2][0][RTW89_FCC][1][57] = 22, [0][0][2][0][RTW89_FCC][2][57] = 68, [0][0][2][0][RTW89_ETSI][1][57] = 127, @@ -40530,6 +41429,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][57] = 127, [0][0][2][0][RTW89_MKK][0][57] = 127, [0][0][2][0][RTW89_IC][1][57] = 22, + [0][0][2][0][RTW89_IC][2][57] = 68, [0][0][2][0][RTW89_KCC][1][57] = 26, [0][0][2][0][RTW89_KCC][0][57] = 127, [0][0][2][0][RTW89_ACMA][1][57] = 127, @@ -40539,6 +41439,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][57] = 127, [0][0][2][0][RTW89_UK][1][57] = 127, [0][0][2][0][RTW89_UK][0][57] = 127, + [0][0][2][0][RTW89_THAILAND][1][57] = 127, + [0][0][2][0][RTW89_THAILAND][0][57] = 127, [0][0][2][0][RTW89_FCC][1][59] = 22, [0][0][2][0][RTW89_FCC][2][59] = 68, [0][0][2][0][RTW89_ETSI][1][59] = 127, @@ -40546,6 +41448,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][59] = 127, [0][0][2][0][RTW89_MKK][0][59] = 127, [0][0][2][0][RTW89_IC][1][59] = 22, + [0][0][2][0][RTW89_IC][2][59] = 68, [0][0][2][0][RTW89_KCC][1][59] = 26, [0][0][2][0][RTW89_KCC][0][59] = 127, [0][0][2][0][RTW89_ACMA][1][59] = 127, @@ -40555,6 +41458,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][59] = 127, [0][0][2][0][RTW89_UK][1][59] = 127, [0][0][2][0][RTW89_UK][0][59] = 127, + [0][0][2][0][RTW89_THAILAND][1][59] = 127, + [0][0][2][0][RTW89_THAILAND][0][59] = 127, [0][0][2][0][RTW89_FCC][1][60] = 22, [0][0][2][0][RTW89_FCC][2][60] = 68, [0][0][2][0][RTW89_ETSI][1][60] = 127, @@ -40562,6 +41467,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][60] = 127, [0][0][2][0][RTW89_MKK][0][60] = 127, [0][0][2][0][RTW89_IC][1][60] = 22, + [0][0][2][0][RTW89_IC][2][60] = 68, [0][0][2][0][RTW89_KCC][1][60] = 26, [0][0][2][0][RTW89_KCC][0][60] = 127, [0][0][2][0][RTW89_ACMA][1][60] = 127, @@ -40571,6 +41477,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][60] = 127, [0][0][2][0][RTW89_UK][1][60] = 127, [0][0][2][0][RTW89_UK][0][60] = 127, + [0][0][2][0][RTW89_THAILAND][1][60] = 127, + [0][0][2][0][RTW89_THAILAND][0][60] = 127, [0][0][2][0][RTW89_FCC][1][62] = 22, [0][0][2][0][RTW89_FCC][2][62] = 68, [0][0][2][0][RTW89_ETSI][1][62] = 127, @@ -40578,6 +41486,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][62] = 127, [0][0][2][0][RTW89_MKK][0][62] = 127, [0][0][2][0][RTW89_IC][1][62] = 22, + [0][0][2][0][RTW89_IC][2][62] = 68, [0][0][2][0][RTW89_KCC][1][62] = 26, [0][0][2][0][RTW89_KCC][0][62] = 127, [0][0][2][0][RTW89_ACMA][1][62] = 127, @@ -40587,6 +41496,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][62] = 127, [0][0][2][0][RTW89_UK][1][62] = 127, [0][0][2][0][RTW89_UK][0][62] = 127, + [0][0][2][0][RTW89_THAILAND][1][62] = 127, + [0][0][2][0][RTW89_THAILAND][0][62] = 127, [0][0][2][0][RTW89_FCC][1][64] = 22, [0][0][2][0][RTW89_FCC][2][64] = 68, [0][0][2][0][RTW89_ETSI][1][64] = 127, @@ -40594,6 +41505,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][64] = 127, [0][0][2][0][RTW89_MKK][0][64] = 127, [0][0][2][0][RTW89_IC][1][64] = 22, + [0][0][2][0][RTW89_IC][2][64] = 68, [0][0][2][0][RTW89_KCC][1][64] = 26, [0][0][2][0][RTW89_KCC][0][64] = 127, [0][0][2][0][RTW89_ACMA][1][64] = 127, @@ -40603,6 +41515,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][64] = 127, [0][0][2][0][RTW89_UK][1][64] = 127, [0][0][2][0][RTW89_UK][0][64] = 127, + [0][0][2][0][RTW89_THAILAND][1][64] = 127, + [0][0][2][0][RTW89_THAILAND][0][64] = 127, [0][0][2][0][RTW89_FCC][1][66] = 22, [0][0][2][0][RTW89_FCC][2][66] = 68, [0][0][2][0][RTW89_ETSI][1][66] = 127, @@ -40610,6 +41524,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][66] = 127, [0][0][2][0][RTW89_MKK][0][66] = 127, [0][0][2][0][RTW89_IC][1][66] = 22, + [0][0][2][0][RTW89_IC][2][66] = 68, [0][0][2][0][RTW89_KCC][1][66] = 26, [0][0][2][0][RTW89_KCC][0][66] = 127, [0][0][2][0][RTW89_ACMA][1][66] = 127, @@ -40619,6 +41534,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][66] = 127, [0][0][2][0][RTW89_UK][1][66] = 127, [0][0][2][0][RTW89_UK][0][66] = 127, + [0][0][2][0][RTW89_THAILAND][1][66] = 127, + [0][0][2][0][RTW89_THAILAND][0][66] = 127, [0][0][2][0][RTW89_FCC][1][68] = 22, [0][0][2][0][RTW89_FCC][2][68] = 68, [0][0][2][0][RTW89_ETSI][1][68] = 127, @@ -40626,6 +41543,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][68] = 127, [0][0][2][0][RTW89_MKK][0][68] = 127, [0][0][2][0][RTW89_IC][1][68] = 22, + [0][0][2][0][RTW89_IC][2][68] = 68, [0][0][2][0][RTW89_KCC][1][68] = 26, [0][0][2][0][RTW89_KCC][0][68] = 127, [0][0][2][0][RTW89_ACMA][1][68] = 127, @@ -40635,6 +41553,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][68] = 127, [0][0][2][0][RTW89_UK][1][68] = 127, [0][0][2][0][RTW89_UK][0][68] = 127, + [0][0][2][0][RTW89_THAILAND][1][68] = 127, + [0][0][2][0][RTW89_THAILAND][0][68] = 127, [0][0][2][0][RTW89_FCC][1][70] = 24, [0][0][2][0][RTW89_FCC][2][70] = 68, [0][0][2][0][RTW89_ETSI][1][70] = 127, @@ -40642,6 +41562,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][70] = 127, [0][0][2][0][RTW89_MKK][0][70] = 127, [0][0][2][0][RTW89_IC][1][70] = 24, + [0][0][2][0][RTW89_IC][2][70] = 68, [0][0][2][0][RTW89_KCC][1][70] = 26, [0][0][2][0][RTW89_KCC][0][70] = 127, [0][0][2][0][RTW89_ACMA][1][70] = 127, @@ -40651,6 +41572,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][70] = 127, [0][0][2][0][RTW89_UK][1][70] = 127, [0][0][2][0][RTW89_UK][0][70] = 127, + [0][0][2][0][RTW89_THAILAND][1][70] = 127, + [0][0][2][0][RTW89_THAILAND][0][70] = 127, [0][0][2][0][RTW89_FCC][1][72] = 22, [0][0][2][0][RTW89_FCC][2][72] = 68, [0][0][2][0][RTW89_ETSI][1][72] = 127, @@ -40658,6 +41581,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][72] = 127, [0][0][2][0][RTW89_MKK][0][72] = 127, [0][0][2][0][RTW89_IC][1][72] = 22, + [0][0][2][0][RTW89_IC][2][72] = 68, [0][0][2][0][RTW89_KCC][1][72] = 26, [0][0][2][0][RTW89_KCC][0][72] = 127, [0][0][2][0][RTW89_ACMA][1][72] = 127, @@ -40667,6 +41591,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][72] = 127, [0][0][2][0][RTW89_UK][1][72] = 127, [0][0][2][0][RTW89_UK][0][72] = 127, + [0][0][2][0][RTW89_THAILAND][1][72] = 127, + [0][0][2][0][RTW89_THAILAND][0][72] = 127, [0][0][2][0][RTW89_FCC][1][74] = 22, [0][0][2][0][RTW89_FCC][2][74] = 68, [0][0][2][0][RTW89_ETSI][1][74] = 127, @@ -40674,6 +41600,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][74] = 127, [0][0][2][0][RTW89_MKK][0][74] = 127, [0][0][2][0][RTW89_IC][1][74] = 22, + [0][0][2][0][RTW89_IC][2][74] = 68, [0][0][2][0][RTW89_KCC][1][74] = 26, [0][0][2][0][RTW89_KCC][0][74] = 127, [0][0][2][0][RTW89_ACMA][1][74] = 127, @@ -40683,6 +41610,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][74] = 127, [0][0][2][0][RTW89_UK][1][74] = 127, [0][0][2][0][RTW89_UK][0][74] = 127, + [0][0][2][0][RTW89_THAILAND][1][74] = 127, + [0][0][2][0][RTW89_THAILAND][0][74] = 127, [0][0][2][0][RTW89_FCC][1][75] = 22, [0][0][2][0][RTW89_FCC][2][75] = 68, [0][0][2][0][RTW89_ETSI][1][75] = 127, @@ -40690,6 +41619,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][75] = 127, [0][0][2][0][RTW89_MKK][0][75] = 127, [0][0][2][0][RTW89_IC][1][75] = 22, + [0][0][2][0][RTW89_IC][2][75] = 68, [0][0][2][0][RTW89_KCC][1][75] = 26, [0][0][2][0][RTW89_KCC][0][75] = 127, [0][0][2][0][RTW89_ACMA][1][75] = 127, @@ -40699,6 +41629,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][75] = 127, [0][0][2][0][RTW89_UK][1][75] = 127, [0][0][2][0][RTW89_UK][0][75] = 127, + [0][0][2][0][RTW89_THAILAND][1][75] = 127, + [0][0][2][0][RTW89_THAILAND][0][75] = 127, [0][0][2][0][RTW89_FCC][1][77] = 22, [0][0][2][0][RTW89_FCC][2][77] = 68, [0][0][2][0][RTW89_ETSI][1][77] = 127, @@ -40706,6 +41638,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][77] = 127, [0][0][2][0][RTW89_MKK][0][77] = 127, [0][0][2][0][RTW89_IC][1][77] = 22, + [0][0][2][0][RTW89_IC][2][77] = 68, [0][0][2][0][RTW89_KCC][1][77] = 26, [0][0][2][0][RTW89_KCC][0][77] = 127, [0][0][2][0][RTW89_ACMA][1][77] = 127, @@ -40715,6 +41648,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][77] = 127, [0][0][2][0][RTW89_UK][1][77] = 127, [0][0][2][0][RTW89_UK][0][77] = 127, + [0][0][2][0][RTW89_THAILAND][1][77] = 127, + [0][0][2][0][RTW89_THAILAND][0][77] = 127, [0][0][2][0][RTW89_FCC][1][79] = 22, [0][0][2][0][RTW89_FCC][2][79] = 68, [0][0][2][0][RTW89_ETSI][1][79] = 127, @@ -40722,6 +41657,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][79] = 127, [0][0][2][0][RTW89_MKK][0][79] = 127, [0][0][2][0][RTW89_IC][1][79] = 22, + [0][0][2][0][RTW89_IC][2][79] = 68, [0][0][2][0][RTW89_KCC][1][79] = 26, [0][0][2][0][RTW89_KCC][0][79] = 127, [0][0][2][0][RTW89_ACMA][1][79] = 127, @@ -40731,6 +41667,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][79] = 127, [0][0][2][0][RTW89_UK][1][79] = 127, [0][0][2][0][RTW89_UK][0][79] = 127, + [0][0][2][0][RTW89_THAILAND][1][79] = 127, + [0][0][2][0][RTW89_THAILAND][0][79] = 127, [0][0][2][0][RTW89_FCC][1][81] = 22, [0][0][2][0][RTW89_FCC][2][81] = 68, [0][0][2][0][RTW89_ETSI][1][81] = 127, @@ -40738,6 +41676,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][81] = 127, [0][0][2][0][RTW89_MKK][0][81] = 127, [0][0][2][0][RTW89_IC][1][81] = 22, + [0][0][2][0][RTW89_IC][2][81] = 68, [0][0][2][0][RTW89_KCC][1][81] = 26, [0][0][2][0][RTW89_KCC][0][81] = 127, [0][0][2][0][RTW89_ACMA][1][81] = 127, @@ -40747,6 +41686,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][81] = 127, [0][0][2][0][RTW89_UK][1][81] = 127, [0][0][2][0][RTW89_UK][0][81] = 127, + [0][0][2][0][RTW89_THAILAND][1][81] = 127, + [0][0][2][0][RTW89_THAILAND][0][81] = 127, [0][0][2][0][RTW89_FCC][1][83] = 22, [0][0][2][0][RTW89_FCC][2][83] = 68, [0][0][2][0][RTW89_ETSI][1][83] = 127, @@ -40754,6 +41695,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][83] = 127, [0][0][2][0][RTW89_MKK][0][83] = 127, [0][0][2][0][RTW89_IC][1][83] = 22, + [0][0][2][0][RTW89_IC][2][83] = 68, [0][0][2][0][RTW89_KCC][1][83] = 32, [0][0][2][0][RTW89_KCC][0][83] = 127, [0][0][2][0][RTW89_ACMA][1][83] = 127, @@ -40763,6 +41705,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][83] = 127, [0][0][2][0][RTW89_UK][1][83] = 127, [0][0][2][0][RTW89_UK][0][83] = 127, + [0][0][2][0][RTW89_THAILAND][1][83] = 127, + [0][0][2][0][RTW89_THAILAND][0][83] = 127, [0][0][2][0][RTW89_FCC][1][85] = 22, [0][0][2][0][RTW89_FCC][2][85] = 68, [0][0][2][0][RTW89_ETSI][1][85] = 127, @@ -40770,6 +41714,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][85] = 127, [0][0][2][0][RTW89_MKK][0][85] = 127, [0][0][2][0][RTW89_IC][1][85] = 22, + [0][0][2][0][RTW89_IC][2][85] = 68, [0][0][2][0][RTW89_KCC][1][85] = 32, [0][0][2][0][RTW89_KCC][0][85] = 127, [0][0][2][0][RTW89_ACMA][1][85] = 127, @@ -40779,6 +41724,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][85] = 127, [0][0][2][0][RTW89_UK][1][85] = 127, [0][0][2][0][RTW89_UK][0][85] = 127, + [0][0][2][0][RTW89_THAILAND][1][85] = 127, + [0][0][2][0][RTW89_THAILAND][0][85] = 127, [0][0][2][0][RTW89_FCC][1][87] = 22, [0][0][2][0][RTW89_FCC][2][87] = 127, [0][0][2][0][RTW89_ETSI][1][87] = 127, @@ -40786,6 +41733,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][87] = 127, [0][0][2][0][RTW89_MKK][0][87] = 127, [0][0][2][0][RTW89_IC][1][87] = 22, + [0][0][2][0][RTW89_IC][2][87] = 127, [0][0][2][0][RTW89_KCC][1][87] = 32, [0][0][2][0][RTW89_KCC][0][87] = 127, [0][0][2][0][RTW89_ACMA][1][87] = 127, @@ -40795,6 +41743,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][87] = 127, [0][0][2][0][RTW89_UK][1][87] = 127, [0][0][2][0][RTW89_UK][0][87] = 127, + [0][0][2][0][RTW89_THAILAND][1][87] = 127, + [0][0][2][0][RTW89_THAILAND][0][87] = 127, [0][0][2][0][RTW89_FCC][1][89] = 22, [0][0][2][0][RTW89_FCC][2][89] = 127, [0][0][2][0][RTW89_ETSI][1][89] = 127, @@ -40802,6 +41752,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][89] = 127, [0][0][2][0][RTW89_MKK][0][89] = 127, [0][0][2][0][RTW89_IC][1][89] = 22, + [0][0][2][0][RTW89_IC][2][89] = 127, [0][0][2][0][RTW89_KCC][1][89] = 32, [0][0][2][0][RTW89_KCC][0][89] = 127, [0][0][2][0][RTW89_ACMA][1][89] = 127, @@ -40811,6 +41762,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][89] = 127, [0][0][2][0][RTW89_UK][1][89] = 127, [0][0][2][0][RTW89_UK][0][89] = 127, + [0][0][2][0][RTW89_THAILAND][1][89] = 127, + [0][0][2][0][RTW89_THAILAND][0][89] = 127, [0][0][2][0][RTW89_FCC][1][90] = 22, [0][0][2][0][RTW89_FCC][2][90] = 127, [0][0][2][0][RTW89_ETSI][1][90] = 127, @@ -40818,6 +41771,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][90] = 127, [0][0][2][0][RTW89_MKK][0][90] = 127, [0][0][2][0][RTW89_IC][1][90] = 22, + [0][0][2][0][RTW89_IC][2][90] = 127, [0][0][2][0][RTW89_KCC][1][90] = 32, [0][0][2][0][RTW89_KCC][0][90] = 127, [0][0][2][0][RTW89_ACMA][1][90] = 127, @@ -40827,6 +41781,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][90] = 127, [0][0][2][0][RTW89_UK][1][90] = 127, [0][0][2][0][RTW89_UK][0][90] = 127, + [0][0][2][0][RTW89_THAILAND][1][90] = 127, + [0][0][2][0][RTW89_THAILAND][0][90] = 127, [0][0][2][0][RTW89_FCC][1][92] = 22, [0][0][2][0][RTW89_FCC][2][92] = 127, [0][0][2][0][RTW89_ETSI][1][92] = 127, @@ -40834,6 +41790,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][92] = 127, [0][0][2][0][RTW89_MKK][0][92] = 127, [0][0][2][0][RTW89_IC][1][92] = 22, + [0][0][2][0][RTW89_IC][2][92] = 127, [0][0][2][0][RTW89_KCC][1][92] = 32, [0][0][2][0][RTW89_KCC][0][92] = 127, [0][0][2][0][RTW89_ACMA][1][92] = 127, @@ -40843,6 +41800,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][92] = 127, [0][0][2][0][RTW89_UK][1][92] = 127, [0][0][2][0][RTW89_UK][0][92] = 127, + [0][0][2][0][RTW89_THAILAND][1][92] = 127, + [0][0][2][0][RTW89_THAILAND][0][92] = 127, [0][0][2][0][RTW89_FCC][1][94] = 22, [0][0][2][0][RTW89_FCC][2][94] = 127, [0][0][2][0][RTW89_ETSI][1][94] = 127, @@ -40850,6 +41809,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][94] = 127, [0][0][2][0][RTW89_MKK][0][94] = 127, [0][0][2][0][RTW89_IC][1][94] = 22, + [0][0][2][0][RTW89_IC][2][94] = 127, [0][0][2][0][RTW89_KCC][1][94] = 32, [0][0][2][0][RTW89_KCC][0][94] = 127, [0][0][2][0][RTW89_ACMA][1][94] = 127, @@ -40859,6 +41819,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][94] = 127, [0][0][2][0][RTW89_UK][1][94] = 127, [0][0][2][0][RTW89_UK][0][94] = 127, + [0][0][2][0][RTW89_THAILAND][1][94] = 127, + [0][0][2][0][RTW89_THAILAND][0][94] = 127, [0][0][2][0][RTW89_FCC][1][96] = 22, [0][0][2][0][RTW89_FCC][2][96] = 127, [0][0][2][0][RTW89_ETSI][1][96] = 127, @@ -40866,6 +41828,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][96] = 127, [0][0][2][0][RTW89_MKK][0][96] = 127, [0][0][2][0][RTW89_IC][1][96] = 22, + [0][0][2][0][RTW89_IC][2][96] = 127, [0][0][2][0][RTW89_KCC][1][96] = 32, [0][0][2][0][RTW89_KCC][0][96] = 127, [0][0][2][0][RTW89_ACMA][1][96] = 127, @@ -40875,6 +41838,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][96] = 127, [0][0][2][0][RTW89_UK][1][96] = 127, [0][0][2][0][RTW89_UK][0][96] = 127, + [0][0][2][0][RTW89_THAILAND][1][96] = 127, + [0][0][2][0][RTW89_THAILAND][0][96] = 127, [0][0][2][0][RTW89_FCC][1][98] = 22, [0][0][2][0][RTW89_FCC][2][98] = 127, [0][0][2][0][RTW89_ETSI][1][98] = 127, @@ -40882,6 +41847,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][98] = 127, [0][0][2][0][RTW89_MKK][0][98] = 127, [0][0][2][0][RTW89_IC][1][98] = 22, + [0][0][2][0][RTW89_IC][2][98] = 127, [0][0][2][0][RTW89_KCC][1][98] = 32, [0][0][2][0][RTW89_KCC][0][98] = 127, [0][0][2][0][RTW89_ACMA][1][98] = 127, @@ -40891,6 +41857,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][98] = 127, [0][0][2][0][RTW89_UK][1][98] = 127, [0][0][2][0][RTW89_UK][0][98] = 127, + [0][0][2][0][RTW89_THAILAND][1][98] = 127, + [0][0][2][0][RTW89_THAILAND][0][98] = 127, [0][0][2][0][RTW89_FCC][1][100] = 22, [0][0][2][0][RTW89_FCC][2][100] = 127, [0][0][2][0][RTW89_ETSI][1][100] = 127, @@ -40898,6 +41866,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][100] = 127, [0][0][2][0][RTW89_MKK][0][100] = 127, [0][0][2][0][RTW89_IC][1][100] = 22, + [0][0][2][0][RTW89_IC][2][100] = 127, [0][0][2][0][RTW89_KCC][1][100] = 32, [0][0][2][0][RTW89_KCC][0][100] = 127, [0][0][2][0][RTW89_ACMA][1][100] = 127, @@ -40907,6 +41876,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][100] = 127, [0][0][2][0][RTW89_UK][1][100] = 127, [0][0][2][0][RTW89_UK][0][100] = 127, + [0][0][2][0][RTW89_THAILAND][1][100] = 127, + [0][0][2][0][RTW89_THAILAND][0][100] = 127, [0][0][2][0][RTW89_FCC][1][102] = 22, [0][0][2][0][RTW89_FCC][2][102] = 127, [0][0][2][0][RTW89_ETSI][1][102] = 127, @@ -40914,6 +41885,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][102] = 127, [0][0][2][0][RTW89_MKK][0][102] = 127, [0][0][2][0][RTW89_IC][1][102] = 22, + [0][0][2][0][RTW89_IC][2][102] = 127, [0][0][2][0][RTW89_KCC][1][102] = 32, [0][0][2][0][RTW89_KCC][0][102] = 127, [0][0][2][0][RTW89_ACMA][1][102] = 127, @@ -40923,6 +41895,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][102] = 127, [0][0][2][0][RTW89_UK][1][102] = 127, [0][0][2][0][RTW89_UK][0][102] = 127, + [0][0][2][0][RTW89_THAILAND][1][102] = 127, + [0][0][2][0][RTW89_THAILAND][0][102] = 127, [0][0][2][0][RTW89_FCC][1][104] = 22, [0][0][2][0][RTW89_FCC][2][104] = 127, [0][0][2][0][RTW89_ETSI][1][104] = 127, @@ -40930,6 +41904,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][104] = 127, [0][0][2][0][RTW89_MKK][0][104] = 127, [0][0][2][0][RTW89_IC][1][104] = 22, + [0][0][2][0][RTW89_IC][2][104] = 127, [0][0][2][0][RTW89_KCC][1][104] = 32, [0][0][2][0][RTW89_KCC][0][104] = 127, [0][0][2][0][RTW89_ACMA][1][104] = 127, @@ -40939,6 +41914,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][104] = 127, [0][0][2][0][RTW89_UK][1][104] = 127, [0][0][2][0][RTW89_UK][0][104] = 127, + [0][0][2][0][RTW89_THAILAND][1][104] = 127, + [0][0][2][0][RTW89_THAILAND][0][104] = 127, [0][0][2][0][RTW89_FCC][1][105] = 22, [0][0][2][0][RTW89_FCC][2][105] = 127, [0][0][2][0][RTW89_ETSI][1][105] = 127, @@ -40946,6 +41923,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][105] = 127, [0][0][2][0][RTW89_MKK][0][105] = 127, [0][0][2][0][RTW89_IC][1][105] = 22, + [0][0][2][0][RTW89_IC][2][105] = 127, [0][0][2][0][RTW89_KCC][1][105] = 32, [0][0][2][0][RTW89_KCC][0][105] = 127, [0][0][2][0][RTW89_ACMA][1][105] = 127, @@ -40955,6 +41933,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][105] = 127, [0][0][2][0][RTW89_UK][1][105] = 127, [0][0][2][0][RTW89_UK][0][105] = 127, + [0][0][2][0][RTW89_THAILAND][1][105] = 127, + [0][0][2][0][RTW89_THAILAND][0][105] = 127, [0][0][2][0][RTW89_FCC][1][107] = 24, [0][0][2][0][RTW89_FCC][2][107] = 127, [0][0][2][0][RTW89_ETSI][1][107] = 127, @@ -40962,6 +41942,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][107] = 127, [0][0][2][0][RTW89_MKK][0][107] = 127, [0][0][2][0][RTW89_IC][1][107] = 24, + [0][0][2][0][RTW89_IC][2][107] = 127, [0][0][2][0][RTW89_KCC][1][107] = 32, [0][0][2][0][RTW89_KCC][0][107] = 127, [0][0][2][0][RTW89_ACMA][1][107] = 127, @@ -40971,6 +41952,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][107] = 127, [0][0][2][0][RTW89_UK][1][107] = 127, [0][0][2][0][RTW89_UK][0][107] = 127, + [0][0][2][0][RTW89_THAILAND][1][107] = 127, + [0][0][2][0][RTW89_THAILAND][0][107] = 127, [0][0][2][0][RTW89_FCC][1][109] = 24, [0][0][2][0][RTW89_FCC][2][109] = 127, [0][0][2][0][RTW89_ETSI][1][109] = 127, @@ -40978,6 +41961,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][109] = 127, [0][0][2][0][RTW89_MKK][0][109] = 127, [0][0][2][0][RTW89_IC][1][109] = 24, + [0][0][2][0][RTW89_IC][2][109] = 127, [0][0][2][0][RTW89_KCC][1][109] = 32, [0][0][2][0][RTW89_KCC][0][109] = 127, [0][0][2][0][RTW89_ACMA][1][109] = 127, @@ -40987,6 +41971,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][109] = 127, [0][0][2][0][RTW89_UK][1][109] = 127, [0][0][2][0][RTW89_UK][0][109] = 127, + [0][0][2][0][RTW89_THAILAND][1][109] = 127, + [0][0][2][0][RTW89_THAILAND][0][109] = 127, [0][0][2][0][RTW89_FCC][1][111] = 127, [0][0][2][0][RTW89_FCC][2][111] = 127, [0][0][2][0][RTW89_ETSI][1][111] = 127, @@ -40994,6 +41980,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][111] = 127, [0][0][2][0][RTW89_MKK][0][111] = 127, [0][0][2][0][RTW89_IC][1][111] = 127, + [0][0][2][0][RTW89_IC][2][111] = 127, [0][0][2][0][RTW89_KCC][1][111] = 127, [0][0][2][0][RTW89_KCC][0][111] = 127, [0][0][2][0][RTW89_ACMA][1][111] = 127, @@ -41003,6 +41990,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][111] = 127, [0][0][2][0][RTW89_UK][1][111] = 127, [0][0][2][0][RTW89_UK][0][111] = 127, + [0][0][2][0][RTW89_THAILAND][1][111] = 127, + [0][0][2][0][RTW89_THAILAND][0][111] = 127, [0][0][2][0][RTW89_FCC][1][113] = 127, [0][0][2][0][RTW89_FCC][2][113] = 127, [0][0][2][0][RTW89_ETSI][1][113] = 127, @@ -41010,6 +41999,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][113] = 127, [0][0][2][0][RTW89_MKK][0][113] = 127, [0][0][2][0][RTW89_IC][1][113] = 127, + [0][0][2][0][RTW89_IC][2][113] = 127, [0][0][2][0][RTW89_KCC][1][113] = 127, [0][0][2][0][RTW89_KCC][0][113] = 127, [0][0][2][0][RTW89_ACMA][1][113] = 127, @@ -41019,6 +42009,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][113] = 127, [0][0][2][0][RTW89_UK][1][113] = 127, [0][0][2][0][RTW89_UK][0][113] = 127, + [0][0][2][0][RTW89_THAILAND][1][113] = 127, + [0][0][2][0][RTW89_THAILAND][0][113] = 127, [0][0][2][0][RTW89_FCC][1][115] = 127, [0][0][2][0][RTW89_FCC][2][115] = 127, [0][0][2][0][RTW89_ETSI][1][115] = 127, @@ -41026,6 +42018,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][115] = 127, [0][0][2][0][RTW89_MKK][0][115] = 127, [0][0][2][0][RTW89_IC][1][115] = 127, + [0][0][2][0][RTW89_IC][2][115] = 127, [0][0][2][0][RTW89_KCC][1][115] = 127, [0][0][2][0][RTW89_KCC][0][115] = 127, [0][0][2][0][RTW89_ACMA][1][115] = 127, @@ -41035,6 +42028,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][115] = 127, [0][0][2][0][RTW89_UK][1][115] = 127, [0][0][2][0][RTW89_UK][0][115] = 127, + [0][0][2][0][RTW89_THAILAND][1][115] = 127, + [0][0][2][0][RTW89_THAILAND][0][115] = 127, [0][0][2][0][RTW89_FCC][1][117] = 127, [0][0][2][0][RTW89_FCC][2][117] = 127, [0][0][2][0][RTW89_ETSI][1][117] = 127, @@ -41042,6 +42037,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][117] = 127, [0][0][2][0][RTW89_MKK][0][117] = 127, [0][0][2][0][RTW89_IC][1][117] = 127, + [0][0][2][0][RTW89_IC][2][117] = 127, [0][0][2][0][RTW89_KCC][1][117] = 127, [0][0][2][0][RTW89_KCC][0][117] = 127, [0][0][2][0][RTW89_ACMA][1][117] = 127, @@ -41051,6 +42047,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][117] = 127, [0][0][2][0][RTW89_UK][1][117] = 127, [0][0][2][0][RTW89_UK][0][117] = 127, + [0][0][2][0][RTW89_THAILAND][1][117] = 127, + [0][0][2][0][RTW89_THAILAND][0][117] = 127, [0][0][2][0][RTW89_FCC][1][119] = 127, [0][0][2][0][RTW89_FCC][2][119] = 127, [0][0][2][0][RTW89_ETSI][1][119] = 127, @@ -41058,6 +42056,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][1][119] = 127, [0][0][2][0][RTW89_MKK][0][119] = 127, [0][0][2][0][RTW89_IC][1][119] = 127, + [0][0][2][0][RTW89_IC][2][119] = 127, [0][0][2][0][RTW89_KCC][1][119] = 127, [0][0][2][0][RTW89_KCC][0][119] = 127, [0][0][2][0][RTW89_ACMA][1][119] = 127, @@ -41067,6 +42066,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_QATAR][0][119] = 127, [0][0][2][0][RTW89_UK][1][119] = 127, [0][0][2][0][RTW89_UK][0][119] = 127, + [0][0][2][0][RTW89_THAILAND][1][119] = 127, + [0][0][2][0][RTW89_THAILAND][0][119] = 127, [0][1][2][0][RTW89_FCC][1][0] = -2, [0][1][2][0][RTW89_FCC][2][0] = 54, [0][1][2][0][RTW89_ETSI][1][0] = 54, @@ -41074,6 +42075,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][0] = 56, [0][1][2][0][RTW89_MKK][0][0] = 16, [0][1][2][0][RTW89_IC][1][0] = -2, + [0][1][2][0][RTW89_IC][2][0] = 54, [0][1][2][0][RTW89_KCC][1][0] = 12, [0][1][2][0][RTW89_KCC][0][0] = 10, [0][1][2][0][RTW89_ACMA][1][0] = 54, @@ -41083,6 +42085,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][0] = 18, [0][1][2][0][RTW89_UK][1][0] = 54, [0][1][2][0][RTW89_UK][0][0] = 18, + [0][1][2][0][RTW89_THAILAND][1][0] = 44, + [0][1][2][0][RTW89_THAILAND][0][0] = -2, [0][1][2][0][RTW89_FCC][1][2] = -4, [0][1][2][0][RTW89_FCC][2][2] = 54, [0][1][2][0][RTW89_ETSI][1][2] = 54, @@ -41090,6 +42094,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][2] = 54, [0][1][2][0][RTW89_MKK][0][2] = 16, [0][1][2][0][RTW89_IC][1][2] = -4, + [0][1][2][0][RTW89_IC][2][2] = 54, [0][1][2][0][RTW89_KCC][1][2] = 12, [0][1][2][0][RTW89_KCC][0][2] = 12, [0][1][2][0][RTW89_ACMA][1][2] = 54, @@ -41099,6 +42104,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][2] = 18, [0][1][2][0][RTW89_UK][1][2] = 54, [0][1][2][0][RTW89_UK][0][2] = 18, + [0][1][2][0][RTW89_THAILAND][1][2] = 44, + [0][1][2][0][RTW89_THAILAND][0][2] = -4, [0][1][2][0][RTW89_FCC][1][4] = -4, [0][1][2][0][RTW89_FCC][2][4] = 54, [0][1][2][0][RTW89_ETSI][1][4] = 54, @@ -41106,6 +42113,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][4] = 54, [0][1][2][0][RTW89_MKK][0][4] = 16, [0][1][2][0][RTW89_IC][1][4] = -4, + [0][1][2][0][RTW89_IC][2][4] = 54, [0][1][2][0][RTW89_KCC][1][4] = 12, [0][1][2][0][RTW89_KCC][0][4] = 12, [0][1][2][0][RTW89_ACMA][1][4] = 54, @@ -41115,6 +42123,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][4] = 18, [0][1][2][0][RTW89_UK][1][4] = 54, [0][1][2][0][RTW89_UK][0][4] = 18, + [0][1][2][0][RTW89_THAILAND][1][4] = 44, + [0][1][2][0][RTW89_THAILAND][0][4] = -4, [0][1][2][0][RTW89_FCC][1][6] = -4, [0][1][2][0][RTW89_FCC][2][6] = 54, [0][1][2][0][RTW89_ETSI][1][6] = 54, @@ -41122,6 +42132,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][6] = 54, [0][1][2][0][RTW89_MKK][0][6] = 16, [0][1][2][0][RTW89_IC][1][6] = -4, + [0][1][2][0][RTW89_IC][2][6] = 54, [0][1][2][0][RTW89_KCC][1][6] = 12, [0][1][2][0][RTW89_KCC][0][6] = 12, [0][1][2][0][RTW89_ACMA][1][6] = 54, @@ -41131,6 +42142,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][6] = 18, [0][1][2][0][RTW89_UK][1][6] = 54, [0][1][2][0][RTW89_UK][0][6] = 18, + [0][1][2][0][RTW89_THAILAND][1][6] = 44, + [0][1][2][0][RTW89_THAILAND][0][6] = -4, [0][1][2][0][RTW89_FCC][1][8] = -4, [0][1][2][0][RTW89_FCC][2][8] = 54, [0][1][2][0][RTW89_ETSI][1][8] = 54, @@ -41138,6 +42151,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][8] = 54, [0][1][2][0][RTW89_MKK][0][8] = 16, [0][1][2][0][RTW89_IC][1][8] = -4, + [0][1][2][0][RTW89_IC][2][8] = 54, [0][1][2][0][RTW89_KCC][1][8] = 12, [0][1][2][0][RTW89_KCC][0][8] = 12, [0][1][2][0][RTW89_ACMA][1][8] = 54, @@ -41147,6 +42161,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][8] = 18, [0][1][2][0][RTW89_UK][1][8] = 54, [0][1][2][0][RTW89_UK][0][8] = 18, + [0][1][2][0][RTW89_THAILAND][1][8] = 44, + [0][1][2][0][RTW89_THAILAND][0][8] = -4, [0][1][2][0][RTW89_FCC][1][10] = -4, [0][1][2][0][RTW89_FCC][2][10] = 54, [0][1][2][0][RTW89_ETSI][1][10] = 54, @@ -41154,6 +42170,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][10] = 54, [0][1][2][0][RTW89_MKK][0][10] = 16, [0][1][2][0][RTW89_IC][1][10] = -4, + [0][1][2][0][RTW89_IC][2][10] = 54, [0][1][2][0][RTW89_KCC][1][10] = 12, [0][1][2][0][RTW89_KCC][0][10] = 12, [0][1][2][0][RTW89_ACMA][1][10] = 54, @@ -41163,6 +42180,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][10] = 18, [0][1][2][0][RTW89_UK][1][10] = 54, [0][1][2][0][RTW89_UK][0][10] = 18, + [0][1][2][0][RTW89_THAILAND][1][10] = 44, + [0][1][2][0][RTW89_THAILAND][0][10] = -4, [0][1][2][0][RTW89_FCC][1][12] = -4, [0][1][2][0][RTW89_FCC][2][12] = 54, [0][1][2][0][RTW89_ETSI][1][12] = 54, @@ -41170,6 +42189,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][12] = 54, [0][1][2][0][RTW89_MKK][0][12] = 16, [0][1][2][0][RTW89_IC][1][12] = -4, + [0][1][2][0][RTW89_IC][2][12] = 54, [0][1][2][0][RTW89_KCC][1][12] = 12, [0][1][2][0][RTW89_KCC][0][12] = 12, [0][1][2][0][RTW89_ACMA][1][12] = 54, @@ -41179,6 +42199,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][12] = 18, [0][1][2][0][RTW89_UK][1][12] = 54, [0][1][2][0][RTW89_UK][0][12] = 18, + [0][1][2][0][RTW89_THAILAND][1][12] = 44, + [0][1][2][0][RTW89_THAILAND][0][12] = -4, [0][1][2][0][RTW89_FCC][1][14] = -4, [0][1][2][0][RTW89_FCC][2][14] = 54, [0][1][2][0][RTW89_ETSI][1][14] = 54, @@ -41186,6 +42208,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][14] = 54, [0][1][2][0][RTW89_MKK][0][14] = 16, [0][1][2][0][RTW89_IC][1][14] = -4, + [0][1][2][0][RTW89_IC][2][14] = 54, [0][1][2][0][RTW89_KCC][1][14] = 12, [0][1][2][0][RTW89_KCC][0][14] = 12, [0][1][2][0][RTW89_ACMA][1][14] = 54, @@ -41195,6 +42218,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][14] = 18, [0][1][2][0][RTW89_UK][1][14] = 54, [0][1][2][0][RTW89_UK][0][14] = 18, + [0][1][2][0][RTW89_THAILAND][1][14] = 44, + [0][1][2][0][RTW89_THAILAND][0][14] = -4, [0][1][2][0][RTW89_FCC][1][15] = -4, [0][1][2][0][RTW89_FCC][2][15] = 54, [0][1][2][0][RTW89_ETSI][1][15] = 54, @@ -41202,6 +42227,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][15] = 54, [0][1][2][0][RTW89_MKK][0][15] = 16, [0][1][2][0][RTW89_IC][1][15] = -4, + [0][1][2][0][RTW89_IC][2][15] = 54, [0][1][2][0][RTW89_KCC][1][15] = 12, [0][1][2][0][RTW89_KCC][0][15] = 12, [0][1][2][0][RTW89_ACMA][1][15] = 54, @@ -41211,6 +42237,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][15] = 18, [0][1][2][0][RTW89_UK][1][15] = 54, [0][1][2][0][RTW89_UK][0][15] = 18, + [0][1][2][0][RTW89_THAILAND][1][15] = 44, + [0][1][2][0][RTW89_THAILAND][0][15] = -4, [0][1][2][0][RTW89_FCC][1][17] = -4, [0][1][2][0][RTW89_FCC][2][17] = 54, [0][1][2][0][RTW89_ETSI][1][17] = 54, @@ -41218,6 +42246,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][17] = 54, [0][1][2][0][RTW89_MKK][0][17] = 16, [0][1][2][0][RTW89_IC][1][17] = -4, + [0][1][2][0][RTW89_IC][2][17] = 54, [0][1][2][0][RTW89_KCC][1][17] = 12, [0][1][2][0][RTW89_KCC][0][17] = 12, [0][1][2][0][RTW89_ACMA][1][17] = 54, @@ -41227,6 +42256,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][17] = 18, [0][1][2][0][RTW89_UK][1][17] = 54, [0][1][2][0][RTW89_UK][0][17] = 18, + [0][1][2][0][RTW89_THAILAND][1][17] = 44, + [0][1][2][0][RTW89_THAILAND][0][17] = -4, [0][1][2][0][RTW89_FCC][1][19] = -4, [0][1][2][0][RTW89_FCC][2][19] = 54, [0][1][2][0][RTW89_ETSI][1][19] = 54, @@ -41234,6 +42265,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][19] = 54, [0][1][2][0][RTW89_MKK][0][19] = 16, [0][1][2][0][RTW89_IC][1][19] = -4, + [0][1][2][0][RTW89_IC][2][19] = 54, [0][1][2][0][RTW89_KCC][1][19] = 12, [0][1][2][0][RTW89_KCC][0][19] = 12, [0][1][2][0][RTW89_ACMA][1][19] = 54, @@ -41243,6 +42275,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][19] = 18, [0][1][2][0][RTW89_UK][1][19] = 54, [0][1][2][0][RTW89_UK][0][19] = 18, + [0][1][2][0][RTW89_THAILAND][1][19] = 44, + [0][1][2][0][RTW89_THAILAND][0][19] = -4, [0][1][2][0][RTW89_FCC][1][21] = -4, [0][1][2][0][RTW89_FCC][2][21] = 54, [0][1][2][0][RTW89_ETSI][1][21] = 54, @@ -41250,6 +42284,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][21] = 54, [0][1][2][0][RTW89_MKK][0][21] = 16, [0][1][2][0][RTW89_IC][1][21] = -4, + [0][1][2][0][RTW89_IC][2][21] = 54, [0][1][2][0][RTW89_KCC][1][21] = 12, [0][1][2][0][RTW89_KCC][0][21] = 12, [0][1][2][0][RTW89_ACMA][1][21] = 54, @@ -41259,6 +42294,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][21] = 18, [0][1][2][0][RTW89_UK][1][21] = 54, [0][1][2][0][RTW89_UK][0][21] = 18, + [0][1][2][0][RTW89_THAILAND][1][21] = 44, + [0][1][2][0][RTW89_THAILAND][0][21] = -4, [0][1][2][0][RTW89_FCC][1][23] = -4, [0][1][2][0][RTW89_FCC][2][23] = 68, [0][1][2][0][RTW89_ETSI][1][23] = 54, @@ -41266,6 +42303,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][23] = 54, [0][1][2][0][RTW89_MKK][0][23] = 16, [0][1][2][0][RTW89_IC][1][23] = -4, + [0][1][2][0][RTW89_IC][2][23] = 68, [0][1][2][0][RTW89_KCC][1][23] = 12, [0][1][2][0][RTW89_KCC][0][23] = 10, [0][1][2][0][RTW89_ACMA][1][23] = 54, @@ -41275,6 +42313,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][23] = 18, [0][1][2][0][RTW89_UK][1][23] = 54, [0][1][2][0][RTW89_UK][0][23] = 18, + [0][1][2][0][RTW89_THAILAND][1][23] = 44, + [0][1][2][0][RTW89_THAILAND][0][23] = -4, [0][1][2][0][RTW89_FCC][1][25] = -4, [0][1][2][0][RTW89_FCC][2][25] = 68, [0][1][2][0][RTW89_ETSI][1][25] = 54, @@ -41282,6 +42322,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][25] = 54, [0][1][2][0][RTW89_MKK][0][25] = 16, [0][1][2][0][RTW89_IC][1][25] = -4, + [0][1][2][0][RTW89_IC][2][25] = 68, [0][1][2][0][RTW89_KCC][1][25] = 12, [0][1][2][0][RTW89_KCC][0][25] = 14, [0][1][2][0][RTW89_ACMA][1][25] = 54, @@ -41291,6 +42332,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][25] = 18, [0][1][2][0][RTW89_UK][1][25] = 54, [0][1][2][0][RTW89_UK][0][25] = 18, + [0][1][2][0][RTW89_THAILAND][1][25] = 42, + [0][1][2][0][RTW89_THAILAND][0][25] = -4, [0][1][2][0][RTW89_FCC][1][27] = -4, [0][1][2][0][RTW89_FCC][2][27] = 68, [0][1][2][0][RTW89_ETSI][1][27] = 54, @@ -41298,6 +42341,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][27] = 54, [0][1][2][0][RTW89_MKK][0][27] = 16, [0][1][2][0][RTW89_IC][1][27] = -4, + [0][1][2][0][RTW89_IC][2][27] = 68, [0][1][2][0][RTW89_KCC][1][27] = 12, [0][1][2][0][RTW89_KCC][0][27] = 14, [0][1][2][0][RTW89_ACMA][1][27] = 54, @@ -41307,6 +42351,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][27] = 18, [0][1][2][0][RTW89_UK][1][27] = 54, [0][1][2][0][RTW89_UK][0][27] = 18, + [0][1][2][0][RTW89_THAILAND][1][27] = 42, + [0][1][2][0][RTW89_THAILAND][0][27] = -4, [0][1][2][0][RTW89_FCC][1][29] = -4, [0][1][2][0][RTW89_FCC][2][29] = 68, [0][1][2][0][RTW89_ETSI][1][29] = 54, @@ -41314,6 +42360,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][29] = 54, [0][1][2][0][RTW89_MKK][0][29] = 16, [0][1][2][0][RTW89_IC][1][29] = -4, + [0][1][2][0][RTW89_IC][2][29] = 68, [0][1][2][0][RTW89_KCC][1][29] = 12, [0][1][2][0][RTW89_KCC][0][29] = 14, [0][1][2][0][RTW89_ACMA][1][29] = 54, @@ -41323,6 +42370,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][29] = 18, [0][1][2][0][RTW89_UK][1][29] = 54, [0][1][2][0][RTW89_UK][0][29] = 18, + [0][1][2][0][RTW89_THAILAND][1][29] = 42, + [0][1][2][0][RTW89_THAILAND][0][29] = -4, [0][1][2][0][RTW89_FCC][1][30] = -4, [0][1][2][0][RTW89_FCC][2][30] = 68, [0][1][2][0][RTW89_ETSI][1][30] = 54, @@ -41330,6 +42379,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][30] = 54, [0][1][2][0][RTW89_MKK][0][30] = 16, [0][1][2][0][RTW89_IC][1][30] = -4, + [0][1][2][0][RTW89_IC][2][30] = 68, [0][1][2][0][RTW89_KCC][1][30] = 12, [0][1][2][0][RTW89_KCC][0][30] = 14, [0][1][2][0][RTW89_ACMA][1][30] = 54, @@ -41339,6 +42389,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][30] = 18, [0][1][2][0][RTW89_UK][1][30] = 54, [0][1][2][0][RTW89_UK][0][30] = 18, + [0][1][2][0][RTW89_THAILAND][1][30] = 42, + [0][1][2][0][RTW89_THAILAND][0][30] = -4, [0][1][2][0][RTW89_FCC][1][32] = -4, [0][1][2][0][RTW89_FCC][2][32] = 68, [0][1][2][0][RTW89_ETSI][1][32] = 54, @@ -41346,6 +42398,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][32] = 54, [0][1][2][0][RTW89_MKK][0][32] = 16, [0][1][2][0][RTW89_IC][1][32] = -4, + [0][1][2][0][RTW89_IC][2][32] = 68, [0][1][2][0][RTW89_KCC][1][32] = 12, [0][1][2][0][RTW89_KCC][0][32] = 14, [0][1][2][0][RTW89_ACMA][1][32] = 54, @@ -41355,6 +42408,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][32] = 18, [0][1][2][0][RTW89_UK][1][32] = 54, [0][1][2][0][RTW89_UK][0][32] = 18, + [0][1][2][0][RTW89_THAILAND][1][32] = 42, + [0][1][2][0][RTW89_THAILAND][0][32] = -4, [0][1][2][0][RTW89_FCC][1][34] = -4, [0][1][2][0][RTW89_FCC][2][34] = 68, [0][1][2][0][RTW89_ETSI][1][34] = 54, @@ -41362,6 +42417,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][34] = 54, [0][1][2][0][RTW89_MKK][0][34] = 16, [0][1][2][0][RTW89_IC][1][34] = -4, + [0][1][2][0][RTW89_IC][2][34] = 68, [0][1][2][0][RTW89_KCC][1][34] = 12, [0][1][2][0][RTW89_KCC][0][34] = 14, [0][1][2][0][RTW89_ACMA][1][34] = 54, @@ -41371,6 +42427,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][34] = 18, [0][1][2][0][RTW89_UK][1][34] = 54, [0][1][2][0][RTW89_UK][0][34] = 18, + [0][1][2][0][RTW89_THAILAND][1][34] = 42, + [0][1][2][0][RTW89_THAILAND][0][34] = -4, [0][1][2][0][RTW89_FCC][1][36] = -4, [0][1][2][0][RTW89_FCC][2][36] = 68, [0][1][2][0][RTW89_ETSI][1][36] = 54, @@ -41378,6 +42436,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][36] = 54, [0][1][2][0][RTW89_MKK][0][36] = 16, [0][1][2][0][RTW89_IC][1][36] = -4, + [0][1][2][0][RTW89_IC][2][36] = 68, [0][1][2][0][RTW89_KCC][1][36] = 12, [0][1][2][0][RTW89_KCC][0][36] = 14, [0][1][2][0][RTW89_ACMA][1][36] = 54, @@ -41387,6 +42446,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][36] = 18, [0][1][2][0][RTW89_UK][1][36] = 54, [0][1][2][0][RTW89_UK][0][36] = 18, + [0][1][2][0][RTW89_THAILAND][1][36] = 42, + [0][1][2][0][RTW89_THAILAND][0][36] = -4, [0][1][2][0][RTW89_FCC][1][38] = -4, [0][1][2][0][RTW89_FCC][2][38] = 68, [0][1][2][0][RTW89_ETSI][1][38] = 54, @@ -41394,6 +42455,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][38] = 54, [0][1][2][0][RTW89_MKK][0][38] = 16, [0][1][2][0][RTW89_IC][1][38] = -4, + [0][1][2][0][RTW89_IC][2][38] = 68, [0][1][2][0][RTW89_KCC][1][38] = 12, [0][1][2][0][RTW89_KCC][0][38] = 14, [0][1][2][0][RTW89_ACMA][1][38] = 54, @@ -41403,6 +42465,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][38] = 18, [0][1][2][0][RTW89_UK][1][38] = 54, [0][1][2][0][RTW89_UK][0][38] = 18, + [0][1][2][0][RTW89_THAILAND][1][38] = 42, + [0][1][2][0][RTW89_THAILAND][0][38] = -4, [0][1][2][0][RTW89_FCC][1][40] = -4, [0][1][2][0][RTW89_FCC][2][40] = 68, [0][1][2][0][RTW89_ETSI][1][40] = 54, @@ -41410,6 +42474,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][40] = 54, [0][1][2][0][RTW89_MKK][0][40] = 16, [0][1][2][0][RTW89_IC][1][40] = -4, + [0][1][2][0][RTW89_IC][2][40] = 68, [0][1][2][0][RTW89_KCC][1][40] = 12, [0][1][2][0][RTW89_KCC][0][40] = 14, [0][1][2][0][RTW89_ACMA][1][40] = 54, @@ -41419,6 +42484,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][40] = 18, [0][1][2][0][RTW89_UK][1][40] = 54, [0][1][2][0][RTW89_UK][0][40] = 18, + [0][1][2][0][RTW89_THAILAND][1][40] = 42, + [0][1][2][0][RTW89_THAILAND][0][40] = -4, [0][1][2][0][RTW89_FCC][1][42] = -4, [0][1][2][0][RTW89_FCC][2][42] = 68, [0][1][2][0][RTW89_ETSI][1][42] = 54, @@ -41426,6 +42493,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][42] = 54, [0][1][2][0][RTW89_MKK][0][42] = 16, [0][1][2][0][RTW89_IC][1][42] = -4, + [0][1][2][0][RTW89_IC][2][42] = 68, [0][1][2][0][RTW89_KCC][1][42] = 12, [0][1][2][0][RTW89_KCC][0][42] = 14, [0][1][2][0][RTW89_ACMA][1][42] = 54, @@ -41435,6 +42503,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][42] = 18, [0][1][2][0][RTW89_UK][1][42] = 54, [0][1][2][0][RTW89_UK][0][42] = 18, + [0][1][2][0][RTW89_THAILAND][1][42] = 42, + [0][1][2][0][RTW89_THAILAND][0][42] = -4, [0][1][2][0][RTW89_FCC][1][44] = -2, [0][1][2][0][RTW89_FCC][2][44] = 68, [0][1][2][0][RTW89_ETSI][1][44] = 54, @@ -41442,6 +42512,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][44] = 34, [0][1][2][0][RTW89_MKK][0][44] = 16, [0][1][2][0][RTW89_IC][1][44] = -2, + [0][1][2][0][RTW89_IC][2][44] = 68, [0][1][2][0][RTW89_KCC][1][44] = 12, [0][1][2][0][RTW89_KCC][0][44] = 12, [0][1][2][0][RTW89_ACMA][1][44] = 54, @@ -41451,6 +42522,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][44] = 18, [0][1][2][0][RTW89_UK][1][44] = 54, [0][1][2][0][RTW89_UK][0][44] = 18, + [0][1][2][0][RTW89_THAILAND][1][44] = 42, + [0][1][2][0][RTW89_THAILAND][0][44] = -2, [0][1][2][0][RTW89_FCC][1][45] = -2, [0][1][2][0][RTW89_FCC][2][45] = 127, [0][1][2][0][RTW89_ETSI][1][45] = 127, @@ -41458,6 +42531,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][45] = 127, [0][1][2][0][RTW89_MKK][0][45] = 127, [0][1][2][0][RTW89_IC][1][45] = -2, + [0][1][2][0][RTW89_IC][2][45] = 70, [0][1][2][0][RTW89_KCC][1][45] = 12, [0][1][2][0][RTW89_KCC][0][45] = 127, [0][1][2][0][RTW89_ACMA][1][45] = 127, @@ -41467,6 +42541,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][45] = 127, [0][1][2][0][RTW89_UK][1][45] = 127, [0][1][2][0][RTW89_UK][0][45] = 127, + [0][1][2][0][RTW89_THAILAND][1][45] = 127, + [0][1][2][0][RTW89_THAILAND][0][45] = 127, [0][1][2][0][RTW89_FCC][1][47] = -2, [0][1][2][0][RTW89_FCC][2][47] = 127, [0][1][2][0][RTW89_ETSI][1][47] = 127, @@ -41474,6 +42550,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][47] = 127, [0][1][2][0][RTW89_MKK][0][47] = 127, [0][1][2][0][RTW89_IC][1][47] = -2, + [0][1][2][0][RTW89_IC][2][47] = 68, [0][1][2][0][RTW89_KCC][1][47] = 12, [0][1][2][0][RTW89_KCC][0][47] = 127, [0][1][2][0][RTW89_ACMA][1][47] = 127, @@ -41483,6 +42560,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][47] = 127, [0][1][2][0][RTW89_UK][1][47] = 127, [0][1][2][0][RTW89_UK][0][47] = 127, + [0][1][2][0][RTW89_THAILAND][1][47] = 127, + [0][1][2][0][RTW89_THAILAND][0][47] = 127, [0][1][2][0][RTW89_FCC][1][49] = -2, [0][1][2][0][RTW89_FCC][2][49] = 127, [0][1][2][0][RTW89_ETSI][1][49] = 127, @@ -41490,6 +42569,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][49] = 127, [0][1][2][0][RTW89_MKK][0][49] = 127, [0][1][2][0][RTW89_IC][1][49] = -2, + [0][1][2][0][RTW89_IC][2][49] = 68, [0][1][2][0][RTW89_KCC][1][49] = 12, [0][1][2][0][RTW89_KCC][0][49] = 127, [0][1][2][0][RTW89_ACMA][1][49] = 127, @@ -41499,6 +42579,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][49] = 127, [0][1][2][0][RTW89_UK][1][49] = 127, [0][1][2][0][RTW89_UK][0][49] = 127, + [0][1][2][0][RTW89_THAILAND][1][49] = 127, + [0][1][2][0][RTW89_THAILAND][0][49] = 127, [0][1][2][0][RTW89_FCC][1][51] = -2, [0][1][2][0][RTW89_FCC][2][51] = 127, [0][1][2][0][RTW89_ETSI][1][51] = 127, @@ -41506,6 +42588,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][51] = 127, [0][1][2][0][RTW89_MKK][0][51] = 127, [0][1][2][0][RTW89_IC][1][51] = -2, + [0][1][2][0][RTW89_IC][2][51] = 68, [0][1][2][0][RTW89_KCC][1][51] = 12, [0][1][2][0][RTW89_KCC][0][51] = 127, [0][1][2][0][RTW89_ACMA][1][51] = 127, @@ -41515,6 +42598,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][51] = 127, [0][1][2][0][RTW89_UK][1][51] = 127, [0][1][2][0][RTW89_UK][0][51] = 127, + [0][1][2][0][RTW89_THAILAND][1][51] = 127, + [0][1][2][0][RTW89_THAILAND][0][51] = 127, [0][1][2][0][RTW89_FCC][1][53] = -2, [0][1][2][0][RTW89_FCC][2][53] = 127, [0][1][2][0][RTW89_ETSI][1][53] = 127, @@ -41522,6 +42607,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][53] = 127, [0][1][2][0][RTW89_MKK][0][53] = 127, [0][1][2][0][RTW89_IC][1][53] = -2, + [0][1][2][0][RTW89_IC][2][53] = 68, [0][1][2][0][RTW89_KCC][1][53] = 12, [0][1][2][0][RTW89_KCC][0][53] = 127, [0][1][2][0][RTW89_ACMA][1][53] = 127, @@ -41531,6 +42617,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][53] = 127, [0][1][2][0][RTW89_UK][1][53] = 127, [0][1][2][0][RTW89_UK][0][53] = 127, + [0][1][2][0][RTW89_THAILAND][1][53] = 127, + [0][1][2][0][RTW89_THAILAND][0][53] = 127, [0][1][2][0][RTW89_FCC][1][55] = -2, [0][1][2][0][RTW89_FCC][2][55] = 68, [0][1][2][0][RTW89_ETSI][1][55] = 127, @@ -41538,6 +42626,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][55] = 127, [0][1][2][0][RTW89_MKK][0][55] = 127, [0][1][2][0][RTW89_IC][1][55] = -2, + [0][1][2][0][RTW89_IC][2][55] = 68, [0][1][2][0][RTW89_KCC][1][55] = 12, [0][1][2][0][RTW89_KCC][0][55] = 127, [0][1][2][0][RTW89_ACMA][1][55] = 127, @@ -41547,6 +42636,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][55] = 127, [0][1][2][0][RTW89_UK][1][55] = 127, [0][1][2][0][RTW89_UK][0][55] = 127, + [0][1][2][0][RTW89_THAILAND][1][55] = 127, + [0][1][2][0][RTW89_THAILAND][0][55] = 127, [0][1][2][0][RTW89_FCC][1][57] = -2, [0][1][2][0][RTW89_FCC][2][57] = 68, [0][1][2][0][RTW89_ETSI][1][57] = 127, @@ -41554,6 +42645,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][57] = 127, [0][1][2][0][RTW89_MKK][0][57] = 127, [0][1][2][0][RTW89_IC][1][57] = -2, + [0][1][2][0][RTW89_IC][2][57] = 68, [0][1][2][0][RTW89_KCC][1][57] = 12, [0][1][2][0][RTW89_KCC][0][57] = 127, [0][1][2][0][RTW89_ACMA][1][57] = 127, @@ -41563,6 +42655,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][57] = 127, [0][1][2][0][RTW89_UK][1][57] = 127, [0][1][2][0][RTW89_UK][0][57] = 127, + [0][1][2][0][RTW89_THAILAND][1][57] = 127, + [0][1][2][0][RTW89_THAILAND][0][57] = 127, [0][1][2][0][RTW89_FCC][1][59] = -2, [0][1][2][0][RTW89_FCC][2][59] = 68, [0][1][2][0][RTW89_ETSI][1][59] = 127, @@ -41570,6 +42664,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][59] = 127, [0][1][2][0][RTW89_MKK][0][59] = 127, [0][1][2][0][RTW89_IC][1][59] = -2, + [0][1][2][0][RTW89_IC][2][59] = 68, [0][1][2][0][RTW89_KCC][1][59] = 12, [0][1][2][0][RTW89_KCC][0][59] = 127, [0][1][2][0][RTW89_ACMA][1][59] = 127, @@ -41579,6 +42674,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][59] = 127, [0][1][2][0][RTW89_UK][1][59] = 127, [0][1][2][0][RTW89_UK][0][59] = 127, + [0][1][2][0][RTW89_THAILAND][1][59] = 127, + [0][1][2][0][RTW89_THAILAND][0][59] = 127, [0][1][2][0][RTW89_FCC][1][60] = -2, [0][1][2][0][RTW89_FCC][2][60] = 68, [0][1][2][0][RTW89_ETSI][1][60] = 127, @@ -41586,6 +42683,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][60] = 127, [0][1][2][0][RTW89_MKK][0][60] = 127, [0][1][2][0][RTW89_IC][1][60] = -2, + [0][1][2][0][RTW89_IC][2][60] = 68, [0][1][2][0][RTW89_KCC][1][60] = 12, [0][1][2][0][RTW89_KCC][0][60] = 127, [0][1][2][0][RTW89_ACMA][1][60] = 127, @@ -41595,6 +42693,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][60] = 127, [0][1][2][0][RTW89_UK][1][60] = 127, [0][1][2][0][RTW89_UK][0][60] = 127, + [0][1][2][0][RTW89_THAILAND][1][60] = 127, + [0][1][2][0][RTW89_THAILAND][0][60] = 127, [0][1][2][0][RTW89_FCC][1][62] = -2, [0][1][2][0][RTW89_FCC][2][62] = 68, [0][1][2][0][RTW89_ETSI][1][62] = 127, @@ -41602,6 +42702,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][62] = 127, [0][1][2][0][RTW89_MKK][0][62] = 127, [0][1][2][0][RTW89_IC][1][62] = -2, + [0][1][2][0][RTW89_IC][2][62] = 68, [0][1][2][0][RTW89_KCC][1][62] = 12, [0][1][2][0][RTW89_KCC][0][62] = 127, [0][1][2][0][RTW89_ACMA][1][62] = 127, @@ -41611,6 +42712,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][62] = 127, [0][1][2][0][RTW89_UK][1][62] = 127, [0][1][2][0][RTW89_UK][0][62] = 127, + [0][1][2][0][RTW89_THAILAND][1][62] = 127, + [0][1][2][0][RTW89_THAILAND][0][62] = 127, [0][1][2][0][RTW89_FCC][1][64] = -2, [0][1][2][0][RTW89_FCC][2][64] = 68, [0][1][2][0][RTW89_ETSI][1][64] = 127, @@ -41618,6 +42721,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][64] = 127, [0][1][2][0][RTW89_MKK][0][64] = 127, [0][1][2][0][RTW89_IC][1][64] = -2, + [0][1][2][0][RTW89_IC][2][64] = 68, [0][1][2][0][RTW89_KCC][1][64] = 12, [0][1][2][0][RTW89_KCC][0][64] = 127, [0][1][2][0][RTW89_ACMA][1][64] = 127, @@ -41627,6 +42731,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][64] = 127, [0][1][2][0][RTW89_UK][1][64] = 127, [0][1][2][0][RTW89_UK][0][64] = 127, + [0][1][2][0][RTW89_THAILAND][1][64] = 127, + [0][1][2][0][RTW89_THAILAND][0][64] = 127, [0][1][2][0][RTW89_FCC][1][66] = -2, [0][1][2][0][RTW89_FCC][2][66] = 68, [0][1][2][0][RTW89_ETSI][1][66] = 127, @@ -41634,6 +42740,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][66] = 127, [0][1][2][0][RTW89_MKK][0][66] = 127, [0][1][2][0][RTW89_IC][1][66] = -2, + [0][1][2][0][RTW89_IC][2][66] = 68, [0][1][2][0][RTW89_KCC][1][66] = 12, [0][1][2][0][RTW89_KCC][0][66] = 127, [0][1][2][0][RTW89_ACMA][1][66] = 127, @@ -41643,6 +42750,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][66] = 127, [0][1][2][0][RTW89_UK][1][66] = 127, [0][1][2][0][RTW89_UK][0][66] = 127, + [0][1][2][0][RTW89_THAILAND][1][66] = 127, + [0][1][2][0][RTW89_THAILAND][0][66] = 127, [0][1][2][0][RTW89_FCC][1][68] = -2, [0][1][2][0][RTW89_FCC][2][68] = 68, [0][1][2][0][RTW89_ETSI][1][68] = 127, @@ -41650,6 +42759,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][68] = 127, [0][1][2][0][RTW89_MKK][0][68] = 127, [0][1][2][0][RTW89_IC][1][68] = -2, + [0][1][2][0][RTW89_IC][2][68] = 68, [0][1][2][0][RTW89_KCC][1][68] = 12, [0][1][2][0][RTW89_KCC][0][68] = 127, [0][1][2][0][RTW89_ACMA][1][68] = 127, @@ -41659,6 +42769,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][68] = 127, [0][1][2][0][RTW89_UK][1][68] = 127, [0][1][2][0][RTW89_UK][0][68] = 127, + [0][1][2][0][RTW89_THAILAND][1][68] = 127, + [0][1][2][0][RTW89_THAILAND][0][68] = 127, [0][1][2][0][RTW89_FCC][1][70] = -2, [0][1][2][0][RTW89_FCC][2][70] = 68, [0][1][2][0][RTW89_ETSI][1][70] = 127, @@ -41666,6 +42778,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][70] = 127, [0][1][2][0][RTW89_MKK][0][70] = 127, [0][1][2][0][RTW89_IC][1][70] = -2, + [0][1][2][0][RTW89_IC][2][70] = 68, [0][1][2][0][RTW89_KCC][1][70] = 12, [0][1][2][0][RTW89_KCC][0][70] = 127, [0][1][2][0][RTW89_ACMA][1][70] = 127, @@ -41675,6 +42788,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][70] = 127, [0][1][2][0][RTW89_UK][1][70] = 127, [0][1][2][0][RTW89_UK][0][70] = 127, + [0][1][2][0][RTW89_THAILAND][1][70] = 127, + [0][1][2][0][RTW89_THAILAND][0][70] = 127, [0][1][2][0][RTW89_FCC][1][72] = -2, [0][1][2][0][RTW89_FCC][2][72] = 68, [0][1][2][0][RTW89_ETSI][1][72] = 127, @@ -41682,6 +42797,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][72] = 127, [0][1][2][0][RTW89_MKK][0][72] = 127, [0][1][2][0][RTW89_IC][1][72] = -2, + [0][1][2][0][RTW89_IC][2][72] = 68, [0][1][2][0][RTW89_KCC][1][72] = 12, [0][1][2][0][RTW89_KCC][0][72] = 127, [0][1][2][0][RTW89_ACMA][1][72] = 127, @@ -41691,6 +42807,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][72] = 127, [0][1][2][0][RTW89_UK][1][72] = 127, [0][1][2][0][RTW89_UK][0][72] = 127, + [0][1][2][0][RTW89_THAILAND][1][72] = 127, + [0][1][2][0][RTW89_THAILAND][0][72] = 127, [0][1][2][0][RTW89_FCC][1][74] = -2, [0][1][2][0][RTW89_FCC][2][74] = 68, [0][1][2][0][RTW89_ETSI][1][74] = 127, @@ -41698,6 +42816,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][74] = 127, [0][1][2][0][RTW89_MKK][0][74] = 127, [0][1][2][0][RTW89_IC][1][74] = -2, + [0][1][2][0][RTW89_IC][2][74] = 68, [0][1][2][0][RTW89_KCC][1][74] = 12, [0][1][2][0][RTW89_KCC][0][74] = 127, [0][1][2][0][RTW89_ACMA][1][74] = 127, @@ -41707,6 +42826,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][74] = 127, [0][1][2][0][RTW89_UK][1][74] = 127, [0][1][2][0][RTW89_UK][0][74] = 127, + [0][1][2][0][RTW89_THAILAND][1][74] = 127, + [0][1][2][0][RTW89_THAILAND][0][74] = 127, [0][1][2][0][RTW89_FCC][1][75] = -2, [0][1][2][0][RTW89_FCC][2][75] = 68, [0][1][2][0][RTW89_ETSI][1][75] = 127, @@ -41714,6 +42835,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][75] = 127, [0][1][2][0][RTW89_MKK][0][75] = 127, [0][1][2][0][RTW89_IC][1][75] = -2, + [0][1][2][0][RTW89_IC][2][75] = 68, [0][1][2][0][RTW89_KCC][1][75] = 12, [0][1][2][0][RTW89_KCC][0][75] = 127, [0][1][2][0][RTW89_ACMA][1][75] = 127, @@ -41723,6 +42845,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][75] = 127, [0][1][2][0][RTW89_UK][1][75] = 127, [0][1][2][0][RTW89_UK][0][75] = 127, + [0][1][2][0][RTW89_THAILAND][1][75] = 127, + [0][1][2][0][RTW89_THAILAND][0][75] = 127, [0][1][2][0][RTW89_FCC][1][77] = -2, [0][1][2][0][RTW89_FCC][2][77] = 68, [0][1][2][0][RTW89_ETSI][1][77] = 127, @@ -41730,6 +42854,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][77] = 127, [0][1][2][0][RTW89_MKK][0][77] = 127, [0][1][2][0][RTW89_IC][1][77] = -2, + [0][1][2][0][RTW89_IC][2][77] = 68, [0][1][2][0][RTW89_KCC][1][77] = 12, [0][1][2][0][RTW89_KCC][0][77] = 127, [0][1][2][0][RTW89_ACMA][1][77] = 127, @@ -41739,6 +42864,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][77] = 127, [0][1][2][0][RTW89_UK][1][77] = 127, [0][1][2][0][RTW89_UK][0][77] = 127, + [0][1][2][0][RTW89_THAILAND][1][77] = 127, + [0][1][2][0][RTW89_THAILAND][0][77] = 127, [0][1][2][0][RTW89_FCC][1][79] = -2, [0][1][2][0][RTW89_FCC][2][79] = 68, [0][1][2][0][RTW89_ETSI][1][79] = 127, @@ -41746,6 +42873,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][79] = 127, [0][1][2][0][RTW89_MKK][0][79] = 127, [0][1][2][0][RTW89_IC][1][79] = -2, + [0][1][2][0][RTW89_IC][2][79] = 68, [0][1][2][0][RTW89_KCC][1][79] = 12, [0][1][2][0][RTW89_KCC][0][79] = 127, [0][1][2][0][RTW89_ACMA][1][79] = 127, @@ -41755,6 +42883,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][79] = 127, [0][1][2][0][RTW89_UK][1][79] = 127, [0][1][2][0][RTW89_UK][0][79] = 127, + [0][1][2][0][RTW89_THAILAND][1][79] = 127, + [0][1][2][0][RTW89_THAILAND][0][79] = 127, [0][1][2][0][RTW89_FCC][1][81] = -2, [0][1][2][0][RTW89_FCC][2][81] = 68, [0][1][2][0][RTW89_ETSI][1][81] = 127, @@ -41762,6 +42892,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][81] = 127, [0][1][2][0][RTW89_MKK][0][81] = 127, [0][1][2][0][RTW89_IC][1][81] = -2, + [0][1][2][0][RTW89_IC][2][81] = 68, [0][1][2][0][RTW89_KCC][1][81] = 12, [0][1][2][0][RTW89_KCC][0][81] = 127, [0][1][2][0][RTW89_ACMA][1][81] = 127, @@ -41771,6 +42902,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][81] = 127, [0][1][2][0][RTW89_UK][1][81] = 127, [0][1][2][0][RTW89_UK][0][81] = 127, + [0][1][2][0][RTW89_THAILAND][1][81] = 127, + [0][1][2][0][RTW89_THAILAND][0][81] = 127, [0][1][2][0][RTW89_FCC][1][83] = -2, [0][1][2][0][RTW89_FCC][2][83] = 68, [0][1][2][0][RTW89_ETSI][1][83] = 127, @@ -41778,6 +42911,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][83] = 127, [0][1][2][0][RTW89_MKK][0][83] = 127, [0][1][2][0][RTW89_IC][1][83] = -2, + [0][1][2][0][RTW89_IC][2][83] = 68, [0][1][2][0][RTW89_KCC][1][83] = 20, [0][1][2][0][RTW89_KCC][0][83] = 127, [0][1][2][0][RTW89_ACMA][1][83] = 127, @@ -41787,6 +42921,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][83] = 127, [0][1][2][0][RTW89_UK][1][83] = 127, [0][1][2][0][RTW89_UK][0][83] = 127, + [0][1][2][0][RTW89_THAILAND][1][83] = 127, + [0][1][2][0][RTW89_THAILAND][0][83] = 127, [0][1][2][0][RTW89_FCC][1][85] = -2, [0][1][2][0][RTW89_FCC][2][85] = 68, [0][1][2][0][RTW89_ETSI][1][85] = 127, @@ -41794,6 +42930,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][85] = 127, [0][1][2][0][RTW89_MKK][0][85] = 127, [0][1][2][0][RTW89_IC][1][85] = -2, + [0][1][2][0][RTW89_IC][2][85] = 68, [0][1][2][0][RTW89_KCC][1][85] = 20, [0][1][2][0][RTW89_KCC][0][85] = 127, [0][1][2][0][RTW89_ACMA][1][85] = 127, @@ -41803,6 +42940,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][85] = 127, [0][1][2][0][RTW89_UK][1][85] = 127, [0][1][2][0][RTW89_UK][0][85] = 127, + [0][1][2][0][RTW89_THAILAND][1][85] = 127, + [0][1][2][0][RTW89_THAILAND][0][85] = 127, [0][1][2][0][RTW89_FCC][1][87] = -2, [0][1][2][0][RTW89_FCC][2][87] = 127, [0][1][2][0][RTW89_ETSI][1][87] = 127, @@ -41810,6 +42949,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][87] = 127, [0][1][2][0][RTW89_MKK][0][87] = 127, [0][1][2][0][RTW89_IC][1][87] = -2, + [0][1][2][0][RTW89_IC][2][87] = 127, [0][1][2][0][RTW89_KCC][1][87] = 20, [0][1][2][0][RTW89_KCC][0][87] = 127, [0][1][2][0][RTW89_ACMA][1][87] = 127, @@ -41819,6 +42959,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][87] = 127, [0][1][2][0][RTW89_UK][1][87] = 127, [0][1][2][0][RTW89_UK][0][87] = 127, + [0][1][2][0][RTW89_THAILAND][1][87] = 127, + [0][1][2][0][RTW89_THAILAND][0][87] = 127, [0][1][2][0][RTW89_FCC][1][89] = -2, [0][1][2][0][RTW89_FCC][2][89] = 127, [0][1][2][0][RTW89_ETSI][1][89] = 127, @@ -41826,6 +42968,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][89] = 127, [0][1][2][0][RTW89_MKK][0][89] = 127, [0][1][2][0][RTW89_IC][1][89] = -2, + [0][1][2][0][RTW89_IC][2][89] = 127, [0][1][2][0][RTW89_KCC][1][89] = 20, [0][1][2][0][RTW89_KCC][0][89] = 127, [0][1][2][0][RTW89_ACMA][1][89] = 127, @@ -41835,6 +42978,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][89] = 127, [0][1][2][0][RTW89_UK][1][89] = 127, [0][1][2][0][RTW89_UK][0][89] = 127, + [0][1][2][0][RTW89_THAILAND][1][89] = 127, + [0][1][2][0][RTW89_THAILAND][0][89] = 127, [0][1][2][0][RTW89_FCC][1][90] = -2, [0][1][2][0][RTW89_FCC][2][90] = 127, [0][1][2][0][RTW89_ETSI][1][90] = 127, @@ -41842,6 +42987,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][90] = 127, [0][1][2][0][RTW89_MKK][0][90] = 127, [0][1][2][0][RTW89_IC][1][90] = -2, + [0][1][2][0][RTW89_IC][2][90] = 127, [0][1][2][0][RTW89_KCC][1][90] = 20, [0][1][2][0][RTW89_KCC][0][90] = 127, [0][1][2][0][RTW89_ACMA][1][90] = 127, @@ -41851,6 +42997,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][90] = 127, [0][1][2][0][RTW89_UK][1][90] = 127, [0][1][2][0][RTW89_UK][0][90] = 127, + [0][1][2][0][RTW89_THAILAND][1][90] = 127, + [0][1][2][0][RTW89_THAILAND][0][90] = 127, [0][1][2][0][RTW89_FCC][1][92] = -2, [0][1][2][0][RTW89_FCC][2][92] = 127, [0][1][2][0][RTW89_ETSI][1][92] = 127, @@ -41858,6 +43006,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][92] = 127, [0][1][2][0][RTW89_MKK][0][92] = 127, [0][1][2][0][RTW89_IC][1][92] = -2, + [0][1][2][0][RTW89_IC][2][92] = 127, [0][1][2][0][RTW89_KCC][1][92] = 20, [0][1][2][0][RTW89_KCC][0][92] = 127, [0][1][2][0][RTW89_ACMA][1][92] = 127, @@ -41867,6 +43016,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][92] = 127, [0][1][2][0][RTW89_UK][1][92] = 127, [0][1][2][0][RTW89_UK][0][92] = 127, + [0][1][2][0][RTW89_THAILAND][1][92] = 127, + [0][1][2][0][RTW89_THAILAND][0][92] = 127, [0][1][2][0][RTW89_FCC][1][94] = -2, [0][1][2][0][RTW89_FCC][2][94] = 127, [0][1][2][0][RTW89_ETSI][1][94] = 127, @@ -41874,6 +43025,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][94] = 127, [0][1][2][0][RTW89_MKK][0][94] = 127, [0][1][2][0][RTW89_IC][1][94] = -2, + [0][1][2][0][RTW89_IC][2][94] = 127, [0][1][2][0][RTW89_KCC][1][94] = 20, [0][1][2][0][RTW89_KCC][0][94] = 127, [0][1][2][0][RTW89_ACMA][1][94] = 127, @@ -41883,6 +43035,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][94] = 127, [0][1][2][0][RTW89_UK][1][94] = 127, [0][1][2][0][RTW89_UK][0][94] = 127, + [0][1][2][0][RTW89_THAILAND][1][94] = 127, + [0][1][2][0][RTW89_THAILAND][0][94] = 127, [0][1][2][0][RTW89_FCC][1][96] = -2, [0][1][2][0][RTW89_FCC][2][96] = 127, [0][1][2][0][RTW89_ETSI][1][96] = 127, @@ -41890,6 +43044,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][96] = 127, [0][1][2][0][RTW89_MKK][0][96] = 127, [0][1][2][0][RTW89_IC][1][96] = -2, + [0][1][2][0][RTW89_IC][2][96] = 127, [0][1][2][0][RTW89_KCC][1][96] = 20, [0][1][2][0][RTW89_KCC][0][96] = 127, [0][1][2][0][RTW89_ACMA][1][96] = 127, @@ -41899,6 +43054,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][96] = 127, [0][1][2][0][RTW89_UK][1][96] = 127, [0][1][2][0][RTW89_UK][0][96] = 127, + [0][1][2][0][RTW89_THAILAND][1][96] = 127, + [0][1][2][0][RTW89_THAILAND][0][96] = 127, [0][1][2][0][RTW89_FCC][1][98] = -2, [0][1][2][0][RTW89_FCC][2][98] = 127, [0][1][2][0][RTW89_ETSI][1][98] = 127, @@ -41906,6 +43063,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][98] = 127, [0][1][2][0][RTW89_MKK][0][98] = 127, [0][1][2][0][RTW89_IC][1][98] = -2, + [0][1][2][0][RTW89_IC][2][98] = 127, [0][1][2][0][RTW89_KCC][1][98] = 20, [0][1][2][0][RTW89_KCC][0][98] = 127, [0][1][2][0][RTW89_ACMA][1][98] = 127, @@ -41915,6 +43073,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][98] = 127, [0][1][2][0][RTW89_UK][1][98] = 127, [0][1][2][0][RTW89_UK][0][98] = 127, + [0][1][2][0][RTW89_THAILAND][1][98] = 127, + [0][1][2][0][RTW89_THAILAND][0][98] = 127, [0][1][2][0][RTW89_FCC][1][100] = -2, [0][1][2][0][RTW89_FCC][2][100] = 127, [0][1][2][0][RTW89_ETSI][1][100] = 127, @@ -41922,6 +43082,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][100] = 127, [0][1][2][0][RTW89_MKK][0][100] = 127, [0][1][2][0][RTW89_IC][1][100] = -2, + [0][1][2][0][RTW89_IC][2][100] = 127, [0][1][2][0][RTW89_KCC][1][100] = 20, [0][1][2][0][RTW89_KCC][0][100] = 127, [0][1][2][0][RTW89_ACMA][1][100] = 127, @@ -41931,6 +43092,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][100] = 127, [0][1][2][0][RTW89_UK][1][100] = 127, [0][1][2][0][RTW89_UK][0][100] = 127, + [0][1][2][0][RTW89_THAILAND][1][100] = 127, + [0][1][2][0][RTW89_THAILAND][0][100] = 127, [0][1][2][0][RTW89_FCC][1][102] = -2, [0][1][2][0][RTW89_FCC][2][102] = 127, [0][1][2][0][RTW89_ETSI][1][102] = 127, @@ -41938,6 +43101,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][102] = 127, [0][1][2][0][RTW89_MKK][0][102] = 127, [0][1][2][0][RTW89_IC][1][102] = -2, + [0][1][2][0][RTW89_IC][2][102] = 127, [0][1][2][0][RTW89_KCC][1][102] = 20, [0][1][2][0][RTW89_KCC][0][102] = 127, [0][1][2][0][RTW89_ACMA][1][102] = 127, @@ -41947,6 +43111,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][102] = 127, [0][1][2][0][RTW89_UK][1][102] = 127, [0][1][2][0][RTW89_UK][0][102] = 127, + [0][1][2][0][RTW89_THAILAND][1][102] = 127, + [0][1][2][0][RTW89_THAILAND][0][102] = 127, [0][1][2][0][RTW89_FCC][1][104] = -2, [0][1][2][0][RTW89_FCC][2][104] = 127, [0][1][2][0][RTW89_ETSI][1][104] = 127, @@ -41954,6 +43120,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][104] = 127, [0][1][2][0][RTW89_MKK][0][104] = 127, [0][1][2][0][RTW89_IC][1][104] = -2, + [0][1][2][0][RTW89_IC][2][104] = 127, [0][1][2][0][RTW89_KCC][1][104] = 20, [0][1][2][0][RTW89_KCC][0][104] = 127, [0][1][2][0][RTW89_ACMA][1][104] = 127, @@ -41963,6 +43130,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][104] = 127, [0][1][2][0][RTW89_UK][1][104] = 127, [0][1][2][0][RTW89_UK][0][104] = 127, + [0][1][2][0][RTW89_THAILAND][1][104] = 127, + [0][1][2][0][RTW89_THAILAND][0][104] = 127, [0][1][2][0][RTW89_FCC][1][105] = -2, [0][1][2][0][RTW89_FCC][2][105] = 127, [0][1][2][0][RTW89_ETSI][1][105] = 127, @@ -41970,6 +43139,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][105] = 127, [0][1][2][0][RTW89_MKK][0][105] = 127, [0][1][2][0][RTW89_IC][1][105] = -2, + [0][1][2][0][RTW89_IC][2][105] = 127, [0][1][2][0][RTW89_KCC][1][105] = 20, [0][1][2][0][RTW89_KCC][0][105] = 127, [0][1][2][0][RTW89_ACMA][1][105] = 127, @@ -41979,6 +43149,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][105] = 127, [0][1][2][0][RTW89_UK][1][105] = 127, [0][1][2][0][RTW89_UK][0][105] = 127, + [0][1][2][0][RTW89_THAILAND][1][105] = 127, + [0][1][2][0][RTW89_THAILAND][0][105] = 127, [0][1][2][0][RTW89_FCC][1][107] = 1, [0][1][2][0][RTW89_FCC][2][107] = 127, [0][1][2][0][RTW89_ETSI][1][107] = 127, @@ -41986,6 +43158,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][107] = 127, [0][1][2][0][RTW89_MKK][0][107] = 127, [0][1][2][0][RTW89_IC][1][107] = 1, + [0][1][2][0][RTW89_IC][2][107] = 127, [0][1][2][0][RTW89_KCC][1][107] = 20, [0][1][2][0][RTW89_KCC][0][107] = 127, [0][1][2][0][RTW89_ACMA][1][107] = 127, @@ -41995,6 +43168,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][107] = 127, [0][1][2][0][RTW89_UK][1][107] = 127, [0][1][2][0][RTW89_UK][0][107] = 127, + [0][1][2][0][RTW89_THAILAND][1][107] = 127, + [0][1][2][0][RTW89_THAILAND][0][107] = 127, [0][1][2][0][RTW89_FCC][1][109] = 1, [0][1][2][0][RTW89_FCC][2][109] = 127, [0][1][2][0][RTW89_ETSI][1][109] = 127, @@ -42002,6 +43177,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][109] = 127, [0][1][2][0][RTW89_MKK][0][109] = 127, [0][1][2][0][RTW89_IC][1][109] = 1, + [0][1][2][0][RTW89_IC][2][109] = 127, [0][1][2][0][RTW89_KCC][1][109] = 20, [0][1][2][0][RTW89_KCC][0][109] = 127, [0][1][2][0][RTW89_ACMA][1][109] = 127, @@ -42011,6 +43187,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][109] = 127, [0][1][2][0][RTW89_UK][1][109] = 127, [0][1][2][0][RTW89_UK][0][109] = 127, + [0][1][2][0][RTW89_THAILAND][1][109] = 127, + [0][1][2][0][RTW89_THAILAND][0][109] = 127, [0][1][2][0][RTW89_FCC][1][111] = 127, [0][1][2][0][RTW89_FCC][2][111] = 127, [0][1][2][0][RTW89_ETSI][1][111] = 127, @@ -42018,6 +43196,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][111] = 127, [0][1][2][0][RTW89_MKK][0][111] = 127, [0][1][2][0][RTW89_IC][1][111] = 127, + [0][1][2][0][RTW89_IC][2][111] = 127, [0][1][2][0][RTW89_KCC][1][111] = 127, [0][1][2][0][RTW89_KCC][0][111] = 127, [0][1][2][0][RTW89_ACMA][1][111] = 127, @@ -42027,6 +43206,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][111] = 127, [0][1][2][0][RTW89_UK][1][111] = 127, [0][1][2][0][RTW89_UK][0][111] = 127, + [0][1][2][0][RTW89_THAILAND][1][111] = 127, + [0][1][2][0][RTW89_THAILAND][0][111] = 127, [0][1][2][0][RTW89_FCC][1][113] = 127, [0][1][2][0][RTW89_FCC][2][113] = 127, [0][1][2][0][RTW89_ETSI][1][113] = 127, @@ -42034,6 +43215,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][113] = 127, [0][1][2][0][RTW89_MKK][0][113] = 127, [0][1][2][0][RTW89_IC][1][113] = 127, + [0][1][2][0][RTW89_IC][2][113] = 127, [0][1][2][0][RTW89_KCC][1][113] = 127, [0][1][2][0][RTW89_KCC][0][113] = 127, [0][1][2][0][RTW89_ACMA][1][113] = 127, @@ -42043,6 +43225,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][113] = 127, [0][1][2][0][RTW89_UK][1][113] = 127, [0][1][2][0][RTW89_UK][0][113] = 127, + [0][1][2][0][RTW89_THAILAND][1][113] = 127, + [0][1][2][0][RTW89_THAILAND][0][113] = 127, [0][1][2][0][RTW89_FCC][1][115] = 127, [0][1][2][0][RTW89_FCC][2][115] = 127, [0][1][2][0][RTW89_ETSI][1][115] = 127, @@ -42050,6 +43234,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][115] = 127, [0][1][2][0][RTW89_MKK][0][115] = 127, [0][1][2][0][RTW89_IC][1][115] = 127, + [0][1][2][0][RTW89_IC][2][115] = 127, [0][1][2][0][RTW89_KCC][1][115] = 127, [0][1][2][0][RTW89_KCC][0][115] = 127, [0][1][2][0][RTW89_ACMA][1][115] = 127, @@ -42059,6 +43244,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][115] = 127, [0][1][2][0][RTW89_UK][1][115] = 127, [0][1][2][0][RTW89_UK][0][115] = 127, + [0][1][2][0][RTW89_THAILAND][1][115] = 127, + [0][1][2][0][RTW89_THAILAND][0][115] = 127, [0][1][2][0][RTW89_FCC][1][117] = 127, [0][1][2][0][RTW89_FCC][2][117] = 127, [0][1][2][0][RTW89_ETSI][1][117] = 127, @@ -42066,6 +43253,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][117] = 127, [0][1][2][0][RTW89_MKK][0][117] = 127, [0][1][2][0][RTW89_IC][1][117] = 127, + [0][1][2][0][RTW89_IC][2][117] = 127, [0][1][2][0][RTW89_KCC][1][117] = 127, [0][1][2][0][RTW89_KCC][0][117] = 127, [0][1][2][0][RTW89_ACMA][1][117] = 127, @@ -42075,6 +43263,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][117] = 127, [0][1][2][0][RTW89_UK][1][117] = 127, [0][1][2][0][RTW89_UK][0][117] = 127, + [0][1][2][0][RTW89_THAILAND][1][117] = 127, + [0][1][2][0][RTW89_THAILAND][0][117] = 127, [0][1][2][0][RTW89_FCC][1][119] = 127, [0][1][2][0][RTW89_FCC][2][119] = 127, [0][1][2][0][RTW89_ETSI][1][119] = 127, @@ -42082,6 +43272,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][1][119] = 127, [0][1][2][0][RTW89_MKK][0][119] = 127, [0][1][2][0][RTW89_IC][1][119] = 127, + [0][1][2][0][RTW89_IC][2][119] = 127, [0][1][2][0][RTW89_KCC][1][119] = 127, [0][1][2][0][RTW89_KCC][0][119] = 127, [0][1][2][0][RTW89_ACMA][1][119] = 127, @@ -42091,6 +43282,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_QATAR][0][119] = 127, [0][1][2][0][RTW89_UK][1][119] = 127, [0][1][2][0][RTW89_UK][0][119] = 127, + [0][1][2][0][RTW89_THAILAND][1][119] = 127, + [0][1][2][0][RTW89_THAILAND][0][119] = 127, [0][1][2][1][RTW89_FCC][1][0] = -2, [0][1][2][1][RTW89_FCC][2][0] = 54, [0][1][2][1][RTW89_ETSI][1][0] = 42, @@ -42098,6 +43291,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][0] = 56, [0][1][2][1][RTW89_MKK][0][0] = 16, [0][1][2][1][RTW89_IC][1][0] = -2, + [0][1][2][1][RTW89_IC][2][0] = 54, [0][1][2][1][RTW89_KCC][1][0] = 12, [0][1][2][1][RTW89_KCC][0][0] = 10, [0][1][2][1][RTW89_ACMA][1][0] = 42, @@ -42107,6 +43301,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][0] = 6, [0][1][2][1][RTW89_UK][1][0] = 42, [0][1][2][1][RTW89_UK][0][0] = 6, + [0][1][2][1][RTW89_THAILAND][1][0] = 44, + [0][1][2][1][RTW89_THAILAND][0][0] = -2, [0][1][2][1][RTW89_FCC][1][2] = -4, [0][1][2][1][RTW89_FCC][2][2] = 54, [0][1][2][1][RTW89_ETSI][1][2] = 42, @@ -42114,6 +43310,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][2] = 54, [0][1][2][1][RTW89_MKK][0][2] = 16, [0][1][2][1][RTW89_IC][1][2] = -4, + [0][1][2][1][RTW89_IC][2][2] = 54, [0][1][2][1][RTW89_KCC][1][2] = 12, [0][1][2][1][RTW89_KCC][0][2] = 12, [0][1][2][1][RTW89_ACMA][1][2] = 42, @@ -42123,6 +43320,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][2] = 6, [0][1][2][1][RTW89_UK][1][2] = 42, [0][1][2][1][RTW89_UK][0][2] = 6, + [0][1][2][1][RTW89_THAILAND][1][2] = 44, + [0][1][2][1][RTW89_THAILAND][0][2] = -4, [0][1][2][1][RTW89_FCC][1][4] = -4, [0][1][2][1][RTW89_FCC][2][4] = 54, [0][1][2][1][RTW89_ETSI][1][4] = 42, @@ -42130,6 +43329,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][4] = 54, [0][1][2][1][RTW89_MKK][0][4] = 16, [0][1][2][1][RTW89_IC][1][4] = -4, + [0][1][2][1][RTW89_IC][2][4] = 54, [0][1][2][1][RTW89_KCC][1][4] = 12, [0][1][2][1][RTW89_KCC][0][4] = 12, [0][1][2][1][RTW89_ACMA][1][4] = 42, @@ -42139,6 +43339,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][4] = 6, [0][1][2][1][RTW89_UK][1][4] = 42, [0][1][2][1][RTW89_UK][0][4] = 6, + [0][1][2][1][RTW89_THAILAND][1][4] = 44, + [0][1][2][1][RTW89_THAILAND][0][4] = -4, [0][1][2][1][RTW89_FCC][1][6] = -4, [0][1][2][1][RTW89_FCC][2][6] = 54, [0][1][2][1][RTW89_ETSI][1][6] = 42, @@ -42146,6 +43348,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][6] = 54, [0][1][2][1][RTW89_MKK][0][6] = 16, [0][1][2][1][RTW89_IC][1][6] = -4, + [0][1][2][1][RTW89_IC][2][6] = 54, [0][1][2][1][RTW89_KCC][1][6] = 12, [0][1][2][1][RTW89_KCC][0][6] = 12, [0][1][2][1][RTW89_ACMA][1][6] = 42, @@ -42155,6 +43358,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][6] = 6, [0][1][2][1][RTW89_UK][1][6] = 42, [0][1][2][1][RTW89_UK][0][6] = 6, + [0][1][2][1][RTW89_THAILAND][1][6] = 44, + [0][1][2][1][RTW89_THAILAND][0][6] = -4, [0][1][2][1][RTW89_FCC][1][8] = -4, [0][1][2][1][RTW89_FCC][2][8] = 54, [0][1][2][1][RTW89_ETSI][1][8] = 42, @@ -42162,6 +43367,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][8] = 54, [0][1][2][1][RTW89_MKK][0][8] = 16, [0][1][2][1][RTW89_IC][1][8] = -4, + [0][1][2][1][RTW89_IC][2][8] = 54, [0][1][2][1][RTW89_KCC][1][8] = 12, [0][1][2][1][RTW89_KCC][0][8] = 12, [0][1][2][1][RTW89_ACMA][1][8] = 42, @@ -42171,6 +43377,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][8] = 6, [0][1][2][1][RTW89_UK][1][8] = 42, [0][1][2][1][RTW89_UK][0][8] = 6, + [0][1][2][1][RTW89_THAILAND][1][8] = 44, + [0][1][2][1][RTW89_THAILAND][0][8] = -4, [0][1][2][1][RTW89_FCC][1][10] = -4, [0][1][2][1][RTW89_FCC][2][10] = 54, [0][1][2][1][RTW89_ETSI][1][10] = 42, @@ -42178,6 +43386,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][10] = 54, [0][1][2][1][RTW89_MKK][0][10] = 16, [0][1][2][1][RTW89_IC][1][10] = -4, + [0][1][2][1][RTW89_IC][2][10] = 54, [0][1][2][1][RTW89_KCC][1][10] = 12, [0][1][2][1][RTW89_KCC][0][10] = 12, [0][1][2][1][RTW89_ACMA][1][10] = 42, @@ -42187,6 +43396,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][10] = 6, [0][1][2][1][RTW89_UK][1][10] = 42, [0][1][2][1][RTW89_UK][0][10] = 6, + [0][1][2][1][RTW89_THAILAND][1][10] = 44, + [0][1][2][1][RTW89_THAILAND][0][10] = -4, [0][1][2][1][RTW89_FCC][1][12] = -4, [0][1][2][1][RTW89_FCC][2][12] = 54, [0][1][2][1][RTW89_ETSI][1][12] = 42, @@ -42194,6 +43405,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][12] = 54, [0][1][2][1][RTW89_MKK][0][12] = 16, [0][1][2][1][RTW89_IC][1][12] = -4, + [0][1][2][1][RTW89_IC][2][12] = 54, [0][1][2][1][RTW89_KCC][1][12] = 12, [0][1][2][1][RTW89_KCC][0][12] = 12, [0][1][2][1][RTW89_ACMA][1][12] = 42, @@ -42203,6 +43415,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][12] = 6, [0][1][2][1][RTW89_UK][1][12] = 42, [0][1][2][1][RTW89_UK][0][12] = 6, + [0][1][2][1][RTW89_THAILAND][1][12] = 44, + [0][1][2][1][RTW89_THAILAND][0][12] = -4, [0][1][2][1][RTW89_FCC][1][14] = -4, [0][1][2][1][RTW89_FCC][2][14] = 54, [0][1][2][1][RTW89_ETSI][1][14] = 42, @@ -42210,6 +43424,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][14] = 54, [0][1][2][1][RTW89_MKK][0][14] = 16, [0][1][2][1][RTW89_IC][1][14] = -4, + [0][1][2][1][RTW89_IC][2][14] = 54, [0][1][2][1][RTW89_KCC][1][14] = 12, [0][1][2][1][RTW89_KCC][0][14] = 12, [0][1][2][1][RTW89_ACMA][1][14] = 42, @@ -42219,6 +43434,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][14] = 6, [0][1][2][1][RTW89_UK][1][14] = 42, [0][1][2][1][RTW89_UK][0][14] = 6, + [0][1][2][1][RTW89_THAILAND][1][14] = 44, + [0][1][2][1][RTW89_THAILAND][0][14] = -4, [0][1][2][1][RTW89_FCC][1][15] = -4, [0][1][2][1][RTW89_FCC][2][15] = 54, [0][1][2][1][RTW89_ETSI][1][15] = 42, @@ -42226,6 +43443,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][15] = 54, [0][1][2][1][RTW89_MKK][0][15] = 16, [0][1][2][1][RTW89_IC][1][15] = -4, + [0][1][2][1][RTW89_IC][2][15] = 54, [0][1][2][1][RTW89_KCC][1][15] = 12, [0][1][2][1][RTW89_KCC][0][15] = 12, [0][1][2][1][RTW89_ACMA][1][15] = 42, @@ -42235,6 +43453,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][15] = 6, [0][1][2][1][RTW89_UK][1][15] = 42, [0][1][2][1][RTW89_UK][0][15] = 6, + [0][1][2][1][RTW89_THAILAND][1][15] = 44, + [0][1][2][1][RTW89_THAILAND][0][15] = -4, [0][1][2][1][RTW89_FCC][1][17] = -4, [0][1][2][1][RTW89_FCC][2][17] = 54, [0][1][2][1][RTW89_ETSI][1][17] = 42, @@ -42242,6 +43462,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][17] = 54, [0][1][2][1][RTW89_MKK][0][17] = 16, [0][1][2][1][RTW89_IC][1][17] = -4, + [0][1][2][1][RTW89_IC][2][17] = 54, [0][1][2][1][RTW89_KCC][1][17] = 12, [0][1][2][1][RTW89_KCC][0][17] = 12, [0][1][2][1][RTW89_ACMA][1][17] = 42, @@ -42251,6 +43472,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][17] = 6, [0][1][2][1][RTW89_UK][1][17] = 42, [0][1][2][1][RTW89_UK][0][17] = 6, + [0][1][2][1][RTW89_THAILAND][1][17] = 44, + [0][1][2][1][RTW89_THAILAND][0][17] = -4, [0][1][2][1][RTW89_FCC][1][19] = -4, [0][1][2][1][RTW89_FCC][2][19] = 54, [0][1][2][1][RTW89_ETSI][1][19] = 42, @@ -42258,6 +43481,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][19] = 54, [0][1][2][1][RTW89_MKK][0][19] = 16, [0][1][2][1][RTW89_IC][1][19] = -4, + [0][1][2][1][RTW89_IC][2][19] = 54, [0][1][2][1][RTW89_KCC][1][19] = 12, [0][1][2][1][RTW89_KCC][0][19] = 12, [0][1][2][1][RTW89_ACMA][1][19] = 42, @@ -42267,6 +43491,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][19] = 6, [0][1][2][1][RTW89_UK][1][19] = 42, [0][1][2][1][RTW89_UK][0][19] = 6, + [0][1][2][1][RTW89_THAILAND][1][19] = 44, + [0][1][2][1][RTW89_THAILAND][0][19] = -4, [0][1][2][1][RTW89_FCC][1][21] = -4, [0][1][2][1][RTW89_FCC][2][21] = 54, [0][1][2][1][RTW89_ETSI][1][21] = 42, @@ -42274,6 +43500,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][21] = 54, [0][1][2][1][RTW89_MKK][0][21] = 16, [0][1][2][1][RTW89_IC][1][21] = -4, + [0][1][2][1][RTW89_IC][2][21] = 54, [0][1][2][1][RTW89_KCC][1][21] = 12, [0][1][2][1][RTW89_KCC][0][21] = 12, [0][1][2][1][RTW89_ACMA][1][21] = 42, @@ -42283,6 +43510,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][21] = 6, [0][1][2][1][RTW89_UK][1][21] = 42, [0][1][2][1][RTW89_UK][0][21] = 6, + [0][1][2][1][RTW89_THAILAND][1][21] = 44, + [0][1][2][1][RTW89_THAILAND][0][21] = -4, [0][1][2][1][RTW89_FCC][1][23] = -4, [0][1][2][1][RTW89_FCC][2][23] = 68, [0][1][2][1][RTW89_ETSI][1][23] = 42, @@ -42290,6 +43519,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][23] = 54, [0][1][2][1][RTW89_MKK][0][23] = 16, [0][1][2][1][RTW89_IC][1][23] = -4, + [0][1][2][1][RTW89_IC][2][23] = 68, [0][1][2][1][RTW89_KCC][1][23] = 12, [0][1][2][1][RTW89_KCC][0][23] = 10, [0][1][2][1][RTW89_ACMA][1][23] = 42, @@ -42299,6 +43529,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][23] = 6, [0][1][2][1][RTW89_UK][1][23] = 42, [0][1][2][1][RTW89_UK][0][23] = 6, + [0][1][2][1][RTW89_THAILAND][1][23] = 44, + [0][1][2][1][RTW89_THAILAND][0][23] = -4, [0][1][2][1][RTW89_FCC][1][25] = -4, [0][1][2][1][RTW89_FCC][2][25] = 68, [0][1][2][1][RTW89_ETSI][1][25] = 42, @@ -42306,6 +43538,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][25] = 54, [0][1][2][1][RTW89_MKK][0][25] = 16, [0][1][2][1][RTW89_IC][1][25] = -4, + [0][1][2][1][RTW89_IC][2][25] = 68, [0][1][2][1][RTW89_KCC][1][25] = 12, [0][1][2][1][RTW89_KCC][0][25] = 14, [0][1][2][1][RTW89_ACMA][1][25] = 42, @@ -42315,6 +43548,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][25] = 6, [0][1][2][1][RTW89_UK][1][25] = 42, [0][1][2][1][RTW89_UK][0][25] = 6, + [0][1][2][1][RTW89_THAILAND][1][25] = 42, + [0][1][2][1][RTW89_THAILAND][0][25] = -4, [0][1][2][1][RTW89_FCC][1][27] = -4, [0][1][2][1][RTW89_FCC][2][27] = 68, [0][1][2][1][RTW89_ETSI][1][27] = 42, @@ -42322,6 +43557,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][27] = 54, [0][1][2][1][RTW89_MKK][0][27] = 16, [0][1][2][1][RTW89_IC][1][27] = -4, + [0][1][2][1][RTW89_IC][2][27] = 68, [0][1][2][1][RTW89_KCC][1][27] = 12, [0][1][2][1][RTW89_KCC][0][27] = 14, [0][1][2][1][RTW89_ACMA][1][27] = 42, @@ -42331,6 +43567,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][27] = 6, [0][1][2][1][RTW89_UK][1][27] = 42, [0][1][2][1][RTW89_UK][0][27] = 6, + [0][1][2][1][RTW89_THAILAND][1][27] = 42, + [0][1][2][1][RTW89_THAILAND][0][27] = -4, [0][1][2][1][RTW89_FCC][1][29] = -4, [0][1][2][1][RTW89_FCC][2][29] = 68, [0][1][2][1][RTW89_ETSI][1][29] = 42, @@ -42338,6 +43576,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][29] = 54, [0][1][2][1][RTW89_MKK][0][29] = 16, [0][1][2][1][RTW89_IC][1][29] = -4, + [0][1][2][1][RTW89_IC][2][29] = 68, [0][1][2][1][RTW89_KCC][1][29] = 12, [0][1][2][1][RTW89_KCC][0][29] = 14, [0][1][2][1][RTW89_ACMA][1][29] = 42, @@ -42347,6 +43586,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][29] = 6, [0][1][2][1][RTW89_UK][1][29] = 42, [0][1][2][1][RTW89_UK][0][29] = 6, + [0][1][2][1][RTW89_THAILAND][1][29] = 42, + [0][1][2][1][RTW89_THAILAND][0][29] = -4, [0][1][2][1][RTW89_FCC][1][30] = -4, [0][1][2][1][RTW89_FCC][2][30] = 68, [0][1][2][1][RTW89_ETSI][1][30] = 42, @@ -42354,6 +43595,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][30] = 54, [0][1][2][1][RTW89_MKK][0][30] = 16, [0][1][2][1][RTW89_IC][1][30] = -4, + [0][1][2][1][RTW89_IC][2][30] = 68, [0][1][2][1][RTW89_KCC][1][30] = 12, [0][1][2][1][RTW89_KCC][0][30] = 14, [0][1][2][1][RTW89_ACMA][1][30] = 42, @@ -42363,6 +43605,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][30] = 6, [0][1][2][1][RTW89_UK][1][30] = 42, [0][1][2][1][RTW89_UK][0][30] = 6, + [0][1][2][1][RTW89_THAILAND][1][30] = 42, + [0][1][2][1][RTW89_THAILAND][0][30] = -4, [0][1][2][1][RTW89_FCC][1][32] = -4, [0][1][2][1][RTW89_FCC][2][32] = 68, [0][1][2][1][RTW89_ETSI][1][32] = 42, @@ -42370,6 +43614,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][32] = 54, [0][1][2][1][RTW89_MKK][0][32] = 16, [0][1][2][1][RTW89_IC][1][32] = -4, + [0][1][2][1][RTW89_IC][2][32] = 68, [0][1][2][1][RTW89_KCC][1][32] = 12, [0][1][2][1][RTW89_KCC][0][32] = 14, [0][1][2][1][RTW89_ACMA][1][32] = 42, @@ -42379,6 +43624,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][32] = 6, [0][1][2][1][RTW89_UK][1][32] = 42, [0][1][2][1][RTW89_UK][0][32] = 6, + [0][1][2][1][RTW89_THAILAND][1][32] = 42, + [0][1][2][1][RTW89_THAILAND][0][32] = -4, [0][1][2][1][RTW89_FCC][1][34] = -4, [0][1][2][1][RTW89_FCC][2][34] = 68, [0][1][2][1][RTW89_ETSI][1][34] = 42, @@ -42386,6 +43633,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][34] = 54, [0][1][2][1][RTW89_MKK][0][34] = 16, [0][1][2][1][RTW89_IC][1][34] = -4, + [0][1][2][1][RTW89_IC][2][34] = 68, [0][1][2][1][RTW89_KCC][1][34] = 12, [0][1][2][1][RTW89_KCC][0][34] = 14, [0][1][2][1][RTW89_ACMA][1][34] = 42, @@ -42395,6 +43643,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][34] = 6, [0][1][2][1][RTW89_UK][1][34] = 42, [0][1][2][1][RTW89_UK][0][34] = 6, + [0][1][2][1][RTW89_THAILAND][1][34] = 42, + [0][1][2][1][RTW89_THAILAND][0][34] = -4, [0][1][2][1][RTW89_FCC][1][36] = -4, [0][1][2][1][RTW89_FCC][2][36] = 68, [0][1][2][1][RTW89_ETSI][1][36] = 42, @@ -42402,6 +43652,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][36] = 54, [0][1][2][1][RTW89_MKK][0][36] = 16, [0][1][2][1][RTW89_IC][1][36] = -4, + [0][1][2][1][RTW89_IC][2][36] = 68, [0][1][2][1][RTW89_KCC][1][36] = 12, [0][1][2][1][RTW89_KCC][0][36] = 14, [0][1][2][1][RTW89_ACMA][1][36] = 42, @@ -42411,6 +43662,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][36] = 6, [0][1][2][1][RTW89_UK][1][36] = 42, [0][1][2][1][RTW89_UK][0][36] = 6, + [0][1][2][1][RTW89_THAILAND][1][36] = 42, + [0][1][2][1][RTW89_THAILAND][0][36] = -4, [0][1][2][1][RTW89_FCC][1][38] = -4, [0][1][2][1][RTW89_FCC][2][38] = 68, [0][1][2][1][RTW89_ETSI][1][38] = 42, @@ -42418,6 +43671,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][38] = 54, [0][1][2][1][RTW89_MKK][0][38] = 16, [0][1][2][1][RTW89_IC][1][38] = -4, + [0][1][2][1][RTW89_IC][2][38] = 68, [0][1][2][1][RTW89_KCC][1][38] = 12, [0][1][2][1][RTW89_KCC][0][38] = 14, [0][1][2][1][RTW89_ACMA][1][38] = 42, @@ -42427,6 +43681,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][38] = 6, [0][1][2][1][RTW89_UK][1][38] = 42, [0][1][2][1][RTW89_UK][0][38] = 6, + [0][1][2][1][RTW89_THAILAND][1][38] = 42, + [0][1][2][1][RTW89_THAILAND][0][38] = -4, [0][1][2][1][RTW89_FCC][1][40] = -4, [0][1][2][1][RTW89_FCC][2][40] = 68, [0][1][2][1][RTW89_ETSI][1][40] = 42, @@ -42434,6 +43690,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][40] = 54, [0][1][2][1][RTW89_MKK][0][40] = 16, [0][1][2][1][RTW89_IC][1][40] = -4, + [0][1][2][1][RTW89_IC][2][40] = 68, [0][1][2][1][RTW89_KCC][1][40] = 12, [0][1][2][1][RTW89_KCC][0][40] = 14, [0][1][2][1][RTW89_ACMA][1][40] = 42, @@ -42443,6 +43700,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][40] = 6, [0][1][2][1][RTW89_UK][1][40] = 42, [0][1][2][1][RTW89_UK][0][40] = 6, + [0][1][2][1][RTW89_THAILAND][1][40] = 42, + [0][1][2][1][RTW89_THAILAND][0][40] = -4, [0][1][2][1][RTW89_FCC][1][42] = -4, [0][1][2][1][RTW89_FCC][2][42] = 68, [0][1][2][1][RTW89_ETSI][1][42] = 42, @@ -42450,6 +43709,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][42] = 54, [0][1][2][1][RTW89_MKK][0][42] = 16, [0][1][2][1][RTW89_IC][1][42] = -4, + [0][1][2][1][RTW89_IC][2][42] = 68, [0][1][2][1][RTW89_KCC][1][42] = 12, [0][1][2][1][RTW89_KCC][0][42] = 14, [0][1][2][1][RTW89_ACMA][1][42] = 42, @@ -42459,6 +43719,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][42] = 6, [0][1][2][1][RTW89_UK][1][42] = 42, [0][1][2][1][RTW89_UK][0][42] = 6, + [0][1][2][1][RTW89_THAILAND][1][42] = 42, + [0][1][2][1][RTW89_THAILAND][0][42] = -4, [0][1][2][1][RTW89_FCC][1][44] = -2, [0][1][2][1][RTW89_FCC][2][44] = 68, [0][1][2][1][RTW89_ETSI][1][44] = 42, @@ -42466,6 +43728,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][44] = 34, [0][1][2][1][RTW89_MKK][0][44] = 16, [0][1][2][1][RTW89_IC][1][44] = -2, + [0][1][2][1][RTW89_IC][2][44] = 68, [0][1][2][1][RTW89_KCC][1][44] = 12, [0][1][2][1][RTW89_KCC][0][44] = 12, [0][1][2][1][RTW89_ACMA][1][44] = 42, @@ -42475,6 +43738,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][44] = 6, [0][1][2][1][RTW89_UK][1][44] = 42, [0][1][2][1][RTW89_UK][0][44] = 6, + [0][1][2][1][RTW89_THAILAND][1][44] = 42, + [0][1][2][1][RTW89_THAILAND][0][44] = -2, [0][1][2][1][RTW89_FCC][1][45] = -2, [0][1][2][1][RTW89_FCC][2][45] = 127, [0][1][2][1][RTW89_ETSI][1][45] = 127, @@ -42482,6 +43747,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][45] = 127, [0][1][2][1][RTW89_MKK][0][45] = 127, [0][1][2][1][RTW89_IC][1][45] = -2, + [0][1][2][1][RTW89_IC][2][45] = 70, [0][1][2][1][RTW89_KCC][1][45] = 12, [0][1][2][1][RTW89_KCC][0][45] = 127, [0][1][2][1][RTW89_ACMA][1][45] = 127, @@ -42491,6 +43757,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][45] = 127, [0][1][2][1][RTW89_UK][1][45] = 127, [0][1][2][1][RTW89_UK][0][45] = 127, + [0][1][2][1][RTW89_THAILAND][1][45] = 127, + [0][1][2][1][RTW89_THAILAND][0][45] = 127, [0][1][2][1][RTW89_FCC][1][47] = -2, [0][1][2][1][RTW89_FCC][2][47] = 127, [0][1][2][1][RTW89_ETSI][1][47] = 127, @@ -42498,6 +43766,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][47] = 127, [0][1][2][1][RTW89_MKK][0][47] = 127, [0][1][2][1][RTW89_IC][1][47] = -2, + [0][1][2][1][RTW89_IC][2][47] = 68, [0][1][2][1][RTW89_KCC][1][47] = 12, [0][1][2][1][RTW89_KCC][0][47] = 127, [0][1][2][1][RTW89_ACMA][1][47] = 127, @@ -42507,6 +43776,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][47] = 127, [0][1][2][1][RTW89_UK][1][47] = 127, [0][1][2][1][RTW89_UK][0][47] = 127, + [0][1][2][1][RTW89_THAILAND][1][47] = 127, + [0][1][2][1][RTW89_THAILAND][0][47] = 127, [0][1][2][1][RTW89_FCC][1][49] = -2, [0][1][2][1][RTW89_FCC][2][49] = 127, [0][1][2][1][RTW89_ETSI][1][49] = 127, @@ -42514,6 +43785,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][49] = 127, [0][1][2][1][RTW89_MKK][0][49] = 127, [0][1][2][1][RTW89_IC][1][49] = -2, + [0][1][2][1][RTW89_IC][2][49] = 68, [0][1][2][1][RTW89_KCC][1][49] = 12, [0][1][2][1][RTW89_KCC][0][49] = 127, [0][1][2][1][RTW89_ACMA][1][49] = 127, @@ -42523,6 +43795,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][49] = 127, [0][1][2][1][RTW89_UK][1][49] = 127, [0][1][2][1][RTW89_UK][0][49] = 127, + [0][1][2][1][RTW89_THAILAND][1][49] = 127, + [0][1][2][1][RTW89_THAILAND][0][49] = 127, [0][1][2][1][RTW89_FCC][1][51] = -2, [0][1][2][1][RTW89_FCC][2][51] = 127, [0][1][2][1][RTW89_ETSI][1][51] = 127, @@ -42530,6 +43804,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][51] = 127, [0][1][2][1][RTW89_MKK][0][51] = 127, [0][1][2][1][RTW89_IC][1][51] = -2, + [0][1][2][1][RTW89_IC][2][51] = 68, [0][1][2][1][RTW89_KCC][1][51] = 12, [0][1][2][1][RTW89_KCC][0][51] = 127, [0][1][2][1][RTW89_ACMA][1][51] = 127, @@ -42539,6 +43814,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][51] = 127, [0][1][2][1][RTW89_UK][1][51] = 127, [0][1][2][1][RTW89_UK][0][51] = 127, + [0][1][2][1][RTW89_THAILAND][1][51] = 127, + [0][1][2][1][RTW89_THAILAND][0][51] = 127, [0][1][2][1][RTW89_FCC][1][53] = -2, [0][1][2][1][RTW89_FCC][2][53] = 127, [0][1][2][1][RTW89_ETSI][1][53] = 127, @@ -42546,6 +43823,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][53] = 127, [0][1][2][1][RTW89_MKK][0][53] = 127, [0][1][2][1][RTW89_IC][1][53] = -2, + [0][1][2][1][RTW89_IC][2][53] = 68, [0][1][2][1][RTW89_KCC][1][53] = 12, [0][1][2][1][RTW89_KCC][0][53] = 127, [0][1][2][1][RTW89_ACMA][1][53] = 127, @@ -42555,6 +43833,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][53] = 127, [0][1][2][1][RTW89_UK][1][53] = 127, [0][1][2][1][RTW89_UK][0][53] = 127, + [0][1][2][1][RTW89_THAILAND][1][53] = 127, + [0][1][2][1][RTW89_THAILAND][0][53] = 127, [0][1][2][1][RTW89_FCC][1][55] = -2, [0][1][2][1][RTW89_FCC][2][55] = 68, [0][1][2][1][RTW89_ETSI][1][55] = 127, @@ -42562,6 +43842,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][55] = 127, [0][1][2][1][RTW89_MKK][0][55] = 127, [0][1][2][1][RTW89_IC][1][55] = -2, + [0][1][2][1][RTW89_IC][2][55] = 68, [0][1][2][1][RTW89_KCC][1][55] = 12, [0][1][2][1][RTW89_KCC][0][55] = 127, [0][1][2][1][RTW89_ACMA][1][55] = 127, @@ -42571,6 +43852,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][55] = 127, [0][1][2][1][RTW89_UK][1][55] = 127, [0][1][2][1][RTW89_UK][0][55] = 127, + [0][1][2][1][RTW89_THAILAND][1][55] = 127, + [0][1][2][1][RTW89_THAILAND][0][55] = 127, [0][1][2][1][RTW89_FCC][1][57] = -2, [0][1][2][1][RTW89_FCC][2][57] = 68, [0][1][2][1][RTW89_ETSI][1][57] = 127, @@ -42578,6 +43861,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][57] = 127, [0][1][2][1][RTW89_MKK][0][57] = 127, [0][1][2][1][RTW89_IC][1][57] = -2, + [0][1][2][1][RTW89_IC][2][57] = 68, [0][1][2][1][RTW89_KCC][1][57] = 12, [0][1][2][1][RTW89_KCC][0][57] = 127, [0][1][2][1][RTW89_ACMA][1][57] = 127, @@ -42587,6 +43871,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][57] = 127, [0][1][2][1][RTW89_UK][1][57] = 127, [0][1][2][1][RTW89_UK][0][57] = 127, + [0][1][2][1][RTW89_THAILAND][1][57] = 127, + [0][1][2][1][RTW89_THAILAND][0][57] = 127, [0][1][2][1][RTW89_FCC][1][59] = -2, [0][1][2][1][RTW89_FCC][2][59] = 68, [0][1][2][1][RTW89_ETSI][1][59] = 127, @@ -42594,6 +43880,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][59] = 127, [0][1][2][1][RTW89_MKK][0][59] = 127, [0][1][2][1][RTW89_IC][1][59] = -2, + [0][1][2][1][RTW89_IC][2][59] = 68, [0][1][2][1][RTW89_KCC][1][59] = 12, [0][1][2][1][RTW89_KCC][0][59] = 127, [0][1][2][1][RTW89_ACMA][1][59] = 127, @@ -42603,6 +43890,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][59] = 127, [0][1][2][1][RTW89_UK][1][59] = 127, [0][1][2][1][RTW89_UK][0][59] = 127, + [0][1][2][1][RTW89_THAILAND][1][59] = 127, + [0][1][2][1][RTW89_THAILAND][0][59] = 127, [0][1][2][1][RTW89_FCC][1][60] = -2, [0][1][2][1][RTW89_FCC][2][60] = 68, [0][1][2][1][RTW89_ETSI][1][60] = 127, @@ -42610,6 +43899,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][60] = 127, [0][1][2][1][RTW89_MKK][0][60] = 127, [0][1][2][1][RTW89_IC][1][60] = -2, + [0][1][2][1][RTW89_IC][2][60] = 68, [0][1][2][1][RTW89_KCC][1][60] = 12, [0][1][2][1][RTW89_KCC][0][60] = 127, [0][1][2][1][RTW89_ACMA][1][60] = 127, @@ -42619,6 +43909,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][60] = 127, [0][1][2][1][RTW89_UK][1][60] = 127, [0][1][2][1][RTW89_UK][0][60] = 127, + [0][1][2][1][RTW89_THAILAND][1][60] = 127, + [0][1][2][1][RTW89_THAILAND][0][60] = 127, [0][1][2][1][RTW89_FCC][1][62] = -2, [0][1][2][1][RTW89_FCC][2][62] = 68, [0][1][2][1][RTW89_ETSI][1][62] = 127, @@ -42626,6 +43918,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][62] = 127, [0][1][2][1][RTW89_MKK][0][62] = 127, [0][1][2][1][RTW89_IC][1][62] = -2, + [0][1][2][1][RTW89_IC][2][62] = 68, [0][1][2][1][RTW89_KCC][1][62] = 12, [0][1][2][1][RTW89_KCC][0][62] = 127, [0][1][2][1][RTW89_ACMA][1][62] = 127, @@ -42635,6 +43928,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][62] = 127, [0][1][2][1][RTW89_UK][1][62] = 127, [0][1][2][1][RTW89_UK][0][62] = 127, + [0][1][2][1][RTW89_THAILAND][1][62] = 127, + [0][1][2][1][RTW89_THAILAND][0][62] = 127, [0][1][2][1][RTW89_FCC][1][64] = -2, [0][1][2][1][RTW89_FCC][2][64] = 68, [0][1][2][1][RTW89_ETSI][1][64] = 127, @@ -42642,6 +43937,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][64] = 127, [0][1][2][1][RTW89_MKK][0][64] = 127, [0][1][2][1][RTW89_IC][1][64] = -2, + [0][1][2][1][RTW89_IC][2][64] = 68, [0][1][2][1][RTW89_KCC][1][64] = 12, [0][1][2][1][RTW89_KCC][0][64] = 127, [0][1][2][1][RTW89_ACMA][1][64] = 127, @@ -42651,6 +43947,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][64] = 127, [0][1][2][1][RTW89_UK][1][64] = 127, [0][1][2][1][RTW89_UK][0][64] = 127, + [0][1][2][1][RTW89_THAILAND][1][64] = 127, + [0][1][2][1][RTW89_THAILAND][0][64] = 127, [0][1][2][1][RTW89_FCC][1][66] = -2, [0][1][2][1][RTW89_FCC][2][66] = 68, [0][1][2][1][RTW89_ETSI][1][66] = 127, @@ -42658,6 +43956,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][66] = 127, [0][1][2][1][RTW89_MKK][0][66] = 127, [0][1][2][1][RTW89_IC][1][66] = -2, + [0][1][2][1][RTW89_IC][2][66] = 68, [0][1][2][1][RTW89_KCC][1][66] = 12, [0][1][2][1][RTW89_KCC][0][66] = 127, [0][1][2][1][RTW89_ACMA][1][66] = 127, @@ -42667,6 +43966,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][66] = 127, [0][1][2][1][RTW89_UK][1][66] = 127, [0][1][2][1][RTW89_UK][0][66] = 127, + [0][1][2][1][RTW89_THAILAND][1][66] = 127, + [0][1][2][1][RTW89_THAILAND][0][66] = 127, [0][1][2][1][RTW89_FCC][1][68] = -2, [0][1][2][1][RTW89_FCC][2][68] = 68, [0][1][2][1][RTW89_ETSI][1][68] = 127, @@ -42674,6 +43975,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][68] = 127, [0][1][2][1][RTW89_MKK][0][68] = 127, [0][1][2][1][RTW89_IC][1][68] = -2, + [0][1][2][1][RTW89_IC][2][68] = 68, [0][1][2][1][RTW89_KCC][1][68] = 12, [0][1][2][1][RTW89_KCC][0][68] = 127, [0][1][2][1][RTW89_ACMA][1][68] = 127, @@ -42683,6 +43985,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][68] = 127, [0][1][2][1][RTW89_UK][1][68] = 127, [0][1][2][1][RTW89_UK][0][68] = 127, + [0][1][2][1][RTW89_THAILAND][1][68] = 127, + [0][1][2][1][RTW89_THAILAND][0][68] = 127, [0][1][2][1][RTW89_FCC][1][70] = -2, [0][1][2][1][RTW89_FCC][2][70] = 68, [0][1][2][1][RTW89_ETSI][1][70] = 127, @@ -42690,6 +43994,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][70] = 127, [0][1][2][1][RTW89_MKK][0][70] = 127, [0][1][2][1][RTW89_IC][1][70] = -2, + [0][1][2][1][RTW89_IC][2][70] = 68, [0][1][2][1][RTW89_KCC][1][70] = 12, [0][1][2][1][RTW89_KCC][0][70] = 127, [0][1][2][1][RTW89_ACMA][1][70] = 127, @@ -42699,6 +44004,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][70] = 127, [0][1][2][1][RTW89_UK][1][70] = 127, [0][1][2][1][RTW89_UK][0][70] = 127, + [0][1][2][1][RTW89_THAILAND][1][70] = 127, + [0][1][2][1][RTW89_THAILAND][0][70] = 127, [0][1][2][1][RTW89_FCC][1][72] = -2, [0][1][2][1][RTW89_FCC][2][72] = 68, [0][1][2][1][RTW89_ETSI][1][72] = 127, @@ -42706,6 +44013,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][72] = 127, [0][1][2][1][RTW89_MKK][0][72] = 127, [0][1][2][1][RTW89_IC][1][72] = -2, + [0][1][2][1][RTW89_IC][2][72] = 68, [0][1][2][1][RTW89_KCC][1][72] = 12, [0][1][2][1][RTW89_KCC][0][72] = 127, [0][1][2][1][RTW89_ACMA][1][72] = 127, @@ -42715,6 +44023,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][72] = 127, [0][1][2][1][RTW89_UK][1][72] = 127, [0][1][2][1][RTW89_UK][0][72] = 127, + [0][1][2][1][RTW89_THAILAND][1][72] = 127, + [0][1][2][1][RTW89_THAILAND][0][72] = 127, [0][1][2][1][RTW89_FCC][1][74] = -2, [0][1][2][1][RTW89_FCC][2][74] = 68, [0][1][2][1][RTW89_ETSI][1][74] = 127, @@ -42722,6 +44032,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][74] = 127, [0][1][2][1][RTW89_MKK][0][74] = 127, [0][1][2][1][RTW89_IC][1][74] = -2, + [0][1][2][1][RTW89_IC][2][74] = 68, [0][1][2][1][RTW89_KCC][1][74] = 12, [0][1][2][1][RTW89_KCC][0][74] = 127, [0][1][2][1][RTW89_ACMA][1][74] = 127, @@ -42731,6 +44042,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][74] = 127, [0][1][2][1][RTW89_UK][1][74] = 127, [0][1][2][1][RTW89_UK][0][74] = 127, + [0][1][2][1][RTW89_THAILAND][1][74] = 127, + [0][1][2][1][RTW89_THAILAND][0][74] = 127, [0][1][2][1][RTW89_FCC][1][75] = -2, [0][1][2][1][RTW89_FCC][2][75] = 68, [0][1][2][1][RTW89_ETSI][1][75] = 127, @@ -42738,6 +44051,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][75] = 127, [0][1][2][1][RTW89_MKK][0][75] = 127, [0][1][2][1][RTW89_IC][1][75] = -2, + [0][1][2][1][RTW89_IC][2][75] = 68, [0][1][2][1][RTW89_KCC][1][75] = 12, [0][1][2][1][RTW89_KCC][0][75] = 127, [0][1][2][1][RTW89_ACMA][1][75] = 127, @@ -42747,6 +44061,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][75] = 127, [0][1][2][1][RTW89_UK][1][75] = 127, [0][1][2][1][RTW89_UK][0][75] = 127, + [0][1][2][1][RTW89_THAILAND][1][75] = 127, + [0][1][2][1][RTW89_THAILAND][0][75] = 127, [0][1][2][1][RTW89_FCC][1][77] = -2, [0][1][2][1][RTW89_FCC][2][77] = 68, [0][1][2][1][RTW89_ETSI][1][77] = 127, @@ -42754,6 +44070,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][77] = 127, [0][1][2][1][RTW89_MKK][0][77] = 127, [0][1][2][1][RTW89_IC][1][77] = -2, + [0][1][2][1][RTW89_IC][2][77] = 68, [0][1][2][1][RTW89_KCC][1][77] = 12, [0][1][2][1][RTW89_KCC][0][77] = 127, [0][1][2][1][RTW89_ACMA][1][77] = 127, @@ -42763,6 +44080,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][77] = 127, [0][1][2][1][RTW89_UK][1][77] = 127, [0][1][2][1][RTW89_UK][0][77] = 127, + [0][1][2][1][RTW89_THAILAND][1][77] = 127, + [0][1][2][1][RTW89_THAILAND][0][77] = 127, [0][1][2][1][RTW89_FCC][1][79] = -2, [0][1][2][1][RTW89_FCC][2][79] = 68, [0][1][2][1][RTW89_ETSI][1][79] = 127, @@ -42770,6 +44089,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][79] = 127, [0][1][2][1][RTW89_MKK][0][79] = 127, [0][1][2][1][RTW89_IC][1][79] = -2, + [0][1][2][1][RTW89_IC][2][79] = 68, [0][1][2][1][RTW89_KCC][1][79] = 12, [0][1][2][1][RTW89_KCC][0][79] = 127, [0][1][2][1][RTW89_ACMA][1][79] = 127, @@ -42779,6 +44099,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][79] = 127, [0][1][2][1][RTW89_UK][1][79] = 127, [0][1][2][1][RTW89_UK][0][79] = 127, + [0][1][2][1][RTW89_THAILAND][1][79] = 127, + [0][1][2][1][RTW89_THAILAND][0][79] = 127, [0][1][2][1][RTW89_FCC][1][81] = -2, [0][1][2][1][RTW89_FCC][2][81] = 68, [0][1][2][1][RTW89_ETSI][1][81] = 127, @@ -42786,6 +44108,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][81] = 127, [0][1][2][1][RTW89_MKK][0][81] = 127, [0][1][2][1][RTW89_IC][1][81] = -2, + [0][1][2][1][RTW89_IC][2][81] = 68, [0][1][2][1][RTW89_KCC][1][81] = 12, [0][1][2][1][RTW89_KCC][0][81] = 127, [0][1][2][1][RTW89_ACMA][1][81] = 127, @@ -42795,6 +44118,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][81] = 127, [0][1][2][1][RTW89_UK][1][81] = 127, [0][1][2][1][RTW89_UK][0][81] = 127, + [0][1][2][1][RTW89_THAILAND][1][81] = 127, + [0][1][2][1][RTW89_THAILAND][0][81] = 127, [0][1][2][1][RTW89_FCC][1][83] = -2, [0][1][2][1][RTW89_FCC][2][83] = 68, [0][1][2][1][RTW89_ETSI][1][83] = 127, @@ -42802,6 +44127,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][83] = 127, [0][1][2][1][RTW89_MKK][0][83] = 127, [0][1][2][1][RTW89_IC][1][83] = -2, + [0][1][2][1][RTW89_IC][2][83] = 68, [0][1][2][1][RTW89_KCC][1][83] = 20, [0][1][2][1][RTW89_KCC][0][83] = 127, [0][1][2][1][RTW89_ACMA][1][83] = 127, @@ -42811,6 +44137,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][83] = 127, [0][1][2][1][RTW89_UK][1][83] = 127, [0][1][2][1][RTW89_UK][0][83] = 127, + [0][1][2][1][RTW89_THAILAND][1][83] = 127, + [0][1][2][1][RTW89_THAILAND][0][83] = 127, [0][1][2][1][RTW89_FCC][1][85] = -2, [0][1][2][1][RTW89_FCC][2][85] = 68, [0][1][2][1][RTW89_ETSI][1][85] = 127, @@ -42818,6 +44146,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][85] = 127, [0][1][2][1][RTW89_MKK][0][85] = 127, [0][1][2][1][RTW89_IC][1][85] = -2, + [0][1][2][1][RTW89_IC][2][85] = 68, [0][1][2][1][RTW89_KCC][1][85] = 20, [0][1][2][1][RTW89_KCC][0][85] = 127, [0][1][2][1][RTW89_ACMA][1][85] = 127, @@ -42827,6 +44156,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][85] = 127, [0][1][2][1][RTW89_UK][1][85] = 127, [0][1][2][1][RTW89_UK][0][85] = 127, + [0][1][2][1][RTW89_THAILAND][1][85] = 127, + [0][1][2][1][RTW89_THAILAND][0][85] = 127, [0][1][2][1][RTW89_FCC][1][87] = -2, [0][1][2][1][RTW89_FCC][2][87] = 127, [0][1][2][1][RTW89_ETSI][1][87] = 127, @@ -42834,6 +44165,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][87] = 127, [0][1][2][1][RTW89_MKK][0][87] = 127, [0][1][2][1][RTW89_IC][1][87] = -2, + [0][1][2][1][RTW89_IC][2][87] = 127, [0][1][2][1][RTW89_KCC][1][87] = 20, [0][1][2][1][RTW89_KCC][0][87] = 127, [0][1][2][1][RTW89_ACMA][1][87] = 127, @@ -42843,6 +44175,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][87] = 127, [0][1][2][1][RTW89_UK][1][87] = 127, [0][1][2][1][RTW89_UK][0][87] = 127, + [0][1][2][1][RTW89_THAILAND][1][87] = 127, + [0][1][2][1][RTW89_THAILAND][0][87] = 127, [0][1][2][1][RTW89_FCC][1][89] = -2, [0][1][2][1][RTW89_FCC][2][89] = 127, [0][1][2][1][RTW89_ETSI][1][89] = 127, @@ -42850,6 +44184,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][89] = 127, [0][1][2][1][RTW89_MKK][0][89] = 127, [0][1][2][1][RTW89_IC][1][89] = -2, + [0][1][2][1][RTW89_IC][2][89] = 127, [0][1][2][1][RTW89_KCC][1][89] = 20, [0][1][2][1][RTW89_KCC][0][89] = 127, [0][1][2][1][RTW89_ACMA][1][89] = 127, @@ -42859,6 +44194,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][89] = 127, [0][1][2][1][RTW89_UK][1][89] = 127, [0][1][2][1][RTW89_UK][0][89] = 127, + [0][1][2][1][RTW89_THAILAND][1][89] = 127, + [0][1][2][1][RTW89_THAILAND][0][89] = 127, [0][1][2][1][RTW89_FCC][1][90] = -2, [0][1][2][1][RTW89_FCC][2][90] = 127, [0][1][2][1][RTW89_ETSI][1][90] = 127, @@ -42866,6 +44203,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][90] = 127, [0][1][2][1][RTW89_MKK][0][90] = 127, [0][1][2][1][RTW89_IC][1][90] = -2, + [0][1][2][1][RTW89_IC][2][90] = 127, [0][1][2][1][RTW89_KCC][1][90] = 20, [0][1][2][1][RTW89_KCC][0][90] = 127, [0][1][2][1][RTW89_ACMA][1][90] = 127, @@ -42875,6 +44213,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][90] = 127, [0][1][2][1][RTW89_UK][1][90] = 127, [0][1][2][1][RTW89_UK][0][90] = 127, + [0][1][2][1][RTW89_THAILAND][1][90] = 127, + [0][1][2][1][RTW89_THAILAND][0][90] = 127, [0][1][2][1][RTW89_FCC][1][92] = -2, [0][1][2][1][RTW89_FCC][2][92] = 127, [0][1][2][1][RTW89_ETSI][1][92] = 127, @@ -42882,6 +44222,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][92] = 127, [0][1][2][1][RTW89_MKK][0][92] = 127, [0][1][2][1][RTW89_IC][1][92] = -2, + [0][1][2][1][RTW89_IC][2][92] = 127, [0][1][2][1][RTW89_KCC][1][92] = 20, [0][1][2][1][RTW89_KCC][0][92] = 127, [0][1][2][1][RTW89_ACMA][1][92] = 127, @@ -42891,6 +44232,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][92] = 127, [0][1][2][1][RTW89_UK][1][92] = 127, [0][1][2][1][RTW89_UK][0][92] = 127, + [0][1][2][1][RTW89_THAILAND][1][92] = 127, + [0][1][2][1][RTW89_THAILAND][0][92] = 127, [0][1][2][1][RTW89_FCC][1][94] = -2, [0][1][2][1][RTW89_FCC][2][94] = 127, [0][1][2][1][RTW89_ETSI][1][94] = 127, @@ -42898,6 +44241,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][94] = 127, [0][1][2][1][RTW89_MKK][0][94] = 127, [0][1][2][1][RTW89_IC][1][94] = -2, + [0][1][2][1][RTW89_IC][2][94] = 127, [0][1][2][1][RTW89_KCC][1][94] = 20, [0][1][2][1][RTW89_KCC][0][94] = 127, [0][1][2][1][RTW89_ACMA][1][94] = 127, @@ -42907,6 +44251,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][94] = 127, [0][1][2][1][RTW89_UK][1][94] = 127, [0][1][2][1][RTW89_UK][0][94] = 127, + [0][1][2][1][RTW89_THAILAND][1][94] = 127, + [0][1][2][1][RTW89_THAILAND][0][94] = 127, [0][1][2][1][RTW89_FCC][1][96] = -2, [0][1][2][1][RTW89_FCC][2][96] = 127, [0][1][2][1][RTW89_ETSI][1][96] = 127, @@ -42914,6 +44260,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][96] = 127, [0][1][2][1][RTW89_MKK][0][96] = 127, [0][1][2][1][RTW89_IC][1][96] = -2, + [0][1][2][1][RTW89_IC][2][96] = 127, [0][1][2][1][RTW89_KCC][1][96] = 20, [0][1][2][1][RTW89_KCC][0][96] = 127, [0][1][2][1][RTW89_ACMA][1][96] = 127, @@ -42923,6 +44270,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][96] = 127, [0][1][2][1][RTW89_UK][1][96] = 127, [0][1][2][1][RTW89_UK][0][96] = 127, + [0][1][2][1][RTW89_THAILAND][1][96] = 127, + [0][1][2][1][RTW89_THAILAND][0][96] = 127, [0][1][2][1][RTW89_FCC][1][98] = -2, [0][1][2][1][RTW89_FCC][2][98] = 127, [0][1][2][1][RTW89_ETSI][1][98] = 127, @@ -42930,6 +44279,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][98] = 127, [0][1][2][1][RTW89_MKK][0][98] = 127, [0][1][2][1][RTW89_IC][1][98] = -2, + [0][1][2][1][RTW89_IC][2][98] = 127, [0][1][2][1][RTW89_KCC][1][98] = 20, [0][1][2][1][RTW89_KCC][0][98] = 127, [0][1][2][1][RTW89_ACMA][1][98] = 127, @@ -42939,6 +44289,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][98] = 127, [0][1][2][1][RTW89_UK][1][98] = 127, [0][1][2][1][RTW89_UK][0][98] = 127, + [0][1][2][1][RTW89_THAILAND][1][98] = 127, + [0][1][2][1][RTW89_THAILAND][0][98] = 127, [0][1][2][1][RTW89_FCC][1][100] = -2, [0][1][2][1][RTW89_FCC][2][100] = 127, [0][1][2][1][RTW89_ETSI][1][100] = 127, @@ -42946,6 +44298,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][100] = 127, [0][1][2][1][RTW89_MKK][0][100] = 127, [0][1][2][1][RTW89_IC][1][100] = -2, + [0][1][2][1][RTW89_IC][2][100] = 127, [0][1][2][1][RTW89_KCC][1][100] = 20, [0][1][2][1][RTW89_KCC][0][100] = 127, [0][1][2][1][RTW89_ACMA][1][100] = 127, @@ -42955,6 +44308,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][100] = 127, [0][1][2][1][RTW89_UK][1][100] = 127, [0][1][2][1][RTW89_UK][0][100] = 127, + [0][1][2][1][RTW89_THAILAND][1][100] = 127, + [0][1][2][1][RTW89_THAILAND][0][100] = 127, [0][1][2][1][RTW89_FCC][1][102] = -2, [0][1][2][1][RTW89_FCC][2][102] = 127, [0][1][2][1][RTW89_ETSI][1][102] = 127, @@ -42962,6 +44317,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][102] = 127, [0][1][2][1][RTW89_MKK][0][102] = 127, [0][1][2][1][RTW89_IC][1][102] = -2, + [0][1][2][1][RTW89_IC][2][102] = 127, [0][1][2][1][RTW89_KCC][1][102] = 20, [0][1][2][1][RTW89_KCC][0][102] = 127, [0][1][2][1][RTW89_ACMA][1][102] = 127, @@ -42971,6 +44327,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][102] = 127, [0][1][2][1][RTW89_UK][1][102] = 127, [0][1][2][1][RTW89_UK][0][102] = 127, + [0][1][2][1][RTW89_THAILAND][1][102] = 127, + [0][1][2][1][RTW89_THAILAND][0][102] = 127, [0][1][2][1][RTW89_FCC][1][104] = -2, [0][1][2][1][RTW89_FCC][2][104] = 127, [0][1][2][1][RTW89_ETSI][1][104] = 127, @@ -42978,6 +44336,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][104] = 127, [0][1][2][1][RTW89_MKK][0][104] = 127, [0][1][2][1][RTW89_IC][1][104] = -2, + [0][1][2][1][RTW89_IC][2][104] = 127, [0][1][2][1][RTW89_KCC][1][104] = 20, [0][1][2][1][RTW89_KCC][0][104] = 127, [0][1][2][1][RTW89_ACMA][1][104] = 127, @@ -42987,6 +44346,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][104] = 127, [0][1][2][1][RTW89_UK][1][104] = 127, [0][1][2][1][RTW89_UK][0][104] = 127, + [0][1][2][1][RTW89_THAILAND][1][104] = 127, + [0][1][2][1][RTW89_THAILAND][0][104] = 127, [0][1][2][1][RTW89_FCC][1][105] = -2, [0][1][2][1][RTW89_FCC][2][105] = 127, [0][1][2][1][RTW89_ETSI][1][105] = 127, @@ -42994,6 +44355,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][105] = 127, [0][1][2][1][RTW89_MKK][0][105] = 127, [0][1][2][1][RTW89_IC][1][105] = -2, + [0][1][2][1][RTW89_IC][2][105] = 127, [0][1][2][1][RTW89_KCC][1][105] = 20, [0][1][2][1][RTW89_KCC][0][105] = 127, [0][1][2][1][RTW89_ACMA][1][105] = 127, @@ -43003,6 +44365,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][105] = 127, [0][1][2][1][RTW89_UK][1][105] = 127, [0][1][2][1][RTW89_UK][0][105] = 127, + [0][1][2][1][RTW89_THAILAND][1][105] = 127, + [0][1][2][1][RTW89_THAILAND][0][105] = 127, [0][1][2][1][RTW89_FCC][1][107] = 1, [0][1][2][1][RTW89_FCC][2][107] = 127, [0][1][2][1][RTW89_ETSI][1][107] = 127, @@ -43010,6 +44374,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][107] = 127, [0][1][2][1][RTW89_MKK][0][107] = 127, [0][1][2][1][RTW89_IC][1][107] = 1, + [0][1][2][1][RTW89_IC][2][107] = 127, [0][1][2][1][RTW89_KCC][1][107] = 20, [0][1][2][1][RTW89_KCC][0][107] = 127, [0][1][2][1][RTW89_ACMA][1][107] = 127, @@ -43019,6 +44384,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][107] = 127, [0][1][2][1][RTW89_UK][1][107] = 127, [0][1][2][1][RTW89_UK][0][107] = 127, + [0][1][2][1][RTW89_THAILAND][1][107] = 127, + [0][1][2][1][RTW89_THAILAND][0][107] = 127, [0][1][2][1][RTW89_FCC][1][109] = 1, [0][1][2][1][RTW89_FCC][2][109] = 127, [0][1][2][1][RTW89_ETSI][1][109] = 127, @@ -43026,6 +44393,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][109] = 127, [0][1][2][1][RTW89_MKK][0][109] = 127, [0][1][2][1][RTW89_IC][1][109] = 1, + [0][1][2][1][RTW89_IC][2][109] = 127, [0][1][2][1][RTW89_KCC][1][109] = 20, [0][1][2][1][RTW89_KCC][0][109] = 127, [0][1][2][1][RTW89_ACMA][1][109] = 127, @@ -43035,6 +44403,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][109] = 127, [0][1][2][1][RTW89_UK][1][109] = 127, [0][1][2][1][RTW89_UK][0][109] = 127, + [0][1][2][1][RTW89_THAILAND][1][109] = 127, + [0][1][2][1][RTW89_THAILAND][0][109] = 127, [0][1][2][1][RTW89_FCC][1][111] = 127, [0][1][2][1][RTW89_FCC][2][111] = 127, [0][1][2][1][RTW89_ETSI][1][111] = 127, @@ -43042,6 +44412,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][111] = 127, [0][1][2][1][RTW89_MKK][0][111] = 127, [0][1][2][1][RTW89_IC][1][111] = 127, + [0][1][2][1][RTW89_IC][2][111] = 127, [0][1][2][1][RTW89_KCC][1][111] = 127, [0][1][2][1][RTW89_KCC][0][111] = 127, [0][1][2][1][RTW89_ACMA][1][111] = 127, @@ -43051,6 +44422,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][111] = 127, [0][1][2][1][RTW89_UK][1][111] = 127, [0][1][2][1][RTW89_UK][0][111] = 127, + [0][1][2][1][RTW89_THAILAND][1][111] = 127, + [0][1][2][1][RTW89_THAILAND][0][111] = 127, [0][1][2][1][RTW89_FCC][1][113] = 127, [0][1][2][1][RTW89_FCC][2][113] = 127, [0][1][2][1][RTW89_ETSI][1][113] = 127, @@ -43058,6 +44431,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][113] = 127, [0][1][2][1][RTW89_MKK][0][113] = 127, [0][1][2][1][RTW89_IC][1][113] = 127, + [0][1][2][1][RTW89_IC][2][113] = 127, [0][1][2][1][RTW89_KCC][1][113] = 127, [0][1][2][1][RTW89_KCC][0][113] = 127, [0][1][2][1][RTW89_ACMA][1][113] = 127, @@ -43067,6 +44441,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][113] = 127, [0][1][2][1][RTW89_UK][1][113] = 127, [0][1][2][1][RTW89_UK][0][113] = 127, + [0][1][2][1][RTW89_THAILAND][1][113] = 127, + [0][1][2][1][RTW89_THAILAND][0][113] = 127, [0][1][2][1][RTW89_FCC][1][115] = 127, [0][1][2][1][RTW89_FCC][2][115] = 127, [0][1][2][1][RTW89_ETSI][1][115] = 127, @@ -43074,6 +44450,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][115] = 127, [0][1][2][1][RTW89_MKK][0][115] = 127, [0][1][2][1][RTW89_IC][1][115] = 127, + [0][1][2][1][RTW89_IC][2][115] = 127, [0][1][2][1][RTW89_KCC][1][115] = 127, [0][1][2][1][RTW89_KCC][0][115] = 127, [0][1][2][1][RTW89_ACMA][1][115] = 127, @@ -43083,6 +44460,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][115] = 127, [0][1][2][1][RTW89_UK][1][115] = 127, [0][1][2][1][RTW89_UK][0][115] = 127, + [0][1][2][1][RTW89_THAILAND][1][115] = 127, + [0][1][2][1][RTW89_THAILAND][0][115] = 127, [0][1][2][1][RTW89_FCC][1][117] = 127, [0][1][2][1][RTW89_FCC][2][117] = 127, [0][1][2][1][RTW89_ETSI][1][117] = 127, @@ -43090,6 +44469,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][117] = 127, [0][1][2][1][RTW89_MKK][0][117] = 127, [0][1][2][1][RTW89_IC][1][117] = 127, + [0][1][2][1][RTW89_IC][2][117] = 127, [0][1][2][1][RTW89_KCC][1][117] = 127, [0][1][2][1][RTW89_KCC][0][117] = 127, [0][1][2][1][RTW89_ACMA][1][117] = 127, @@ -43099,6 +44479,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][117] = 127, [0][1][2][1][RTW89_UK][1][117] = 127, [0][1][2][1][RTW89_UK][0][117] = 127, + [0][1][2][1][RTW89_THAILAND][1][117] = 127, + [0][1][2][1][RTW89_THAILAND][0][117] = 127, [0][1][2][1][RTW89_FCC][1][119] = 127, [0][1][2][1][RTW89_FCC][2][119] = 127, [0][1][2][1][RTW89_ETSI][1][119] = 127, @@ -43106,6 +44488,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MKK][1][119] = 127, [0][1][2][1][RTW89_MKK][0][119] = 127, [0][1][2][1][RTW89_IC][1][119] = 127, + [0][1][2][1][RTW89_IC][2][119] = 127, [0][1][2][1][RTW89_KCC][1][119] = 127, [0][1][2][1][RTW89_KCC][0][119] = 127, [0][1][2][1][RTW89_ACMA][1][119] = 127, @@ -43115,6 +44498,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_QATAR][0][119] = 127, [0][1][2][1][RTW89_UK][1][119] = 127, [0][1][2][1][RTW89_UK][0][119] = 127, + [0][1][2][1][RTW89_THAILAND][1][119] = 127, + [0][1][2][1][RTW89_THAILAND][0][119] = 127, [1][0][2][0][RTW89_FCC][1][1] = 34, [1][0][2][0][RTW89_FCC][2][1] = 70, [1][0][2][0][RTW89_ETSI][1][1] = 66, @@ -43122,6 +44507,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][1] = 62, [1][0][2][0][RTW89_MKK][0][1] = 26, [1][0][2][0][RTW89_IC][1][1] = 34, + [1][0][2][0][RTW89_IC][2][1] = 70, [1][0][2][0][RTW89_KCC][1][1] = 40, [1][0][2][0][RTW89_KCC][0][1] = 24, [1][0][2][0][RTW89_ACMA][1][1] = 66, @@ -43131,6 +44517,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][1] = 30, [1][0][2][0][RTW89_UK][1][1] = 66, [1][0][2][0][RTW89_UK][0][1] = 30, + [1][0][2][0][RTW89_THAILAND][1][1] = 68, + [1][0][2][0][RTW89_THAILAND][0][1] = 30, [1][0][2][0][RTW89_FCC][1][5] = 34, [1][0][2][0][RTW89_FCC][2][5] = 70, [1][0][2][0][RTW89_ETSI][1][5] = 66, @@ -43138,6 +44526,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][5] = 62, [1][0][2][0][RTW89_MKK][0][5] = 26, [1][0][2][0][RTW89_IC][1][5] = 34, + [1][0][2][0][RTW89_IC][2][5] = 70, [1][0][2][0][RTW89_KCC][1][5] = 40, [1][0][2][0][RTW89_KCC][0][5] = 24, [1][0][2][0][RTW89_ACMA][1][5] = 66, @@ -43147,6 +44536,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][5] = 30, [1][0][2][0][RTW89_UK][1][5] = 66, [1][0][2][0][RTW89_UK][0][5] = 30, + [1][0][2][0][RTW89_THAILAND][1][5] = 68, + [1][0][2][0][RTW89_THAILAND][0][5] = 30, [1][0][2][0][RTW89_FCC][1][9] = 34, [1][0][2][0][RTW89_FCC][2][9] = 70, [1][0][2][0][RTW89_ETSI][1][9] = 66, @@ -43154,6 +44545,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][9] = 62, [1][0][2][0][RTW89_MKK][0][9] = 26, [1][0][2][0][RTW89_IC][1][9] = 34, + [1][0][2][0][RTW89_IC][2][9] = 70, [1][0][2][0][RTW89_KCC][1][9] = 40, [1][0][2][0][RTW89_KCC][0][9] = 24, [1][0][2][0][RTW89_ACMA][1][9] = 66, @@ -43163,6 +44555,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][9] = 30, [1][0][2][0][RTW89_UK][1][9] = 66, [1][0][2][0][RTW89_UK][0][9] = 30, + [1][0][2][0][RTW89_THAILAND][1][9] = 68, + [1][0][2][0][RTW89_THAILAND][0][9] = 30, [1][0][2][0][RTW89_FCC][1][13] = 34, [1][0][2][0][RTW89_FCC][2][13] = 70, [1][0][2][0][RTW89_ETSI][1][13] = 66, @@ -43170,6 +44564,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][13] = 62, [1][0][2][0][RTW89_MKK][0][13] = 26, [1][0][2][0][RTW89_IC][1][13] = 34, + [1][0][2][0][RTW89_IC][2][13] = 70, [1][0][2][0][RTW89_KCC][1][13] = 40, [1][0][2][0][RTW89_KCC][0][13] = 24, [1][0][2][0][RTW89_ACMA][1][13] = 66, @@ -43179,6 +44574,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][13] = 30, [1][0][2][0][RTW89_UK][1][13] = 66, [1][0][2][0][RTW89_UK][0][13] = 30, + [1][0][2][0][RTW89_THAILAND][1][13] = 68, + [1][0][2][0][RTW89_THAILAND][0][13] = 30, [1][0][2][0][RTW89_FCC][1][16] = 34, [1][0][2][0][RTW89_FCC][2][16] = 70, [1][0][2][0][RTW89_ETSI][1][16] = 66, @@ -43186,6 +44583,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][16] = 62, [1][0][2][0][RTW89_MKK][0][16] = 26, [1][0][2][0][RTW89_IC][1][16] = 34, + [1][0][2][0][RTW89_IC][2][16] = 70, [1][0][2][0][RTW89_KCC][1][16] = 40, [1][0][2][0][RTW89_KCC][0][16] = 24, [1][0][2][0][RTW89_ACMA][1][16] = 66, @@ -43195,6 +44593,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][16] = 30, [1][0][2][0][RTW89_UK][1][16] = 66, [1][0][2][0][RTW89_UK][0][16] = 30, + [1][0][2][0][RTW89_THAILAND][1][16] = 68, + [1][0][2][0][RTW89_THAILAND][0][16] = 30, [1][0][2][0][RTW89_FCC][1][20] = 34, [1][0][2][0][RTW89_FCC][2][20] = 70, [1][0][2][0][RTW89_ETSI][1][20] = 66, @@ -43202,6 +44602,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][20] = 62, [1][0][2][0][RTW89_MKK][0][20] = 26, [1][0][2][0][RTW89_IC][1][20] = 34, + [1][0][2][0][RTW89_IC][2][20] = 70, [1][0][2][0][RTW89_KCC][1][20] = 40, [1][0][2][0][RTW89_KCC][0][20] = 24, [1][0][2][0][RTW89_ACMA][1][20] = 66, @@ -43211,6 +44612,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][20] = 30, [1][0][2][0][RTW89_UK][1][20] = 66, [1][0][2][0][RTW89_UK][0][20] = 30, + [1][0][2][0][RTW89_THAILAND][1][20] = 68, + [1][0][2][0][RTW89_THAILAND][0][20] = 30, [1][0][2][0][RTW89_FCC][1][24] = 36, [1][0][2][0][RTW89_FCC][2][24] = 70, [1][0][2][0][RTW89_ETSI][1][24] = 66, @@ -43218,6 +44621,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][24] = 64, [1][0][2][0][RTW89_MKK][0][24] = 28, [1][0][2][0][RTW89_IC][1][24] = 36, + [1][0][2][0][RTW89_IC][2][24] = 70, [1][0][2][0][RTW89_KCC][1][24] = 40, [1][0][2][0][RTW89_KCC][0][24] = 26, [1][0][2][0][RTW89_ACMA][1][24] = 66, @@ -43227,6 +44631,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][24] = 30, [1][0][2][0][RTW89_UK][1][24] = 66, [1][0][2][0][RTW89_UK][0][24] = 30, + [1][0][2][0][RTW89_THAILAND][1][24] = 68, + [1][0][2][0][RTW89_THAILAND][0][24] = 30, [1][0][2][0][RTW89_FCC][1][28] = 34, [1][0][2][0][RTW89_FCC][2][28] = 70, [1][0][2][0][RTW89_ETSI][1][28] = 66, @@ -43234,6 +44640,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][28] = 64, [1][0][2][0][RTW89_MKK][0][28] = 26, [1][0][2][0][RTW89_IC][1][28] = 34, + [1][0][2][0][RTW89_IC][2][28] = 70, [1][0][2][0][RTW89_KCC][1][28] = 40, [1][0][2][0][RTW89_KCC][0][28] = 26, [1][0][2][0][RTW89_ACMA][1][28] = 66, @@ -43243,6 +44650,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][28] = 30, [1][0][2][0][RTW89_UK][1][28] = 66, [1][0][2][0][RTW89_UK][0][28] = 30, + [1][0][2][0][RTW89_THAILAND][1][28] = 68, + [1][0][2][0][RTW89_THAILAND][0][28] = 30, [1][0][2][0][RTW89_FCC][1][31] = 34, [1][0][2][0][RTW89_FCC][2][31] = 70, [1][0][2][0][RTW89_ETSI][1][31] = 66, @@ -43250,6 +44659,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][31] = 64, [1][0][2][0][RTW89_MKK][0][31] = 26, [1][0][2][0][RTW89_IC][1][31] = 34, + [1][0][2][0][RTW89_IC][2][31] = 70, [1][0][2][0][RTW89_KCC][1][31] = 40, [1][0][2][0][RTW89_KCC][0][31] = 26, [1][0][2][0][RTW89_ACMA][1][31] = 66, @@ -43259,6 +44669,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][31] = 30, [1][0][2][0][RTW89_UK][1][31] = 66, [1][0][2][0][RTW89_UK][0][31] = 30, + [1][0][2][0][RTW89_THAILAND][1][31] = 68, + [1][0][2][0][RTW89_THAILAND][0][31] = 30, [1][0][2][0][RTW89_FCC][1][35] = 34, [1][0][2][0][RTW89_FCC][2][35] = 70, [1][0][2][0][RTW89_ETSI][1][35] = 66, @@ -43266,6 +44678,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][35] = 64, [1][0][2][0][RTW89_MKK][0][35] = 26, [1][0][2][0][RTW89_IC][1][35] = 34, + [1][0][2][0][RTW89_IC][2][35] = 70, [1][0][2][0][RTW89_KCC][1][35] = 40, [1][0][2][0][RTW89_KCC][0][35] = 26, [1][0][2][0][RTW89_ACMA][1][35] = 66, @@ -43275,6 +44688,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][35] = 30, [1][0][2][0][RTW89_UK][1][35] = 66, [1][0][2][0][RTW89_UK][0][35] = 30, + [1][0][2][0][RTW89_THAILAND][1][35] = 68, + [1][0][2][0][RTW89_THAILAND][0][35] = 30, [1][0][2][0][RTW89_FCC][1][39] = 34, [1][0][2][0][RTW89_FCC][2][39] = 70, [1][0][2][0][RTW89_ETSI][1][39] = 66, @@ -43282,6 +44697,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][39] = 64, [1][0][2][0][RTW89_MKK][0][39] = 26, [1][0][2][0][RTW89_IC][1][39] = 34, + [1][0][2][0][RTW89_IC][2][39] = 70, [1][0][2][0][RTW89_KCC][1][39] = 40, [1][0][2][0][RTW89_KCC][0][39] = 26, [1][0][2][0][RTW89_ACMA][1][39] = 66, @@ -43291,6 +44707,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][39] = 30, [1][0][2][0][RTW89_UK][1][39] = 66, [1][0][2][0][RTW89_UK][0][39] = 30, + [1][0][2][0][RTW89_THAILAND][1][39] = 68, + [1][0][2][0][RTW89_THAILAND][0][39] = 30, [1][0][2][0][RTW89_FCC][1][43] = 34, [1][0][2][0][RTW89_FCC][2][43] = 70, [1][0][2][0][RTW89_ETSI][1][43] = 66, @@ -43298,6 +44716,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][43] = 64, [1][0][2][0][RTW89_MKK][0][43] = 26, [1][0][2][0][RTW89_IC][1][43] = 34, + [1][0][2][0][RTW89_IC][2][43] = 70, [1][0][2][0][RTW89_KCC][1][43] = 40, [1][0][2][0][RTW89_KCC][0][43] = 26, [1][0][2][0][RTW89_ACMA][1][43] = 66, @@ -43307,6 +44726,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][43] = 30, [1][0][2][0][RTW89_UK][1][43] = 66, [1][0][2][0][RTW89_UK][0][43] = 30, + [1][0][2][0][RTW89_THAILAND][1][43] = 68, + [1][0][2][0][RTW89_THAILAND][0][43] = 30, [1][0][2][0][RTW89_FCC][1][46] = 34, [1][0][2][0][RTW89_FCC][2][46] = 127, [1][0][2][0][RTW89_ETSI][1][46] = 127, @@ -43314,6 +44735,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][46] = 127, [1][0][2][0][RTW89_MKK][0][46] = 127, [1][0][2][0][RTW89_IC][1][46] = 34, + [1][0][2][0][RTW89_IC][2][46] = 68, [1][0][2][0][RTW89_KCC][1][46] = 40, [1][0][2][0][RTW89_KCC][0][46] = 127, [1][0][2][0][RTW89_ACMA][1][46] = 127, @@ -43323,6 +44745,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][46] = 127, [1][0][2][0][RTW89_UK][1][46] = 127, [1][0][2][0][RTW89_UK][0][46] = 127, + [1][0][2][0][RTW89_THAILAND][1][46] = 127, + [1][0][2][0][RTW89_THAILAND][0][46] = 127, [1][0][2][0][RTW89_FCC][1][50] = 34, [1][0][2][0][RTW89_FCC][2][50] = 127, [1][0][2][0][RTW89_ETSI][1][50] = 127, @@ -43330,6 +44754,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][50] = 127, [1][0][2][0][RTW89_MKK][0][50] = 127, [1][0][2][0][RTW89_IC][1][50] = 34, + [1][0][2][0][RTW89_IC][2][50] = 68, [1][0][2][0][RTW89_KCC][1][50] = 40, [1][0][2][0][RTW89_KCC][0][50] = 127, [1][0][2][0][RTW89_ACMA][1][50] = 127, @@ -43339,6 +44764,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][50] = 127, [1][0][2][0][RTW89_UK][1][50] = 127, [1][0][2][0][RTW89_UK][0][50] = 127, + [1][0][2][0][RTW89_THAILAND][1][50] = 127, + [1][0][2][0][RTW89_THAILAND][0][50] = 127, [1][0][2][0][RTW89_FCC][1][54] = 36, [1][0][2][0][RTW89_FCC][2][54] = 127, [1][0][2][0][RTW89_ETSI][1][54] = 127, @@ -43346,6 +44773,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][54] = 127, [1][0][2][0][RTW89_MKK][0][54] = 127, [1][0][2][0][RTW89_IC][1][54] = 36, + [1][0][2][0][RTW89_IC][2][54] = 127, [1][0][2][0][RTW89_KCC][1][54] = 40, [1][0][2][0][RTW89_KCC][0][54] = 127, [1][0][2][0][RTW89_ACMA][1][54] = 127, @@ -43355,6 +44783,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][54] = 127, [1][0][2][0][RTW89_UK][1][54] = 127, [1][0][2][0][RTW89_UK][0][54] = 127, + [1][0][2][0][RTW89_THAILAND][1][54] = 127, + [1][0][2][0][RTW89_THAILAND][0][54] = 127, [1][0][2][0][RTW89_FCC][1][58] = 36, [1][0][2][0][RTW89_FCC][2][58] = 66, [1][0][2][0][RTW89_ETSI][1][58] = 127, @@ -43362,6 +44792,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][58] = 127, [1][0][2][0][RTW89_MKK][0][58] = 127, [1][0][2][0][RTW89_IC][1][58] = 36, + [1][0][2][0][RTW89_IC][2][58] = 66, [1][0][2][0][RTW89_KCC][1][58] = 40, [1][0][2][0][RTW89_KCC][0][58] = 127, [1][0][2][0][RTW89_ACMA][1][58] = 127, @@ -43371,6 +44802,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][58] = 127, [1][0][2][0][RTW89_UK][1][58] = 127, [1][0][2][0][RTW89_UK][0][58] = 127, + [1][0][2][0][RTW89_THAILAND][1][58] = 127, + [1][0][2][0][RTW89_THAILAND][0][58] = 127, [1][0][2][0][RTW89_FCC][1][61] = 34, [1][0][2][0][RTW89_FCC][2][61] = 66, [1][0][2][0][RTW89_ETSI][1][61] = 127, @@ -43378,6 +44811,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][61] = 127, [1][0][2][0][RTW89_MKK][0][61] = 127, [1][0][2][0][RTW89_IC][1][61] = 34, + [1][0][2][0][RTW89_IC][2][61] = 66, [1][0][2][0][RTW89_KCC][1][61] = 40, [1][0][2][0][RTW89_KCC][0][61] = 127, [1][0][2][0][RTW89_ACMA][1][61] = 127, @@ -43387,6 +44821,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][61] = 127, [1][0][2][0][RTW89_UK][1][61] = 127, [1][0][2][0][RTW89_UK][0][61] = 127, + [1][0][2][0][RTW89_THAILAND][1][61] = 127, + [1][0][2][0][RTW89_THAILAND][0][61] = 127, [1][0][2][0][RTW89_FCC][1][65] = 34, [1][0][2][0][RTW89_FCC][2][65] = 66, [1][0][2][0][RTW89_ETSI][1][65] = 127, @@ -43394,6 +44830,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][65] = 127, [1][0][2][0][RTW89_MKK][0][65] = 127, [1][0][2][0][RTW89_IC][1][65] = 34, + [1][0][2][0][RTW89_IC][2][65] = 66, [1][0][2][0][RTW89_KCC][1][65] = 40, [1][0][2][0][RTW89_KCC][0][65] = 127, [1][0][2][0][RTW89_ACMA][1][65] = 127, @@ -43403,6 +44840,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][65] = 127, [1][0][2][0][RTW89_UK][1][65] = 127, [1][0][2][0][RTW89_UK][0][65] = 127, + [1][0][2][0][RTW89_THAILAND][1][65] = 127, + [1][0][2][0][RTW89_THAILAND][0][65] = 127, [1][0][2][0][RTW89_FCC][1][69] = 34, [1][0][2][0][RTW89_FCC][2][69] = 66, [1][0][2][0][RTW89_ETSI][1][69] = 127, @@ -43410,6 +44849,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][69] = 127, [1][0][2][0][RTW89_MKK][0][69] = 127, [1][0][2][0][RTW89_IC][1][69] = 34, + [1][0][2][0][RTW89_IC][2][69] = 66, [1][0][2][0][RTW89_KCC][1][69] = 40, [1][0][2][0][RTW89_KCC][0][69] = 127, [1][0][2][0][RTW89_ACMA][1][69] = 127, @@ -43419,6 +44859,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][69] = 127, [1][0][2][0][RTW89_UK][1][69] = 127, [1][0][2][0][RTW89_UK][0][69] = 127, + [1][0][2][0][RTW89_THAILAND][1][69] = 127, + [1][0][2][0][RTW89_THAILAND][0][69] = 127, [1][0][2][0][RTW89_FCC][1][73] = 34, [1][0][2][0][RTW89_FCC][2][73] = 66, [1][0][2][0][RTW89_ETSI][1][73] = 127, @@ -43426,6 +44868,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][73] = 127, [1][0][2][0][RTW89_MKK][0][73] = 127, [1][0][2][0][RTW89_IC][1][73] = 34, + [1][0][2][0][RTW89_IC][2][73] = 66, [1][0][2][0][RTW89_KCC][1][73] = 40, [1][0][2][0][RTW89_KCC][0][73] = 127, [1][0][2][0][RTW89_ACMA][1][73] = 127, @@ -43435,6 +44878,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][73] = 127, [1][0][2][0][RTW89_UK][1][73] = 127, [1][0][2][0][RTW89_UK][0][73] = 127, + [1][0][2][0][RTW89_THAILAND][1][73] = 127, + [1][0][2][0][RTW89_THAILAND][0][73] = 127, [1][0][2][0][RTW89_FCC][1][76] = 34, [1][0][2][0][RTW89_FCC][2][76] = 66, [1][0][2][0][RTW89_ETSI][1][76] = 127, @@ -43442,6 +44887,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][76] = 127, [1][0][2][0][RTW89_MKK][0][76] = 127, [1][0][2][0][RTW89_IC][1][76] = 34, + [1][0][2][0][RTW89_IC][2][76] = 66, [1][0][2][0][RTW89_KCC][1][76] = 40, [1][0][2][0][RTW89_KCC][0][76] = 127, [1][0][2][0][RTW89_ACMA][1][76] = 127, @@ -43451,6 +44897,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][76] = 127, [1][0][2][0][RTW89_UK][1][76] = 127, [1][0][2][0][RTW89_UK][0][76] = 127, + [1][0][2][0][RTW89_THAILAND][1][76] = 127, + [1][0][2][0][RTW89_THAILAND][0][76] = 127, [1][0][2][0][RTW89_FCC][1][80] = 34, [1][0][2][0][RTW89_FCC][2][80] = 66, [1][0][2][0][RTW89_ETSI][1][80] = 127, @@ -43458,6 +44906,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][80] = 127, [1][0][2][0][RTW89_MKK][0][80] = 127, [1][0][2][0][RTW89_IC][1][80] = 34, + [1][0][2][0][RTW89_IC][2][80] = 66, [1][0][2][0][RTW89_KCC][1][80] = 42, [1][0][2][0][RTW89_KCC][0][80] = 127, [1][0][2][0][RTW89_ACMA][1][80] = 127, @@ -43467,6 +44916,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][80] = 127, [1][0][2][0][RTW89_UK][1][80] = 127, [1][0][2][0][RTW89_UK][0][80] = 127, + [1][0][2][0][RTW89_THAILAND][1][80] = 127, + [1][0][2][0][RTW89_THAILAND][0][80] = 127, [1][0][2][0][RTW89_FCC][1][84] = 34, [1][0][2][0][RTW89_FCC][2][84] = 66, [1][0][2][0][RTW89_ETSI][1][84] = 127, @@ -43474,6 +44925,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][84] = 127, [1][0][2][0][RTW89_MKK][0][84] = 127, [1][0][2][0][RTW89_IC][1][84] = 34, + [1][0][2][0][RTW89_IC][2][84] = 66, [1][0][2][0][RTW89_KCC][1][84] = 42, [1][0][2][0][RTW89_KCC][0][84] = 127, [1][0][2][0][RTW89_ACMA][1][84] = 127, @@ -43483,6 +44935,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][84] = 127, [1][0][2][0][RTW89_UK][1][84] = 127, [1][0][2][0][RTW89_UK][0][84] = 127, + [1][0][2][0][RTW89_THAILAND][1][84] = 127, + [1][0][2][0][RTW89_THAILAND][0][84] = 127, [1][0][2][0][RTW89_FCC][1][88] = 34, [1][0][2][0][RTW89_FCC][2][88] = 127, [1][0][2][0][RTW89_ETSI][1][88] = 127, @@ -43490,6 +44944,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][88] = 127, [1][0][2][0][RTW89_MKK][0][88] = 127, [1][0][2][0][RTW89_IC][1][88] = 34, + [1][0][2][0][RTW89_IC][2][88] = 127, [1][0][2][0][RTW89_KCC][1][88] = 42, [1][0][2][0][RTW89_KCC][0][88] = 127, [1][0][2][0][RTW89_ACMA][1][88] = 127, @@ -43499,6 +44954,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][88] = 127, [1][0][2][0][RTW89_UK][1][88] = 127, [1][0][2][0][RTW89_UK][0][88] = 127, + [1][0][2][0][RTW89_THAILAND][1][88] = 127, + [1][0][2][0][RTW89_THAILAND][0][88] = 127, [1][0][2][0][RTW89_FCC][1][91] = 36, [1][0][2][0][RTW89_FCC][2][91] = 127, [1][0][2][0][RTW89_ETSI][1][91] = 127, @@ -43506,6 +44963,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][91] = 127, [1][0][2][0][RTW89_MKK][0][91] = 127, [1][0][2][0][RTW89_IC][1][91] = 36, + [1][0][2][0][RTW89_IC][2][91] = 127, [1][0][2][0][RTW89_KCC][1][91] = 42, [1][0][2][0][RTW89_KCC][0][91] = 127, [1][0][2][0][RTW89_ACMA][1][91] = 127, @@ -43515,6 +44973,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][91] = 127, [1][0][2][0][RTW89_UK][1][91] = 127, [1][0][2][0][RTW89_UK][0][91] = 127, + [1][0][2][0][RTW89_THAILAND][1][91] = 127, + [1][0][2][0][RTW89_THAILAND][0][91] = 127, [1][0][2][0][RTW89_FCC][1][95] = 34, [1][0][2][0][RTW89_FCC][2][95] = 127, [1][0][2][0][RTW89_ETSI][1][95] = 127, @@ -43522,6 +44982,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][95] = 127, [1][0][2][0][RTW89_MKK][0][95] = 127, [1][0][2][0][RTW89_IC][1][95] = 34, + [1][0][2][0][RTW89_IC][2][95] = 127, [1][0][2][0][RTW89_KCC][1][95] = 42, [1][0][2][0][RTW89_KCC][0][95] = 127, [1][0][2][0][RTW89_ACMA][1][95] = 127, @@ -43531,6 +44992,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][95] = 127, [1][0][2][0][RTW89_UK][1][95] = 127, [1][0][2][0][RTW89_UK][0][95] = 127, + [1][0][2][0][RTW89_THAILAND][1][95] = 127, + [1][0][2][0][RTW89_THAILAND][0][95] = 127, [1][0][2][0][RTW89_FCC][1][99] = 34, [1][0][2][0][RTW89_FCC][2][99] = 127, [1][0][2][0][RTW89_ETSI][1][99] = 127, @@ -43538,6 +45001,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][99] = 127, [1][0][2][0][RTW89_MKK][0][99] = 127, [1][0][2][0][RTW89_IC][1][99] = 34, + [1][0][2][0][RTW89_IC][2][99] = 127, [1][0][2][0][RTW89_KCC][1][99] = 42, [1][0][2][0][RTW89_KCC][0][99] = 127, [1][0][2][0][RTW89_ACMA][1][99] = 127, @@ -43547,6 +45011,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][99] = 127, [1][0][2][0][RTW89_UK][1][99] = 127, [1][0][2][0][RTW89_UK][0][99] = 127, + [1][0][2][0][RTW89_THAILAND][1][99] = 127, + [1][0][2][0][RTW89_THAILAND][0][99] = 127, [1][0][2][0][RTW89_FCC][1][103] = 34, [1][0][2][0][RTW89_FCC][2][103] = 127, [1][0][2][0][RTW89_ETSI][1][103] = 127, @@ -43554,6 +45020,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][103] = 127, [1][0][2][0][RTW89_MKK][0][103] = 127, [1][0][2][0][RTW89_IC][1][103] = 34, + [1][0][2][0][RTW89_IC][2][103] = 127, [1][0][2][0][RTW89_KCC][1][103] = 42, [1][0][2][0][RTW89_KCC][0][103] = 127, [1][0][2][0][RTW89_ACMA][1][103] = 127, @@ -43563,6 +45030,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][103] = 127, [1][0][2][0][RTW89_UK][1][103] = 127, [1][0][2][0][RTW89_UK][0][103] = 127, + [1][0][2][0][RTW89_THAILAND][1][103] = 127, + [1][0][2][0][RTW89_THAILAND][0][103] = 127, [1][0][2][0][RTW89_FCC][1][106] = 36, [1][0][2][0][RTW89_FCC][2][106] = 127, [1][0][2][0][RTW89_ETSI][1][106] = 127, @@ -43570,6 +45039,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][106] = 127, [1][0][2][0][RTW89_MKK][0][106] = 127, [1][0][2][0][RTW89_IC][1][106] = 36, + [1][0][2][0][RTW89_IC][2][106] = 127, [1][0][2][0][RTW89_KCC][1][106] = 42, [1][0][2][0][RTW89_KCC][0][106] = 127, [1][0][2][0][RTW89_ACMA][1][106] = 127, @@ -43579,6 +45049,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][106] = 127, [1][0][2][0][RTW89_UK][1][106] = 127, [1][0][2][0][RTW89_UK][0][106] = 127, + [1][0][2][0][RTW89_THAILAND][1][106] = 127, + [1][0][2][0][RTW89_THAILAND][0][106] = 127, [1][0][2][0][RTW89_FCC][1][110] = 127, [1][0][2][0][RTW89_FCC][2][110] = 127, [1][0][2][0][RTW89_ETSI][1][110] = 127, @@ -43586,6 +45058,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][110] = 127, [1][0][2][0][RTW89_MKK][0][110] = 127, [1][0][2][0][RTW89_IC][1][110] = 127, + [1][0][2][0][RTW89_IC][2][110] = 127, [1][0][2][0][RTW89_KCC][1][110] = 127, [1][0][2][0][RTW89_KCC][0][110] = 127, [1][0][2][0][RTW89_ACMA][1][110] = 127, @@ -43595,6 +45068,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][110] = 127, [1][0][2][0][RTW89_UK][1][110] = 127, [1][0][2][0][RTW89_UK][0][110] = 127, + [1][0][2][0][RTW89_THAILAND][1][110] = 127, + [1][0][2][0][RTW89_THAILAND][0][110] = 127, [1][0][2][0][RTW89_FCC][1][114] = 127, [1][0][2][0][RTW89_FCC][2][114] = 127, [1][0][2][0][RTW89_ETSI][1][114] = 127, @@ -43602,6 +45077,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][114] = 127, [1][0][2][0][RTW89_MKK][0][114] = 127, [1][0][2][0][RTW89_IC][1][114] = 127, + [1][0][2][0][RTW89_IC][2][114] = 127, [1][0][2][0][RTW89_KCC][1][114] = 127, [1][0][2][0][RTW89_KCC][0][114] = 127, [1][0][2][0][RTW89_ACMA][1][114] = 127, @@ -43611,6 +45087,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][114] = 127, [1][0][2][0][RTW89_UK][1][114] = 127, [1][0][2][0][RTW89_UK][0][114] = 127, + [1][0][2][0][RTW89_THAILAND][1][114] = 127, + [1][0][2][0][RTW89_THAILAND][0][114] = 127, [1][0][2][0][RTW89_FCC][1][118] = 127, [1][0][2][0][RTW89_FCC][2][118] = 127, [1][0][2][0][RTW89_ETSI][1][118] = 127, @@ -43618,6 +45096,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1][118] = 127, [1][0][2][0][RTW89_MKK][0][118] = 127, [1][0][2][0][RTW89_IC][1][118] = 127, + [1][0][2][0][RTW89_IC][2][118] = 127, [1][0][2][0][RTW89_KCC][1][118] = 127, [1][0][2][0][RTW89_KCC][0][118] = 127, [1][0][2][0][RTW89_ACMA][1][118] = 127, @@ -43627,6 +45106,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_QATAR][0][118] = 127, [1][0][2][0][RTW89_UK][1][118] = 127, [1][0][2][0][RTW89_UK][0][118] = 127, + [1][0][2][0][RTW89_THAILAND][1][118] = 127, + [1][0][2][0][RTW89_THAILAND][0][118] = 127, [1][1][2][0][RTW89_FCC][1][1] = 10, [1][1][2][0][RTW89_FCC][2][1] = 58, [1][1][2][0][RTW89_ETSI][1][1] = 54, @@ -43634,6 +45115,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][1] = 52, [1][1][2][0][RTW89_MKK][0][1] = 12, [1][1][2][0][RTW89_IC][1][1] = 10, + [1][1][2][0][RTW89_IC][2][1] = 58, [1][1][2][0][RTW89_KCC][1][1] = 28, [1][1][2][0][RTW89_KCC][0][1] = 12, [1][1][2][0][RTW89_ACMA][1][1] = 54, @@ -43643,6 +45125,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][1] = 18, [1][1][2][0][RTW89_UK][1][1] = 54, [1][1][2][0][RTW89_UK][0][1] = 18, + [1][1][2][0][RTW89_THAILAND][1][1] = 46, + [1][1][2][0][RTW89_THAILAND][0][1] = 10, [1][1][2][0][RTW89_FCC][1][5] = 10, [1][1][2][0][RTW89_FCC][2][5] = 58, [1][1][2][0][RTW89_ETSI][1][5] = 54, @@ -43650,6 +45134,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][5] = 52, [1][1][2][0][RTW89_MKK][0][5] = 12, [1][1][2][0][RTW89_IC][1][5] = 10, + [1][1][2][0][RTW89_IC][2][5] = 58, [1][1][2][0][RTW89_KCC][1][5] = 28, [1][1][2][0][RTW89_KCC][0][5] = 12, [1][1][2][0][RTW89_ACMA][1][5] = 54, @@ -43659,6 +45144,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][5] = 16, [1][1][2][0][RTW89_UK][1][5] = 54, [1][1][2][0][RTW89_UK][0][5] = 16, + [1][1][2][0][RTW89_THAILAND][1][5] = 46, + [1][1][2][0][RTW89_THAILAND][0][5] = 10, [1][1][2][0][RTW89_FCC][1][9] = 10, [1][1][2][0][RTW89_FCC][2][9] = 58, [1][1][2][0][RTW89_ETSI][1][9] = 54, @@ -43666,6 +45153,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][9] = 52, [1][1][2][0][RTW89_MKK][0][9] = 12, [1][1][2][0][RTW89_IC][1][9] = 10, + [1][1][2][0][RTW89_IC][2][9] = 58, [1][1][2][0][RTW89_KCC][1][9] = 28, [1][1][2][0][RTW89_KCC][0][9] = 12, [1][1][2][0][RTW89_ACMA][1][9] = 54, @@ -43675,6 +45163,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][9] = 16, [1][1][2][0][RTW89_UK][1][9] = 54, [1][1][2][0][RTW89_UK][0][9] = 16, + [1][1][2][0][RTW89_THAILAND][1][9] = 46, + [1][1][2][0][RTW89_THAILAND][0][9] = 10, [1][1][2][0][RTW89_FCC][1][13] = 10, [1][1][2][0][RTW89_FCC][2][13] = 58, [1][1][2][0][RTW89_ETSI][1][13] = 54, @@ -43682,6 +45172,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][13] = 52, [1][1][2][0][RTW89_MKK][0][13] = 12, [1][1][2][0][RTW89_IC][1][13] = 10, + [1][1][2][0][RTW89_IC][2][13] = 58, [1][1][2][0][RTW89_KCC][1][13] = 28, [1][1][2][0][RTW89_KCC][0][13] = 12, [1][1][2][0][RTW89_ACMA][1][13] = 54, @@ -43691,6 +45182,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][13] = 16, [1][1][2][0][RTW89_UK][1][13] = 54, [1][1][2][0][RTW89_UK][0][13] = 16, + [1][1][2][0][RTW89_THAILAND][1][13] = 46, + [1][1][2][0][RTW89_THAILAND][0][13] = 10, [1][1][2][0][RTW89_FCC][1][16] = 10, [1][1][2][0][RTW89_FCC][2][16] = 58, [1][1][2][0][RTW89_ETSI][1][16] = 54, @@ -43698,6 +45191,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][16] = 52, [1][1][2][0][RTW89_MKK][0][16] = 12, [1][1][2][0][RTW89_IC][1][16] = 10, + [1][1][2][0][RTW89_IC][2][16] = 58, [1][1][2][0][RTW89_KCC][1][16] = 28, [1][1][2][0][RTW89_KCC][0][16] = 12, [1][1][2][0][RTW89_ACMA][1][16] = 54, @@ -43707,6 +45201,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][16] = 16, [1][1][2][0][RTW89_UK][1][16] = 54, [1][1][2][0][RTW89_UK][0][16] = 16, + [1][1][2][0][RTW89_THAILAND][1][16] = 46, + [1][1][2][0][RTW89_THAILAND][0][16] = 10, [1][1][2][0][RTW89_FCC][1][20] = 10, [1][1][2][0][RTW89_FCC][2][20] = 58, [1][1][2][0][RTW89_ETSI][1][20] = 54, @@ -43714,6 +45210,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][20] = 52, [1][1][2][0][RTW89_MKK][0][20] = 12, [1][1][2][0][RTW89_IC][1][20] = 10, + [1][1][2][0][RTW89_IC][2][20] = 58, [1][1][2][0][RTW89_KCC][1][20] = 28, [1][1][2][0][RTW89_KCC][0][20] = 12, [1][1][2][0][RTW89_ACMA][1][20] = 54, @@ -43723,6 +45220,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][20] = 16, [1][1][2][0][RTW89_UK][1][20] = 54, [1][1][2][0][RTW89_UK][0][20] = 16, + [1][1][2][0][RTW89_THAILAND][1][20] = 46, + [1][1][2][0][RTW89_THAILAND][0][20] = 10, [1][1][2][0][RTW89_FCC][1][24] = 10, [1][1][2][0][RTW89_FCC][2][24] = 70, [1][1][2][0][RTW89_ETSI][1][24] = 54, @@ -43730,6 +45229,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][24] = 54, [1][1][2][0][RTW89_MKK][0][24] = 14, [1][1][2][0][RTW89_IC][1][24] = 10, + [1][1][2][0][RTW89_IC][2][24] = 70, [1][1][2][0][RTW89_KCC][1][24] = 28, [1][1][2][0][RTW89_KCC][0][24] = 12, [1][1][2][0][RTW89_ACMA][1][24] = 54, @@ -43739,6 +45239,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][24] = 16, [1][1][2][0][RTW89_UK][1][24] = 54, [1][1][2][0][RTW89_UK][0][24] = 16, + [1][1][2][0][RTW89_THAILAND][1][24] = 46, + [1][1][2][0][RTW89_THAILAND][0][24] = 10, [1][1][2][0][RTW89_FCC][1][28] = 10, [1][1][2][0][RTW89_FCC][2][28] = 70, [1][1][2][0][RTW89_ETSI][1][28] = 54, @@ -43746,6 +45248,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][28] = 52, [1][1][2][0][RTW89_MKK][0][28] = 14, [1][1][2][0][RTW89_IC][1][28] = 10, + [1][1][2][0][RTW89_IC][2][28] = 70, [1][1][2][0][RTW89_KCC][1][28] = 28, [1][1][2][0][RTW89_KCC][0][28] = 14, [1][1][2][0][RTW89_ACMA][1][28] = 54, @@ -43755,6 +45258,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][28] = 16, [1][1][2][0][RTW89_UK][1][28] = 54, [1][1][2][0][RTW89_UK][0][28] = 16, + [1][1][2][0][RTW89_THAILAND][1][28] = 46, + [1][1][2][0][RTW89_THAILAND][0][28] = 10, [1][1][2][0][RTW89_FCC][1][31] = 10, [1][1][2][0][RTW89_FCC][2][31] = 70, [1][1][2][0][RTW89_ETSI][1][31] = 54, @@ -43762,6 +45267,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][31] = 52, [1][1][2][0][RTW89_MKK][0][31] = 14, [1][1][2][0][RTW89_IC][1][31] = 10, + [1][1][2][0][RTW89_IC][2][31] = 70, [1][1][2][0][RTW89_KCC][1][31] = 28, [1][1][2][0][RTW89_KCC][0][31] = 14, [1][1][2][0][RTW89_ACMA][1][31] = 54, @@ -43771,6 +45277,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][31] = 16, [1][1][2][0][RTW89_UK][1][31] = 54, [1][1][2][0][RTW89_UK][0][31] = 16, + [1][1][2][0][RTW89_THAILAND][1][31] = 46, + [1][1][2][0][RTW89_THAILAND][0][31] = 10, [1][1][2][0][RTW89_FCC][1][35] = 10, [1][1][2][0][RTW89_FCC][2][35] = 70, [1][1][2][0][RTW89_ETSI][1][35] = 54, @@ -43778,6 +45286,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][35] = 52, [1][1][2][0][RTW89_MKK][0][35] = 14, [1][1][2][0][RTW89_IC][1][35] = 10, + [1][1][2][0][RTW89_IC][2][35] = 70, [1][1][2][0][RTW89_KCC][1][35] = 28, [1][1][2][0][RTW89_KCC][0][35] = 14, [1][1][2][0][RTW89_ACMA][1][35] = 54, @@ -43787,6 +45296,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][35] = 16, [1][1][2][0][RTW89_UK][1][35] = 54, [1][1][2][0][RTW89_UK][0][35] = 16, + [1][1][2][0][RTW89_THAILAND][1][35] = 46, + [1][1][2][0][RTW89_THAILAND][0][35] = 10, [1][1][2][0][RTW89_FCC][1][39] = 10, [1][1][2][0][RTW89_FCC][2][39] = 70, [1][1][2][0][RTW89_ETSI][1][39] = 54, @@ -43794,6 +45305,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][39] = 52, [1][1][2][0][RTW89_MKK][0][39] = 14, [1][1][2][0][RTW89_IC][1][39] = 10, + [1][1][2][0][RTW89_IC][2][39] = 70, [1][1][2][0][RTW89_KCC][1][39] = 28, [1][1][2][0][RTW89_KCC][0][39] = 14, [1][1][2][0][RTW89_ACMA][1][39] = 54, @@ -43803,6 +45315,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][39] = 16, [1][1][2][0][RTW89_UK][1][39] = 54, [1][1][2][0][RTW89_UK][0][39] = 16, + [1][1][2][0][RTW89_THAILAND][1][39] = 46, + [1][1][2][0][RTW89_THAILAND][0][39] = 10, [1][1][2][0][RTW89_FCC][1][43] = 10, [1][1][2][0][RTW89_FCC][2][43] = 70, [1][1][2][0][RTW89_ETSI][1][43] = 54, @@ -43810,6 +45324,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][43] = 52, [1][1][2][0][RTW89_MKK][0][43] = 14, [1][1][2][0][RTW89_IC][1][43] = 10, + [1][1][2][0][RTW89_IC][2][43] = 70, [1][1][2][0][RTW89_KCC][1][43] = 28, [1][1][2][0][RTW89_KCC][0][43] = 14, [1][1][2][0][RTW89_ACMA][1][43] = 54, @@ -43819,6 +45334,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][43] = 16, [1][1][2][0][RTW89_UK][1][43] = 54, [1][1][2][0][RTW89_UK][0][43] = 16, + [1][1][2][0][RTW89_THAILAND][1][43] = 46, + [1][1][2][0][RTW89_THAILAND][0][43] = 10, [1][1][2][0][RTW89_FCC][1][46] = 12, [1][1][2][0][RTW89_FCC][2][46] = 127, [1][1][2][0][RTW89_ETSI][1][46] = 127, @@ -43826,6 +45343,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][46] = 127, [1][1][2][0][RTW89_MKK][0][46] = 127, [1][1][2][0][RTW89_IC][1][46] = 12, + [1][1][2][0][RTW89_IC][2][46] = 68, [1][1][2][0][RTW89_KCC][1][46] = 28, [1][1][2][0][RTW89_KCC][0][46] = 127, [1][1][2][0][RTW89_ACMA][1][46] = 127, @@ -43835,6 +45353,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][46] = 127, [1][1][2][0][RTW89_UK][1][46] = 127, [1][1][2][0][RTW89_UK][0][46] = 127, + [1][1][2][0][RTW89_THAILAND][1][46] = 127, + [1][1][2][0][RTW89_THAILAND][0][46] = 127, [1][1][2][0][RTW89_FCC][1][50] = 12, [1][1][2][0][RTW89_FCC][2][50] = 127, [1][1][2][0][RTW89_ETSI][1][50] = 127, @@ -43842,6 +45362,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][50] = 127, [1][1][2][0][RTW89_MKK][0][50] = 127, [1][1][2][0][RTW89_IC][1][50] = 12, + [1][1][2][0][RTW89_IC][2][50] = 68, [1][1][2][0][RTW89_KCC][1][50] = 28, [1][1][2][0][RTW89_KCC][0][50] = 127, [1][1][2][0][RTW89_ACMA][1][50] = 127, @@ -43851,6 +45372,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][50] = 127, [1][1][2][0][RTW89_UK][1][50] = 127, [1][1][2][0][RTW89_UK][0][50] = 127, + [1][1][2][0][RTW89_THAILAND][1][50] = 127, + [1][1][2][0][RTW89_THAILAND][0][50] = 127, [1][1][2][0][RTW89_FCC][1][54] = 10, [1][1][2][0][RTW89_FCC][2][54] = 127, [1][1][2][0][RTW89_ETSI][1][54] = 127, @@ -43858,6 +45381,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][54] = 127, [1][1][2][0][RTW89_MKK][0][54] = 127, [1][1][2][0][RTW89_IC][1][54] = 10, + [1][1][2][0][RTW89_IC][2][54] = 127, [1][1][2][0][RTW89_KCC][1][54] = 28, [1][1][2][0][RTW89_KCC][0][54] = 127, [1][1][2][0][RTW89_ACMA][1][54] = 127, @@ -43867,6 +45391,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][54] = 127, [1][1][2][0][RTW89_UK][1][54] = 127, [1][1][2][0][RTW89_UK][0][54] = 127, + [1][1][2][0][RTW89_THAILAND][1][54] = 127, + [1][1][2][0][RTW89_THAILAND][0][54] = 127, [1][1][2][0][RTW89_FCC][1][58] = 10, [1][1][2][0][RTW89_FCC][2][58] = 66, [1][1][2][0][RTW89_ETSI][1][58] = 127, @@ -43874,6 +45400,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][58] = 127, [1][1][2][0][RTW89_MKK][0][58] = 127, [1][1][2][0][RTW89_IC][1][58] = 10, + [1][1][2][0][RTW89_IC][2][58] = 66, [1][1][2][0][RTW89_KCC][1][58] = 28, [1][1][2][0][RTW89_KCC][0][58] = 127, [1][1][2][0][RTW89_ACMA][1][58] = 127, @@ -43883,6 +45410,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][58] = 127, [1][1][2][0][RTW89_UK][1][58] = 127, [1][1][2][0][RTW89_UK][0][58] = 127, + [1][1][2][0][RTW89_THAILAND][1][58] = 127, + [1][1][2][0][RTW89_THAILAND][0][58] = 127, [1][1][2][0][RTW89_FCC][1][61] = 10, [1][1][2][0][RTW89_FCC][2][61] = 66, [1][1][2][0][RTW89_ETSI][1][61] = 127, @@ -43890,6 +45419,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][61] = 127, [1][1][2][0][RTW89_MKK][0][61] = 127, [1][1][2][0][RTW89_IC][1][61] = 10, + [1][1][2][0][RTW89_IC][2][61] = 66, [1][1][2][0][RTW89_KCC][1][61] = 28, [1][1][2][0][RTW89_KCC][0][61] = 127, [1][1][2][0][RTW89_ACMA][1][61] = 127, @@ -43899,6 +45429,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][61] = 127, [1][1][2][0][RTW89_UK][1][61] = 127, [1][1][2][0][RTW89_UK][0][61] = 127, + [1][1][2][0][RTW89_THAILAND][1][61] = 127, + [1][1][2][0][RTW89_THAILAND][0][61] = 127, [1][1][2][0][RTW89_FCC][1][65] = 10, [1][1][2][0][RTW89_FCC][2][65] = 66, [1][1][2][0][RTW89_ETSI][1][65] = 127, @@ -43906,6 +45438,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][65] = 127, [1][1][2][0][RTW89_MKK][0][65] = 127, [1][1][2][0][RTW89_IC][1][65] = 10, + [1][1][2][0][RTW89_IC][2][65] = 66, [1][1][2][0][RTW89_KCC][1][65] = 28, [1][1][2][0][RTW89_KCC][0][65] = 127, [1][1][2][0][RTW89_ACMA][1][65] = 127, @@ -43915,6 +45448,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][65] = 127, [1][1][2][0][RTW89_UK][1][65] = 127, [1][1][2][0][RTW89_UK][0][65] = 127, + [1][1][2][0][RTW89_THAILAND][1][65] = 127, + [1][1][2][0][RTW89_THAILAND][0][65] = 127, [1][1][2][0][RTW89_FCC][1][69] = 10, [1][1][2][0][RTW89_FCC][2][69] = 66, [1][1][2][0][RTW89_ETSI][1][69] = 127, @@ -43922,6 +45457,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][69] = 127, [1][1][2][0][RTW89_MKK][0][69] = 127, [1][1][2][0][RTW89_IC][1][69] = 10, + [1][1][2][0][RTW89_IC][2][69] = 66, [1][1][2][0][RTW89_KCC][1][69] = 28, [1][1][2][0][RTW89_KCC][0][69] = 127, [1][1][2][0][RTW89_ACMA][1][69] = 127, @@ -43931,6 +45467,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][69] = 127, [1][1][2][0][RTW89_UK][1][69] = 127, [1][1][2][0][RTW89_UK][0][69] = 127, + [1][1][2][0][RTW89_THAILAND][1][69] = 127, + [1][1][2][0][RTW89_THAILAND][0][69] = 127, [1][1][2][0][RTW89_FCC][1][73] = 10, [1][1][2][0][RTW89_FCC][2][73] = 66, [1][1][2][0][RTW89_ETSI][1][73] = 127, @@ -43938,6 +45476,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][73] = 127, [1][1][2][0][RTW89_MKK][0][73] = 127, [1][1][2][0][RTW89_IC][1][73] = 10, + [1][1][2][0][RTW89_IC][2][73] = 66, [1][1][2][0][RTW89_KCC][1][73] = 28, [1][1][2][0][RTW89_KCC][0][73] = 127, [1][1][2][0][RTW89_ACMA][1][73] = 127, @@ -43947,6 +45486,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][73] = 127, [1][1][2][0][RTW89_UK][1][73] = 127, [1][1][2][0][RTW89_UK][0][73] = 127, + [1][1][2][0][RTW89_THAILAND][1][73] = 127, + [1][1][2][0][RTW89_THAILAND][0][73] = 127, [1][1][2][0][RTW89_FCC][1][76] = 10, [1][1][2][0][RTW89_FCC][2][76] = 66, [1][1][2][0][RTW89_ETSI][1][76] = 127, @@ -43954,6 +45495,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][76] = 127, [1][1][2][0][RTW89_MKK][0][76] = 127, [1][1][2][0][RTW89_IC][1][76] = 10, + [1][1][2][0][RTW89_IC][2][76] = 66, [1][1][2][0][RTW89_KCC][1][76] = 28, [1][1][2][0][RTW89_KCC][0][76] = 127, [1][1][2][0][RTW89_ACMA][1][76] = 127, @@ -43963,6 +45505,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][76] = 127, [1][1][2][0][RTW89_UK][1][76] = 127, [1][1][2][0][RTW89_UK][0][76] = 127, + [1][1][2][0][RTW89_THAILAND][1][76] = 127, + [1][1][2][0][RTW89_THAILAND][0][76] = 127, [1][1][2][0][RTW89_FCC][1][80] = 10, [1][1][2][0][RTW89_FCC][2][80] = 66, [1][1][2][0][RTW89_ETSI][1][80] = 127, @@ -43970,6 +45514,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][80] = 127, [1][1][2][0][RTW89_MKK][0][80] = 127, [1][1][2][0][RTW89_IC][1][80] = 10, + [1][1][2][0][RTW89_IC][2][80] = 66, [1][1][2][0][RTW89_KCC][1][80] = 32, [1][1][2][0][RTW89_KCC][0][80] = 127, [1][1][2][0][RTW89_ACMA][1][80] = 127, @@ -43979,6 +45524,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][80] = 127, [1][1][2][0][RTW89_UK][1][80] = 127, [1][1][2][0][RTW89_UK][0][80] = 127, + [1][1][2][0][RTW89_THAILAND][1][80] = 127, + [1][1][2][0][RTW89_THAILAND][0][80] = 127, [1][1][2][0][RTW89_FCC][1][84] = 10, [1][1][2][0][RTW89_FCC][2][84] = 66, [1][1][2][0][RTW89_ETSI][1][84] = 127, @@ -43986,6 +45533,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][84] = 127, [1][1][2][0][RTW89_MKK][0][84] = 127, [1][1][2][0][RTW89_IC][1][84] = 10, + [1][1][2][0][RTW89_IC][2][84] = 66, [1][1][2][0][RTW89_KCC][1][84] = 32, [1][1][2][0][RTW89_KCC][0][84] = 127, [1][1][2][0][RTW89_ACMA][1][84] = 127, @@ -43995,6 +45543,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][84] = 127, [1][1][2][0][RTW89_UK][1][84] = 127, [1][1][2][0][RTW89_UK][0][84] = 127, + [1][1][2][0][RTW89_THAILAND][1][84] = 127, + [1][1][2][0][RTW89_THAILAND][0][84] = 127, [1][1][2][0][RTW89_FCC][1][88] = 10, [1][1][2][0][RTW89_FCC][2][88] = 127, [1][1][2][0][RTW89_ETSI][1][88] = 127, @@ -44002,6 +45552,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][88] = 127, [1][1][2][0][RTW89_MKK][0][88] = 127, [1][1][2][0][RTW89_IC][1][88] = 10, + [1][1][2][0][RTW89_IC][2][88] = 127, [1][1][2][0][RTW89_KCC][1][88] = 32, [1][1][2][0][RTW89_KCC][0][88] = 127, [1][1][2][0][RTW89_ACMA][1][88] = 127, @@ -44011,6 +45562,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][88] = 127, [1][1][2][0][RTW89_UK][1][88] = 127, [1][1][2][0][RTW89_UK][0][88] = 127, + [1][1][2][0][RTW89_THAILAND][1][88] = 127, + [1][1][2][0][RTW89_THAILAND][0][88] = 127, [1][1][2][0][RTW89_FCC][1][91] = 12, [1][1][2][0][RTW89_FCC][2][91] = 127, [1][1][2][0][RTW89_ETSI][1][91] = 127, @@ -44018,6 +45571,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][91] = 127, [1][1][2][0][RTW89_MKK][0][91] = 127, [1][1][2][0][RTW89_IC][1][91] = 12, + [1][1][2][0][RTW89_IC][2][91] = 127, [1][1][2][0][RTW89_KCC][1][91] = 32, [1][1][2][0][RTW89_KCC][0][91] = 127, [1][1][2][0][RTW89_ACMA][1][91] = 127, @@ -44027,6 +45581,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][91] = 127, [1][1][2][0][RTW89_UK][1][91] = 127, [1][1][2][0][RTW89_UK][0][91] = 127, + [1][1][2][0][RTW89_THAILAND][1][91] = 127, + [1][1][2][0][RTW89_THAILAND][0][91] = 127, [1][1][2][0][RTW89_FCC][1][95] = 10, [1][1][2][0][RTW89_FCC][2][95] = 127, [1][1][2][0][RTW89_ETSI][1][95] = 127, @@ -44034,6 +45590,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][95] = 127, [1][1][2][0][RTW89_MKK][0][95] = 127, [1][1][2][0][RTW89_IC][1][95] = 10, + [1][1][2][0][RTW89_IC][2][95] = 127, [1][1][2][0][RTW89_KCC][1][95] = 32, [1][1][2][0][RTW89_KCC][0][95] = 127, [1][1][2][0][RTW89_ACMA][1][95] = 127, @@ -44043,6 +45600,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][95] = 127, [1][1][2][0][RTW89_UK][1][95] = 127, [1][1][2][0][RTW89_UK][0][95] = 127, + [1][1][2][0][RTW89_THAILAND][1][95] = 127, + [1][1][2][0][RTW89_THAILAND][0][95] = 127, [1][1][2][0][RTW89_FCC][1][99] = 10, [1][1][2][0][RTW89_FCC][2][99] = 127, [1][1][2][0][RTW89_ETSI][1][99] = 127, @@ -44050,6 +45609,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][99] = 127, [1][1][2][0][RTW89_MKK][0][99] = 127, [1][1][2][0][RTW89_IC][1][99] = 10, + [1][1][2][0][RTW89_IC][2][99] = 127, [1][1][2][0][RTW89_KCC][1][99] = 32, [1][1][2][0][RTW89_KCC][0][99] = 127, [1][1][2][0][RTW89_ACMA][1][99] = 127, @@ -44059,6 +45619,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][99] = 127, [1][1][2][0][RTW89_UK][1][99] = 127, [1][1][2][0][RTW89_UK][0][99] = 127, + [1][1][2][0][RTW89_THAILAND][1][99] = 127, + [1][1][2][0][RTW89_THAILAND][0][99] = 127, [1][1][2][0][RTW89_FCC][1][103] = 10, [1][1][2][0][RTW89_FCC][2][103] = 127, [1][1][2][0][RTW89_ETSI][1][103] = 127, @@ -44066,6 +45628,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][103] = 127, [1][1][2][0][RTW89_MKK][0][103] = 127, [1][1][2][0][RTW89_IC][1][103] = 10, + [1][1][2][0][RTW89_IC][2][103] = 127, [1][1][2][0][RTW89_KCC][1][103] = 32, [1][1][2][0][RTW89_KCC][0][103] = 127, [1][1][2][0][RTW89_ACMA][1][103] = 127, @@ -44075,6 +45638,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][103] = 127, [1][1][2][0][RTW89_UK][1][103] = 127, [1][1][2][0][RTW89_UK][0][103] = 127, + [1][1][2][0][RTW89_THAILAND][1][103] = 127, + [1][1][2][0][RTW89_THAILAND][0][103] = 127, [1][1][2][0][RTW89_FCC][1][106] = 12, [1][1][2][0][RTW89_FCC][2][106] = 127, [1][1][2][0][RTW89_ETSI][1][106] = 127, @@ -44082,6 +45647,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][106] = 127, [1][1][2][0][RTW89_MKK][0][106] = 127, [1][1][2][0][RTW89_IC][1][106] = 12, + [1][1][2][0][RTW89_IC][2][106] = 127, [1][1][2][0][RTW89_KCC][1][106] = 32, [1][1][2][0][RTW89_KCC][0][106] = 127, [1][1][2][0][RTW89_ACMA][1][106] = 127, @@ -44091,6 +45657,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][106] = 127, [1][1][2][0][RTW89_UK][1][106] = 127, [1][1][2][0][RTW89_UK][0][106] = 127, + [1][1][2][0][RTW89_THAILAND][1][106] = 127, + [1][1][2][0][RTW89_THAILAND][0][106] = 127, [1][1][2][0][RTW89_FCC][1][110] = 127, [1][1][2][0][RTW89_FCC][2][110] = 127, [1][1][2][0][RTW89_ETSI][1][110] = 127, @@ -44098,6 +45666,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][110] = 127, [1][1][2][0][RTW89_MKK][0][110] = 127, [1][1][2][0][RTW89_IC][1][110] = 127, + [1][1][2][0][RTW89_IC][2][110] = 127, [1][1][2][0][RTW89_KCC][1][110] = 127, [1][1][2][0][RTW89_KCC][0][110] = 127, [1][1][2][0][RTW89_ACMA][1][110] = 127, @@ -44107,6 +45676,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][110] = 127, [1][1][2][0][RTW89_UK][1][110] = 127, [1][1][2][0][RTW89_UK][0][110] = 127, + [1][1][2][0][RTW89_THAILAND][1][110] = 127, + [1][1][2][0][RTW89_THAILAND][0][110] = 127, [1][1][2][0][RTW89_FCC][1][114] = 127, [1][1][2][0][RTW89_FCC][2][114] = 127, [1][1][2][0][RTW89_ETSI][1][114] = 127, @@ -44114,6 +45685,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][114] = 127, [1][1][2][0][RTW89_MKK][0][114] = 127, [1][1][2][0][RTW89_IC][1][114] = 127, + [1][1][2][0][RTW89_IC][2][114] = 127, [1][1][2][0][RTW89_KCC][1][114] = 127, [1][1][2][0][RTW89_KCC][0][114] = 127, [1][1][2][0][RTW89_ACMA][1][114] = 127, @@ -44123,6 +45695,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][114] = 127, [1][1][2][0][RTW89_UK][1][114] = 127, [1][1][2][0][RTW89_UK][0][114] = 127, + [1][1][2][0][RTW89_THAILAND][1][114] = 127, + [1][1][2][0][RTW89_THAILAND][0][114] = 127, [1][1][2][0][RTW89_FCC][1][118] = 127, [1][1][2][0][RTW89_FCC][2][118] = 127, [1][1][2][0][RTW89_ETSI][1][118] = 127, @@ -44130,6 +45704,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1][118] = 127, [1][1][2][0][RTW89_MKK][0][118] = 127, [1][1][2][0][RTW89_IC][1][118] = 127, + [1][1][2][0][RTW89_IC][2][118] = 127, [1][1][2][0][RTW89_KCC][1][118] = 127, [1][1][2][0][RTW89_KCC][0][118] = 127, [1][1][2][0][RTW89_ACMA][1][118] = 127, @@ -44139,6 +45714,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_QATAR][0][118] = 127, [1][1][2][0][RTW89_UK][1][118] = 127, [1][1][2][0][RTW89_UK][0][118] = 127, + [1][1][2][0][RTW89_THAILAND][1][118] = 127, + [1][1][2][0][RTW89_THAILAND][0][118] = 127, [1][1][2][1][RTW89_FCC][1][1] = 10, [1][1][2][1][RTW89_FCC][2][1] = 58, [1][1][2][1][RTW89_ETSI][1][1] = 42, @@ -44146,6 +45723,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][1] = 52, [1][1][2][1][RTW89_MKK][0][1] = 12, [1][1][2][1][RTW89_IC][1][1] = 10, + [1][1][2][1][RTW89_IC][2][1] = 58, [1][1][2][1][RTW89_KCC][1][1] = 28, [1][1][2][1][RTW89_KCC][0][1] = 12, [1][1][2][1][RTW89_ACMA][1][1] = 42, @@ -44155,6 +45733,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][1] = 6, [1][1][2][1][RTW89_UK][1][1] = 42, [1][1][2][1][RTW89_UK][0][1] = 6, + [1][1][2][1][RTW89_THAILAND][1][1] = 46, + [1][1][2][1][RTW89_THAILAND][0][1] = 6, [1][1][2][1][RTW89_FCC][1][5] = 10, [1][1][2][1][RTW89_FCC][2][5] = 58, [1][1][2][1][RTW89_ETSI][1][5] = 42, @@ -44162,6 +45742,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][5] = 52, [1][1][2][1][RTW89_MKK][0][5] = 12, [1][1][2][1][RTW89_IC][1][5] = 10, + [1][1][2][1][RTW89_IC][2][5] = 58, [1][1][2][1][RTW89_KCC][1][5] = 28, [1][1][2][1][RTW89_KCC][0][5] = 12, [1][1][2][1][RTW89_ACMA][1][5] = 42, @@ -44171,6 +45752,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][5] = 6, [1][1][2][1][RTW89_UK][1][5] = 42, [1][1][2][1][RTW89_UK][0][5] = 6, + [1][1][2][1][RTW89_THAILAND][1][5] = 46, + [1][1][2][1][RTW89_THAILAND][0][5] = 6, [1][1][2][1][RTW89_FCC][1][9] = 10, [1][1][2][1][RTW89_FCC][2][9] = 58, [1][1][2][1][RTW89_ETSI][1][9] = 42, @@ -44178,6 +45761,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][9] = 52, [1][1][2][1][RTW89_MKK][0][9] = 12, [1][1][2][1][RTW89_IC][1][9] = 10, + [1][1][2][1][RTW89_IC][2][9] = 58, [1][1][2][1][RTW89_KCC][1][9] = 28, [1][1][2][1][RTW89_KCC][0][9] = 12, [1][1][2][1][RTW89_ACMA][1][9] = 42, @@ -44187,6 +45771,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][9] = 6, [1][1][2][1][RTW89_UK][1][9] = 42, [1][1][2][1][RTW89_UK][0][9] = 6, + [1][1][2][1][RTW89_THAILAND][1][9] = 46, + [1][1][2][1][RTW89_THAILAND][0][9] = 6, [1][1][2][1][RTW89_FCC][1][13] = 10, [1][1][2][1][RTW89_FCC][2][13] = 58, [1][1][2][1][RTW89_ETSI][1][13] = 42, @@ -44194,6 +45780,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][13] = 52, [1][1][2][1][RTW89_MKK][0][13] = 12, [1][1][2][1][RTW89_IC][1][13] = 10, + [1][1][2][1][RTW89_IC][2][13] = 58, [1][1][2][1][RTW89_KCC][1][13] = 28, [1][1][2][1][RTW89_KCC][0][13] = 12, [1][1][2][1][RTW89_ACMA][1][13] = 42, @@ -44203,6 +45790,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][13] = 6, [1][1][2][1][RTW89_UK][1][13] = 42, [1][1][2][1][RTW89_UK][0][13] = 6, + [1][1][2][1][RTW89_THAILAND][1][13] = 46, + [1][1][2][1][RTW89_THAILAND][0][13] = 6, [1][1][2][1][RTW89_FCC][1][16] = 10, [1][1][2][1][RTW89_FCC][2][16] = 58, [1][1][2][1][RTW89_ETSI][1][16] = 42, @@ -44210,6 +45799,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][16] = 52, [1][1][2][1][RTW89_MKK][0][16] = 12, [1][1][2][1][RTW89_IC][1][16] = 10, + [1][1][2][1][RTW89_IC][2][16] = 58, [1][1][2][1][RTW89_KCC][1][16] = 28, [1][1][2][1][RTW89_KCC][0][16] = 12, [1][1][2][1][RTW89_ACMA][1][16] = 42, @@ -44219,6 +45809,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][16] = 6, [1][1][2][1][RTW89_UK][1][16] = 42, [1][1][2][1][RTW89_UK][0][16] = 6, + [1][1][2][1][RTW89_THAILAND][1][16] = 46, + [1][1][2][1][RTW89_THAILAND][0][16] = 6, [1][1][2][1][RTW89_FCC][1][20] = 10, [1][1][2][1][RTW89_FCC][2][20] = 58, [1][1][2][1][RTW89_ETSI][1][20] = 42, @@ -44226,6 +45818,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][20] = 52, [1][1][2][1][RTW89_MKK][0][20] = 12, [1][1][2][1][RTW89_IC][1][20] = 10, + [1][1][2][1][RTW89_IC][2][20] = 58, [1][1][2][1][RTW89_KCC][1][20] = 28, [1][1][2][1][RTW89_KCC][0][20] = 12, [1][1][2][1][RTW89_ACMA][1][20] = 42, @@ -44235,6 +45828,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][20] = 6, [1][1][2][1][RTW89_UK][1][20] = 42, [1][1][2][1][RTW89_UK][0][20] = 6, + [1][1][2][1][RTW89_THAILAND][1][20] = 46, + [1][1][2][1][RTW89_THAILAND][0][20] = 6, [1][1][2][1][RTW89_FCC][1][24] = 10, [1][1][2][1][RTW89_FCC][2][24] = 70, [1][1][2][1][RTW89_ETSI][1][24] = 42, @@ -44242,6 +45837,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][24] = 54, [1][1][2][1][RTW89_MKK][0][24] = 14, [1][1][2][1][RTW89_IC][1][24] = 10, + [1][1][2][1][RTW89_IC][2][24] = 70, [1][1][2][1][RTW89_KCC][1][24] = 28, [1][1][2][1][RTW89_KCC][0][24] = 12, [1][1][2][1][RTW89_ACMA][1][24] = 42, @@ -44251,6 +45847,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][24] = 6, [1][1][2][1][RTW89_UK][1][24] = 42, [1][1][2][1][RTW89_UK][0][24] = 6, + [1][1][2][1][RTW89_THAILAND][1][24] = 46, + [1][1][2][1][RTW89_THAILAND][0][24] = 6, [1][1][2][1][RTW89_FCC][1][28] = 10, [1][1][2][1][RTW89_FCC][2][28] = 70, [1][1][2][1][RTW89_ETSI][1][28] = 42, @@ -44258,6 +45856,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][28] = 52, [1][1][2][1][RTW89_MKK][0][28] = 14, [1][1][2][1][RTW89_IC][1][28] = 10, + [1][1][2][1][RTW89_IC][2][28] = 70, [1][1][2][1][RTW89_KCC][1][28] = 28, [1][1][2][1][RTW89_KCC][0][28] = 14, [1][1][2][1][RTW89_ACMA][1][28] = 42, @@ -44267,6 +45866,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][28] = 6, [1][1][2][1][RTW89_UK][1][28] = 42, [1][1][2][1][RTW89_UK][0][28] = 6, + [1][1][2][1][RTW89_THAILAND][1][28] = 46, + [1][1][2][1][RTW89_THAILAND][0][28] = 6, [1][1][2][1][RTW89_FCC][1][31] = 10, [1][1][2][1][RTW89_FCC][2][31] = 70, [1][1][2][1][RTW89_ETSI][1][31] = 42, @@ -44274,6 +45875,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][31] = 52, [1][1][2][1][RTW89_MKK][0][31] = 14, [1][1][2][1][RTW89_IC][1][31] = 10, + [1][1][2][1][RTW89_IC][2][31] = 70, [1][1][2][1][RTW89_KCC][1][31] = 28, [1][1][2][1][RTW89_KCC][0][31] = 14, [1][1][2][1][RTW89_ACMA][1][31] = 42, @@ -44283,6 +45885,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][31] = 6, [1][1][2][1][RTW89_UK][1][31] = 42, [1][1][2][1][RTW89_UK][0][31] = 6, + [1][1][2][1][RTW89_THAILAND][1][31] = 46, + [1][1][2][1][RTW89_THAILAND][0][31] = 6, [1][1][2][1][RTW89_FCC][1][35] = 10, [1][1][2][1][RTW89_FCC][2][35] = 70, [1][1][2][1][RTW89_ETSI][1][35] = 42, @@ -44290,6 +45894,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][35] = 52, [1][1][2][1][RTW89_MKK][0][35] = 14, [1][1][2][1][RTW89_IC][1][35] = 10, + [1][1][2][1][RTW89_IC][2][35] = 70, [1][1][2][1][RTW89_KCC][1][35] = 28, [1][1][2][1][RTW89_KCC][0][35] = 14, [1][1][2][1][RTW89_ACMA][1][35] = 42, @@ -44299,6 +45904,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][35] = 6, [1][1][2][1][RTW89_UK][1][35] = 42, [1][1][2][1][RTW89_UK][0][35] = 6, + [1][1][2][1][RTW89_THAILAND][1][35] = 46, + [1][1][2][1][RTW89_THAILAND][0][35] = 6, [1][1][2][1][RTW89_FCC][1][39] = 10, [1][1][2][1][RTW89_FCC][2][39] = 70, [1][1][2][1][RTW89_ETSI][1][39] = 42, @@ -44306,6 +45913,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][39] = 52, [1][1][2][1][RTW89_MKK][0][39] = 14, [1][1][2][1][RTW89_IC][1][39] = 10, + [1][1][2][1][RTW89_IC][2][39] = 70, [1][1][2][1][RTW89_KCC][1][39] = 28, [1][1][2][1][RTW89_KCC][0][39] = 14, [1][1][2][1][RTW89_ACMA][1][39] = 42, @@ -44315,6 +45923,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][39] = 6, [1][1][2][1][RTW89_UK][1][39] = 42, [1][1][2][1][RTW89_UK][0][39] = 6, + [1][1][2][1][RTW89_THAILAND][1][39] = 46, + [1][1][2][1][RTW89_THAILAND][0][39] = 6, [1][1][2][1][RTW89_FCC][1][43] = 10, [1][1][2][1][RTW89_FCC][2][43] = 70, [1][1][2][1][RTW89_ETSI][1][43] = 42, @@ -44322,6 +45932,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][43] = 52, [1][1][2][1][RTW89_MKK][0][43] = 14, [1][1][2][1][RTW89_IC][1][43] = 10, + [1][1][2][1][RTW89_IC][2][43] = 70, [1][1][2][1][RTW89_KCC][1][43] = 28, [1][1][2][1][RTW89_KCC][0][43] = 14, [1][1][2][1][RTW89_ACMA][1][43] = 42, @@ -44331,6 +45942,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][43] = 6, [1][1][2][1][RTW89_UK][1][43] = 42, [1][1][2][1][RTW89_UK][0][43] = 6, + [1][1][2][1][RTW89_THAILAND][1][43] = 46, + [1][1][2][1][RTW89_THAILAND][0][43] = 6, [1][1][2][1][RTW89_FCC][1][46] = 12, [1][1][2][1][RTW89_FCC][2][46] = 127, [1][1][2][1][RTW89_ETSI][1][46] = 127, @@ -44338,6 +45951,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][46] = 127, [1][1][2][1][RTW89_MKK][0][46] = 127, [1][1][2][1][RTW89_IC][1][46] = 12, + [1][1][2][1][RTW89_IC][2][46] = 68, [1][1][2][1][RTW89_KCC][1][46] = 28, [1][1][2][1][RTW89_KCC][0][46] = 127, [1][1][2][1][RTW89_ACMA][1][46] = 127, @@ -44347,6 +45961,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][46] = 127, [1][1][2][1][RTW89_UK][1][46] = 127, [1][1][2][1][RTW89_UK][0][46] = 127, + [1][1][2][1][RTW89_THAILAND][1][46] = 127, + [1][1][2][1][RTW89_THAILAND][0][46] = 127, [1][1][2][1][RTW89_FCC][1][50] = 12, [1][1][2][1][RTW89_FCC][2][50] = 127, [1][1][2][1][RTW89_ETSI][1][50] = 127, @@ -44354,6 +45970,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][50] = 127, [1][1][2][1][RTW89_MKK][0][50] = 127, [1][1][2][1][RTW89_IC][1][50] = 12, + [1][1][2][1][RTW89_IC][2][50] = 68, [1][1][2][1][RTW89_KCC][1][50] = 28, [1][1][2][1][RTW89_KCC][0][50] = 127, [1][1][2][1][RTW89_ACMA][1][50] = 127, @@ -44363,6 +45980,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][50] = 127, [1][1][2][1][RTW89_UK][1][50] = 127, [1][1][2][1][RTW89_UK][0][50] = 127, + [1][1][2][1][RTW89_THAILAND][1][50] = 127, + [1][1][2][1][RTW89_THAILAND][0][50] = 127, [1][1][2][1][RTW89_FCC][1][54] = 10, [1][1][2][1][RTW89_FCC][2][54] = 127, [1][1][2][1][RTW89_ETSI][1][54] = 127, @@ -44370,6 +45989,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][54] = 127, [1][1][2][1][RTW89_MKK][0][54] = 127, [1][1][2][1][RTW89_IC][1][54] = 10, + [1][1][2][1][RTW89_IC][2][54] = 127, [1][1][2][1][RTW89_KCC][1][54] = 28, [1][1][2][1][RTW89_KCC][0][54] = 127, [1][1][2][1][RTW89_ACMA][1][54] = 127, @@ -44379,6 +45999,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][54] = 127, [1][1][2][1][RTW89_UK][1][54] = 127, [1][1][2][1][RTW89_UK][0][54] = 127, + [1][1][2][1][RTW89_THAILAND][1][54] = 127, + [1][1][2][1][RTW89_THAILAND][0][54] = 127, [1][1][2][1][RTW89_FCC][1][58] = 10, [1][1][2][1][RTW89_FCC][2][58] = 66, [1][1][2][1][RTW89_ETSI][1][58] = 127, @@ -44386,6 +46008,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][58] = 127, [1][1][2][1][RTW89_MKK][0][58] = 127, [1][1][2][1][RTW89_IC][1][58] = 10, + [1][1][2][1][RTW89_IC][2][58] = 66, [1][1][2][1][RTW89_KCC][1][58] = 28, [1][1][2][1][RTW89_KCC][0][58] = 127, [1][1][2][1][RTW89_ACMA][1][58] = 127, @@ -44395,6 +46018,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][58] = 127, [1][1][2][1][RTW89_UK][1][58] = 127, [1][1][2][1][RTW89_UK][0][58] = 127, + [1][1][2][1][RTW89_THAILAND][1][58] = 127, + [1][1][2][1][RTW89_THAILAND][0][58] = 127, [1][1][2][1][RTW89_FCC][1][61] = 10, [1][1][2][1][RTW89_FCC][2][61] = 66, [1][1][2][1][RTW89_ETSI][1][61] = 127, @@ -44402,6 +46027,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][61] = 127, [1][1][2][1][RTW89_MKK][0][61] = 127, [1][1][2][1][RTW89_IC][1][61] = 10, + [1][1][2][1][RTW89_IC][2][61] = 66, [1][1][2][1][RTW89_KCC][1][61] = 28, [1][1][2][1][RTW89_KCC][0][61] = 127, [1][1][2][1][RTW89_ACMA][1][61] = 127, @@ -44411,6 +46037,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][61] = 127, [1][1][2][1][RTW89_UK][1][61] = 127, [1][1][2][1][RTW89_UK][0][61] = 127, + [1][1][2][1][RTW89_THAILAND][1][61] = 127, + [1][1][2][1][RTW89_THAILAND][0][61] = 127, [1][1][2][1][RTW89_FCC][1][65] = 10, [1][1][2][1][RTW89_FCC][2][65] = 66, [1][1][2][1][RTW89_ETSI][1][65] = 127, @@ -44418,6 +46046,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][65] = 127, [1][1][2][1][RTW89_MKK][0][65] = 127, [1][1][2][1][RTW89_IC][1][65] = 10, + [1][1][2][1][RTW89_IC][2][65] = 66, [1][1][2][1][RTW89_KCC][1][65] = 28, [1][1][2][1][RTW89_KCC][0][65] = 127, [1][1][2][1][RTW89_ACMA][1][65] = 127, @@ -44427,6 +46056,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][65] = 127, [1][1][2][1][RTW89_UK][1][65] = 127, [1][1][2][1][RTW89_UK][0][65] = 127, + [1][1][2][1][RTW89_THAILAND][1][65] = 127, + [1][1][2][1][RTW89_THAILAND][0][65] = 127, [1][1][2][1][RTW89_FCC][1][69] = 10, [1][1][2][1][RTW89_FCC][2][69] = 66, [1][1][2][1][RTW89_ETSI][1][69] = 127, @@ -44434,6 +46065,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][69] = 127, [1][1][2][1][RTW89_MKK][0][69] = 127, [1][1][2][1][RTW89_IC][1][69] = 10, + [1][1][2][1][RTW89_IC][2][69] = 66, [1][1][2][1][RTW89_KCC][1][69] = 28, [1][1][2][1][RTW89_KCC][0][69] = 127, [1][1][2][1][RTW89_ACMA][1][69] = 127, @@ -44443,6 +46075,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][69] = 127, [1][1][2][1][RTW89_UK][1][69] = 127, [1][1][2][1][RTW89_UK][0][69] = 127, + [1][1][2][1][RTW89_THAILAND][1][69] = 127, + [1][1][2][1][RTW89_THAILAND][0][69] = 127, [1][1][2][1][RTW89_FCC][1][73] = 10, [1][1][2][1][RTW89_FCC][2][73] = 66, [1][1][2][1][RTW89_ETSI][1][73] = 127, @@ -44450,6 +46084,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][73] = 127, [1][1][2][1][RTW89_MKK][0][73] = 127, [1][1][2][1][RTW89_IC][1][73] = 10, + [1][1][2][1][RTW89_IC][2][73] = 66, [1][1][2][1][RTW89_KCC][1][73] = 28, [1][1][2][1][RTW89_KCC][0][73] = 127, [1][1][2][1][RTW89_ACMA][1][73] = 127, @@ -44459,6 +46094,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][73] = 127, [1][1][2][1][RTW89_UK][1][73] = 127, [1][1][2][1][RTW89_UK][0][73] = 127, + [1][1][2][1][RTW89_THAILAND][1][73] = 127, + [1][1][2][1][RTW89_THAILAND][0][73] = 127, [1][1][2][1][RTW89_FCC][1][76] = 10, [1][1][2][1][RTW89_FCC][2][76] = 66, [1][1][2][1][RTW89_ETSI][1][76] = 127, @@ -44466,6 +46103,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][76] = 127, [1][1][2][1][RTW89_MKK][0][76] = 127, [1][1][2][1][RTW89_IC][1][76] = 10, + [1][1][2][1][RTW89_IC][2][76] = 66, [1][1][2][1][RTW89_KCC][1][76] = 28, [1][1][2][1][RTW89_KCC][0][76] = 127, [1][1][2][1][RTW89_ACMA][1][76] = 127, @@ -44475,6 +46113,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][76] = 127, [1][1][2][1][RTW89_UK][1][76] = 127, [1][1][2][1][RTW89_UK][0][76] = 127, + [1][1][2][1][RTW89_THAILAND][1][76] = 127, + [1][1][2][1][RTW89_THAILAND][0][76] = 127, [1][1][2][1][RTW89_FCC][1][80] = 10, [1][1][2][1][RTW89_FCC][2][80] = 66, [1][1][2][1][RTW89_ETSI][1][80] = 127, @@ -44482,6 +46122,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][80] = 127, [1][1][2][1][RTW89_MKK][0][80] = 127, [1][1][2][1][RTW89_IC][1][80] = 10, + [1][1][2][1][RTW89_IC][2][80] = 66, [1][1][2][1][RTW89_KCC][1][80] = 32, [1][1][2][1][RTW89_KCC][0][80] = 127, [1][1][2][1][RTW89_ACMA][1][80] = 127, @@ -44491,6 +46132,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][80] = 127, [1][1][2][1][RTW89_UK][1][80] = 127, [1][1][2][1][RTW89_UK][0][80] = 127, + [1][1][2][1][RTW89_THAILAND][1][80] = 127, + [1][1][2][1][RTW89_THAILAND][0][80] = 127, [1][1][2][1][RTW89_FCC][1][84] = 10, [1][1][2][1][RTW89_FCC][2][84] = 66, [1][1][2][1][RTW89_ETSI][1][84] = 127, @@ -44498,6 +46141,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][84] = 127, [1][1][2][1][RTW89_MKK][0][84] = 127, [1][1][2][1][RTW89_IC][1][84] = 10, + [1][1][2][1][RTW89_IC][2][84] = 66, [1][1][2][1][RTW89_KCC][1][84] = 32, [1][1][2][1][RTW89_KCC][0][84] = 127, [1][1][2][1][RTW89_ACMA][1][84] = 127, @@ -44507,6 +46151,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][84] = 127, [1][1][2][1][RTW89_UK][1][84] = 127, [1][1][2][1][RTW89_UK][0][84] = 127, + [1][1][2][1][RTW89_THAILAND][1][84] = 127, + [1][1][2][1][RTW89_THAILAND][0][84] = 127, [1][1][2][1][RTW89_FCC][1][88] = 10, [1][1][2][1][RTW89_FCC][2][88] = 127, [1][1][2][1][RTW89_ETSI][1][88] = 127, @@ -44514,6 +46160,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][88] = 127, [1][1][2][1][RTW89_MKK][0][88] = 127, [1][1][2][1][RTW89_IC][1][88] = 10, + [1][1][2][1][RTW89_IC][2][88] = 127, [1][1][2][1][RTW89_KCC][1][88] = 32, [1][1][2][1][RTW89_KCC][0][88] = 127, [1][1][2][1][RTW89_ACMA][1][88] = 127, @@ -44523,6 +46170,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][88] = 127, [1][1][2][1][RTW89_UK][1][88] = 127, [1][1][2][1][RTW89_UK][0][88] = 127, + [1][1][2][1][RTW89_THAILAND][1][88] = 127, + [1][1][2][1][RTW89_THAILAND][0][88] = 127, [1][1][2][1][RTW89_FCC][1][91] = 12, [1][1][2][1][RTW89_FCC][2][91] = 127, [1][1][2][1][RTW89_ETSI][1][91] = 127, @@ -44530,6 +46179,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][91] = 127, [1][1][2][1][RTW89_MKK][0][91] = 127, [1][1][2][1][RTW89_IC][1][91] = 12, + [1][1][2][1][RTW89_IC][2][91] = 127, [1][1][2][1][RTW89_KCC][1][91] = 32, [1][1][2][1][RTW89_KCC][0][91] = 127, [1][1][2][1][RTW89_ACMA][1][91] = 127, @@ -44539,6 +46189,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][91] = 127, [1][1][2][1][RTW89_UK][1][91] = 127, [1][1][2][1][RTW89_UK][0][91] = 127, + [1][1][2][1][RTW89_THAILAND][1][91] = 127, + [1][1][2][1][RTW89_THAILAND][0][91] = 127, [1][1][2][1][RTW89_FCC][1][95] = 10, [1][1][2][1][RTW89_FCC][2][95] = 127, [1][1][2][1][RTW89_ETSI][1][95] = 127, @@ -44546,6 +46198,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][95] = 127, [1][1][2][1][RTW89_MKK][0][95] = 127, [1][1][2][1][RTW89_IC][1][95] = 10, + [1][1][2][1][RTW89_IC][2][95] = 127, [1][1][2][1][RTW89_KCC][1][95] = 32, [1][1][2][1][RTW89_KCC][0][95] = 127, [1][1][2][1][RTW89_ACMA][1][95] = 127, @@ -44555,6 +46208,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][95] = 127, [1][1][2][1][RTW89_UK][1][95] = 127, [1][1][2][1][RTW89_UK][0][95] = 127, + [1][1][2][1][RTW89_THAILAND][1][95] = 127, + [1][1][2][1][RTW89_THAILAND][0][95] = 127, [1][1][2][1][RTW89_FCC][1][99] = 10, [1][1][2][1][RTW89_FCC][2][99] = 127, [1][1][2][1][RTW89_ETSI][1][99] = 127, @@ -44562,6 +46217,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][99] = 127, [1][1][2][1][RTW89_MKK][0][99] = 127, [1][1][2][1][RTW89_IC][1][99] = 10, + [1][1][2][1][RTW89_IC][2][99] = 127, [1][1][2][1][RTW89_KCC][1][99] = 32, [1][1][2][1][RTW89_KCC][0][99] = 127, [1][1][2][1][RTW89_ACMA][1][99] = 127, @@ -44571,6 +46227,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][99] = 127, [1][1][2][1][RTW89_UK][1][99] = 127, [1][1][2][1][RTW89_UK][0][99] = 127, + [1][1][2][1][RTW89_THAILAND][1][99] = 127, + [1][1][2][1][RTW89_THAILAND][0][99] = 127, [1][1][2][1][RTW89_FCC][1][103] = 10, [1][1][2][1][RTW89_FCC][2][103] = 127, [1][1][2][1][RTW89_ETSI][1][103] = 127, @@ -44578,6 +46236,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][103] = 127, [1][1][2][1][RTW89_MKK][0][103] = 127, [1][1][2][1][RTW89_IC][1][103] = 10, + [1][1][2][1][RTW89_IC][2][103] = 127, [1][1][2][1][RTW89_KCC][1][103] = 32, [1][1][2][1][RTW89_KCC][0][103] = 127, [1][1][2][1][RTW89_ACMA][1][103] = 127, @@ -44587,6 +46246,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][103] = 127, [1][1][2][1][RTW89_UK][1][103] = 127, [1][1][2][1][RTW89_UK][0][103] = 127, + [1][1][2][1][RTW89_THAILAND][1][103] = 127, + [1][1][2][1][RTW89_THAILAND][0][103] = 127, [1][1][2][1][RTW89_FCC][1][106] = 12, [1][1][2][1][RTW89_FCC][2][106] = 127, [1][1][2][1][RTW89_ETSI][1][106] = 127, @@ -44594,6 +46255,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][106] = 127, [1][1][2][1][RTW89_MKK][0][106] = 127, [1][1][2][1][RTW89_IC][1][106] = 12, + [1][1][2][1][RTW89_IC][2][106] = 127, [1][1][2][1][RTW89_KCC][1][106] = 32, [1][1][2][1][RTW89_KCC][0][106] = 127, [1][1][2][1][RTW89_ACMA][1][106] = 127, @@ -44603,6 +46265,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][106] = 127, [1][1][2][1][RTW89_UK][1][106] = 127, [1][1][2][1][RTW89_UK][0][106] = 127, + [1][1][2][1][RTW89_THAILAND][1][106] = 127, + [1][1][2][1][RTW89_THAILAND][0][106] = 127, [1][1][2][1][RTW89_FCC][1][110] = 127, [1][1][2][1][RTW89_FCC][2][110] = 127, [1][1][2][1][RTW89_ETSI][1][110] = 127, @@ -44610,6 +46274,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][110] = 127, [1][1][2][1][RTW89_MKK][0][110] = 127, [1][1][2][1][RTW89_IC][1][110] = 127, + [1][1][2][1][RTW89_IC][2][110] = 127, [1][1][2][1][RTW89_KCC][1][110] = 127, [1][1][2][1][RTW89_KCC][0][110] = 127, [1][1][2][1][RTW89_ACMA][1][110] = 127, @@ -44619,6 +46284,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][110] = 127, [1][1][2][1][RTW89_UK][1][110] = 127, [1][1][2][1][RTW89_UK][0][110] = 127, + [1][1][2][1][RTW89_THAILAND][1][110] = 127, + [1][1][2][1][RTW89_THAILAND][0][110] = 127, [1][1][2][1][RTW89_FCC][1][114] = 127, [1][1][2][1][RTW89_FCC][2][114] = 127, [1][1][2][1][RTW89_ETSI][1][114] = 127, @@ -44626,6 +46293,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][114] = 127, [1][1][2][1][RTW89_MKK][0][114] = 127, [1][1][2][1][RTW89_IC][1][114] = 127, + [1][1][2][1][RTW89_IC][2][114] = 127, [1][1][2][1][RTW89_KCC][1][114] = 127, [1][1][2][1][RTW89_KCC][0][114] = 127, [1][1][2][1][RTW89_ACMA][1][114] = 127, @@ -44635,6 +46303,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][114] = 127, [1][1][2][1][RTW89_UK][1][114] = 127, [1][1][2][1][RTW89_UK][0][114] = 127, + [1][1][2][1][RTW89_THAILAND][1][114] = 127, + [1][1][2][1][RTW89_THAILAND][0][114] = 127, [1][1][2][1][RTW89_FCC][1][118] = 127, [1][1][2][1][RTW89_FCC][2][118] = 127, [1][1][2][1][RTW89_ETSI][1][118] = 127, @@ -44642,6 +46312,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1][118] = 127, [1][1][2][1][RTW89_MKK][0][118] = 127, [1][1][2][1][RTW89_IC][1][118] = 127, + [1][1][2][1][RTW89_IC][2][118] = 127, [1][1][2][1][RTW89_KCC][1][118] = 127, [1][1][2][1][RTW89_KCC][0][118] = 127, [1][1][2][1][RTW89_ACMA][1][118] = 127, @@ -44651,6 +46322,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_QATAR][0][118] = 127, [1][1][2][1][RTW89_UK][1][118] = 127, [1][1][2][1][RTW89_UK][0][118] = 127, + [1][1][2][1][RTW89_THAILAND][1][118] = 127, + [1][1][2][1][RTW89_THAILAND][0][118] = 127, [2][0][2][0][RTW89_FCC][1][3] = 46, [2][0][2][0][RTW89_FCC][2][3] = 60, [2][0][2][0][RTW89_ETSI][1][3] = 58, @@ -44658,6 +46331,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][3] = 58, [2][0][2][0][RTW89_MKK][0][3] = 26, [2][0][2][0][RTW89_IC][1][3] = 46, + [2][0][2][0][RTW89_IC][2][3] = 60, [2][0][2][0][RTW89_KCC][1][3] = 50, [2][0][2][0][RTW89_KCC][0][3] = 24, [2][0][2][0][RTW89_ACMA][1][3] = 58, @@ -44667,6 +46341,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][3] = 30, [2][0][2][0][RTW89_UK][1][3] = 58, [2][0][2][0][RTW89_UK][0][3] = 30, + [2][0][2][0][RTW89_THAILAND][1][3] = 58, + [2][0][2][0][RTW89_THAILAND][0][3] = 30, [2][0][2][0][RTW89_FCC][1][11] = 46, [2][0][2][0][RTW89_FCC][2][11] = 60, [2][0][2][0][RTW89_ETSI][1][11] = 58, @@ -44674,6 +46350,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][11] = 58, [2][0][2][0][RTW89_MKK][0][11] = 24, [2][0][2][0][RTW89_IC][1][11] = 46, + [2][0][2][0][RTW89_IC][2][11] = 60, [2][0][2][0][RTW89_KCC][1][11] = 50, [2][0][2][0][RTW89_KCC][0][11] = 24, [2][0][2][0][RTW89_ACMA][1][11] = 58, @@ -44683,6 +46360,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][11] = 30, [2][0][2][0][RTW89_UK][1][11] = 58, [2][0][2][0][RTW89_UK][0][11] = 30, + [2][0][2][0][RTW89_THAILAND][1][11] = 58, + [2][0][2][0][RTW89_THAILAND][0][11] = 30, [2][0][2][0][RTW89_FCC][1][18] = 46, [2][0][2][0][RTW89_FCC][2][18] = 60, [2][0][2][0][RTW89_ETSI][1][18] = 58, @@ -44690,6 +46369,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][18] = 58, [2][0][2][0][RTW89_MKK][0][18] = 24, [2][0][2][0][RTW89_IC][1][18] = 46, + [2][0][2][0][RTW89_IC][2][18] = 60, [2][0][2][0][RTW89_KCC][1][18] = 50, [2][0][2][0][RTW89_KCC][0][18] = 24, [2][0][2][0][RTW89_ACMA][1][18] = 58, @@ -44699,6 +46379,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][18] = 30, [2][0][2][0][RTW89_UK][1][18] = 58, [2][0][2][0][RTW89_UK][0][18] = 30, + [2][0][2][0][RTW89_THAILAND][1][18] = 58, + [2][0][2][0][RTW89_THAILAND][0][18] = 30, [2][0][2][0][RTW89_FCC][1][26] = 46, [2][0][2][0][RTW89_FCC][2][26] = 60, [2][0][2][0][RTW89_ETSI][1][26] = 58, @@ -44706,6 +46388,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][26] = 58, [2][0][2][0][RTW89_MKK][0][26] = 24, [2][0][2][0][RTW89_IC][1][26] = 46, + [2][0][2][0][RTW89_IC][2][26] = 60, [2][0][2][0][RTW89_KCC][1][26] = 50, [2][0][2][0][RTW89_KCC][0][26] = 26, [2][0][2][0][RTW89_ACMA][1][26] = 58, @@ -44715,6 +46398,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][26] = 30, [2][0][2][0][RTW89_UK][1][26] = 58, [2][0][2][0][RTW89_UK][0][26] = 30, + [2][0][2][0][RTW89_THAILAND][1][26] = 58, + [2][0][2][0][RTW89_THAILAND][0][26] = 30, [2][0][2][0][RTW89_FCC][1][33] = 46, [2][0][2][0][RTW89_FCC][2][33] = 60, [2][0][2][0][RTW89_ETSI][1][33] = 58, @@ -44722,6 +46407,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][33] = 58, [2][0][2][0][RTW89_MKK][0][33] = 24, [2][0][2][0][RTW89_IC][1][33] = 46, + [2][0][2][0][RTW89_IC][2][33] = 60, [2][0][2][0][RTW89_KCC][1][33] = 50, [2][0][2][0][RTW89_KCC][0][33] = 24, [2][0][2][0][RTW89_ACMA][1][33] = 58, @@ -44731,6 +46417,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][33] = 30, [2][0][2][0][RTW89_UK][1][33] = 58, [2][0][2][0][RTW89_UK][0][33] = 30, + [2][0][2][0][RTW89_THAILAND][1][33] = 58, + [2][0][2][0][RTW89_THAILAND][0][33] = 30, [2][0][2][0][RTW89_FCC][1][41] = 46, [2][0][2][0][RTW89_FCC][2][41] = 60, [2][0][2][0][RTW89_ETSI][1][41] = 58, @@ -44738,6 +46426,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][41] = 58, [2][0][2][0][RTW89_MKK][0][41] = 24, [2][0][2][0][RTW89_IC][1][41] = 46, + [2][0][2][0][RTW89_IC][2][41] = 60, [2][0][2][0][RTW89_KCC][1][41] = 50, [2][0][2][0][RTW89_KCC][0][41] = 24, [2][0][2][0][RTW89_ACMA][1][41] = 58, @@ -44747,6 +46436,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][41] = 30, [2][0][2][0][RTW89_UK][1][41] = 58, [2][0][2][0][RTW89_UK][0][41] = 30, + [2][0][2][0][RTW89_THAILAND][1][41] = 58, + [2][0][2][0][RTW89_THAILAND][0][41] = 30, [2][0][2][0][RTW89_FCC][1][48] = 46, [2][0][2][0][RTW89_FCC][2][48] = 127, [2][0][2][0][RTW89_ETSI][1][48] = 127, @@ -44754,6 +46445,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][48] = 127, [2][0][2][0][RTW89_MKK][0][48] = 127, [2][0][2][0][RTW89_IC][1][48] = 46, + [2][0][2][0][RTW89_IC][2][48] = 60, [2][0][2][0][RTW89_KCC][1][48] = 48, [2][0][2][0][RTW89_KCC][0][48] = 127, [2][0][2][0][RTW89_ACMA][1][48] = 127, @@ -44763,6 +46455,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][48] = 127, [2][0][2][0][RTW89_UK][1][48] = 127, [2][0][2][0][RTW89_UK][0][48] = 127, + [2][0][2][0][RTW89_THAILAND][1][48] = 127, + [2][0][2][0][RTW89_THAILAND][0][48] = 127, [2][0][2][0][RTW89_FCC][1][56] = 46, [2][0][2][0][RTW89_FCC][2][56] = 127, [2][0][2][0][RTW89_ETSI][1][56] = 127, @@ -44770,6 +46464,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][56] = 127, [2][0][2][0][RTW89_MKK][0][56] = 127, [2][0][2][0][RTW89_IC][1][56] = 46, + [2][0][2][0][RTW89_IC][2][56] = 58, [2][0][2][0][RTW89_KCC][1][56] = 48, [2][0][2][0][RTW89_KCC][0][56] = 127, [2][0][2][0][RTW89_ACMA][1][56] = 127, @@ -44779,6 +46474,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][56] = 127, [2][0][2][0][RTW89_UK][1][56] = 127, [2][0][2][0][RTW89_UK][0][56] = 127, + [2][0][2][0][RTW89_THAILAND][1][56] = 127, + [2][0][2][0][RTW89_THAILAND][0][56] = 127, [2][0][2][0][RTW89_FCC][1][63] = 46, [2][0][2][0][RTW89_FCC][2][63] = 58, [2][0][2][0][RTW89_ETSI][1][63] = 127, @@ -44786,6 +46483,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][63] = 127, [2][0][2][0][RTW89_MKK][0][63] = 127, [2][0][2][0][RTW89_IC][1][63] = 46, + [2][0][2][0][RTW89_IC][2][63] = 58, [2][0][2][0][RTW89_KCC][1][63] = 48, [2][0][2][0][RTW89_KCC][0][63] = 127, [2][0][2][0][RTW89_ACMA][1][63] = 127, @@ -44795,6 +46493,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][63] = 127, [2][0][2][0][RTW89_UK][1][63] = 127, [2][0][2][0][RTW89_UK][0][63] = 127, + [2][0][2][0][RTW89_THAILAND][1][63] = 127, + [2][0][2][0][RTW89_THAILAND][0][63] = 127, [2][0][2][0][RTW89_FCC][1][71] = 46, [2][0][2][0][RTW89_FCC][2][71] = 58, [2][0][2][0][RTW89_ETSI][1][71] = 127, @@ -44802,6 +46502,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][71] = 127, [2][0][2][0][RTW89_MKK][0][71] = 127, [2][0][2][0][RTW89_IC][1][71] = 46, + [2][0][2][0][RTW89_IC][2][71] = 58, [2][0][2][0][RTW89_KCC][1][71] = 48, [2][0][2][0][RTW89_KCC][0][71] = 127, [2][0][2][0][RTW89_ACMA][1][71] = 127, @@ -44811,6 +46512,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][71] = 127, [2][0][2][0][RTW89_UK][1][71] = 127, [2][0][2][0][RTW89_UK][0][71] = 127, + [2][0][2][0][RTW89_THAILAND][1][71] = 127, + [2][0][2][0][RTW89_THAILAND][0][71] = 127, [2][0][2][0][RTW89_FCC][1][78] = 46, [2][0][2][0][RTW89_FCC][2][78] = 58, [2][0][2][0][RTW89_ETSI][1][78] = 127, @@ -44818,6 +46521,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][78] = 127, [2][0][2][0][RTW89_MKK][0][78] = 127, [2][0][2][0][RTW89_IC][1][78] = 46, + [2][0][2][0][RTW89_IC][2][78] = 58, [2][0][2][0][RTW89_KCC][1][78] = 52, [2][0][2][0][RTW89_KCC][0][78] = 127, [2][0][2][0][RTW89_ACMA][1][78] = 127, @@ -44827,6 +46531,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][78] = 127, [2][0][2][0][RTW89_UK][1][78] = 127, [2][0][2][0][RTW89_UK][0][78] = 127, + [2][0][2][0][RTW89_THAILAND][1][78] = 127, + [2][0][2][0][RTW89_THAILAND][0][78] = 127, [2][0][2][0][RTW89_FCC][1][86] = 46, [2][0][2][0][RTW89_FCC][2][86] = 127, [2][0][2][0][RTW89_ETSI][1][86] = 127, @@ -44834,6 +46540,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][86] = 127, [2][0][2][0][RTW89_MKK][0][86] = 127, [2][0][2][0][RTW89_IC][1][86] = 46, + [2][0][2][0][RTW89_IC][2][86] = 127, [2][0][2][0][RTW89_KCC][1][86] = 52, [2][0][2][0][RTW89_KCC][0][86] = 127, [2][0][2][0][RTW89_ACMA][1][86] = 127, @@ -44843,6 +46550,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][86] = 127, [2][0][2][0][RTW89_UK][1][86] = 127, [2][0][2][0][RTW89_UK][0][86] = 127, + [2][0][2][0][RTW89_THAILAND][1][86] = 127, + [2][0][2][0][RTW89_THAILAND][0][86] = 127, [2][0][2][0][RTW89_FCC][1][93] = 46, [2][0][2][0][RTW89_FCC][2][93] = 127, [2][0][2][0][RTW89_ETSI][1][93] = 127, @@ -44850,6 +46559,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][93] = 127, [2][0][2][0][RTW89_MKK][0][93] = 127, [2][0][2][0][RTW89_IC][1][93] = 46, + [2][0][2][0][RTW89_IC][2][93] = 127, [2][0][2][0][RTW89_KCC][1][93] = 50, [2][0][2][0][RTW89_KCC][0][93] = 127, [2][0][2][0][RTW89_ACMA][1][93] = 127, @@ -44859,6 +46569,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][93] = 127, [2][0][2][0][RTW89_UK][1][93] = 127, [2][0][2][0][RTW89_UK][0][93] = 127, + [2][0][2][0][RTW89_THAILAND][1][93] = 127, + [2][0][2][0][RTW89_THAILAND][0][93] = 127, [2][0][2][0][RTW89_FCC][1][101] = 44, [2][0][2][0][RTW89_FCC][2][101] = 127, [2][0][2][0][RTW89_ETSI][1][101] = 127, @@ -44866,6 +46578,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][101] = 127, [2][0][2][0][RTW89_MKK][0][101] = 127, [2][0][2][0][RTW89_IC][1][101] = 44, + [2][0][2][0][RTW89_IC][2][101] = 127, [2][0][2][0][RTW89_KCC][1][101] = 50, [2][0][2][0][RTW89_KCC][0][101] = 127, [2][0][2][0][RTW89_ACMA][1][101] = 127, @@ -44875,6 +46588,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][101] = 127, [2][0][2][0][RTW89_UK][1][101] = 127, [2][0][2][0][RTW89_UK][0][101] = 127, + [2][0][2][0][RTW89_THAILAND][1][101] = 127, + [2][0][2][0][RTW89_THAILAND][0][101] = 127, [2][0][2][0][RTW89_FCC][1][108] = 127, [2][0][2][0][RTW89_FCC][2][108] = 127, [2][0][2][0][RTW89_ETSI][1][108] = 127, @@ -44882,6 +46597,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][108] = 127, [2][0][2][0][RTW89_MKK][0][108] = 127, [2][0][2][0][RTW89_IC][1][108] = 127, + [2][0][2][0][RTW89_IC][2][108] = 127, [2][0][2][0][RTW89_KCC][1][108] = 127, [2][0][2][0][RTW89_KCC][0][108] = 127, [2][0][2][0][RTW89_ACMA][1][108] = 127, @@ -44891,6 +46607,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][108] = 127, [2][0][2][0][RTW89_UK][1][108] = 127, [2][0][2][0][RTW89_UK][0][108] = 127, + [2][0][2][0][RTW89_THAILAND][1][108] = 127, + [2][0][2][0][RTW89_THAILAND][0][108] = 127, [2][0][2][0][RTW89_FCC][1][116] = 127, [2][0][2][0][RTW89_FCC][2][116] = 127, [2][0][2][0][RTW89_ETSI][1][116] = 127, @@ -44898,6 +46616,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MKK][1][116] = 127, [2][0][2][0][RTW89_MKK][0][116] = 127, [2][0][2][0][RTW89_IC][1][116] = 127, + [2][0][2][0][RTW89_IC][2][116] = 127, [2][0][2][0][RTW89_KCC][1][116] = 127, [2][0][2][0][RTW89_KCC][0][116] = 127, [2][0][2][0][RTW89_ACMA][1][116] = 127, @@ -44907,6 +46626,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_QATAR][0][116] = 127, [2][0][2][0][RTW89_UK][1][116] = 127, [2][0][2][0][RTW89_UK][0][116] = 127, + [2][0][2][0][RTW89_THAILAND][1][116] = 127, + [2][0][2][0][RTW89_THAILAND][0][116] = 127, [2][1][2][0][RTW89_FCC][1][3] = 22, [2][1][2][0][RTW89_FCC][2][3] = 50, [2][1][2][0][RTW89_ETSI][1][3] = 54, @@ -44914,6 +46635,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][3] = 52, [2][1][2][0][RTW89_MKK][0][3] = 14, [2][1][2][0][RTW89_IC][1][3] = 22, + [2][1][2][0][RTW89_IC][2][3] = 50, [2][1][2][0][RTW89_KCC][1][3] = 38, [2][1][2][0][RTW89_KCC][0][3] = 12, [2][1][2][0][RTW89_ACMA][1][3] = 54, @@ -44923,6 +46645,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][3] = 16, [2][1][2][0][RTW89_UK][1][3] = 54, [2][1][2][0][RTW89_UK][0][3] = 16, + [2][1][2][0][RTW89_THAILAND][1][3] = 46, + [2][1][2][0][RTW89_THAILAND][0][3] = 18, [2][1][2][0][RTW89_FCC][1][11] = 20, [2][1][2][0][RTW89_FCC][2][11] = 50, [2][1][2][0][RTW89_ETSI][1][11] = 54, @@ -44930,6 +46654,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][11] = 52, [2][1][2][0][RTW89_MKK][0][11] = 12, [2][1][2][0][RTW89_IC][1][11] = 20, + [2][1][2][0][RTW89_IC][2][11] = 50, [2][1][2][0][RTW89_KCC][1][11] = 38, [2][1][2][0][RTW89_KCC][0][11] = 12, [2][1][2][0][RTW89_ACMA][1][11] = 54, @@ -44939,6 +46664,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][11] = 16, [2][1][2][0][RTW89_UK][1][11] = 54, [2][1][2][0][RTW89_UK][0][11] = 16, + [2][1][2][0][RTW89_THAILAND][1][11] = 46, + [2][1][2][0][RTW89_THAILAND][0][11] = 18, [2][1][2][0][RTW89_FCC][1][18] = 20, [2][1][2][0][RTW89_FCC][2][18] = 50, [2][1][2][0][RTW89_ETSI][1][18] = 54, @@ -44946,6 +46673,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][18] = 52, [2][1][2][0][RTW89_MKK][0][18] = 12, [2][1][2][0][RTW89_IC][1][18] = 20, + [2][1][2][0][RTW89_IC][2][18] = 50, [2][1][2][0][RTW89_KCC][1][18] = 38, [2][1][2][0][RTW89_KCC][0][18] = 12, [2][1][2][0][RTW89_ACMA][1][18] = 54, @@ -44955,6 +46683,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][18] = 16, [2][1][2][0][RTW89_UK][1][18] = 54, [2][1][2][0][RTW89_UK][0][18] = 16, + [2][1][2][0][RTW89_THAILAND][1][18] = 46, + [2][1][2][0][RTW89_THAILAND][0][18] = 18, [2][1][2][0][RTW89_FCC][1][26] = 20, [2][1][2][0][RTW89_FCC][2][26] = 60, [2][1][2][0][RTW89_ETSI][1][26] = 54, @@ -44962,6 +46692,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][26] = 52, [2][1][2][0][RTW89_MKK][0][26] = 12, [2][1][2][0][RTW89_IC][1][26] = 20, + [2][1][2][0][RTW89_IC][2][26] = 60, [2][1][2][0][RTW89_KCC][1][26] = 38, [2][1][2][0][RTW89_KCC][0][26] = 12, [2][1][2][0][RTW89_ACMA][1][26] = 54, @@ -44971,6 +46702,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][26] = 16, [2][1][2][0][RTW89_UK][1][26] = 54, [2][1][2][0][RTW89_UK][0][26] = 16, + [2][1][2][0][RTW89_THAILAND][1][26] = 46, + [2][1][2][0][RTW89_THAILAND][0][26] = 18, [2][1][2][0][RTW89_FCC][1][33] = 20, [2][1][2][0][RTW89_FCC][2][33] = 60, [2][1][2][0][RTW89_ETSI][1][33] = 54, @@ -44978,6 +46711,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][33] = 48, [2][1][2][0][RTW89_MKK][0][33] = 12, [2][1][2][0][RTW89_IC][1][33] = 20, + [2][1][2][0][RTW89_IC][2][33] = 60, [2][1][2][0][RTW89_KCC][1][33] = 38, [2][1][2][0][RTW89_KCC][0][33] = 12, [2][1][2][0][RTW89_ACMA][1][33] = 54, @@ -44987,6 +46721,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][33] = 16, [2][1][2][0][RTW89_UK][1][33] = 54, [2][1][2][0][RTW89_UK][0][33] = 16, + [2][1][2][0][RTW89_THAILAND][1][33] = 46, + [2][1][2][0][RTW89_THAILAND][0][33] = 18, [2][1][2][0][RTW89_FCC][1][41] = 22, [2][1][2][0][RTW89_FCC][2][41] = 60, [2][1][2][0][RTW89_ETSI][1][41] = 54, @@ -44994,6 +46730,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][41] = 48, [2][1][2][0][RTW89_MKK][0][41] = 12, [2][1][2][0][RTW89_IC][1][41] = 22, + [2][1][2][0][RTW89_IC][2][41] = 60, [2][1][2][0][RTW89_KCC][1][41] = 38, [2][1][2][0][RTW89_KCC][0][41] = 12, [2][1][2][0][RTW89_ACMA][1][41] = 54, @@ -45003,6 +46740,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][41] = 18, [2][1][2][0][RTW89_UK][1][41] = 54, [2][1][2][0][RTW89_UK][0][41] = 18, + [2][1][2][0][RTW89_THAILAND][1][41] = 46, + [2][1][2][0][RTW89_THAILAND][0][41] = 18, [2][1][2][0][RTW89_FCC][1][48] = 22, [2][1][2][0][RTW89_FCC][2][48] = 127, [2][1][2][0][RTW89_ETSI][1][48] = 127, @@ -45010,6 +46749,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][48] = 127, [2][1][2][0][RTW89_MKK][0][48] = 127, [2][1][2][0][RTW89_IC][1][48] = 22, + [2][1][2][0][RTW89_IC][2][48] = 60, [2][1][2][0][RTW89_KCC][1][48] = 38, [2][1][2][0][RTW89_KCC][0][48] = 127, [2][1][2][0][RTW89_ACMA][1][48] = 127, @@ -45019,6 +46759,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][48] = 127, [2][1][2][0][RTW89_UK][1][48] = 127, [2][1][2][0][RTW89_UK][0][48] = 127, + [2][1][2][0][RTW89_THAILAND][1][48] = 127, + [2][1][2][0][RTW89_THAILAND][0][48] = 127, [2][1][2][0][RTW89_FCC][1][56] = 20, [2][1][2][0][RTW89_FCC][2][56] = 127, [2][1][2][0][RTW89_ETSI][1][56] = 127, @@ -45026,6 +46768,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][56] = 127, [2][1][2][0][RTW89_MKK][0][56] = 127, [2][1][2][0][RTW89_IC][1][56] = 20, + [2][1][2][0][RTW89_IC][2][56] = 56, [2][1][2][0][RTW89_KCC][1][56] = 38, [2][1][2][0][RTW89_KCC][0][56] = 127, [2][1][2][0][RTW89_ACMA][1][56] = 127, @@ -45035,6 +46778,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][56] = 127, [2][1][2][0][RTW89_UK][1][56] = 127, [2][1][2][0][RTW89_UK][0][56] = 127, + [2][1][2][0][RTW89_THAILAND][1][56] = 127, + [2][1][2][0][RTW89_THAILAND][0][56] = 127, [2][1][2][0][RTW89_FCC][1][63] = 22, [2][1][2][0][RTW89_FCC][2][63] = 58, [2][1][2][0][RTW89_ETSI][1][63] = 127, @@ -45042,6 +46787,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][63] = 127, [2][1][2][0][RTW89_MKK][0][63] = 127, [2][1][2][0][RTW89_IC][1][63] = 22, + [2][1][2][0][RTW89_IC][2][63] = 58, [2][1][2][0][RTW89_KCC][1][63] = 38, [2][1][2][0][RTW89_KCC][0][63] = 127, [2][1][2][0][RTW89_ACMA][1][63] = 127, @@ -45051,6 +46797,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][63] = 127, [2][1][2][0][RTW89_UK][1][63] = 127, [2][1][2][0][RTW89_UK][0][63] = 127, + [2][1][2][0][RTW89_THAILAND][1][63] = 127, + [2][1][2][0][RTW89_THAILAND][0][63] = 127, [2][1][2][0][RTW89_FCC][1][71] = 20, [2][1][2][0][RTW89_FCC][2][71] = 58, [2][1][2][0][RTW89_ETSI][1][71] = 127, @@ -45058,6 +46806,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][71] = 127, [2][1][2][0][RTW89_MKK][0][71] = 127, [2][1][2][0][RTW89_IC][1][71] = 20, + [2][1][2][0][RTW89_IC][2][71] = 58, [2][1][2][0][RTW89_KCC][1][71] = 38, [2][1][2][0][RTW89_KCC][0][71] = 127, [2][1][2][0][RTW89_ACMA][1][71] = 127, @@ -45067,6 +46816,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][71] = 127, [2][1][2][0][RTW89_UK][1][71] = 127, [2][1][2][0][RTW89_UK][0][71] = 127, + [2][1][2][0][RTW89_THAILAND][1][71] = 127, + [2][1][2][0][RTW89_THAILAND][0][71] = 127, [2][1][2][0][RTW89_FCC][1][78] = 20, [2][1][2][0][RTW89_FCC][2][78] = 58, [2][1][2][0][RTW89_ETSI][1][78] = 127, @@ -45074,6 +46825,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][78] = 127, [2][1][2][0][RTW89_MKK][0][78] = 127, [2][1][2][0][RTW89_IC][1][78] = 20, + [2][1][2][0][RTW89_IC][2][78] = 58, [2][1][2][0][RTW89_KCC][1][78] = 38, [2][1][2][0][RTW89_KCC][0][78] = 127, [2][1][2][0][RTW89_ACMA][1][78] = 127, @@ -45083,6 +46835,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][78] = 127, [2][1][2][0][RTW89_UK][1][78] = 127, [2][1][2][0][RTW89_UK][0][78] = 127, + [2][1][2][0][RTW89_THAILAND][1][78] = 127, + [2][1][2][0][RTW89_THAILAND][0][78] = 127, [2][1][2][0][RTW89_FCC][1][86] = 20, [2][1][2][0][RTW89_FCC][2][86] = 127, [2][1][2][0][RTW89_ETSI][1][86] = 127, @@ -45090,6 +46844,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][86] = 127, [2][1][2][0][RTW89_MKK][0][86] = 127, [2][1][2][0][RTW89_IC][1][86] = 20, + [2][1][2][0][RTW89_IC][2][86] = 127, [2][1][2][0][RTW89_KCC][1][86] = 38, [2][1][2][0][RTW89_KCC][0][86] = 127, [2][1][2][0][RTW89_ACMA][1][86] = 127, @@ -45099,6 +46854,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][86] = 127, [2][1][2][0][RTW89_UK][1][86] = 127, [2][1][2][0][RTW89_UK][0][86] = 127, + [2][1][2][0][RTW89_THAILAND][1][86] = 127, + [2][1][2][0][RTW89_THAILAND][0][86] = 127, [2][1][2][0][RTW89_FCC][1][93] = 22, [2][1][2][0][RTW89_FCC][2][93] = 127, [2][1][2][0][RTW89_ETSI][1][93] = 127, @@ -45106,6 +46863,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][93] = 127, [2][1][2][0][RTW89_MKK][0][93] = 127, [2][1][2][0][RTW89_IC][1][93] = 22, + [2][1][2][0][RTW89_IC][2][93] = 127, [2][1][2][0][RTW89_KCC][1][93] = 38, [2][1][2][0][RTW89_KCC][0][93] = 127, [2][1][2][0][RTW89_ACMA][1][93] = 127, @@ -45115,6 +46873,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][93] = 127, [2][1][2][0][RTW89_UK][1][93] = 127, [2][1][2][0][RTW89_UK][0][93] = 127, + [2][1][2][0][RTW89_THAILAND][1][93] = 127, + [2][1][2][0][RTW89_THAILAND][0][93] = 127, [2][1][2][0][RTW89_FCC][1][101] = 22, [2][1][2][0][RTW89_FCC][2][101] = 127, [2][1][2][0][RTW89_ETSI][1][101] = 127, @@ -45122,6 +46882,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][101] = 127, [2][1][2][0][RTW89_MKK][0][101] = 127, [2][1][2][0][RTW89_IC][1][101] = 22, + [2][1][2][0][RTW89_IC][2][101] = 127, [2][1][2][0][RTW89_KCC][1][101] = 38, [2][1][2][0][RTW89_KCC][0][101] = 127, [2][1][2][0][RTW89_ACMA][1][101] = 127, @@ -45131,6 +46892,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][101] = 127, [2][1][2][0][RTW89_UK][1][101] = 127, [2][1][2][0][RTW89_UK][0][101] = 127, + [2][1][2][0][RTW89_THAILAND][1][101] = 127, + [2][1][2][0][RTW89_THAILAND][0][101] = 127, [2][1][2][0][RTW89_FCC][1][108] = 127, [2][1][2][0][RTW89_FCC][2][108] = 127, [2][1][2][0][RTW89_ETSI][1][108] = 127, @@ -45138,6 +46901,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][108] = 127, [2][1][2][0][RTW89_MKK][0][108] = 127, [2][1][2][0][RTW89_IC][1][108] = 127, + [2][1][2][0][RTW89_IC][2][108] = 127, [2][1][2][0][RTW89_KCC][1][108] = 127, [2][1][2][0][RTW89_KCC][0][108] = 127, [2][1][2][0][RTW89_ACMA][1][108] = 127, @@ -45147,6 +46911,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][108] = 127, [2][1][2][0][RTW89_UK][1][108] = 127, [2][1][2][0][RTW89_UK][0][108] = 127, + [2][1][2][0][RTW89_THAILAND][1][108] = 127, + [2][1][2][0][RTW89_THAILAND][0][108] = 127, [2][1][2][0][RTW89_FCC][1][116] = 127, [2][1][2][0][RTW89_FCC][2][116] = 127, [2][1][2][0][RTW89_ETSI][1][116] = 127, @@ -45154,6 +46920,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MKK][1][116] = 127, [2][1][2][0][RTW89_MKK][0][116] = 127, [2][1][2][0][RTW89_IC][1][116] = 127, + [2][1][2][0][RTW89_IC][2][116] = 127, [2][1][2][0][RTW89_KCC][1][116] = 127, [2][1][2][0][RTW89_KCC][0][116] = 127, [2][1][2][0][RTW89_ACMA][1][116] = 127, @@ -45163,6 +46930,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_QATAR][0][116] = 127, [2][1][2][0][RTW89_UK][1][116] = 127, [2][1][2][0][RTW89_UK][0][116] = 127, + [2][1][2][0][RTW89_THAILAND][1][116] = 127, + [2][1][2][0][RTW89_THAILAND][0][116] = 127, [2][1][2][1][RTW89_FCC][1][3] = 22, [2][1][2][1][RTW89_FCC][2][3] = 50, [2][1][2][1][RTW89_ETSI][1][3] = 42, @@ -45170,6 +46939,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][3] = 52, [2][1][2][1][RTW89_MKK][0][3] = 14, [2][1][2][1][RTW89_IC][1][3] = 22, + [2][1][2][1][RTW89_IC][2][3] = 50, [2][1][2][1][RTW89_KCC][1][3] = 38, [2][1][2][1][RTW89_KCC][0][3] = 12, [2][1][2][1][RTW89_ACMA][1][3] = 42, @@ -45179,6 +46949,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][3] = 6, [2][1][2][1][RTW89_UK][1][3] = 42, [2][1][2][1][RTW89_UK][0][3] = 6, + [2][1][2][1][RTW89_THAILAND][1][3] = 46, + [2][1][2][1][RTW89_THAILAND][0][3] = 6, [2][1][2][1][RTW89_FCC][1][11] = 20, [2][1][2][1][RTW89_FCC][2][11] = 50, [2][1][2][1][RTW89_ETSI][1][11] = 42, @@ -45186,6 +46958,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][11] = 52, [2][1][2][1][RTW89_MKK][0][11] = 12, [2][1][2][1][RTW89_IC][1][11] = 20, + [2][1][2][1][RTW89_IC][2][11] = 50, [2][1][2][1][RTW89_KCC][1][11] = 38, [2][1][2][1][RTW89_KCC][0][11] = 12, [2][1][2][1][RTW89_ACMA][1][11] = 42, @@ -45195,6 +46968,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][11] = 6, [2][1][2][1][RTW89_UK][1][11] = 42, [2][1][2][1][RTW89_UK][0][11] = 6, + [2][1][2][1][RTW89_THAILAND][1][11] = 46, + [2][1][2][1][RTW89_THAILAND][0][11] = 6, [2][1][2][1][RTW89_FCC][1][18] = 20, [2][1][2][1][RTW89_FCC][2][18] = 50, [2][1][2][1][RTW89_ETSI][1][18] = 42, @@ -45202,6 +46977,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][18] = 52, [2][1][2][1][RTW89_MKK][0][18] = 12, [2][1][2][1][RTW89_IC][1][18] = 20, + [2][1][2][1][RTW89_IC][2][18] = 50, [2][1][2][1][RTW89_KCC][1][18] = 38, [2][1][2][1][RTW89_KCC][0][18] = 12, [2][1][2][1][RTW89_ACMA][1][18] = 42, @@ -45211,6 +46987,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][18] = 6, [2][1][2][1][RTW89_UK][1][18] = 42, [2][1][2][1][RTW89_UK][0][18] = 6, + [2][1][2][1][RTW89_THAILAND][1][18] = 46, + [2][1][2][1][RTW89_THAILAND][0][18] = 6, [2][1][2][1][RTW89_FCC][1][26] = 20, [2][1][2][1][RTW89_FCC][2][26] = 60, [2][1][2][1][RTW89_ETSI][1][26] = 42, @@ -45218,6 +46996,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][26] = 52, [2][1][2][1][RTW89_MKK][0][26] = 12, [2][1][2][1][RTW89_IC][1][26] = 20, + [2][1][2][1][RTW89_IC][2][26] = 60, [2][1][2][1][RTW89_KCC][1][26] = 38, [2][1][2][1][RTW89_KCC][0][26] = 12, [2][1][2][1][RTW89_ACMA][1][26] = 42, @@ -45227,6 +47006,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][26] = 6, [2][1][2][1][RTW89_UK][1][26] = 42, [2][1][2][1][RTW89_UK][0][26] = 6, + [2][1][2][1][RTW89_THAILAND][1][26] = 46, + [2][1][2][1][RTW89_THAILAND][0][26] = 6, [2][1][2][1][RTW89_FCC][1][33] = 20, [2][1][2][1][RTW89_FCC][2][33] = 60, [2][1][2][1][RTW89_ETSI][1][33] = 42, @@ -45234,6 +47015,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][33] = 48, [2][1][2][1][RTW89_MKK][0][33] = 12, [2][1][2][1][RTW89_IC][1][33] = 20, + [2][1][2][1][RTW89_IC][2][33] = 60, [2][1][2][1][RTW89_KCC][1][33] = 38, [2][1][2][1][RTW89_KCC][0][33] = 12, [2][1][2][1][RTW89_ACMA][1][33] = 42, @@ -45243,6 +47025,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][33] = 6, [2][1][2][1][RTW89_UK][1][33] = 42, [2][1][2][1][RTW89_UK][0][33] = 6, + [2][1][2][1][RTW89_THAILAND][1][33] = 46, + [2][1][2][1][RTW89_THAILAND][0][33] = 6, [2][1][2][1][RTW89_FCC][1][41] = 22, [2][1][2][1][RTW89_FCC][2][41] = 60, [2][1][2][1][RTW89_ETSI][1][41] = 42, @@ -45250,6 +47034,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][41] = 48, [2][1][2][1][RTW89_MKK][0][41] = 12, [2][1][2][1][RTW89_IC][1][41] = 22, + [2][1][2][1][RTW89_IC][2][41] = 60, [2][1][2][1][RTW89_KCC][1][41] = 38, [2][1][2][1][RTW89_KCC][0][41] = 12, [2][1][2][1][RTW89_ACMA][1][41] = 42, @@ -45259,6 +47044,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][41] = 6, [2][1][2][1][RTW89_UK][1][41] = 42, [2][1][2][1][RTW89_UK][0][41] = 6, + [2][1][2][1][RTW89_THAILAND][1][41] = 46, + [2][1][2][1][RTW89_THAILAND][0][41] = 6, [2][1][2][1][RTW89_FCC][1][48] = 22, [2][1][2][1][RTW89_FCC][2][48] = 127, [2][1][2][1][RTW89_ETSI][1][48] = 127, @@ -45266,6 +47053,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][48] = 127, [2][1][2][1][RTW89_MKK][0][48] = 127, [2][1][2][1][RTW89_IC][1][48] = 22, + [2][1][2][1][RTW89_IC][2][48] = 60, [2][1][2][1][RTW89_KCC][1][48] = 38, [2][1][2][1][RTW89_KCC][0][48] = 127, [2][1][2][1][RTW89_ACMA][1][48] = 127, @@ -45275,6 +47063,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][48] = 127, [2][1][2][1][RTW89_UK][1][48] = 127, [2][1][2][1][RTW89_UK][0][48] = 127, + [2][1][2][1][RTW89_THAILAND][1][48] = 127, + [2][1][2][1][RTW89_THAILAND][0][48] = 127, [2][1][2][1][RTW89_FCC][1][56] = 20, [2][1][2][1][RTW89_FCC][2][56] = 127, [2][1][2][1][RTW89_ETSI][1][56] = 127, @@ -45282,6 +47072,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][56] = 127, [2][1][2][1][RTW89_MKK][0][56] = 127, [2][1][2][1][RTW89_IC][1][56] = 20, + [2][1][2][1][RTW89_IC][2][56] = 56, [2][1][2][1][RTW89_KCC][1][56] = 38, [2][1][2][1][RTW89_KCC][0][56] = 127, [2][1][2][1][RTW89_ACMA][1][56] = 127, @@ -45291,6 +47082,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][56] = 127, [2][1][2][1][RTW89_UK][1][56] = 127, [2][1][2][1][RTW89_UK][0][56] = 127, + [2][1][2][1][RTW89_THAILAND][1][56] = 127, + [2][1][2][1][RTW89_THAILAND][0][56] = 127, [2][1][2][1][RTW89_FCC][1][63] = 22, [2][1][2][1][RTW89_FCC][2][63] = 58, [2][1][2][1][RTW89_ETSI][1][63] = 127, @@ -45298,6 +47091,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][63] = 127, [2][1][2][1][RTW89_MKK][0][63] = 127, [2][1][2][1][RTW89_IC][1][63] = 22, + [2][1][2][1][RTW89_IC][2][63] = 58, [2][1][2][1][RTW89_KCC][1][63] = 38, [2][1][2][1][RTW89_KCC][0][63] = 127, [2][1][2][1][RTW89_ACMA][1][63] = 127, @@ -45307,6 +47101,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][63] = 127, [2][1][2][1][RTW89_UK][1][63] = 127, [2][1][2][1][RTW89_UK][0][63] = 127, + [2][1][2][1][RTW89_THAILAND][1][63] = 127, + [2][1][2][1][RTW89_THAILAND][0][63] = 127, [2][1][2][1][RTW89_FCC][1][71] = 20, [2][1][2][1][RTW89_FCC][2][71] = 58, [2][1][2][1][RTW89_ETSI][1][71] = 127, @@ -45314,6 +47110,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][71] = 127, [2][1][2][1][RTW89_MKK][0][71] = 127, [2][1][2][1][RTW89_IC][1][71] = 20, + [2][1][2][1][RTW89_IC][2][71] = 58, [2][1][2][1][RTW89_KCC][1][71] = 38, [2][1][2][1][RTW89_KCC][0][71] = 127, [2][1][2][1][RTW89_ACMA][1][71] = 127, @@ -45323,6 +47120,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][71] = 127, [2][1][2][1][RTW89_UK][1][71] = 127, [2][1][2][1][RTW89_UK][0][71] = 127, + [2][1][2][1][RTW89_THAILAND][1][71] = 127, + [2][1][2][1][RTW89_THAILAND][0][71] = 127, [2][1][2][1][RTW89_FCC][1][78] = 20, [2][1][2][1][RTW89_FCC][2][78] = 58, [2][1][2][1][RTW89_ETSI][1][78] = 127, @@ -45330,6 +47129,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][78] = 127, [2][1][2][1][RTW89_MKK][0][78] = 127, [2][1][2][1][RTW89_IC][1][78] = 20, + [2][1][2][1][RTW89_IC][2][78] = 58, [2][1][2][1][RTW89_KCC][1][78] = 38, [2][1][2][1][RTW89_KCC][0][78] = 127, [2][1][2][1][RTW89_ACMA][1][78] = 127, @@ -45339,6 +47139,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][78] = 127, [2][1][2][1][RTW89_UK][1][78] = 127, [2][1][2][1][RTW89_UK][0][78] = 127, + [2][1][2][1][RTW89_THAILAND][1][78] = 127, + [2][1][2][1][RTW89_THAILAND][0][78] = 127, [2][1][2][1][RTW89_FCC][1][86] = 20, [2][1][2][1][RTW89_FCC][2][86] = 127, [2][1][2][1][RTW89_ETSI][1][86] = 127, @@ -45346,6 +47148,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][86] = 127, [2][1][2][1][RTW89_MKK][0][86] = 127, [2][1][2][1][RTW89_IC][1][86] = 20, + [2][1][2][1][RTW89_IC][2][86] = 127, [2][1][2][1][RTW89_KCC][1][86] = 38, [2][1][2][1][RTW89_KCC][0][86] = 127, [2][1][2][1][RTW89_ACMA][1][86] = 127, @@ -45355,6 +47158,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][86] = 127, [2][1][2][1][RTW89_UK][1][86] = 127, [2][1][2][1][RTW89_UK][0][86] = 127, + [2][1][2][1][RTW89_THAILAND][1][86] = 127, + [2][1][2][1][RTW89_THAILAND][0][86] = 127, [2][1][2][1][RTW89_FCC][1][93] = 22, [2][1][2][1][RTW89_FCC][2][93] = 127, [2][1][2][1][RTW89_ETSI][1][93] = 127, @@ -45362,6 +47167,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][93] = 127, [2][1][2][1][RTW89_MKK][0][93] = 127, [2][1][2][1][RTW89_IC][1][93] = 22, + [2][1][2][1][RTW89_IC][2][93] = 127, [2][1][2][1][RTW89_KCC][1][93] = 38, [2][1][2][1][RTW89_KCC][0][93] = 127, [2][1][2][1][RTW89_ACMA][1][93] = 127, @@ -45371,6 +47177,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][93] = 127, [2][1][2][1][RTW89_UK][1][93] = 127, [2][1][2][1][RTW89_UK][0][93] = 127, + [2][1][2][1][RTW89_THAILAND][1][93] = 127, + [2][1][2][1][RTW89_THAILAND][0][93] = 127, [2][1][2][1][RTW89_FCC][1][101] = 22, [2][1][2][1][RTW89_FCC][2][101] = 127, [2][1][2][1][RTW89_ETSI][1][101] = 127, @@ -45378,6 +47186,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][101] = 127, [2][1][2][1][RTW89_MKK][0][101] = 127, [2][1][2][1][RTW89_IC][1][101] = 22, + [2][1][2][1][RTW89_IC][2][101] = 127, [2][1][2][1][RTW89_KCC][1][101] = 38, [2][1][2][1][RTW89_KCC][0][101] = 127, [2][1][2][1][RTW89_ACMA][1][101] = 127, @@ -45387,6 +47196,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][101] = 127, [2][1][2][1][RTW89_UK][1][101] = 127, [2][1][2][1][RTW89_UK][0][101] = 127, + [2][1][2][1][RTW89_THAILAND][1][101] = 127, + [2][1][2][1][RTW89_THAILAND][0][101] = 127, [2][1][2][1][RTW89_FCC][1][108] = 127, [2][1][2][1][RTW89_FCC][2][108] = 127, [2][1][2][1][RTW89_ETSI][1][108] = 127, @@ -45394,6 +47205,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][108] = 127, [2][1][2][1][RTW89_MKK][0][108] = 127, [2][1][2][1][RTW89_IC][1][108] = 127, + [2][1][2][1][RTW89_IC][2][108] = 127, [2][1][2][1][RTW89_KCC][1][108] = 127, [2][1][2][1][RTW89_KCC][0][108] = 127, [2][1][2][1][RTW89_ACMA][1][108] = 127, @@ -45403,6 +47215,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][108] = 127, [2][1][2][1][RTW89_UK][1][108] = 127, [2][1][2][1][RTW89_UK][0][108] = 127, + [2][1][2][1][RTW89_THAILAND][1][108] = 127, + [2][1][2][1][RTW89_THAILAND][0][108] = 127, [2][1][2][1][RTW89_FCC][1][116] = 127, [2][1][2][1][RTW89_FCC][2][116] = 127, [2][1][2][1][RTW89_ETSI][1][116] = 127, @@ -45410,6 +47224,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MKK][1][116] = 127, [2][1][2][1][RTW89_MKK][0][116] = 127, [2][1][2][1][RTW89_IC][1][116] = 127, + [2][1][2][1][RTW89_IC][2][116] = 127, [2][1][2][1][RTW89_KCC][1][116] = 127, [2][1][2][1][RTW89_KCC][0][116] = 127, [2][1][2][1][RTW89_ACMA][1][116] = 127, @@ -45419,6 +47234,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_QATAR][0][116] = 127, [2][1][2][1][RTW89_UK][1][116] = 127, [2][1][2][1][RTW89_UK][0][116] = 127, + [2][1][2][1][RTW89_THAILAND][1][116] = 127, + [2][1][2][1][RTW89_THAILAND][0][116] = 127, [3][0][2][0][RTW89_FCC][1][7] = 52, [3][0][2][0][RTW89_FCC][2][7] = 52, [3][0][2][0][RTW89_ETSI][1][7] = 50, @@ -45426,6 +47243,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][7] = 50, [3][0][2][0][RTW89_MKK][0][7] = 22, [3][0][2][0][RTW89_IC][1][7] = 52, + [3][0][2][0][RTW89_IC][2][7] = 52, [3][0][2][0][RTW89_KCC][1][7] = 42, [3][0][2][0][RTW89_KCC][0][7] = 24, [3][0][2][0][RTW89_ACMA][1][7] = 50, @@ -45435,6 +47253,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][7] = 30, [3][0][2][0][RTW89_UK][1][7] = 50, [3][0][2][0][RTW89_UK][0][7] = 30, + [3][0][2][0][RTW89_THAILAND][1][7] = 50, + [3][0][2][0][RTW89_THAILAND][0][7] = 30, [3][0][2][0][RTW89_FCC][1][22] = 52, [3][0][2][0][RTW89_FCC][2][22] = 52, [3][0][2][0][RTW89_ETSI][1][22] = 50, @@ -45442,6 +47262,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][22] = 50, [3][0][2][0][RTW89_MKK][0][22] = 20, [3][0][2][0][RTW89_IC][1][22] = 52, + [3][0][2][0][RTW89_IC][2][22] = 52, [3][0][2][0][RTW89_KCC][1][22] = 42, [3][0][2][0][RTW89_KCC][0][22] = 24, [3][0][2][0][RTW89_ACMA][1][22] = 50, @@ -45451,6 +47272,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][22] = 30, [3][0][2][0][RTW89_UK][1][22] = 50, [3][0][2][0][RTW89_UK][0][22] = 30, + [3][0][2][0][RTW89_THAILAND][1][22] = 50, + [3][0][2][0][RTW89_THAILAND][0][22] = 30, [3][0][2][0][RTW89_FCC][1][37] = 52, [3][0][2][0][RTW89_FCC][2][37] = 52, [3][0][2][0][RTW89_ETSI][1][37] = 50, @@ -45458,6 +47281,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][37] = 50, [3][0][2][0][RTW89_MKK][0][37] = 20, [3][0][2][0][RTW89_IC][1][37] = 52, + [3][0][2][0][RTW89_IC][2][37] = 52, [3][0][2][0][RTW89_KCC][1][37] = 42, [3][0][2][0][RTW89_KCC][0][37] = 24, [3][0][2][0][RTW89_ACMA][1][37] = 50, @@ -45467,6 +47291,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][37] = 30, [3][0][2][0][RTW89_UK][1][37] = 50, [3][0][2][0][RTW89_UK][0][37] = 30, + [3][0][2][0][RTW89_THAILAND][1][37] = 50, + [3][0][2][0][RTW89_THAILAND][0][37] = 30, [3][0][2][0][RTW89_FCC][1][52] = 54, [3][0][2][0][RTW89_FCC][2][52] = 127, [3][0][2][0][RTW89_ETSI][1][52] = 127, @@ -45474,6 +47300,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][52] = 127, [3][0][2][0][RTW89_MKK][0][52] = 127, [3][0][2][0][RTW89_IC][1][52] = 54, + [3][0][2][0][RTW89_IC][2][52] = 56, [3][0][2][0][RTW89_KCC][1][52] = 56, [3][0][2][0][RTW89_KCC][0][52] = 127, [3][0][2][0][RTW89_ACMA][1][52] = 127, @@ -45483,6 +47310,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][52] = 127, [3][0][2][0][RTW89_UK][1][52] = 127, [3][0][2][0][RTW89_UK][0][52] = 127, + [3][0][2][0][RTW89_THAILAND][1][52] = 127, + [3][0][2][0][RTW89_THAILAND][0][52] = 127, [3][0][2][0][RTW89_FCC][1][67] = 54, [3][0][2][0][RTW89_FCC][2][67] = 54, [3][0][2][0][RTW89_ETSI][1][67] = 127, @@ -45490,6 +47319,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][67] = 127, [3][0][2][0][RTW89_MKK][0][67] = 127, [3][0][2][0][RTW89_IC][1][67] = 54, + [3][0][2][0][RTW89_IC][2][67] = 54, [3][0][2][0][RTW89_KCC][1][67] = 54, [3][0][2][0][RTW89_KCC][0][67] = 127, [3][0][2][0][RTW89_ACMA][1][67] = 127, @@ -45499,6 +47329,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][67] = 127, [3][0][2][0][RTW89_UK][1][67] = 127, [3][0][2][0][RTW89_UK][0][67] = 127, + [3][0][2][0][RTW89_THAILAND][1][67] = 127, + [3][0][2][0][RTW89_THAILAND][0][67] = 127, [3][0][2][0][RTW89_FCC][1][82] = 46, [3][0][2][0][RTW89_FCC][2][82] = 127, [3][0][2][0][RTW89_ETSI][1][82] = 127, @@ -45506,6 +47338,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][82] = 127, [3][0][2][0][RTW89_MKK][0][82] = 127, [3][0][2][0][RTW89_IC][1][82] = 46, + [3][0][2][0][RTW89_IC][2][82] = 127, [3][0][2][0][RTW89_KCC][1][82] = 26, [3][0][2][0][RTW89_KCC][0][82] = 127, [3][0][2][0][RTW89_ACMA][1][82] = 127, @@ -45515,6 +47348,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][82] = 127, [3][0][2][0][RTW89_UK][1][82] = 127, [3][0][2][0][RTW89_UK][0][82] = 127, + [3][0][2][0][RTW89_THAILAND][1][82] = 127, + [3][0][2][0][RTW89_THAILAND][0][82] = 127, [3][0][2][0][RTW89_FCC][1][97] = 40, [3][0][2][0][RTW89_FCC][2][97] = 127, [3][0][2][0][RTW89_ETSI][1][97] = 127, @@ -45522,6 +47357,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][97] = 127, [3][0][2][0][RTW89_MKK][0][97] = 127, [3][0][2][0][RTW89_IC][1][97] = 40, + [3][0][2][0][RTW89_IC][2][97] = 127, [3][0][2][0][RTW89_KCC][1][97] = 26, [3][0][2][0][RTW89_KCC][0][97] = 127, [3][0][2][0][RTW89_ACMA][1][97] = 127, @@ -45531,6 +47367,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][97] = 127, [3][0][2][0][RTW89_UK][1][97] = 127, [3][0][2][0][RTW89_UK][0][97] = 127, + [3][0][2][0][RTW89_THAILAND][1][97] = 127, + [3][0][2][0][RTW89_THAILAND][0][97] = 127, [3][0][2][0][RTW89_FCC][1][112] = 127, [3][0][2][0][RTW89_FCC][2][112] = 127, [3][0][2][0][RTW89_ETSI][1][112] = 127, @@ -45538,6 +47376,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_MKK][1][112] = 127, [3][0][2][0][RTW89_MKK][0][112] = 127, [3][0][2][0][RTW89_IC][1][112] = 127, + [3][0][2][0][RTW89_IC][2][112] = 127, [3][0][2][0][RTW89_KCC][1][112] = 127, [3][0][2][0][RTW89_KCC][0][112] = 127, [3][0][2][0][RTW89_ACMA][1][112] = 127, @@ -45547,6 +47386,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_QATAR][0][112] = 127, [3][0][2][0][RTW89_UK][1][112] = 127, [3][0][2][0][RTW89_UK][0][112] = 127, + [3][0][2][0][RTW89_THAILAND][1][112] = 127, + [3][0][2][0][RTW89_THAILAND][0][112] = 127, [3][1][2][0][RTW89_FCC][1][7] = 32, [3][1][2][0][RTW89_FCC][2][7] = 46, [3][1][2][0][RTW89_ETSI][1][7] = 50, @@ -45554,6 +47395,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][7] = 38, [3][1][2][0][RTW89_MKK][0][7] = 10, [3][1][2][0][RTW89_IC][1][7] = 32, + [3][1][2][0][RTW89_IC][2][7] = 46, [3][1][2][0][RTW89_KCC][1][7] = 40, [3][1][2][0][RTW89_KCC][0][7] = 12, [3][1][2][0][RTW89_ACMA][1][7] = 50, @@ -45563,6 +47405,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][7] = 18, [3][1][2][0][RTW89_UK][1][7] = 50, [3][1][2][0][RTW89_UK][0][7] = 18, + [3][1][2][0][RTW89_THAILAND][1][7] = 46, + [3][1][2][0][RTW89_THAILAND][0][7] = 18, [3][1][2][0][RTW89_FCC][1][22] = 30, [3][1][2][0][RTW89_FCC][2][22] = 52, [3][1][2][0][RTW89_ETSI][1][22] = 46, @@ -45570,6 +47414,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][22] = 48, [3][1][2][0][RTW89_MKK][0][22] = 8, [3][1][2][0][RTW89_IC][1][22] = 30, + [3][1][2][0][RTW89_IC][2][22] = 52, [3][1][2][0][RTW89_KCC][1][22] = 40, [3][1][2][0][RTW89_KCC][0][22] = 12, [3][1][2][0][RTW89_ACMA][1][22] = 46, @@ -45579,6 +47424,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][22] = 16, [3][1][2][0][RTW89_UK][1][22] = 46, [3][1][2][0][RTW89_UK][0][22] = 16, + [3][1][2][0][RTW89_THAILAND][1][22] = 46, + [3][1][2][0][RTW89_THAILAND][0][22] = 18, [3][1][2][0][RTW89_FCC][1][37] = 30, [3][1][2][0][RTW89_FCC][2][37] = 52, [3][1][2][0][RTW89_ETSI][1][37] = 46, @@ -45586,6 +47433,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][37] = 48, [3][1][2][0][RTW89_MKK][0][37] = 8, [3][1][2][0][RTW89_IC][1][37] = 30, + [3][1][2][0][RTW89_IC][2][37] = 52, [3][1][2][0][RTW89_KCC][1][37] = 40, [3][1][2][0][RTW89_KCC][0][37] = 12, [3][1][2][0][RTW89_ACMA][1][37] = 46, @@ -45595,6 +47443,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][37] = 16, [3][1][2][0][RTW89_UK][1][37] = 46, [3][1][2][0][RTW89_UK][0][37] = 16, + [3][1][2][0][RTW89_THAILAND][1][37] = 46, + [3][1][2][0][RTW89_THAILAND][0][37] = 18, [3][1][2][0][RTW89_FCC][1][52] = 30, [3][1][2][0][RTW89_FCC][2][52] = 127, [3][1][2][0][RTW89_ETSI][1][52] = 127, @@ -45602,6 +47452,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][52] = 127, [3][1][2][0][RTW89_MKK][0][52] = 127, [3][1][2][0][RTW89_IC][1][52] = 30, + [3][1][2][0][RTW89_IC][2][52] = 56, [3][1][2][0][RTW89_KCC][1][52] = 48, [3][1][2][0][RTW89_KCC][0][52] = 127, [3][1][2][0][RTW89_ACMA][1][52] = 127, @@ -45611,6 +47462,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][52] = 127, [3][1][2][0][RTW89_UK][1][52] = 127, [3][1][2][0][RTW89_UK][0][52] = 127, + [3][1][2][0][RTW89_THAILAND][1][52] = 127, + [3][1][2][0][RTW89_THAILAND][0][52] = 127, [3][1][2][0][RTW89_FCC][1][67] = 32, [3][1][2][0][RTW89_FCC][2][67] = 54, [3][1][2][0][RTW89_ETSI][1][67] = 127, @@ -45618,6 +47471,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][67] = 127, [3][1][2][0][RTW89_MKK][0][67] = 127, [3][1][2][0][RTW89_IC][1][67] = 32, + [3][1][2][0][RTW89_IC][2][67] = 54, [3][1][2][0][RTW89_KCC][1][67] = 48, [3][1][2][0][RTW89_KCC][0][67] = 127, [3][1][2][0][RTW89_ACMA][1][67] = 127, @@ -45627,6 +47481,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][67] = 127, [3][1][2][0][RTW89_UK][1][67] = 127, [3][1][2][0][RTW89_UK][0][67] = 127, + [3][1][2][0][RTW89_THAILAND][1][67] = 127, + [3][1][2][0][RTW89_THAILAND][0][67] = 127, [3][1][2][0][RTW89_FCC][1][82] = 32, [3][1][2][0][RTW89_FCC][2][82] = 127, [3][1][2][0][RTW89_ETSI][1][82] = 127, @@ -45634,6 +47490,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][82] = 127, [3][1][2][0][RTW89_MKK][0][82] = 127, [3][1][2][0][RTW89_IC][1][82] = 32, + [3][1][2][0][RTW89_IC][2][82] = 127, [3][1][2][0][RTW89_KCC][1][82] = 24, [3][1][2][0][RTW89_KCC][0][82] = 127, [3][1][2][0][RTW89_ACMA][1][82] = 127, @@ -45643,6 +47500,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][82] = 127, [3][1][2][0][RTW89_UK][1][82] = 127, [3][1][2][0][RTW89_UK][0][82] = 127, + [3][1][2][0][RTW89_THAILAND][1][82] = 127, + [3][1][2][0][RTW89_THAILAND][0][82] = 127, [3][1][2][0][RTW89_FCC][1][97] = 32, [3][1][2][0][RTW89_FCC][2][97] = 127, [3][1][2][0][RTW89_ETSI][1][97] = 127, @@ -45650,6 +47509,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][97] = 127, [3][1][2][0][RTW89_MKK][0][97] = 127, [3][1][2][0][RTW89_IC][1][97] = 32, + [3][1][2][0][RTW89_IC][2][97] = 127, [3][1][2][0][RTW89_KCC][1][97] = 24, [3][1][2][0][RTW89_KCC][0][97] = 127, [3][1][2][0][RTW89_ACMA][1][97] = 127, @@ -45659,6 +47519,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][97] = 127, [3][1][2][0][RTW89_UK][1][97] = 127, [3][1][2][0][RTW89_UK][0][97] = 127, + [3][1][2][0][RTW89_THAILAND][1][97] = 127, + [3][1][2][0][RTW89_THAILAND][0][97] = 127, [3][1][2][0][RTW89_FCC][1][112] = 127, [3][1][2][0][RTW89_FCC][2][112] = 127, [3][1][2][0][RTW89_ETSI][1][112] = 127, @@ -45666,6 +47528,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_MKK][1][112] = 127, [3][1][2][0][RTW89_MKK][0][112] = 127, [3][1][2][0][RTW89_IC][1][112] = 127, + [3][1][2][0][RTW89_IC][2][112] = 127, [3][1][2][0][RTW89_KCC][1][112] = 127, [3][1][2][0][RTW89_KCC][0][112] = 127, [3][1][2][0][RTW89_ACMA][1][112] = 127, @@ -45675,6 +47538,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_QATAR][0][112] = 127, [3][1][2][0][RTW89_UK][1][112] = 127, [3][1][2][0][RTW89_UK][0][112] = 127, + [3][1][2][0][RTW89_THAILAND][1][112] = 127, + [3][1][2][0][RTW89_THAILAND][0][112] = 127, [3][1][2][1][RTW89_FCC][1][7] = 32, [3][1][2][1][RTW89_FCC][2][7] = 46, [3][1][2][1][RTW89_ETSI][1][7] = 42, @@ -45682,6 +47547,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][7] = 38, [3][1][2][1][RTW89_MKK][0][7] = 10, [3][1][2][1][RTW89_IC][1][7] = 32, + [3][1][2][1][RTW89_IC][2][7] = 46, [3][1][2][1][RTW89_KCC][1][7] = 40, [3][1][2][1][RTW89_KCC][0][7] = 12, [3][1][2][1][RTW89_ACMA][1][7] = 42, @@ -45691,6 +47557,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][7] = 6, [3][1][2][1][RTW89_UK][1][7] = 42, [3][1][2][1][RTW89_UK][0][7] = 6, + [3][1][2][1][RTW89_THAILAND][1][7] = 46, + [3][1][2][1][RTW89_THAILAND][0][7] = 6, [3][1][2][1][RTW89_FCC][1][22] = 30, [3][1][2][1][RTW89_FCC][2][22] = 52, [3][1][2][1][RTW89_ETSI][1][22] = 42, @@ -45698,6 +47566,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][22] = 48, [3][1][2][1][RTW89_MKK][0][22] = 8, [3][1][2][1][RTW89_IC][1][22] = 30, + [3][1][2][1][RTW89_IC][2][22] = 52, [3][1][2][1][RTW89_KCC][1][22] = 40, [3][1][2][1][RTW89_KCC][0][22] = 12, [3][1][2][1][RTW89_ACMA][1][22] = 42, @@ -45707,6 +47576,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][22] = 6, [3][1][2][1][RTW89_UK][1][22] = 42, [3][1][2][1][RTW89_UK][0][22] = 6, + [3][1][2][1][RTW89_THAILAND][1][22] = 46, + [3][1][2][1][RTW89_THAILAND][0][22] = 6, [3][1][2][1][RTW89_FCC][1][37] = 30, [3][1][2][1][RTW89_FCC][2][37] = 52, [3][1][2][1][RTW89_ETSI][1][37] = 42, @@ -45714,6 +47585,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][37] = 48, [3][1][2][1][RTW89_MKK][0][37] = 8, [3][1][2][1][RTW89_IC][1][37] = 30, + [3][1][2][1][RTW89_IC][2][37] = 52, [3][1][2][1][RTW89_KCC][1][37] = 40, [3][1][2][1][RTW89_KCC][0][37] = 12, [3][1][2][1][RTW89_ACMA][1][37] = 42, @@ -45723,6 +47595,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][37] = 6, [3][1][2][1][RTW89_UK][1][37] = 42, [3][1][2][1][RTW89_UK][0][37] = 6, + [3][1][2][1][RTW89_THAILAND][1][37] = 46, + [3][1][2][1][RTW89_THAILAND][0][37] = 6, [3][1][2][1][RTW89_FCC][1][52] = 30, [3][1][2][1][RTW89_FCC][2][52] = 127, [3][1][2][1][RTW89_ETSI][1][52] = 127, @@ -45730,6 +47604,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][52] = 127, [3][1][2][1][RTW89_MKK][0][52] = 127, [3][1][2][1][RTW89_IC][1][52] = 30, + [3][1][2][1][RTW89_IC][2][52] = 56, [3][1][2][1][RTW89_KCC][1][52] = 48, [3][1][2][1][RTW89_KCC][0][52] = 127, [3][1][2][1][RTW89_ACMA][1][52] = 127, @@ -45739,6 +47614,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][52] = 127, [3][1][2][1][RTW89_UK][1][52] = 127, [3][1][2][1][RTW89_UK][0][52] = 127, + [3][1][2][1][RTW89_THAILAND][1][52] = 127, + [3][1][2][1][RTW89_THAILAND][0][52] = 127, [3][1][2][1][RTW89_FCC][1][67] = 32, [3][1][2][1][RTW89_FCC][2][67] = 54, [3][1][2][1][RTW89_ETSI][1][67] = 127, @@ -45746,6 +47623,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][67] = 127, [3][1][2][1][RTW89_MKK][0][67] = 127, [3][1][2][1][RTW89_IC][1][67] = 32, + [3][1][2][1][RTW89_IC][2][67] = 54, [3][1][2][1][RTW89_KCC][1][67] = 48, [3][1][2][1][RTW89_KCC][0][67] = 127, [3][1][2][1][RTW89_ACMA][1][67] = 127, @@ -45755,6 +47633,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][67] = 127, [3][1][2][1][RTW89_UK][1][67] = 127, [3][1][2][1][RTW89_UK][0][67] = 127, + [3][1][2][1][RTW89_THAILAND][1][67] = 127, + [3][1][2][1][RTW89_THAILAND][0][67] = 127, [3][1][2][1][RTW89_FCC][1][82] = 32, [3][1][2][1][RTW89_FCC][2][82] = 127, [3][1][2][1][RTW89_ETSI][1][82] = 127, @@ -45762,6 +47642,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][82] = 127, [3][1][2][1][RTW89_MKK][0][82] = 127, [3][1][2][1][RTW89_IC][1][82] = 32, + [3][1][2][1][RTW89_IC][2][82] = 127, [3][1][2][1][RTW89_KCC][1][82] = 24, [3][1][2][1][RTW89_KCC][0][82] = 127, [3][1][2][1][RTW89_ACMA][1][82] = 127, @@ -45771,6 +47652,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][82] = 127, [3][1][2][1][RTW89_UK][1][82] = 127, [3][1][2][1][RTW89_UK][0][82] = 127, + [3][1][2][1][RTW89_THAILAND][1][82] = 127, + [3][1][2][1][RTW89_THAILAND][0][82] = 127, [3][1][2][1][RTW89_FCC][1][97] = 32, [3][1][2][1][RTW89_FCC][2][97] = 127, [3][1][2][1][RTW89_ETSI][1][97] = 127, @@ -45778,6 +47661,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][97] = 127, [3][1][2][1][RTW89_MKK][0][97] = 127, [3][1][2][1][RTW89_IC][1][97] = 32, + [3][1][2][1][RTW89_IC][2][97] = 127, [3][1][2][1][RTW89_KCC][1][97] = 24, [3][1][2][1][RTW89_KCC][0][97] = 127, [3][1][2][1][RTW89_ACMA][1][97] = 127, @@ -45787,6 +47671,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][97] = 127, [3][1][2][1][RTW89_UK][1][97] = 127, [3][1][2][1][RTW89_UK][0][97] = 127, + [3][1][2][1][RTW89_THAILAND][1][97] = 127, + [3][1][2][1][RTW89_THAILAND][0][97] = 127, [3][1][2][1][RTW89_FCC][1][112] = 127, [3][1][2][1][RTW89_FCC][2][112] = 127, [3][1][2][1][RTW89_ETSI][1][112] = 127, @@ -45794,6 +47680,7 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_MKK][1][112] = 127, [3][1][2][1][RTW89_MKK][0][112] = 127, [3][1][2][1][RTW89_IC][1][112] = 127, + [3][1][2][1][RTW89_IC][2][112] = 127, [3][1][2][1][RTW89_KCC][1][112] = 127, [3][1][2][1][RTW89_KCC][0][112] = 127, [3][1][2][1][RTW89_ACMA][1][112] = 127, @@ -45803,6 +47690,8 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_QATAR][0][112] = 127, [3][1][2][1][RTW89_UK][1][112] = 127, [3][1][2][1][RTW89_UK][0][112] = 127, + [3][1][2][1][RTW89_THAILAND][1][112] = 127, + [3][1][2][1][RTW89_THAILAND][0][112] = 127, }; static @@ -45904,6 +47793,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][0] = 34, [0][0][RTW89_CHILE][0] = 60, [0][0][RTW89_QATAR][0] = 34, + [0][0][RTW89_THAILAND][0] = 34, [0][0][RTW89_FCC][1] = 60, [0][0][RTW89_ETSI][1] = 38, [0][0][RTW89_MKK][1] = 40, @@ -45916,6 +47806,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][1] = 38, [0][0][RTW89_CHILE][1] = 50, [0][0][RTW89_QATAR][1] = 38, + [0][0][RTW89_THAILAND][1] = 38, [0][0][RTW89_FCC][2] = 64, [0][0][RTW89_ETSI][2] = 38, [0][0][RTW89_MKK][2] = 40, @@ -45928,6 +47819,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][2] = 38, [0][0][RTW89_CHILE][2] = 50, [0][0][RTW89_QATAR][2] = 38, + [0][0][RTW89_THAILAND][2] = 38, [0][0][RTW89_FCC][3] = 68, [0][0][RTW89_ETSI][3] = 38, [0][0][RTW89_MKK][3] = 40, @@ -45940,6 +47832,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][3] = 38, [0][0][RTW89_CHILE][3] = 50, [0][0][RTW89_QATAR][3] = 38, + [0][0][RTW89_THAILAND][3] = 38, [0][0][RTW89_FCC][4] = 68, [0][0][RTW89_ETSI][4] = 38, [0][0][RTW89_MKK][4] = 40, @@ -45952,6 +47845,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][4] = 38, [0][0][RTW89_CHILE][4] = 50, [0][0][RTW89_QATAR][4] = 38, + [0][0][RTW89_THAILAND][4] = 38, [0][0][RTW89_FCC][5] = 78, [0][0][RTW89_ETSI][5] = 38, [0][0][RTW89_MKK][5] = 40, @@ -45964,6 +47858,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][5] = 38, [0][0][RTW89_CHILE][5] = 78, [0][0][RTW89_QATAR][5] = 38, + [0][0][RTW89_THAILAND][5] = 38, [0][0][RTW89_FCC][6] = 54, [0][0][RTW89_ETSI][6] = 38, [0][0][RTW89_MKK][6] = 40, @@ -45976,6 +47871,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][6] = 38, [0][0][RTW89_CHILE][6] = 36, [0][0][RTW89_QATAR][6] = 38, + [0][0][RTW89_THAILAND][6] = 38, [0][0][RTW89_FCC][7] = 54, [0][0][RTW89_ETSI][7] = 38, [0][0][RTW89_MKK][7] = 40, @@ -45988,6 +47884,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][7] = 38, [0][0][RTW89_CHILE][7] = 36, [0][0][RTW89_QATAR][7] = 38, + [0][0][RTW89_THAILAND][7] = 38, [0][0][RTW89_FCC][8] = 50, [0][0][RTW89_ETSI][8] = 38, [0][0][RTW89_MKK][8] = 40, @@ -46000,6 +47897,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][8] = 38, [0][0][RTW89_CHILE][8] = 36, [0][0][RTW89_QATAR][8] = 38, + [0][0][RTW89_THAILAND][8] = 38, [0][0][RTW89_FCC][9] = 46, [0][0][RTW89_ETSI][9] = 38, [0][0][RTW89_MKK][9] = 40, @@ -46012,6 +47910,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][9] = 38, [0][0][RTW89_CHILE][9] = 36, [0][0][RTW89_QATAR][9] = 38, + [0][0][RTW89_THAILAND][9] = 38, [0][0][RTW89_FCC][10] = 46, [0][0][RTW89_ETSI][10] = 38, [0][0][RTW89_MKK][10] = 40, @@ -46024,6 +47923,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][10] = 38, [0][0][RTW89_CHILE][10] = 46, [0][0][RTW89_QATAR][10] = 38, + [0][0][RTW89_THAILAND][10] = 38, [0][0][RTW89_FCC][11] = 26, [0][0][RTW89_ETSI][11] = 38, [0][0][RTW89_MKK][11] = 40, @@ -46036,6 +47936,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][11] = 38, [0][0][RTW89_CHILE][11] = 26, [0][0][RTW89_QATAR][11] = 38, + [0][0][RTW89_THAILAND][11] = 38, [0][0][RTW89_FCC][12] = -20, [0][0][RTW89_ETSI][12] = 34, [0][0][RTW89_MKK][12] = 36, @@ -46048,6 +47949,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][12] = 34, [0][0][RTW89_CHILE][12] = -20, [0][0][RTW89_QATAR][12] = 34, + [0][0][RTW89_THAILAND][12] = 34, [0][0][RTW89_FCC][13] = 127, [0][0][RTW89_ETSI][13] = 127, [0][0][RTW89_MKK][13] = 127, @@ -46060,6 +47962,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][13] = 127, [0][0][RTW89_CHILE][13] = 127, [0][0][RTW89_QATAR][13] = 127, + [0][0][RTW89_THAILAND][13] = 127, [0][1][RTW89_FCC][0] = 56, [0][1][RTW89_ETSI][0] = 22, [0][1][RTW89_MKK][0] = 24, @@ -46072,6 +47975,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][0] = 22, [0][1][RTW89_CHILE][0] = 56, [0][1][RTW89_QATAR][0] = 22, + [0][1][RTW89_THAILAND][0] = 22, [0][1][RTW89_FCC][1] = 56, [0][1][RTW89_ETSI][1] = 24, [0][1][RTW89_MKK][1] = 30, @@ -46084,6 +47988,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][1] = 24, [0][1][RTW89_CHILE][1] = 40, [0][1][RTW89_QATAR][1] = 24, + [0][1][RTW89_THAILAND][1] = 24, [0][1][RTW89_FCC][2] = 60, [0][1][RTW89_ETSI][2] = 24, [0][1][RTW89_MKK][2] = 30, @@ -46096,6 +48001,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][2] = 24, [0][1][RTW89_CHILE][2] = 40, [0][1][RTW89_QATAR][2] = 24, + [0][1][RTW89_THAILAND][2] = 24, [0][1][RTW89_FCC][3] = 64, [0][1][RTW89_ETSI][3] = 24, [0][1][RTW89_MKK][3] = 30, @@ -46108,6 +48014,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][3] = 24, [0][1][RTW89_CHILE][3] = 40, [0][1][RTW89_QATAR][3] = 24, + [0][1][RTW89_THAILAND][3] = 24, [0][1][RTW89_FCC][4] = 68, [0][1][RTW89_ETSI][4] = 24, [0][1][RTW89_MKK][4] = 30, @@ -46120,6 +48027,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][4] = 24, [0][1][RTW89_CHILE][4] = 40, [0][1][RTW89_QATAR][4] = 24, + [0][1][RTW89_THAILAND][4] = 24, [0][1][RTW89_FCC][5] = 76, [0][1][RTW89_ETSI][5] = 24, [0][1][RTW89_MKK][5] = 30, @@ -46132,6 +48040,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][5] = 24, [0][1][RTW89_CHILE][5] = 76, [0][1][RTW89_QATAR][5] = 24, + [0][1][RTW89_THAILAND][5] = 24, [0][1][RTW89_FCC][6] = 54, [0][1][RTW89_ETSI][6] = 24, [0][1][RTW89_MKK][6] = 30, @@ -46144,6 +48053,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][6] = 24, [0][1][RTW89_CHILE][6] = 26, [0][1][RTW89_QATAR][6] = 24, + [0][1][RTW89_THAILAND][6] = 24, [0][1][RTW89_FCC][7] = 50, [0][1][RTW89_ETSI][7] = 24, [0][1][RTW89_MKK][7] = 30, @@ -46156,6 +48066,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][7] = 24, [0][1][RTW89_CHILE][7] = 26, [0][1][RTW89_QATAR][7] = 24, + [0][1][RTW89_THAILAND][7] = 24, [0][1][RTW89_FCC][8] = 46, [0][1][RTW89_ETSI][8] = 24, [0][1][RTW89_MKK][8] = 30, @@ -46168,6 +48079,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][8] = 24, [0][1][RTW89_CHILE][8] = 26, [0][1][RTW89_QATAR][8] = 24, + [0][1][RTW89_THAILAND][8] = 24, [0][1][RTW89_FCC][9] = 42, [0][1][RTW89_ETSI][9] = 24, [0][1][RTW89_MKK][9] = 30, @@ -46180,6 +48092,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][9] = 24, [0][1][RTW89_CHILE][9] = 26, [0][1][RTW89_QATAR][9] = 24, + [0][1][RTW89_THAILAND][9] = 24, [0][1][RTW89_FCC][10] = 42, [0][1][RTW89_ETSI][10] = 24, [0][1][RTW89_MKK][10] = 30, @@ -46192,6 +48105,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][10] = 24, [0][1][RTW89_CHILE][10] = 42, [0][1][RTW89_QATAR][10] = 24, + [0][1][RTW89_THAILAND][10] = 24, [0][1][RTW89_FCC][11] = 22, [0][1][RTW89_ETSI][11] = 24, [0][1][RTW89_MKK][11] = 30, @@ -46204,6 +48118,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][11] = 24, [0][1][RTW89_CHILE][11] = 22, [0][1][RTW89_QATAR][11] = 24, + [0][1][RTW89_THAILAND][11] = 24, [0][1][RTW89_FCC][12] = -30, [0][1][RTW89_ETSI][12] = 20, [0][1][RTW89_MKK][12] = 24, @@ -46216,6 +48131,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][12] = 20, [0][1][RTW89_CHILE][12] = -30, [0][1][RTW89_QATAR][12] = 20, + [0][1][RTW89_THAILAND][12] = 20, [0][1][RTW89_FCC][13] = 127, [0][1][RTW89_ETSI][13] = 127, [0][1][RTW89_MKK][13] = 127, @@ -46228,6 +48144,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][13] = 127, [0][1][RTW89_CHILE][13] = 127, [0][1][RTW89_QATAR][13] = 127, + [0][1][RTW89_THAILAND][13] = 127, [1][0][RTW89_FCC][0] = 66, [1][0][RTW89_ETSI][0] = 46, [1][0][RTW89_MKK][0] = 48, @@ -46240,6 +48157,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][0] = 46, [1][0][RTW89_CHILE][0] = 66, [1][0][RTW89_QATAR][0] = 46, + [1][0][RTW89_THAILAND][0] = 46, [1][0][RTW89_FCC][1] = 66, [1][0][RTW89_ETSI][1] = 46, [1][0][RTW89_MKK][1] = 48, @@ -46252,6 +48170,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][1] = 46, [1][0][RTW89_CHILE][1] = 54, [1][0][RTW89_QATAR][1] = 46, + [1][0][RTW89_THAILAND][1] = 46, [1][0][RTW89_FCC][2] = 70, [1][0][RTW89_ETSI][2] = 46, [1][0][RTW89_MKK][2] = 48, @@ -46264,6 +48183,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][2] = 46, [1][0][RTW89_CHILE][2] = 54, [1][0][RTW89_QATAR][2] = 46, + [1][0][RTW89_THAILAND][2] = 46, [1][0][RTW89_FCC][3] = 72, [1][0][RTW89_ETSI][3] = 46, [1][0][RTW89_MKK][3] = 48, @@ -46276,6 +48196,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][3] = 46, [1][0][RTW89_CHILE][3] = 54, [1][0][RTW89_QATAR][3] = 46, + [1][0][RTW89_THAILAND][3] = 46, [1][0][RTW89_FCC][4] = 72, [1][0][RTW89_ETSI][4] = 46, [1][0][RTW89_MKK][4] = 48, @@ -46288,6 +48209,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][4] = 46, [1][0][RTW89_CHILE][4] = 54, [1][0][RTW89_QATAR][4] = 46, + [1][0][RTW89_THAILAND][4] = 46, [1][0][RTW89_FCC][5] = 82, [1][0][RTW89_ETSI][5] = 46, [1][0][RTW89_MKK][5] = 48, @@ -46300,6 +48222,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][5] = 46, [1][0][RTW89_CHILE][5] = 82, [1][0][RTW89_QATAR][5] = 46, + [1][0][RTW89_THAILAND][5] = 46, [1][0][RTW89_FCC][6] = 58, [1][0][RTW89_ETSI][6] = 44, [1][0][RTW89_MKK][6] = 48, @@ -46312,6 +48235,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][6] = 44, [1][0][RTW89_CHILE][6] = 40, [1][0][RTW89_QATAR][6] = 44, + [1][0][RTW89_THAILAND][6] = 44, [1][0][RTW89_FCC][7] = 58, [1][0][RTW89_ETSI][7] = 46, [1][0][RTW89_MKK][7] = 48, @@ -46324,6 +48248,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][7] = 46, [1][0][RTW89_CHILE][7] = 40, [1][0][RTW89_QATAR][7] = 46, + [1][0][RTW89_THAILAND][7] = 46, [1][0][RTW89_FCC][8] = 58, [1][0][RTW89_ETSI][8] = 46, [1][0][RTW89_MKK][8] = 48, @@ -46336,6 +48261,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][8] = 46, [1][0][RTW89_CHILE][8] = 40, [1][0][RTW89_QATAR][8] = 46, + [1][0][RTW89_THAILAND][8] = 46, [1][0][RTW89_FCC][9] = 54, [1][0][RTW89_ETSI][9] = 46, [1][0][RTW89_MKK][9] = 48, @@ -46348,6 +48274,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][9] = 46, [1][0][RTW89_CHILE][9] = 40, [1][0][RTW89_QATAR][9] = 46, + [1][0][RTW89_THAILAND][9] = 46, [1][0][RTW89_FCC][10] = 54, [1][0][RTW89_ETSI][10] = 46, [1][0][RTW89_MKK][10] = 48, @@ -46360,6 +48287,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][10] = 46, [1][0][RTW89_CHILE][10] = 54, [1][0][RTW89_QATAR][10] = 46, + [1][0][RTW89_THAILAND][10] = 46, [1][0][RTW89_FCC][11] = 36, [1][0][RTW89_ETSI][11] = 46, [1][0][RTW89_MKK][11] = 48, @@ -46372,6 +48300,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][11] = 46, [1][0][RTW89_CHILE][11] = 36, [1][0][RTW89_QATAR][11] = 46, + [1][0][RTW89_THAILAND][11] = 46, [1][0][RTW89_FCC][12] = 4, [1][0][RTW89_ETSI][12] = 46, [1][0][RTW89_MKK][12] = 46, @@ -46384,6 +48313,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][12] = 46, [1][0][RTW89_CHILE][12] = 4, [1][0][RTW89_QATAR][12] = 46, + [1][0][RTW89_THAILAND][12] = 46, [1][0][RTW89_FCC][13] = 127, [1][0][RTW89_ETSI][13] = 127, [1][0][RTW89_MKK][13] = 127, @@ -46396,6 +48326,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][13] = 127, [1][0][RTW89_CHILE][13] = 127, [1][0][RTW89_QATAR][13] = 127, + [1][0][RTW89_THAILAND][13] = 127, [1][1][RTW89_FCC][0] = 58, [1][1][RTW89_ETSI][0] = 32, [1][1][RTW89_MKK][0] = 34, @@ -46408,6 +48339,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][0] = 32, [1][1][RTW89_CHILE][0] = 58, [1][1][RTW89_QATAR][0] = 32, + [1][1][RTW89_THAILAND][0] = 32, [1][1][RTW89_FCC][1] = 58, [1][1][RTW89_ETSI][1] = 34, [1][1][RTW89_MKK][1] = 34, @@ -46420,6 +48352,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][1] = 34, [1][1][RTW89_CHILE][1] = 40, [1][1][RTW89_QATAR][1] = 34, + [1][1][RTW89_THAILAND][1] = 34, [1][1][RTW89_FCC][2] = 62, [1][1][RTW89_ETSI][2] = 34, [1][1][RTW89_MKK][2] = 34, @@ -46432,6 +48365,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][2] = 34, [1][1][RTW89_CHILE][2] = 40, [1][1][RTW89_QATAR][2] = 34, + [1][1][RTW89_THAILAND][2] = 34, [1][1][RTW89_FCC][3] = 66, [1][1][RTW89_ETSI][3] = 34, [1][1][RTW89_MKK][3] = 34, @@ -46444,6 +48378,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][3] = 34, [1][1][RTW89_CHILE][3] = 40, [1][1][RTW89_QATAR][3] = 34, + [1][1][RTW89_THAILAND][3] = 34, [1][1][RTW89_FCC][4] = 70, [1][1][RTW89_ETSI][4] = 34, [1][1][RTW89_MKK][4] = 34, @@ -46456,6 +48391,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][4] = 34, [1][1][RTW89_CHILE][4] = 40, [1][1][RTW89_QATAR][4] = 34, + [1][1][RTW89_THAILAND][4] = 34, [1][1][RTW89_FCC][5] = 82, [1][1][RTW89_ETSI][5] = 34, [1][1][RTW89_MKK][5] = 34, @@ -46468,6 +48404,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][5] = 34, [1][1][RTW89_CHILE][5] = 78, [1][1][RTW89_QATAR][5] = 34, + [1][1][RTW89_THAILAND][5] = 34, [1][1][RTW89_FCC][6] = 60, [1][1][RTW89_ETSI][6] = 34, [1][1][RTW89_MKK][6] = 34, @@ -46480,6 +48417,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][6] = 34, [1][1][RTW89_CHILE][6] = 30, [1][1][RTW89_QATAR][6] = 34, + [1][1][RTW89_THAILAND][6] = 34, [1][1][RTW89_FCC][7] = 56, [1][1][RTW89_ETSI][7] = 34, [1][1][RTW89_MKK][7] = 34, @@ -46492,6 +48430,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][7] = 34, [1][1][RTW89_CHILE][7] = 30, [1][1][RTW89_QATAR][7] = 34, + [1][1][RTW89_THAILAND][7] = 34, [1][1][RTW89_FCC][8] = 52, [1][1][RTW89_ETSI][8] = 34, [1][1][RTW89_MKK][8] = 34, @@ -46504,6 +48443,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][8] = 34, [1][1][RTW89_CHILE][8] = 30, [1][1][RTW89_QATAR][8] = 34, + [1][1][RTW89_THAILAND][8] = 34, [1][1][RTW89_FCC][9] = 48, [1][1][RTW89_ETSI][9] = 34, [1][1][RTW89_MKK][9] = 34, @@ -46516,6 +48456,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][9] = 34, [1][1][RTW89_CHILE][9] = 30, [1][1][RTW89_QATAR][9] = 34, + [1][1][RTW89_THAILAND][9] = 34, [1][1][RTW89_FCC][10] = 48, [1][1][RTW89_ETSI][10] = 34, [1][1][RTW89_MKK][10] = 34, @@ -46528,6 +48469,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][10] = 34, [1][1][RTW89_CHILE][10] = 48, [1][1][RTW89_QATAR][10] = 34, + [1][1][RTW89_THAILAND][10] = 34, [1][1][RTW89_FCC][11] = 30, [1][1][RTW89_ETSI][11] = 34, [1][1][RTW89_MKK][11] = 34, @@ -46540,6 +48482,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][11] = 34, [1][1][RTW89_CHILE][11] = 30, [1][1][RTW89_QATAR][11] = 34, + [1][1][RTW89_THAILAND][11] = 34, [1][1][RTW89_FCC][12] = -6, [1][1][RTW89_ETSI][12] = 34, [1][1][RTW89_MKK][12] = 34, @@ -46552,6 +48495,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][12] = 34, [1][1][RTW89_CHILE][12] = -6, [1][1][RTW89_QATAR][12] = 34, + [1][1][RTW89_THAILAND][12] = 34, [1][1][RTW89_FCC][13] = 127, [1][1][RTW89_ETSI][13] = 127, [1][1][RTW89_MKK][13] = 127, @@ -46564,6 +48508,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][13] = 127, [1][1][RTW89_CHILE][13] = 127, [1][1][RTW89_QATAR][13] = 127, + [1][1][RTW89_THAILAND][13] = 127, [2][0][RTW89_FCC][0] = 70, [2][0][RTW89_ETSI][0] = 58, [2][0][RTW89_MKK][0] = 58, @@ -46576,6 +48521,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][0] = 58, [2][0][RTW89_CHILE][0] = 70, [2][0][RTW89_QATAR][0] = 58, + [2][0][RTW89_THAILAND][0] = 58, [2][0][RTW89_FCC][1] = 70, [2][0][RTW89_ETSI][1] = 58, [2][0][RTW89_MKK][1] = 58, @@ -46588,6 +48534,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][1] = 58, [2][0][RTW89_CHILE][1] = 54, [2][0][RTW89_QATAR][1] = 58, + [2][0][RTW89_THAILAND][1] = 58, [2][0][RTW89_FCC][2] = 72, [2][0][RTW89_ETSI][2] = 58, [2][0][RTW89_MKK][2] = 58, @@ -46600,6 +48547,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][2] = 58, [2][0][RTW89_CHILE][2] = 54, [2][0][RTW89_QATAR][2] = 58, + [2][0][RTW89_THAILAND][2] = 58, [2][0][RTW89_FCC][3] = 72, [2][0][RTW89_ETSI][3] = 58, [2][0][RTW89_MKK][3] = 58, @@ -46612,6 +48560,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][3] = 58, [2][0][RTW89_CHILE][3] = 54, [2][0][RTW89_QATAR][3] = 58, + [2][0][RTW89_THAILAND][3] = 58, [2][0][RTW89_FCC][4] = 72, [2][0][RTW89_ETSI][4] = 58, [2][0][RTW89_MKK][4] = 58, @@ -46624,6 +48573,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][4] = 58, [2][0][RTW89_CHILE][4] = 54, [2][0][RTW89_QATAR][4] = 58, + [2][0][RTW89_THAILAND][4] = 58, [2][0][RTW89_FCC][5] = 82, [2][0][RTW89_ETSI][5] = 58, [2][0][RTW89_MKK][5] = 58, @@ -46636,6 +48586,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][5] = 58, [2][0][RTW89_CHILE][5] = 82, [2][0][RTW89_QATAR][5] = 58, + [2][0][RTW89_THAILAND][5] = 58, [2][0][RTW89_FCC][6] = 66, [2][0][RTW89_ETSI][6] = 56, [2][0][RTW89_MKK][6] = 58, @@ -46648,6 +48599,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][6] = 56, [2][0][RTW89_CHILE][6] = 48, [2][0][RTW89_QATAR][6] = 56, + [2][0][RTW89_THAILAND][6] = 56, [2][0][RTW89_FCC][7] = 66, [2][0][RTW89_ETSI][7] = 58, [2][0][RTW89_MKK][7] = 58, @@ -46660,6 +48612,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][7] = 58, [2][0][RTW89_CHILE][7] = 48, [2][0][RTW89_QATAR][7] = 58, + [2][0][RTW89_THAILAND][7] = 58, [2][0][RTW89_FCC][8] = 66, [2][0][RTW89_ETSI][8] = 58, [2][0][RTW89_MKK][8] = 58, @@ -46672,6 +48625,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][8] = 58, [2][0][RTW89_CHILE][8] = 48, [2][0][RTW89_QATAR][8] = 58, + [2][0][RTW89_THAILAND][8] = 58, [2][0][RTW89_FCC][9] = 64, [2][0][RTW89_ETSI][9] = 58, [2][0][RTW89_MKK][9] = 58, @@ -46684,6 +48638,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][9] = 58, [2][0][RTW89_CHILE][9] = 48, [2][0][RTW89_QATAR][9] = 58, + [2][0][RTW89_THAILAND][9] = 58, [2][0][RTW89_FCC][10] = 64, [2][0][RTW89_ETSI][10] = 58, [2][0][RTW89_MKK][10] = 58, @@ -46696,6 +48651,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][10] = 58, [2][0][RTW89_CHILE][10] = 64, [2][0][RTW89_QATAR][10] = 58, + [2][0][RTW89_THAILAND][10] = 58, [2][0][RTW89_FCC][11] = 48, [2][0][RTW89_ETSI][11] = 58, [2][0][RTW89_MKK][11] = 58, @@ -46708,6 +48664,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][11] = 58, [2][0][RTW89_CHILE][11] = 48, [2][0][RTW89_QATAR][11] = 58, + [2][0][RTW89_THAILAND][11] = 58, [2][0][RTW89_FCC][12] = 16, [2][0][RTW89_ETSI][12] = 58, [2][0][RTW89_MKK][12] = 58, @@ -46720,6 +48677,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][12] = 58, [2][0][RTW89_CHILE][12] = 16, [2][0][RTW89_QATAR][12] = 58, + [2][0][RTW89_THAILAND][12] = 58, [2][0][RTW89_FCC][13] = 127, [2][0][RTW89_ETSI][13] = 127, [2][0][RTW89_MKK][13] = 127, @@ -46732,6 +48690,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][13] = 127, [2][0][RTW89_CHILE][13] = 127, [2][0][RTW89_QATAR][13] = 127, + [2][0][RTW89_THAILAND][13] = 127, [2][1][RTW89_FCC][0] = 64, [2][1][RTW89_ETSI][0] = 46, [2][1][RTW89_MKK][0] = 46, @@ -46744,6 +48703,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][0] = 46, [2][1][RTW89_CHILE][0] = 64, [2][1][RTW89_QATAR][0] = 46, + [2][1][RTW89_THAILAND][0] = 46, [2][1][RTW89_FCC][1] = 64, [2][1][RTW89_ETSI][1] = 46, [2][1][RTW89_MKK][1] = 46, @@ -46756,6 +48716,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][1] = 46, [2][1][RTW89_CHILE][1] = 44, [2][1][RTW89_QATAR][1] = 46, + [2][1][RTW89_THAILAND][1] = 46, [2][1][RTW89_FCC][2] = 68, [2][1][RTW89_ETSI][2] = 46, [2][1][RTW89_MKK][2] = 46, @@ -46768,6 +48729,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][2] = 46, [2][1][RTW89_CHILE][2] = 44, [2][1][RTW89_QATAR][2] = 46, + [2][1][RTW89_THAILAND][2] = 46, [2][1][RTW89_FCC][3] = 72, [2][1][RTW89_ETSI][3] = 46, [2][1][RTW89_MKK][3] = 46, @@ -46780,6 +48742,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][3] = 46, [2][1][RTW89_CHILE][3] = 44, [2][1][RTW89_QATAR][3] = 46, + [2][1][RTW89_THAILAND][3] = 46, [2][1][RTW89_FCC][4] = 74, [2][1][RTW89_ETSI][4] = 46, [2][1][RTW89_MKK][4] = 46, @@ -46792,6 +48755,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][4] = 46, [2][1][RTW89_CHILE][4] = 44, [2][1][RTW89_QATAR][4] = 46, + [2][1][RTW89_THAILAND][4] = 46, [2][1][RTW89_FCC][5] = 82, [2][1][RTW89_ETSI][5] = 46, [2][1][RTW89_MKK][5] = 46, @@ -46804,6 +48768,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][5] = 46, [2][1][RTW89_CHILE][5] = 78, [2][1][RTW89_QATAR][5] = 46, + [2][1][RTW89_THAILAND][5] = 46, [2][1][RTW89_FCC][6] = 72, [2][1][RTW89_ETSI][6] = 44, [2][1][RTW89_MKK][6] = 46, @@ -46816,6 +48781,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][6] = 44, [2][1][RTW89_CHILE][6] = 42, [2][1][RTW89_QATAR][6] = 44, + [2][1][RTW89_THAILAND][6] = 44, [2][1][RTW89_FCC][7] = 72, [2][1][RTW89_ETSI][7] = 46, [2][1][RTW89_MKK][7] = 46, @@ -46828,6 +48794,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][7] = 46, [2][1][RTW89_CHILE][7] = 42, [2][1][RTW89_QATAR][7] = 46, + [2][1][RTW89_THAILAND][7] = 46, [2][1][RTW89_FCC][8] = 68, [2][1][RTW89_ETSI][8] = 46, [2][1][RTW89_MKK][8] = 46, @@ -46840,6 +48807,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][8] = 46, [2][1][RTW89_CHILE][8] = 42, [2][1][RTW89_QATAR][8] = 46, + [2][1][RTW89_THAILAND][8] = 46, [2][1][RTW89_FCC][9] = 64, [2][1][RTW89_ETSI][9] = 46, [2][1][RTW89_MKK][9] = 46, @@ -46852,6 +48820,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][9] = 46, [2][1][RTW89_CHILE][9] = 42, [2][1][RTW89_QATAR][9] = 46, + [2][1][RTW89_THAILAND][9] = 46, [2][1][RTW89_FCC][10] = 64, [2][1][RTW89_ETSI][10] = 46, [2][1][RTW89_MKK][10] = 46, @@ -46864,6 +48833,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][10] = 46, [2][1][RTW89_CHILE][10] = 64, [2][1][RTW89_QATAR][10] = 46, + [2][1][RTW89_THAILAND][10] = 46, [2][1][RTW89_FCC][11] = 46, [2][1][RTW89_ETSI][11] = 46, [2][1][RTW89_MKK][11] = 46, @@ -46876,6 +48846,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][11] = 46, [2][1][RTW89_CHILE][11] = 46, [2][1][RTW89_QATAR][11] = 46, + [2][1][RTW89_THAILAND][11] = 46, [2][1][RTW89_FCC][12] = 6, [2][1][RTW89_ETSI][12] = 44, [2][1][RTW89_MKK][12] = 46, @@ -46888,6 +48859,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][12] = 44, [2][1][RTW89_CHILE][12] = 6, [2][1][RTW89_QATAR][12] = 44, + [2][1][RTW89_THAILAND][12] = 44, [2][1][RTW89_FCC][13] = 127, [2][1][RTW89_ETSI][13] = 127, [2][1][RTW89_MKK][13] = 127, @@ -46900,6 +48872,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][13] = 127, [2][1][RTW89_CHILE][13] = 127, [2][1][RTW89_QATAR][13] = 127, + [2][1][RTW89_THAILAND][13] = 127, }; static @@ -47085,6 +49058,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][0] = 22, [0][0][RTW89_CHILE][0] = 50, [0][0][RTW89_QATAR][0] = 30, + [0][0][RTW89_THAILAND][0] = 30, [0][0][RTW89_FCC][2] = 50, [0][0][RTW89_ETSI][2] = 30, [0][0][RTW89_MKK][2] = 36, @@ -47097,6 +49071,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][2] = 22, [0][0][RTW89_CHILE][2] = 50, [0][0][RTW89_QATAR][2] = 30, + [0][0][RTW89_THAILAND][2] = 30, [0][0][RTW89_FCC][4] = 50, [0][0][RTW89_ETSI][4] = 30, [0][0][RTW89_MKK][4] = 22, @@ -47109,6 +49084,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][4] = 22, [0][0][RTW89_CHILE][4] = 50, [0][0][RTW89_QATAR][4] = 30, + [0][0][RTW89_THAILAND][4] = 30, [0][0][RTW89_FCC][6] = 50, [0][0][RTW89_ETSI][6] = 30, [0][0][RTW89_MKK][6] = 22, @@ -47121,6 +49097,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][6] = 22, [0][0][RTW89_CHILE][6] = 50, [0][0][RTW89_QATAR][6] = 30, + [0][0][RTW89_THAILAND][6] = 30, [0][0][RTW89_FCC][8] = 52, [0][0][RTW89_ETSI][8] = 28, [0][0][RTW89_MKK][8] = 18, @@ -47133,6 +49110,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][8] = 22, [0][0][RTW89_CHILE][8] = 52, [0][0][RTW89_QATAR][8] = 28, + [0][0][RTW89_THAILAND][8] = 28, [0][0][RTW89_FCC][10] = 52, [0][0][RTW89_ETSI][10] = 28, [0][0][RTW89_MKK][10] = 18, @@ -47145,6 +49123,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][10] = 22, [0][0][RTW89_CHILE][10] = 52, [0][0][RTW89_QATAR][10] = 28, + [0][0][RTW89_THAILAND][10] = 28, [0][0][RTW89_FCC][12] = 52, [0][0][RTW89_ETSI][12] = 28, [0][0][RTW89_MKK][12] = 34, @@ -47157,6 +49136,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][12] = 22, [0][0][RTW89_CHILE][12] = 52, [0][0][RTW89_QATAR][12] = 28, + [0][0][RTW89_THAILAND][12] = 28, [0][0][RTW89_FCC][14] = 52, [0][0][RTW89_ETSI][14] = 28, [0][0][RTW89_MKK][14] = 34, @@ -47169,6 +49149,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][14] = 22, [0][0][RTW89_CHILE][14] = 52, [0][0][RTW89_QATAR][14] = 28, + [0][0][RTW89_THAILAND][14] = 28, [0][0][RTW89_FCC][15] = 52, [0][0][RTW89_ETSI][15] = 30, [0][0][RTW89_MKK][15] = 56, @@ -47181,6 +49162,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][15] = 22, [0][0][RTW89_CHILE][15] = 52, [0][0][RTW89_QATAR][15] = 30, + [0][0][RTW89_THAILAND][15] = 30, [0][0][RTW89_FCC][17] = 52, [0][0][RTW89_ETSI][17] = 30, [0][0][RTW89_MKK][17] = 58, @@ -47193,6 +49175,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][17] = 22, [0][0][RTW89_CHILE][17] = 52, [0][0][RTW89_QATAR][17] = 30, + [0][0][RTW89_THAILAND][17] = 30, [0][0][RTW89_FCC][19] = 52, [0][0][RTW89_ETSI][19] = 30, [0][0][RTW89_MKK][19] = 58, @@ -47205,6 +49188,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][19] = 22, [0][0][RTW89_CHILE][19] = 52, [0][0][RTW89_QATAR][19] = 30, + [0][0][RTW89_THAILAND][19] = 30, [0][0][RTW89_FCC][21] = 52, [0][0][RTW89_ETSI][21] = 30, [0][0][RTW89_MKK][21] = 58, @@ -47217,6 +49201,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][21] = 22, [0][0][RTW89_CHILE][21] = 52, [0][0][RTW89_QATAR][21] = 30, + [0][0][RTW89_THAILAND][21] = 30, [0][0][RTW89_FCC][23] = 52, [0][0][RTW89_ETSI][23] = 30, [0][0][RTW89_MKK][23] = 58, @@ -47229,6 +49214,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][23] = 22, [0][0][RTW89_CHILE][23] = 52, [0][0][RTW89_QATAR][23] = 30, + [0][0][RTW89_THAILAND][23] = 30, [0][0][RTW89_FCC][25] = 52, [0][0][RTW89_ETSI][25] = 30, [0][0][RTW89_MKK][25] = 58, @@ -47241,6 +49227,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][25] = 22, [0][0][RTW89_CHILE][25] = 52, [0][0][RTW89_QATAR][25] = 30, + [0][0][RTW89_THAILAND][25] = 30, [0][0][RTW89_FCC][27] = 52, [0][0][RTW89_ETSI][27] = 30, [0][0][RTW89_MKK][27] = 58, @@ -47253,6 +49240,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][27] = 22, [0][0][RTW89_CHILE][27] = 52, [0][0][RTW89_QATAR][27] = 30, + [0][0][RTW89_THAILAND][27] = 30, [0][0][RTW89_FCC][29] = 52, [0][0][RTW89_ETSI][29] = 30, [0][0][RTW89_MKK][29] = 58, @@ -47265,6 +49253,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][29] = 22, [0][0][RTW89_CHILE][29] = 52, [0][0][RTW89_QATAR][29] = 30, + [0][0][RTW89_THAILAND][29] = 30, [0][0][RTW89_FCC][31] = 52, [0][0][RTW89_ETSI][31] = 30, [0][0][RTW89_MKK][31] = 58, @@ -47277,6 +49266,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][31] = 22, [0][0][RTW89_CHILE][31] = 52, [0][0][RTW89_QATAR][31] = 30, + [0][0][RTW89_THAILAND][31] = 30, [0][0][RTW89_FCC][33] = 44, [0][0][RTW89_ETSI][33] = 30, [0][0][RTW89_MKK][33] = 58, @@ -47289,6 +49279,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][33] = 22, [0][0][RTW89_CHILE][33] = 44, [0][0][RTW89_QATAR][33] = 30, + [0][0][RTW89_THAILAND][33] = 30, [0][0][RTW89_FCC][35] = 44, [0][0][RTW89_ETSI][35] = 30, [0][0][RTW89_MKK][35] = 58, @@ -47301,6 +49292,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][35] = 22, [0][0][RTW89_CHILE][35] = 44, [0][0][RTW89_QATAR][35] = 30, + [0][0][RTW89_THAILAND][35] = 30, [0][0][RTW89_FCC][37] = 52, [0][0][RTW89_ETSI][37] = 127, [0][0][RTW89_MKK][37] = 58, @@ -47313,6 +49305,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][37] = 127, [0][0][RTW89_CHILE][37] = 52, [0][0][RTW89_QATAR][37] = 127, + [0][0][RTW89_THAILAND][37] = 127, [0][0][RTW89_FCC][38] = 64, [0][0][RTW89_ETSI][38] = 28, [0][0][RTW89_MKK][38] = 127, @@ -47325,6 +49318,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][38] = 26, [0][0][RTW89_CHILE][38] = 64, [0][0][RTW89_QATAR][38] = 26, + [0][0][RTW89_THAILAND][38] = 28, [0][0][RTW89_FCC][40] = 64, [0][0][RTW89_ETSI][40] = 28, [0][0][RTW89_MKK][40] = 127, @@ -47337,6 +49331,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][40] = 26, [0][0][RTW89_CHILE][40] = 64, [0][0][RTW89_QATAR][40] = 26, + [0][0][RTW89_THAILAND][40] = 28, [0][0][RTW89_FCC][42] = 60, [0][0][RTW89_ETSI][42] = 28, [0][0][RTW89_MKK][42] = 127, @@ -47349,6 +49344,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][42] = 26, [0][0][RTW89_CHILE][42] = 60, [0][0][RTW89_QATAR][42] = 26, + [0][0][RTW89_THAILAND][42] = 28, [0][0][RTW89_FCC][44] = 60, [0][0][RTW89_ETSI][44] = 28, [0][0][RTW89_MKK][44] = 127, @@ -47361,6 +49357,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][44] = 26, [0][0][RTW89_CHILE][44] = 60, [0][0][RTW89_QATAR][44] = 26, + [0][0][RTW89_THAILAND][44] = 28, [0][0][RTW89_FCC][46] = 60, [0][0][RTW89_ETSI][46] = 28, [0][0][RTW89_MKK][46] = 127, @@ -47373,6 +49370,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][46] = 26, [0][0][RTW89_CHILE][46] = 60, [0][0][RTW89_QATAR][46] = 26, + [0][0][RTW89_THAILAND][46] = 28, [0][0][RTW89_FCC][48] = 46, [0][0][RTW89_ETSI][48] = 127, [0][0][RTW89_MKK][48] = 127, @@ -47385,6 +49383,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][48] = 127, [0][0][RTW89_CHILE][48] = 127, [0][0][RTW89_QATAR][48] = 127, + [0][0][RTW89_THAILAND][48] = 127, [0][0][RTW89_FCC][50] = 44, [0][0][RTW89_ETSI][50] = 127, [0][0][RTW89_MKK][50] = 127, @@ -47397,6 +49396,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][50] = 127, [0][0][RTW89_CHILE][50] = 127, [0][0][RTW89_QATAR][50] = 127, + [0][0][RTW89_THAILAND][50] = 127, [0][0][RTW89_FCC][52] = 34, [0][0][RTW89_ETSI][52] = 127, [0][0][RTW89_MKK][52] = 127, @@ -47409,6 +49409,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_UKRAINE][52] = 127, [0][0][RTW89_CHILE][52] = 127, [0][0][RTW89_QATAR][52] = 127, + [0][0][RTW89_THAILAND][52] = 127, [0][1][RTW89_FCC][0] = 30, [0][1][RTW89_ETSI][0] = 18, [0][1][RTW89_MKK][0] = 20, @@ -47421,6 +49422,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][0] = 10, [0][1][RTW89_CHILE][0] = 30, [0][1][RTW89_QATAR][0] = 18, + [0][1][RTW89_THAILAND][0] = 18, [0][1][RTW89_FCC][2] = 32, [0][1][RTW89_ETSI][2] = 18, [0][1][RTW89_MKK][2] = 20, @@ -47433,6 +49435,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][2] = 10, [0][1][RTW89_CHILE][2] = 32, [0][1][RTW89_QATAR][2] = 18, + [0][1][RTW89_THAILAND][2] = 18, [0][1][RTW89_FCC][4] = 30, [0][1][RTW89_ETSI][4] = 18, [0][1][RTW89_MKK][4] = 8, @@ -47445,6 +49448,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][4] = 10, [0][1][RTW89_CHILE][4] = 30, [0][1][RTW89_QATAR][4] = 18, + [0][1][RTW89_THAILAND][4] = 18, [0][1][RTW89_FCC][6] = 30, [0][1][RTW89_ETSI][6] = 18, [0][1][RTW89_MKK][6] = 8, @@ -47457,6 +49461,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][6] = 10, [0][1][RTW89_CHILE][6] = 30, [0][1][RTW89_QATAR][6] = 18, + [0][1][RTW89_THAILAND][6] = 18, [0][1][RTW89_FCC][8] = 30, [0][1][RTW89_ETSI][8] = 16, [0][1][RTW89_MKK][8] = 20, @@ -47469,6 +49474,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][8] = 10, [0][1][RTW89_CHILE][8] = 30, [0][1][RTW89_QATAR][8] = 16, + [0][1][RTW89_THAILAND][8] = 16, [0][1][RTW89_FCC][10] = 30, [0][1][RTW89_ETSI][10] = 16, [0][1][RTW89_MKK][10] = 20, @@ -47481,6 +49487,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][10] = 10, [0][1][RTW89_CHILE][10] = 30, [0][1][RTW89_QATAR][10] = 16, + [0][1][RTW89_THAILAND][10] = 16, [0][1][RTW89_FCC][12] = 30, [0][1][RTW89_ETSI][12] = 16, [0][1][RTW89_MKK][12] = 34, @@ -47493,6 +49500,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][12] = 10, [0][1][RTW89_CHILE][12] = 30, [0][1][RTW89_QATAR][12] = 16, + [0][1][RTW89_THAILAND][12] = 16, [0][1][RTW89_FCC][14] = 30, [0][1][RTW89_ETSI][14] = 16, [0][1][RTW89_MKK][14] = 34, @@ -47505,6 +49513,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][14] = 10, [0][1][RTW89_CHILE][14] = 30, [0][1][RTW89_QATAR][14] = 16, + [0][1][RTW89_THAILAND][14] = 16, [0][1][RTW89_FCC][15] = 32, [0][1][RTW89_ETSI][15] = 18, [0][1][RTW89_MKK][15] = 44, @@ -47517,6 +49526,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][15] = 10, [0][1][RTW89_CHILE][15] = 32, [0][1][RTW89_QATAR][15] = 18, + [0][1][RTW89_THAILAND][15] = 18, [0][1][RTW89_FCC][17] = 32, [0][1][RTW89_ETSI][17] = 18, [0][1][RTW89_MKK][17] = 44, @@ -47529,6 +49539,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][17] = 10, [0][1][RTW89_CHILE][17] = 32, [0][1][RTW89_QATAR][17] = 18, + [0][1][RTW89_THAILAND][17] = 18, [0][1][RTW89_FCC][19] = 32, [0][1][RTW89_ETSI][19] = 18, [0][1][RTW89_MKK][19] = 44, @@ -47541,6 +49552,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][19] = 10, [0][1][RTW89_CHILE][19] = 32, [0][1][RTW89_QATAR][19] = 18, + [0][1][RTW89_THAILAND][19] = 18, [0][1][RTW89_FCC][21] = 32, [0][1][RTW89_ETSI][21] = 18, [0][1][RTW89_MKK][21] = 44, @@ -47553,6 +49565,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][21] = 10, [0][1][RTW89_CHILE][21] = 32, [0][1][RTW89_QATAR][21] = 18, + [0][1][RTW89_THAILAND][21] = 18, [0][1][RTW89_FCC][23] = 32, [0][1][RTW89_ETSI][23] = 18, [0][1][RTW89_MKK][23] = 44, @@ -47565,6 +49578,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][23] = 10, [0][1][RTW89_CHILE][23] = 32, [0][1][RTW89_QATAR][23] = 18, + [0][1][RTW89_THAILAND][23] = 18, [0][1][RTW89_FCC][25] = 32, [0][1][RTW89_ETSI][25] = 18, [0][1][RTW89_MKK][25] = 44, @@ -47577,6 +49591,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][25] = 10, [0][1][RTW89_CHILE][25] = 32, [0][1][RTW89_QATAR][25] = 18, + [0][1][RTW89_THAILAND][25] = 18, [0][1][RTW89_FCC][27] = 32, [0][1][RTW89_ETSI][27] = 16, [0][1][RTW89_MKK][27] = 44, @@ -47589,6 +49604,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][27] = 10, [0][1][RTW89_CHILE][27] = 32, [0][1][RTW89_QATAR][27] = 16, + [0][1][RTW89_THAILAND][27] = 16, [0][1][RTW89_FCC][29] = 32, [0][1][RTW89_ETSI][29] = 16, [0][1][RTW89_MKK][29] = 44, @@ -47601,6 +49617,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][29] = 10, [0][1][RTW89_CHILE][29] = 32, [0][1][RTW89_QATAR][29] = 16, + [0][1][RTW89_THAILAND][29] = 16, [0][1][RTW89_FCC][31] = 32, [0][1][RTW89_ETSI][31] = 16, [0][1][RTW89_MKK][31] = 44, @@ -47613,6 +49630,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][31] = 10, [0][1][RTW89_CHILE][31] = 32, [0][1][RTW89_QATAR][31] = 16, + [0][1][RTW89_THAILAND][31] = 16, [0][1][RTW89_FCC][33] = 30, [0][1][RTW89_ETSI][33] = 16, [0][1][RTW89_MKK][33] = 44, @@ -47625,6 +49643,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][33] = 10, [0][1][RTW89_CHILE][33] = 30, [0][1][RTW89_QATAR][33] = 16, + [0][1][RTW89_THAILAND][33] = 16, [0][1][RTW89_FCC][35] = 30, [0][1][RTW89_ETSI][35] = 16, [0][1][RTW89_MKK][35] = 44, @@ -47637,6 +49656,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][35] = 10, [0][1][RTW89_CHILE][35] = 30, [0][1][RTW89_QATAR][35] = 16, + [0][1][RTW89_THAILAND][35] = 16, [0][1][RTW89_FCC][37] = 34, [0][1][RTW89_ETSI][37] = 127, [0][1][RTW89_MKK][37] = 44, @@ -47649,6 +49669,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][37] = 127, [0][1][RTW89_CHILE][37] = 34, [0][1][RTW89_QATAR][37] = 127, + [0][1][RTW89_THAILAND][37] = 127, [0][1][RTW89_FCC][38] = 62, [0][1][RTW89_ETSI][38] = 16, [0][1][RTW89_MKK][38] = 127, @@ -47661,6 +49682,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][38] = 14, [0][1][RTW89_CHILE][38] = 62, [0][1][RTW89_QATAR][38] = 14, + [0][1][RTW89_THAILAND][38] = 16, [0][1][RTW89_FCC][40] = 62, [0][1][RTW89_ETSI][40] = 16, [0][1][RTW89_MKK][40] = 127, @@ -47673,6 +49695,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][40] = 14, [0][1][RTW89_CHILE][40] = 62, [0][1][RTW89_QATAR][40] = 14, + [0][1][RTW89_THAILAND][40] = 16, [0][1][RTW89_FCC][42] = 58, [0][1][RTW89_ETSI][42] = 16, [0][1][RTW89_MKK][42] = 127, @@ -47685,6 +49708,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][42] = 14, [0][1][RTW89_CHILE][42] = 58, [0][1][RTW89_QATAR][42] = 14, + [0][1][RTW89_THAILAND][42] = 16, [0][1][RTW89_FCC][44] = 56, [0][1][RTW89_ETSI][44] = 16, [0][1][RTW89_MKK][44] = 127, @@ -47697,6 +49721,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][44] = 14, [0][1][RTW89_CHILE][44] = 56, [0][1][RTW89_QATAR][44] = 14, + [0][1][RTW89_THAILAND][44] = 16, [0][1][RTW89_FCC][46] = 56, [0][1][RTW89_ETSI][46] = 16, [0][1][RTW89_MKK][46] = 127, @@ -47709,6 +49734,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][46] = 14, [0][1][RTW89_CHILE][46] = 56, [0][1][RTW89_QATAR][46] = 14, + [0][1][RTW89_THAILAND][46] = 16, [0][1][RTW89_FCC][48] = 20, [0][1][RTW89_ETSI][48] = 127, [0][1][RTW89_MKK][48] = 127, @@ -47721,6 +49747,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][48] = 127, [0][1][RTW89_CHILE][48] = 127, [0][1][RTW89_QATAR][48] = 127, + [0][1][RTW89_THAILAND][48] = 127, [0][1][RTW89_FCC][50] = 20, [0][1][RTW89_ETSI][50] = 127, [0][1][RTW89_MKK][50] = 127, @@ -47733,6 +49760,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][50] = 127, [0][1][RTW89_CHILE][50] = 127, [0][1][RTW89_QATAR][50] = 127, + [0][1][RTW89_THAILAND][50] = 127, [0][1][RTW89_FCC][52] = 8, [0][1][RTW89_ETSI][52] = 127, [0][1][RTW89_MKK][52] = 127, @@ -47745,6 +49773,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_UKRAINE][52] = 127, [0][1][RTW89_CHILE][52] = 127, [0][1][RTW89_QATAR][52] = 127, + [0][1][RTW89_THAILAND][52] = 127, [1][0][RTW89_FCC][0] = 62, [1][0][RTW89_ETSI][0] = 40, [1][0][RTW89_MKK][0] = 48, @@ -47757,6 +49786,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][0] = 32, [1][0][RTW89_CHILE][0] = 62, [1][0][RTW89_QATAR][0] = 40, + [1][0][RTW89_THAILAND][0] = 40, [1][0][RTW89_FCC][2] = 62, [1][0][RTW89_ETSI][2] = 40, [1][0][RTW89_MKK][2] = 48, @@ -47769,6 +49799,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][2] = 32, [1][0][RTW89_CHILE][2] = 62, [1][0][RTW89_QATAR][2] = 40, + [1][0][RTW89_THAILAND][2] = 40, [1][0][RTW89_FCC][4] = 64, [1][0][RTW89_ETSI][4] = 40, [1][0][RTW89_MKK][4] = 40, @@ -47781,6 +49812,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][4] = 32, [1][0][RTW89_CHILE][4] = 64, [1][0][RTW89_QATAR][4] = 40, + [1][0][RTW89_THAILAND][4] = 40, [1][0][RTW89_FCC][6] = 64, [1][0][RTW89_ETSI][6] = 40, [1][0][RTW89_MKK][6] = 40, @@ -47793,6 +49825,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][6] = 32, [1][0][RTW89_CHILE][6] = 64, [1][0][RTW89_QATAR][6] = 40, + [1][0][RTW89_THAILAND][6] = 40, [1][0][RTW89_FCC][8] = 62, [1][0][RTW89_ETSI][8] = 40, [1][0][RTW89_MKK][8] = 34, @@ -47805,6 +49838,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][8] = 32, [1][0][RTW89_CHILE][8] = 62, [1][0][RTW89_QATAR][8] = 40, + [1][0][RTW89_THAILAND][8] = 40, [1][0][RTW89_FCC][10] = 62, [1][0][RTW89_ETSI][10] = 40, [1][0][RTW89_MKK][10] = 34, @@ -47817,6 +49851,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][10] = 32, [1][0][RTW89_CHILE][10] = 62, [1][0][RTW89_QATAR][10] = 40, + [1][0][RTW89_THAILAND][10] = 40, [1][0][RTW89_FCC][12] = 62, [1][0][RTW89_ETSI][12] = 40, [1][0][RTW89_MKK][12] = 46, @@ -47829,6 +49864,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][12] = 32, [1][0][RTW89_CHILE][12] = 62, [1][0][RTW89_QATAR][12] = 40, + [1][0][RTW89_THAILAND][12] = 40, [1][0][RTW89_FCC][14] = 62, [1][0][RTW89_ETSI][14] = 40, [1][0][RTW89_MKK][14] = 46, @@ -47841,6 +49877,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][14] = 32, [1][0][RTW89_CHILE][14] = 62, [1][0][RTW89_QATAR][14] = 40, + [1][0][RTW89_THAILAND][14] = 40, [1][0][RTW89_FCC][15] = 62, [1][0][RTW89_ETSI][15] = 40, [1][0][RTW89_MKK][15] = 62, @@ -47853,6 +49890,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][15] = 32, [1][0][RTW89_CHILE][15] = 62, [1][0][RTW89_QATAR][15] = 40, + [1][0][RTW89_THAILAND][15] = 40, [1][0][RTW89_FCC][17] = 62, [1][0][RTW89_ETSI][17] = 40, [1][0][RTW89_MKK][17] = 68, @@ -47865,6 +49903,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][17] = 32, [1][0][RTW89_CHILE][17] = 62, [1][0][RTW89_QATAR][17] = 40, + [1][0][RTW89_THAILAND][17] = 40, [1][0][RTW89_FCC][19] = 64, [1][0][RTW89_ETSI][19] = 40, [1][0][RTW89_MKK][19] = 68, @@ -47877,6 +49916,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][19] = 32, [1][0][RTW89_CHILE][19] = 64, [1][0][RTW89_QATAR][19] = 40, + [1][0][RTW89_THAILAND][19] = 40, [1][0][RTW89_FCC][21] = 64, [1][0][RTW89_ETSI][21] = 40, [1][0][RTW89_MKK][21] = 68, @@ -47889,6 +49929,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][21] = 32, [1][0][RTW89_CHILE][21] = 64, [1][0][RTW89_QATAR][21] = 40, + [1][0][RTW89_THAILAND][21] = 40, [1][0][RTW89_FCC][23] = 64, [1][0][RTW89_ETSI][23] = 40, [1][0][RTW89_MKK][23] = 68, @@ -47901,6 +49942,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][23] = 32, [1][0][RTW89_CHILE][23] = 64, [1][0][RTW89_QATAR][23] = 40, + [1][0][RTW89_THAILAND][23] = 40, [1][0][RTW89_FCC][25] = 64, [1][0][RTW89_ETSI][25] = 40, [1][0][RTW89_MKK][25] = 68, @@ -47913,6 +49955,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][25] = 32, [1][0][RTW89_CHILE][25] = 64, [1][0][RTW89_QATAR][25] = 40, + [1][0][RTW89_THAILAND][25] = 40, [1][0][RTW89_FCC][27] = 64, [1][0][RTW89_ETSI][27] = 42, [1][0][RTW89_MKK][27] = 68, @@ -47925,6 +49968,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][27] = 32, [1][0][RTW89_CHILE][27] = 64, [1][0][RTW89_QATAR][27] = 42, + [1][0][RTW89_THAILAND][27] = 42, [1][0][RTW89_FCC][29] = 64, [1][0][RTW89_ETSI][29] = 42, [1][0][RTW89_MKK][29] = 68, @@ -47937,6 +49981,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][29] = 32, [1][0][RTW89_CHILE][29] = 64, [1][0][RTW89_QATAR][29] = 42, + [1][0][RTW89_THAILAND][29] = 42, [1][0][RTW89_FCC][31] = 64, [1][0][RTW89_ETSI][31] = 42, [1][0][RTW89_MKK][31] = 68, @@ -47949,6 +49994,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][31] = 32, [1][0][RTW89_CHILE][31] = 64, [1][0][RTW89_QATAR][31] = 42, + [1][0][RTW89_THAILAND][31] = 42, [1][0][RTW89_FCC][33] = 56, [1][0][RTW89_ETSI][33] = 42, [1][0][RTW89_MKK][33] = 68, @@ -47961,6 +50007,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][33] = 32, [1][0][RTW89_CHILE][33] = 56, [1][0][RTW89_QATAR][33] = 42, + [1][0][RTW89_THAILAND][33] = 42, [1][0][RTW89_FCC][35] = 56, [1][0][RTW89_ETSI][35] = 42, [1][0][RTW89_MKK][35] = 68, @@ -47973,6 +50020,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][35] = 32, [1][0][RTW89_CHILE][35] = 56, [1][0][RTW89_QATAR][35] = 42, + [1][0][RTW89_THAILAND][35] = 42, [1][0][RTW89_FCC][37] = 66, [1][0][RTW89_ETSI][37] = 127, [1][0][RTW89_MKK][37] = 68, @@ -47985,66 +50033,72 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][37] = 127, [1][0][RTW89_CHILE][37] = 66, [1][0][RTW89_QATAR][37] = 127, + [1][0][RTW89_THAILAND][37] = 127, [1][0][RTW89_FCC][38] = 76, [1][0][RTW89_ETSI][38] = 28, [1][0][RTW89_MKK][38] = 127, [1][0][RTW89_IC][38] = 76, [1][0][RTW89_KCC][38] = 54, [1][0][RTW89_ACMA][38] = 76, - [1][0][RTW89_CN][38] = 66, + [1][0][RTW89_CN][38] = 56, [1][0][RTW89_UK][38] = 44, [1][0][RTW89_MEXICO][38] = 76, [1][0][RTW89_UKRAINE][38] = 26, [1][0][RTW89_CHILE][38] = 76, [1][0][RTW89_QATAR][38] = 26, + [1][0][RTW89_THAILAND][38] = 28, [1][0][RTW89_FCC][40] = 76, [1][0][RTW89_ETSI][40] = 28, [1][0][RTW89_MKK][40] = 127, [1][0][RTW89_IC][40] = 76, [1][0][RTW89_KCC][40] = 54, [1][0][RTW89_ACMA][40] = 76, - [1][0][RTW89_CN][40] = 66, + [1][0][RTW89_CN][40] = 56, [1][0][RTW89_UK][40] = 44, [1][0][RTW89_MEXICO][40] = 76, [1][0][RTW89_UKRAINE][40] = 26, [1][0][RTW89_CHILE][40] = 76, [1][0][RTW89_QATAR][40] = 26, + [1][0][RTW89_THAILAND][40] = 28, [1][0][RTW89_FCC][42] = 68, [1][0][RTW89_ETSI][42] = 28, [1][0][RTW89_MKK][42] = 127, [1][0][RTW89_IC][42] = 68, [1][0][RTW89_KCC][42] = 54, [1][0][RTW89_ACMA][42] = 68, - [1][0][RTW89_CN][42] = 66, + [1][0][RTW89_CN][42] = 56, [1][0][RTW89_UK][42] = 44, [1][0][RTW89_MEXICO][42] = 68, [1][0][RTW89_UKRAINE][42] = 26, [1][0][RTW89_CHILE][42] = 68, [1][0][RTW89_QATAR][42] = 26, + [1][0][RTW89_THAILAND][42] = 28, [1][0][RTW89_FCC][44] = 70, [1][0][RTW89_ETSI][44] = 28, [1][0][RTW89_MKK][44] = 127, [1][0][RTW89_IC][44] = 70, [1][0][RTW89_KCC][44] = 54, [1][0][RTW89_ACMA][44] = 70, - [1][0][RTW89_CN][44] = 66, + [1][0][RTW89_CN][44] = 56, [1][0][RTW89_UK][44] = 42, [1][0][RTW89_MEXICO][44] = 70, [1][0][RTW89_UKRAINE][44] = 26, [1][0][RTW89_CHILE][44] = 70, [1][0][RTW89_QATAR][44] = 26, + [1][0][RTW89_THAILAND][44] = 28, [1][0][RTW89_FCC][46] = 70, [1][0][RTW89_ETSI][46] = 28, [1][0][RTW89_MKK][46] = 127, [1][0][RTW89_IC][46] = 70, [1][0][RTW89_KCC][46] = 54, [1][0][RTW89_ACMA][46] = 70, - [1][0][RTW89_CN][46] = 66, + [1][0][RTW89_CN][46] = 56, [1][0][RTW89_UK][46] = 42, [1][0][RTW89_MEXICO][46] = 70, [1][0][RTW89_UKRAINE][46] = 26, [1][0][RTW89_CHILE][46] = 70, [1][0][RTW89_QATAR][46] = 26, + [1][0][RTW89_THAILAND][46] = 28, [1][0][RTW89_FCC][48] = 56, [1][0][RTW89_ETSI][48] = 127, [1][0][RTW89_MKK][48] = 127, @@ -48057,6 +50111,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][48] = 127, [1][0][RTW89_CHILE][48] = 127, [1][0][RTW89_QATAR][48] = 127, + [1][0][RTW89_THAILAND][48] = 127, [1][0][RTW89_FCC][50] = 58, [1][0][RTW89_ETSI][50] = 127, [1][0][RTW89_MKK][50] = 127, @@ -48069,6 +50124,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][50] = 127, [1][0][RTW89_CHILE][50] = 127, [1][0][RTW89_QATAR][50] = 127, + [1][0][RTW89_THAILAND][50] = 127, [1][0][RTW89_FCC][52] = 56, [1][0][RTW89_ETSI][52] = 127, [1][0][RTW89_MKK][52] = 127, @@ -48081,6 +50137,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_UKRAINE][52] = 127, [1][0][RTW89_CHILE][52] = 127, [1][0][RTW89_QATAR][52] = 127, + [1][0][RTW89_THAILAND][52] = 127, [1][1][RTW89_FCC][0] = 44, [1][1][RTW89_ETSI][0] = 30, [1][1][RTW89_MKK][0] = 34, @@ -48093,6 +50150,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][0] = 20, [1][1][RTW89_CHILE][0] = 44, [1][1][RTW89_QATAR][0] = 30, + [1][1][RTW89_THAILAND][0] = 30, [1][1][RTW89_FCC][2] = 44, [1][1][RTW89_ETSI][2] = 30, [1][1][RTW89_MKK][2] = 34, @@ -48105,6 +50163,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][2] = 20, [1][1][RTW89_CHILE][2] = 44, [1][1][RTW89_QATAR][2] = 30, + [1][1][RTW89_THAILAND][2] = 30, [1][1][RTW89_FCC][4] = 46, [1][1][RTW89_ETSI][4] = 30, [1][1][RTW89_MKK][4] = 26, @@ -48117,6 +50176,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][4] = 20, [1][1][RTW89_CHILE][4] = 46, [1][1][RTW89_QATAR][4] = 30, + [1][1][RTW89_THAILAND][4] = 30, [1][1][RTW89_FCC][6] = 46, [1][1][RTW89_ETSI][6] = 30, [1][1][RTW89_MKK][6] = 26, @@ -48129,6 +50189,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][6] = 20, [1][1][RTW89_CHILE][6] = 46, [1][1][RTW89_QATAR][6] = 30, + [1][1][RTW89_THAILAND][6] = 30, [1][1][RTW89_FCC][8] = 44, [1][1][RTW89_ETSI][8] = 30, [1][1][RTW89_MKK][8] = 20, @@ -48141,6 +50202,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][8] = 20, [1][1][RTW89_CHILE][8] = 44, [1][1][RTW89_QATAR][8] = 30, + [1][1][RTW89_THAILAND][8] = 30, [1][1][RTW89_FCC][10] = 44, [1][1][RTW89_ETSI][10] = 30, [1][1][RTW89_MKK][10] = 20, @@ -48153,6 +50215,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][10] = 20, [1][1][RTW89_CHILE][10] = 44, [1][1][RTW89_QATAR][10] = 30, + [1][1][RTW89_THAILAND][10] = 30, [1][1][RTW89_FCC][12] = 44, [1][1][RTW89_ETSI][12] = 30, [1][1][RTW89_MKK][12] = 34, @@ -48165,6 +50228,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][12] = 20, [1][1][RTW89_CHILE][12] = 44, [1][1][RTW89_QATAR][12] = 30, + [1][1][RTW89_THAILAND][12] = 30, [1][1][RTW89_FCC][14] = 44, [1][1][RTW89_ETSI][14] = 30, [1][1][RTW89_MKK][14] = 34, @@ -48177,6 +50241,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][14] = 20, [1][1][RTW89_CHILE][14] = 44, [1][1][RTW89_QATAR][14] = 30, + [1][1][RTW89_THAILAND][14] = 30, [1][1][RTW89_FCC][15] = 44, [1][1][RTW89_ETSI][15] = 28, [1][1][RTW89_MKK][15] = 56, @@ -48189,6 +50254,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][15] = 20, [1][1][RTW89_CHILE][15] = 44, [1][1][RTW89_QATAR][15] = 28, + [1][1][RTW89_THAILAND][15] = 28, [1][1][RTW89_FCC][17] = 44, [1][1][RTW89_ETSI][17] = 28, [1][1][RTW89_MKK][17] = 58, @@ -48201,6 +50267,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][17] = 20, [1][1][RTW89_CHILE][17] = 44, [1][1][RTW89_QATAR][17] = 28, + [1][1][RTW89_THAILAND][17] = 28, [1][1][RTW89_FCC][19] = 44, [1][1][RTW89_ETSI][19] = 28, [1][1][RTW89_MKK][19] = 58, @@ -48213,6 +50280,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][19] = 20, [1][1][RTW89_CHILE][19] = 44, [1][1][RTW89_QATAR][19] = 28, + [1][1][RTW89_THAILAND][19] = 28, [1][1][RTW89_FCC][21] = 44, [1][1][RTW89_ETSI][21] = 28, [1][1][RTW89_MKK][21] = 58, @@ -48225,6 +50293,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][21] = 20, [1][1][RTW89_CHILE][21] = 44, [1][1][RTW89_QATAR][21] = 28, + [1][1][RTW89_THAILAND][21] = 28, [1][1][RTW89_FCC][23] = 44, [1][1][RTW89_ETSI][23] = 28, [1][1][RTW89_MKK][23] = 58, @@ -48237,6 +50306,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][23] = 20, [1][1][RTW89_CHILE][23] = 44, [1][1][RTW89_QATAR][23] = 28, + [1][1][RTW89_THAILAND][23] = 28, [1][1][RTW89_FCC][25] = 44, [1][1][RTW89_ETSI][25] = 28, [1][1][RTW89_MKK][25] = 58, @@ -48249,6 +50319,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][25] = 20, [1][1][RTW89_CHILE][25] = 44, [1][1][RTW89_QATAR][25] = 28, + [1][1][RTW89_THAILAND][25] = 28, [1][1][RTW89_FCC][27] = 44, [1][1][RTW89_ETSI][27] = 30, [1][1][RTW89_MKK][27] = 58, @@ -48261,6 +50332,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][27] = 20, [1][1][RTW89_CHILE][27] = 44, [1][1][RTW89_QATAR][27] = 30, + [1][1][RTW89_THAILAND][27] = 30, [1][1][RTW89_FCC][29] = 44, [1][1][RTW89_ETSI][29] = 30, [1][1][RTW89_MKK][29] = 58, @@ -48273,6 +50345,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][29] = 20, [1][1][RTW89_CHILE][29] = 44, [1][1][RTW89_QATAR][29] = 30, + [1][1][RTW89_THAILAND][29] = 30, [1][1][RTW89_FCC][31] = 44, [1][1][RTW89_ETSI][31] = 30, [1][1][RTW89_MKK][31] = 58, @@ -48285,6 +50358,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][31] = 20, [1][1][RTW89_CHILE][31] = 44, [1][1][RTW89_QATAR][31] = 30, + [1][1][RTW89_THAILAND][31] = 30, [1][1][RTW89_FCC][33] = 38, [1][1][RTW89_ETSI][33] = 30, [1][1][RTW89_MKK][33] = 58, @@ -48297,6 +50371,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][33] = 20, [1][1][RTW89_CHILE][33] = 38, [1][1][RTW89_QATAR][33] = 30, + [1][1][RTW89_THAILAND][33] = 30, [1][1][RTW89_FCC][35] = 38, [1][1][RTW89_ETSI][35] = 30, [1][1][RTW89_MKK][35] = 58, @@ -48309,6 +50384,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][35] = 20, [1][1][RTW89_CHILE][35] = 38, [1][1][RTW89_QATAR][35] = 30, + [1][1][RTW89_THAILAND][35] = 30, [1][1][RTW89_FCC][37] = 46, [1][1][RTW89_ETSI][37] = 127, [1][1][RTW89_MKK][37] = 58, @@ -48321,6 +50397,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][37] = 127, [1][1][RTW89_CHILE][37] = 46, [1][1][RTW89_QATAR][37] = 127, + [1][1][RTW89_THAILAND][37] = 127, [1][1][RTW89_FCC][38] = 74, [1][1][RTW89_ETSI][38] = 16, [1][1][RTW89_MKK][38] = 127, @@ -48333,6 +50410,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][38] = 14, [1][1][RTW89_CHILE][38] = 72, [1][1][RTW89_QATAR][38] = 14, + [1][1][RTW89_THAILAND][38] = 16, [1][1][RTW89_FCC][40] = 74, [1][1][RTW89_ETSI][40] = 16, [1][1][RTW89_MKK][40] = 127, @@ -48345,6 +50423,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][40] = 14, [1][1][RTW89_CHILE][40] = 72, [1][1][RTW89_QATAR][40] = 14, + [1][1][RTW89_THAILAND][40] = 16, [1][1][RTW89_FCC][42] = 74, [1][1][RTW89_ETSI][42] = 16, [1][1][RTW89_MKK][42] = 127, @@ -48357,6 +50436,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][42] = 14, [1][1][RTW89_CHILE][42] = 72, [1][1][RTW89_QATAR][42] = 14, + [1][1][RTW89_THAILAND][42] = 16, [1][1][RTW89_FCC][44] = 74, [1][1][RTW89_ETSI][44] = 16, [1][1][RTW89_MKK][44] = 127, @@ -48369,6 +50449,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][44] = 14, [1][1][RTW89_CHILE][44] = 72, [1][1][RTW89_QATAR][44] = 14, + [1][1][RTW89_THAILAND][44] = 16, [1][1][RTW89_FCC][46] = 74, [1][1][RTW89_ETSI][46] = 16, [1][1][RTW89_MKK][46] = 127, @@ -48381,6 +50462,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][46] = 14, [1][1][RTW89_CHILE][46] = 72, [1][1][RTW89_QATAR][46] = 14, + [1][1][RTW89_THAILAND][46] = 16, [1][1][RTW89_FCC][48] = 34, [1][1][RTW89_ETSI][48] = 127, [1][1][RTW89_MKK][48] = 127, @@ -48393,6 +50475,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][48] = 127, [1][1][RTW89_CHILE][48] = 127, [1][1][RTW89_QATAR][48] = 127, + [1][1][RTW89_THAILAND][48] = 127, [1][1][RTW89_FCC][50] = 34, [1][1][RTW89_ETSI][50] = 127, [1][1][RTW89_MKK][50] = 127, @@ -48405,6 +50488,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][50] = 127, [1][1][RTW89_CHILE][50] = 127, [1][1][RTW89_QATAR][50] = 127, + [1][1][RTW89_THAILAND][50] = 127, [1][1][RTW89_FCC][52] = 30, [1][1][RTW89_ETSI][52] = 127, [1][1][RTW89_MKK][52] = 127, @@ -48417,6 +50501,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_UKRAINE][52] = 127, [1][1][RTW89_CHILE][52] = 127, [1][1][RTW89_QATAR][52] = 127, + [1][1][RTW89_THAILAND][52] = 127, [2][0][RTW89_FCC][0] = 68, [2][0][RTW89_ETSI][0] = 52, [2][0][RTW89_MKK][0] = 60, @@ -48429,6 +50514,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][0] = 46, [2][0][RTW89_CHILE][0] = 68, [2][0][RTW89_QATAR][0] = 52, + [2][0][RTW89_THAILAND][0] = 52, [2][0][RTW89_FCC][2] = 64, [2][0][RTW89_ETSI][2] = 52, [2][0][RTW89_MKK][2] = 60, @@ -48441,6 +50527,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][2] = 46, [2][0][RTW89_CHILE][2] = 64, [2][0][RTW89_QATAR][2] = 52, + [2][0][RTW89_THAILAND][2] = 52, [2][0][RTW89_FCC][4] = 68, [2][0][RTW89_ETSI][4] = 52, [2][0][RTW89_MKK][4] = 50, @@ -48453,6 +50540,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][4] = 46, [2][0][RTW89_CHILE][4] = 68, [2][0][RTW89_QATAR][4] = 52, + [2][0][RTW89_THAILAND][4] = 52, [2][0][RTW89_FCC][6] = 68, [2][0][RTW89_ETSI][6] = 52, [2][0][RTW89_MKK][6] = 50, @@ -48465,6 +50553,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][6] = 46, [2][0][RTW89_CHILE][6] = 68, [2][0][RTW89_QATAR][6] = 52, + [2][0][RTW89_THAILAND][6] = 52, [2][0][RTW89_FCC][8] = 68, [2][0][RTW89_ETSI][8] = 52, [2][0][RTW89_MKK][8] = 44, @@ -48477,6 +50566,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][8] = 46, [2][0][RTW89_CHILE][8] = 68, [2][0][RTW89_QATAR][8] = 52, + [2][0][RTW89_THAILAND][8] = 52, [2][0][RTW89_FCC][10] = 68, [2][0][RTW89_ETSI][10] = 52, [2][0][RTW89_MKK][10] = 44, @@ -48489,6 +50579,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][10] = 46, [2][0][RTW89_CHILE][10] = 68, [2][0][RTW89_QATAR][10] = 52, + [2][0][RTW89_THAILAND][10] = 52, [2][0][RTW89_FCC][12] = 68, [2][0][RTW89_ETSI][12] = 52, [2][0][RTW89_MKK][12] = 58, @@ -48501,6 +50592,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][12] = 46, [2][0][RTW89_CHILE][12] = 68, [2][0][RTW89_QATAR][12] = 52, + [2][0][RTW89_THAILAND][12] = 52, [2][0][RTW89_FCC][14] = 68, [2][0][RTW89_ETSI][14] = 52, [2][0][RTW89_MKK][14] = 58, @@ -48513,6 +50605,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][14] = 46, [2][0][RTW89_CHILE][14] = 68, [2][0][RTW89_QATAR][14] = 52, + [2][0][RTW89_THAILAND][14] = 52, [2][0][RTW89_FCC][15] = 68, [2][0][RTW89_ETSI][15] = 52, [2][0][RTW89_MKK][15] = 68, @@ -48525,6 +50618,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][15] = 46, [2][0][RTW89_CHILE][15] = 68, [2][0][RTW89_QATAR][15] = 52, + [2][0][RTW89_THAILAND][15] = 52, [2][0][RTW89_FCC][17] = 68, [2][0][RTW89_ETSI][17] = 52, [2][0][RTW89_MKK][17] = 74, @@ -48537,6 +50631,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][17] = 46, [2][0][RTW89_CHILE][17] = 68, [2][0][RTW89_QATAR][17] = 52, + [2][0][RTW89_THAILAND][17] = 52, [2][0][RTW89_FCC][19] = 70, [2][0][RTW89_ETSI][19] = 52, [2][0][RTW89_MKK][19] = 74, @@ -48549,6 +50644,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][19] = 46, [2][0][RTW89_CHILE][19] = 70, [2][0][RTW89_QATAR][19] = 52, + [2][0][RTW89_THAILAND][19] = 52, [2][0][RTW89_FCC][21] = 70, [2][0][RTW89_ETSI][21] = 52, [2][0][RTW89_MKK][21] = 74, @@ -48561,6 +50657,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][21] = 46, [2][0][RTW89_CHILE][21] = 70, [2][0][RTW89_QATAR][21] = 52, + [2][0][RTW89_THAILAND][21] = 52, [2][0][RTW89_FCC][23] = 70, [2][0][RTW89_ETSI][23] = 52, [2][0][RTW89_MKK][23] = 74, @@ -48573,6 +50670,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][23] = 46, [2][0][RTW89_CHILE][23] = 70, [2][0][RTW89_QATAR][23] = 52, + [2][0][RTW89_THAILAND][23] = 52, [2][0][RTW89_FCC][25] = 70, [2][0][RTW89_ETSI][25] = 52, [2][0][RTW89_MKK][25] = 74, @@ -48585,6 +50683,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][25] = 46, [2][0][RTW89_CHILE][25] = 70, [2][0][RTW89_QATAR][25] = 52, + [2][0][RTW89_THAILAND][25] = 52, [2][0][RTW89_FCC][27] = 70, [2][0][RTW89_ETSI][27] = 52, [2][0][RTW89_MKK][27] = 74, @@ -48597,6 +50696,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][27] = 46, [2][0][RTW89_CHILE][27] = 70, [2][0][RTW89_QATAR][27] = 52, + [2][0][RTW89_THAILAND][27] = 52, [2][0][RTW89_FCC][29] = 70, [2][0][RTW89_ETSI][29] = 52, [2][0][RTW89_MKK][29] = 74, @@ -48609,6 +50709,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][29] = 46, [2][0][RTW89_CHILE][29] = 70, [2][0][RTW89_QATAR][29] = 52, + [2][0][RTW89_THAILAND][29] = 52, [2][0][RTW89_FCC][31] = 70, [2][0][RTW89_ETSI][31] = 52, [2][0][RTW89_MKK][31] = 74, @@ -48621,6 +50722,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][31] = 46, [2][0][RTW89_CHILE][31] = 70, [2][0][RTW89_QATAR][31] = 52, + [2][0][RTW89_THAILAND][31] = 52, [2][0][RTW89_FCC][33] = 62, [2][0][RTW89_ETSI][33] = 52, [2][0][RTW89_MKK][33] = 74, @@ -48633,6 +50735,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][33] = 46, [2][0][RTW89_CHILE][33] = 62, [2][0][RTW89_QATAR][33] = 52, + [2][0][RTW89_THAILAND][33] = 52, [2][0][RTW89_FCC][35] = 62, [2][0][RTW89_ETSI][35] = 52, [2][0][RTW89_MKK][35] = 74, @@ -48645,6 +50748,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][35] = 46, [2][0][RTW89_CHILE][35] = 62, [2][0][RTW89_QATAR][35] = 52, + [2][0][RTW89_THAILAND][35] = 52, [2][0][RTW89_FCC][37] = 70, [2][0][RTW89_ETSI][37] = 127, [2][0][RTW89_MKK][37] = 74, @@ -48657,66 +50761,72 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][37] = 127, [2][0][RTW89_CHILE][37] = 70, [2][0][RTW89_QATAR][37] = 127, + [2][0][RTW89_THAILAND][37] = 127, [2][0][RTW89_FCC][38] = 82, [2][0][RTW89_ETSI][38] = 28, [2][0][RTW89_MKK][38] = 127, [2][0][RTW89_IC][38] = 82, [2][0][RTW89_KCC][38] = 60, [2][0][RTW89_ACMA][38] = 82, - [2][0][RTW89_CN][38] = 68, + [2][0][RTW89_CN][38] = 56, [2][0][RTW89_UK][38] = 54, [2][0][RTW89_MEXICO][38] = 82, [2][0][RTW89_UKRAINE][38] = 26, [2][0][RTW89_CHILE][38] = 82, [2][0][RTW89_QATAR][38] = 26, + [2][0][RTW89_THAILAND][38] = 28, [2][0][RTW89_FCC][40] = 82, [2][0][RTW89_ETSI][40] = 28, [2][0][RTW89_MKK][40] = 127, [2][0][RTW89_IC][40] = 82, [2][0][RTW89_KCC][40] = 60, [2][0][RTW89_ACMA][40] = 82, - [2][0][RTW89_CN][40] = 68, + [2][0][RTW89_CN][40] = 56, [2][0][RTW89_UK][40] = 54, [2][0][RTW89_MEXICO][40] = 82, [2][0][RTW89_UKRAINE][40] = 26, [2][0][RTW89_CHILE][40] = 82, [2][0][RTW89_QATAR][40] = 26, + [2][0][RTW89_THAILAND][40] = 28, [2][0][RTW89_FCC][42] = 76, [2][0][RTW89_ETSI][42] = 28, [2][0][RTW89_MKK][42] = 127, [2][0][RTW89_IC][42] = 76, [2][0][RTW89_KCC][42] = 60, [2][0][RTW89_ACMA][42] = 76, - [2][0][RTW89_CN][42] = 68, + [2][0][RTW89_CN][42] = 56, [2][0][RTW89_UK][42] = 54, [2][0][RTW89_MEXICO][42] = 76, [2][0][RTW89_UKRAINE][42] = 26, [2][0][RTW89_CHILE][42] = 76, [2][0][RTW89_QATAR][42] = 26, + [2][0][RTW89_THAILAND][42] = 28, [2][0][RTW89_FCC][44] = 80, [2][0][RTW89_ETSI][44] = 28, [2][0][RTW89_MKK][44] = 127, [2][0][RTW89_IC][44] = 80, [2][0][RTW89_KCC][44] = 60, [2][0][RTW89_ACMA][44] = 80, - [2][0][RTW89_CN][44] = 68, + [2][0][RTW89_CN][44] = 56, [2][0][RTW89_UK][44] = 54, [2][0][RTW89_MEXICO][44] = 80, [2][0][RTW89_UKRAINE][44] = 26, [2][0][RTW89_CHILE][44] = 80, [2][0][RTW89_QATAR][44] = 26, + [2][0][RTW89_THAILAND][44] = 28, [2][0][RTW89_FCC][46] = 80, [2][0][RTW89_ETSI][46] = 28, [2][0][RTW89_MKK][46] = 127, [2][0][RTW89_IC][46] = 80, [2][0][RTW89_KCC][46] = 60, [2][0][RTW89_ACMA][46] = 80, - [2][0][RTW89_CN][46] = 68, + [2][0][RTW89_CN][46] = 56, [2][0][RTW89_UK][46] = 54, [2][0][RTW89_MEXICO][46] = 80, [2][0][RTW89_UKRAINE][46] = 26, [2][0][RTW89_CHILE][46] = 80, [2][0][RTW89_QATAR][46] = 26, + [2][0][RTW89_THAILAND][46] = 28, [2][0][RTW89_FCC][48] = 64, [2][0][RTW89_ETSI][48] = 127, [2][0][RTW89_MKK][48] = 127, @@ -48729,6 +50839,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][48] = 127, [2][0][RTW89_CHILE][48] = 127, [2][0][RTW89_QATAR][48] = 127, + [2][0][RTW89_THAILAND][48] = 127, [2][0][RTW89_FCC][50] = 64, [2][0][RTW89_ETSI][50] = 127, [2][0][RTW89_MKK][50] = 127, @@ -48741,6 +50852,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][50] = 127, [2][0][RTW89_CHILE][50] = 127, [2][0][RTW89_QATAR][50] = 127, + [2][0][RTW89_THAILAND][50] = 127, [2][0][RTW89_FCC][52] = 64, [2][0][RTW89_ETSI][52] = 127, [2][0][RTW89_MKK][52] = 127, @@ -48753,6 +50865,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_UKRAINE][52] = 127, [2][0][RTW89_CHILE][52] = 127, [2][0][RTW89_QATAR][52] = 127, + [2][0][RTW89_THAILAND][52] = 127, [2][1][RTW89_FCC][0] = 50, [2][1][RTW89_ETSI][0] = 40, [2][1][RTW89_MKK][0] = 44, @@ -48765,6 +50878,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][0] = 34, [2][1][RTW89_CHILE][0] = 50, [2][1][RTW89_QATAR][0] = 40, + [2][1][RTW89_THAILAND][0] = 40, [2][1][RTW89_FCC][2] = 50, [2][1][RTW89_ETSI][2] = 40, [2][1][RTW89_MKK][2] = 44, @@ -48777,6 +50891,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][2] = 34, [2][1][RTW89_CHILE][2] = 50, [2][1][RTW89_QATAR][2] = 40, + [2][1][RTW89_THAILAND][2] = 40, [2][1][RTW89_FCC][4] = 50, [2][1][RTW89_ETSI][4] = 40, [2][1][RTW89_MKK][4] = 36, @@ -48789,6 +50904,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][4] = 34, [2][1][RTW89_CHILE][4] = 50, [2][1][RTW89_QATAR][4] = 40, + [2][1][RTW89_THAILAND][4] = 40, [2][1][RTW89_FCC][6] = 50, [2][1][RTW89_ETSI][6] = 40, [2][1][RTW89_MKK][6] = 36, @@ -48801,6 +50917,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][6] = 34, [2][1][RTW89_CHILE][6] = 50, [2][1][RTW89_QATAR][6] = 40, + [2][1][RTW89_THAILAND][6] = 40, [2][1][RTW89_FCC][8] = 50, [2][1][RTW89_ETSI][8] = 40, [2][1][RTW89_MKK][8] = 32, @@ -48813,6 +50930,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][8] = 34, [2][1][RTW89_CHILE][8] = 50, [2][1][RTW89_QATAR][8] = 40, + [2][1][RTW89_THAILAND][8] = 40, [2][1][RTW89_FCC][10] = 50, [2][1][RTW89_ETSI][10] = 40, [2][1][RTW89_MKK][10] = 32, @@ -48825,6 +50943,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][10] = 34, [2][1][RTW89_CHILE][10] = 50, [2][1][RTW89_QATAR][10] = 40, + [2][1][RTW89_THAILAND][10] = 40, [2][1][RTW89_FCC][12] = 48, [2][1][RTW89_ETSI][12] = 40, [2][1][RTW89_MKK][12] = 44, @@ -48837,6 +50956,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][12] = 34, [2][1][RTW89_CHILE][12] = 48, [2][1][RTW89_QATAR][12] = 40, + [2][1][RTW89_THAILAND][12] = 40, [2][1][RTW89_FCC][14] = 48, [2][1][RTW89_ETSI][14] = 40, [2][1][RTW89_MKK][14] = 44, @@ -48849,6 +50969,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][14] = 34, [2][1][RTW89_CHILE][14] = 48, [2][1][RTW89_QATAR][14] = 40, + [2][1][RTW89_THAILAND][14] = 40, [2][1][RTW89_FCC][15] = 50, [2][1][RTW89_ETSI][15] = 40, [2][1][RTW89_MKK][15] = 66, @@ -48861,6 +50982,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][15] = 34, [2][1][RTW89_CHILE][15] = 50, [2][1][RTW89_QATAR][15] = 40, + [2][1][RTW89_THAILAND][15] = 40, [2][1][RTW89_FCC][17] = 50, [2][1][RTW89_ETSI][17] = 40, [2][1][RTW89_MKK][17] = 66, @@ -48873,6 +50995,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][17] = 34, [2][1][RTW89_CHILE][17] = 50, [2][1][RTW89_QATAR][17] = 40, + [2][1][RTW89_THAILAND][17] = 40, [2][1][RTW89_FCC][19] = 50, [2][1][RTW89_ETSI][19] = 40, [2][1][RTW89_MKK][19] = 66, @@ -48885,6 +51008,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][19] = 34, [2][1][RTW89_CHILE][19] = 50, [2][1][RTW89_QATAR][19] = 40, + [2][1][RTW89_THAILAND][19] = 40, [2][1][RTW89_FCC][21] = 50, [2][1][RTW89_ETSI][21] = 40, [2][1][RTW89_MKK][21] = 66, @@ -48897,6 +51021,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][21] = 34, [2][1][RTW89_CHILE][21] = 50, [2][1][RTW89_QATAR][21] = 40, + [2][1][RTW89_THAILAND][21] = 40, [2][1][RTW89_FCC][23] = 50, [2][1][RTW89_ETSI][23] = 40, [2][1][RTW89_MKK][23] = 66, @@ -48909,6 +51034,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][23] = 34, [2][1][RTW89_CHILE][23] = 50, [2][1][RTW89_QATAR][23] = 40, + [2][1][RTW89_THAILAND][23] = 40, [2][1][RTW89_FCC][25] = 50, [2][1][RTW89_ETSI][25] = 40, [2][1][RTW89_MKK][25] = 66, @@ -48921,6 +51047,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][25] = 34, [2][1][RTW89_CHILE][25] = 50, [2][1][RTW89_QATAR][25] = 40, + [2][1][RTW89_THAILAND][25] = 40, [2][1][RTW89_FCC][27] = 50, [2][1][RTW89_ETSI][27] = 40, [2][1][RTW89_MKK][27] = 66, @@ -48933,6 +51060,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][27] = 34, [2][1][RTW89_CHILE][27] = 50, [2][1][RTW89_QATAR][27] = 40, + [2][1][RTW89_THAILAND][27] = 40, [2][1][RTW89_FCC][29] = 50, [2][1][RTW89_ETSI][29] = 40, [2][1][RTW89_MKK][29] = 66, @@ -48945,6 +51073,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][29] = 34, [2][1][RTW89_CHILE][29] = 50, [2][1][RTW89_QATAR][29] = 40, + [2][1][RTW89_THAILAND][29] = 40, [2][1][RTW89_FCC][31] = 50, [2][1][RTW89_ETSI][31] = 40, [2][1][RTW89_MKK][31] = 66, @@ -48957,6 +51086,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][31] = 34, [2][1][RTW89_CHILE][31] = 50, [2][1][RTW89_QATAR][31] = 40, + [2][1][RTW89_THAILAND][31] = 40, [2][1][RTW89_FCC][33] = 48, [2][1][RTW89_ETSI][33] = 40, [2][1][RTW89_MKK][33] = 66, @@ -48969,6 +51099,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][33] = 34, [2][1][RTW89_CHILE][33] = 48, [2][1][RTW89_QATAR][33] = 40, + [2][1][RTW89_THAILAND][33] = 40, [2][1][RTW89_FCC][35] = 48, [2][1][RTW89_ETSI][35] = 40, [2][1][RTW89_MKK][35] = 66, @@ -48981,6 +51112,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][35] = 34, [2][1][RTW89_CHILE][35] = 48, [2][1][RTW89_QATAR][35] = 40, + [2][1][RTW89_THAILAND][35] = 40, [2][1][RTW89_FCC][37] = 52, [2][1][RTW89_ETSI][37] = 127, [2][1][RTW89_MKK][37] = 66, @@ -48993,6 +51125,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][37] = 127, [2][1][RTW89_CHILE][37] = 52, [2][1][RTW89_QATAR][37] = 127, + [2][1][RTW89_THAILAND][37] = 127, [2][1][RTW89_FCC][38] = 78, [2][1][RTW89_ETSI][38] = 16, [2][1][RTW89_MKK][38] = 127, @@ -49005,6 +51138,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][38] = 14, [2][1][RTW89_CHILE][38] = 72, [2][1][RTW89_QATAR][38] = 14, + [2][1][RTW89_THAILAND][38] = 16, [2][1][RTW89_FCC][40] = 78, [2][1][RTW89_ETSI][40] = 16, [2][1][RTW89_MKK][40] = 127, @@ -49017,6 +51151,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][40] = 14, [2][1][RTW89_CHILE][40] = 72, [2][1][RTW89_QATAR][40] = 14, + [2][1][RTW89_THAILAND][40] = 16, [2][1][RTW89_FCC][42] = 78, [2][1][RTW89_ETSI][42] = 16, [2][1][RTW89_MKK][42] = 127, @@ -49029,6 +51164,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][42] = 14, [2][1][RTW89_CHILE][42] = 72, [2][1][RTW89_QATAR][42] = 14, + [2][1][RTW89_THAILAND][42] = 16, [2][1][RTW89_FCC][44] = 74, [2][1][RTW89_ETSI][44] = 16, [2][1][RTW89_MKK][44] = 127, @@ -49041,6 +51177,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][44] = 14, [2][1][RTW89_CHILE][44] = 72, [2][1][RTW89_QATAR][44] = 14, + [2][1][RTW89_THAILAND][44] = 16, [2][1][RTW89_FCC][46] = 74, [2][1][RTW89_ETSI][46] = 16, [2][1][RTW89_MKK][46] = 127, @@ -49053,6 +51190,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][46] = 14, [2][1][RTW89_CHILE][46] = 72, [2][1][RTW89_QATAR][46] = 14, + [2][1][RTW89_THAILAND][46] = 16, [2][1][RTW89_FCC][48] = 40, [2][1][RTW89_ETSI][48] = 127, [2][1][RTW89_MKK][48] = 127, @@ -49065,6 +51203,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][48] = 127, [2][1][RTW89_CHILE][48] = 127, [2][1][RTW89_QATAR][48] = 127, + [2][1][RTW89_THAILAND][48] = 127, [2][1][RTW89_FCC][50] = 40, [2][1][RTW89_ETSI][50] = 127, [2][1][RTW89_MKK][50] = 127, @@ -49077,6 +51216,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][50] = 127, [2][1][RTW89_CHILE][50] = 127, [2][1][RTW89_QATAR][50] = 127, + [2][1][RTW89_THAILAND][50] = 127, [2][1][RTW89_FCC][52] = 40, [2][1][RTW89_ETSI][52] = 127, [2][1][RTW89_MKK][52] = 127, @@ -49089,6 +51229,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_UKRAINE][52] = 127, [2][1][RTW89_CHILE][52] = 127, [2][1][RTW89_QATAR][52] = 127, + [2][1][RTW89_THAILAND][52] = 127, }; static @@ -49169,19 +51310,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][2][44] = 56, [0][0][RTW89_WW][0][45] = -16, [0][0][RTW89_WW][1][45] = -16, - [0][0][RTW89_WW][2][45] = 0, + [0][0][RTW89_WW][2][45] = 56, [0][0][RTW89_WW][0][47] = -18, [0][0][RTW89_WW][1][47] = -18, - [0][0][RTW89_WW][2][47] = 0, + [0][0][RTW89_WW][2][47] = 56, [0][0][RTW89_WW][0][49] = -18, [0][0][RTW89_WW][1][49] = -18, - [0][0][RTW89_WW][2][49] = 0, + [0][0][RTW89_WW][2][49] = 56, [0][0][RTW89_WW][0][51] = -18, [0][0][RTW89_WW][1][51] = -18, - [0][0][RTW89_WW][2][51] = 0, + [0][0][RTW89_WW][2][51] = 56, [0][0][RTW89_WW][0][53] = -16, [0][0][RTW89_WW][1][53] = -16, - [0][0][RTW89_WW][2][53] = 0, + [0][0][RTW89_WW][2][53] = 56, [0][0][RTW89_WW][0][55] = -18, [0][0][RTW89_WW][1][55] = -18, [0][0][RTW89_WW][2][55] = 56, @@ -49361,19 +51502,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_WW][2][44] = 32, [0][1][RTW89_WW][0][45] = -40, [0][1][RTW89_WW][1][45] = -40, - [0][1][RTW89_WW][2][45] = 0, + [0][1][RTW89_WW][2][45] = 32, [0][1][RTW89_WW][0][47] = -40, [0][1][RTW89_WW][1][47] = -40, - [0][1][RTW89_WW][2][47] = 0, + [0][1][RTW89_WW][2][47] = 32, [0][1][RTW89_WW][0][49] = -40, [0][1][RTW89_WW][1][49] = -40, - [0][1][RTW89_WW][2][49] = 0, + [0][1][RTW89_WW][2][49] = 32, [0][1][RTW89_WW][0][51] = -40, [0][1][RTW89_WW][1][51] = -40, - [0][1][RTW89_WW][2][51] = 0, + [0][1][RTW89_WW][2][51] = 32, [0][1][RTW89_WW][0][53] = -40, [0][1][RTW89_WW][1][53] = -40, - [0][1][RTW89_WW][2][53] = 0, + [0][1][RTW89_WW][2][53] = 32, [0][1][RTW89_WW][0][55] = -40, [0][1][RTW89_WW][1][55] = -40, [0][1][RTW89_WW][2][55] = 30, @@ -49553,19 +51694,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][2][44] = 66, [1][0][RTW89_WW][0][45] = -4, [1][0][RTW89_WW][1][45] = -4, - [1][0][RTW89_WW][2][45] = 0, + [1][0][RTW89_WW][2][45] = 68, [1][0][RTW89_WW][0][47] = -4, [1][0][RTW89_WW][1][47] = -4, - [1][0][RTW89_WW][2][47] = 0, + [1][0][RTW89_WW][2][47] = 68, [1][0][RTW89_WW][0][49] = -4, [1][0][RTW89_WW][1][49] = -4, - [1][0][RTW89_WW][2][49] = 0, + [1][0][RTW89_WW][2][49] = 68, [1][0][RTW89_WW][0][51] = -4, [1][0][RTW89_WW][1][51] = -4, - [1][0][RTW89_WW][2][51] = 0, + [1][0][RTW89_WW][2][51] = 68, [1][0][RTW89_WW][0][53] = -4, [1][0][RTW89_WW][1][53] = -4, - [1][0][RTW89_WW][2][53] = 0, + [1][0][RTW89_WW][2][53] = 68, [1][0][RTW89_WW][0][55] = -4, [1][0][RTW89_WW][1][55] = -4, [1][0][RTW89_WW][2][55] = 68, @@ -49745,19 +51886,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][2][44] = 44, [1][1][RTW89_WW][0][45] = -26, [1][1][RTW89_WW][1][45] = -26, - [1][1][RTW89_WW][2][45] = 0, + [1][1][RTW89_WW][2][45] = 44, [1][1][RTW89_WW][0][47] = -28, [1][1][RTW89_WW][1][47] = -28, - [1][1][RTW89_WW][2][47] = 0, + [1][1][RTW89_WW][2][47] = 44, [1][1][RTW89_WW][0][49] = -28, [1][1][RTW89_WW][1][49] = -28, - [1][1][RTW89_WW][2][49] = 0, + [1][1][RTW89_WW][2][49] = 44, [1][1][RTW89_WW][0][51] = -28, [1][1][RTW89_WW][1][51] = -28, - [1][1][RTW89_WW][2][51] = 0, + [1][1][RTW89_WW][2][51] = 44, [1][1][RTW89_WW][0][53] = -26, [1][1][RTW89_WW][1][53] = -26, - [1][1][RTW89_WW][2][53] = 0, + [1][1][RTW89_WW][2][53] = 44, [1][1][RTW89_WW][0][55] = -28, [1][1][RTW89_WW][1][55] = -28, [1][1][RTW89_WW][2][55] = 44, @@ -49901,106 +52042,106 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][2][21] = 60, [2][0][RTW89_WW][0][23] = -2, [2][0][RTW89_WW][1][23] = -2, - [2][0][RTW89_WW][2][23] = 78, + [2][0][RTW89_WW][2][23] = 70, [2][0][RTW89_WW][0][25] = -2, [2][0][RTW89_WW][1][25] = -2, - [2][0][RTW89_WW][2][25] = 78, + [2][0][RTW89_WW][2][25] = 70, [2][0][RTW89_WW][0][27] = -2, [2][0][RTW89_WW][1][27] = -2, - [2][0][RTW89_WW][2][27] = 78, + [2][0][RTW89_WW][2][27] = 70, [2][0][RTW89_WW][0][29] = -2, [2][0][RTW89_WW][1][29] = -2, - [2][0][RTW89_WW][2][29] = 78, + [2][0][RTW89_WW][2][29] = 70, [2][0][RTW89_WW][0][30] = -2, [2][0][RTW89_WW][1][30] = -2, - [2][0][RTW89_WW][2][30] = 78, + [2][0][RTW89_WW][2][30] = 70, [2][0][RTW89_WW][0][32] = -2, [2][0][RTW89_WW][1][32] = -2, - [2][0][RTW89_WW][2][32] = 78, + [2][0][RTW89_WW][2][32] = 70, [2][0][RTW89_WW][0][34] = -2, [2][0][RTW89_WW][1][34] = -2, - [2][0][RTW89_WW][2][34] = 78, + [2][0][RTW89_WW][2][34] = 70, [2][0][RTW89_WW][0][36] = -2, [2][0][RTW89_WW][1][36] = -2, - [2][0][RTW89_WW][2][36] = 78, + [2][0][RTW89_WW][2][36] = 70, [2][0][RTW89_WW][0][38] = -2, [2][0][RTW89_WW][1][38] = -2, - [2][0][RTW89_WW][2][38] = 78, + [2][0][RTW89_WW][2][38] = 70, [2][0][RTW89_WW][0][40] = -2, [2][0][RTW89_WW][1][40] = -2, - [2][0][RTW89_WW][2][40] = 78, + [2][0][RTW89_WW][2][40] = 70, [2][0][RTW89_WW][0][42] = -2, [2][0][RTW89_WW][1][42] = -2, - [2][0][RTW89_WW][2][42] = 78, + [2][0][RTW89_WW][2][42] = 70, [2][0][RTW89_WW][0][44] = -2, [2][0][RTW89_WW][1][44] = -2, - [2][0][RTW89_WW][2][44] = 78, + [2][0][RTW89_WW][2][44] = 70, [2][0][RTW89_WW][0][45] = -2, [2][0][RTW89_WW][1][45] = -2, - [2][0][RTW89_WW][2][45] = 0, + [2][0][RTW89_WW][2][45] = 70, [2][0][RTW89_WW][0][47] = -2, [2][0][RTW89_WW][1][47] = -2, - [2][0][RTW89_WW][2][47] = 0, + [2][0][RTW89_WW][2][47] = 70, [2][0][RTW89_WW][0][49] = -2, [2][0][RTW89_WW][1][49] = -2, - [2][0][RTW89_WW][2][49] = 0, + [2][0][RTW89_WW][2][49] = 70, [2][0][RTW89_WW][0][51] = -2, [2][0][RTW89_WW][1][51] = -2, - [2][0][RTW89_WW][2][51] = 0, + [2][0][RTW89_WW][2][51] = 70, [2][0][RTW89_WW][0][53] = -2, [2][0][RTW89_WW][1][53] = -2, - [2][0][RTW89_WW][2][53] = 0, + [2][0][RTW89_WW][2][53] = 70, [2][0][RTW89_WW][0][55] = -2, [2][0][RTW89_WW][1][55] = -2, - [2][0][RTW89_WW][2][55] = 78, + [2][0][RTW89_WW][2][55] = 68, [2][0][RTW89_WW][0][57] = -2, [2][0][RTW89_WW][1][57] = -2, - [2][0][RTW89_WW][2][57] = 78, + [2][0][RTW89_WW][2][57] = 68, [2][0][RTW89_WW][0][59] = -2, [2][0][RTW89_WW][1][59] = -2, - [2][0][RTW89_WW][2][59] = 78, + [2][0][RTW89_WW][2][59] = 68, [2][0][RTW89_WW][0][60] = -2, [2][0][RTW89_WW][1][60] = -2, - [2][0][RTW89_WW][2][60] = 78, + [2][0][RTW89_WW][2][60] = 68, [2][0][RTW89_WW][0][62] = -2, [2][0][RTW89_WW][1][62] = -2, - [2][0][RTW89_WW][2][62] = 78, + [2][0][RTW89_WW][2][62] = 68, [2][0][RTW89_WW][0][64] = -2, [2][0][RTW89_WW][1][64] = -2, - [2][0][RTW89_WW][2][64] = 78, + [2][0][RTW89_WW][2][64] = 68, [2][0][RTW89_WW][0][66] = -2, [2][0][RTW89_WW][1][66] = -2, - [2][0][RTW89_WW][2][66] = 78, + [2][0][RTW89_WW][2][66] = 68, [2][0][RTW89_WW][0][68] = -2, [2][0][RTW89_WW][1][68] = -2, - [2][0][RTW89_WW][2][68] = 78, + [2][0][RTW89_WW][2][68] = 68, [2][0][RTW89_WW][0][70] = -2, [2][0][RTW89_WW][1][70] = -2, - [2][0][RTW89_WW][2][70] = 78, + [2][0][RTW89_WW][2][70] = 68, [2][0][RTW89_WW][0][72] = -2, [2][0][RTW89_WW][1][72] = -2, - [2][0][RTW89_WW][2][72] = 78, + [2][0][RTW89_WW][2][72] = 68, [2][0][RTW89_WW][0][74] = -2, [2][0][RTW89_WW][1][74] = -2, - [2][0][RTW89_WW][2][74] = 78, + [2][0][RTW89_WW][2][74] = 68, [2][0][RTW89_WW][0][75] = -2, [2][0][RTW89_WW][1][75] = -2, - [2][0][RTW89_WW][2][75] = 78, + [2][0][RTW89_WW][2][75] = 68, [2][0][RTW89_WW][0][77] = -2, [2][0][RTW89_WW][1][77] = -2, - [2][0][RTW89_WW][2][77] = 78, + [2][0][RTW89_WW][2][77] = 68, [2][0][RTW89_WW][0][79] = -2, [2][0][RTW89_WW][1][79] = -2, - [2][0][RTW89_WW][2][79] = 78, + [2][0][RTW89_WW][2][79] = 68, [2][0][RTW89_WW][0][81] = -2, [2][0][RTW89_WW][1][81] = -2, - [2][0][RTW89_WW][2][81] = 78, + [2][0][RTW89_WW][2][81] = 68, [2][0][RTW89_WW][0][83] = -2, [2][0][RTW89_WW][1][83] = -2, - [2][0][RTW89_WW][2][83] = 78, + [2][0][RTW89_WW][2][83] = 68, [2][0][RTW89_WW][0][85] = -2, [2][0][RTW89_WW][1][85] = -2, - [2][0][RTW89_WW][2][85] = 78, + [2][0][RTW89_WW][2][85] = 68, [2][0][RTW89_WW][0][87] = -2, [2][0][RTW89_WW][1][87] = -2, [2][0][RTW89_WW][2][87] = 0, @@ -50129,19 +52270,19 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][2][44] = 54, [2][1][RTW89_WW][0][45] = -16, [2][1][RTW89_WW][1][45] = -16, - [2][1][RTW89_WW][2][45] = 0, + [2][1][RTW89_WW][2][45] = 56, [2][1][RTW89_WW][0][47] = -16, [2][1][RTW89_WW][1][47] = -16, - [2][1][RTW89_WW][2][47] = 0, + [2][1][RTW89_WW][2][47] = 56, [2][1][RTW89_WW][0][49] = -16, [2][1][RTW89_WW][1][49] = -16, - [2][1][RTW89_WW][2][49] = 0, + [2][1][RTW89_WW][2][49] = 56, [2][1][RTW89_WW][0][51] = -16, [2][1][RTW89_WW][1][51] = -16, - [2][1][RTW89_WW][2][51] = 0, + [2][1][RTW89_WW][2][51] = 56, [2][1][RTW89_WW][0][53] = -16, [2][1][RTW89_WW][1][53] = -16, - [2][1][RTW89_WW][2][53] = 0, + [2][1][RTW89_WW][2][53] = 56, [2][1][RTW89_WW][0][55] = -16, [2][1][RTW89_WW][1][55] = -16, [2][1][RTW89_WW][2][55] = 54, @@ -50254,6 +52395,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][0] = 30, [0][0][RTW89_MKK][0][0] = -8, [0][0][RTW89_IC][1][0] = -16, + [0][0][RTW89_IC][2][0] = 44, [0][0][RTW89_KCC][1][0] = -2, [0][0][RTW89_KCC][0][0] = -2, [0][0][RTW89_ACMA][1][0] = 32, @@ -50263,6 +52405,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][0] = -8, [0][0][RTW89_UK][1][0] = 32, [0][0][RTW89_UK][0][0] = -8, + [0][0][RTW89_THAILAND][1][0] = 30, + [0][0][RTW89_THAILAND][0][0] = -16, [0][0][RTW89_FCC][1][2] = -18, [0][0][RTW89_FCC][2][2] = 44, [0][0][RTW89_ETSI][1][2] = 32, @@ -50270,6 +52414,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][2] = 30, [0][0][RTW89_MKK][0][2] = -8, [0][0][RTW89_IC][1][2] = -18, + [0][0][RTW89_IC][2][2] = 44, [0][0][RTW89_KCC][1][2] = -2, [0][0][RTW89_KCC][0][2] = -2, [0][0][RTW89_ACMA][1][2] = 32, @@ -50279,6 +52424,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][2] = -8, [0][0][RTW89_UK][1][2] = 32, [0][0][RTW89_UK][0][2] = -8, + [0][0][RTW89_THAILAND][1][2] = 30, + [0][0][RTW89_THAILAND][0][2] = -18, [0][0][RTW89_FCC][1][4] = -18, [0][0][RTW89_FCC][2][4] = 44, [0][0][RTW89_ETSI][1][4] = 32, @@ -50286,6 +52433,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][4] = 30, [0][0][RTW89_MKK][0][4] = -8, [0][0][RTW89_IC][1][4] = -18, + [0][0][RTW89_IC][2][4] = 44, [0][0][RTW89_KCC][1][4] = -2, [0][0][RTW89_KCC][0][4] = -2, [0][0][RTW89_ACMA][1][4] = 32, @@ -50295,6 +52443,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][4] = -8, [0][0][RTW89_UK][1][4] = 32, [0][0][RTW89_UK][0][4] = -8, + [0][0][RTW89_THAILAND][1][4] = 30, + [0][0][RTW89_THAILAND][0][4] = -18, [0][0][RTW89_FCC][1][6] = -18, [0][0][RTW89_FCC][2][6] = 44, [0][0][RTW89_ETSI][1][6] = 32, @@ -50302,6 +52452,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][6] = 30, [0][0][RTW89_MKK][0][6] = -8, [0][0][RTW89_IC][1][6] = -18, + [0][0][RTW89_IC][2][6] = 44, [0][0][RTW89_KCC][1][6] = -2, [0][0][RTW89_KCC][0][6] = -2, [0][0][RTW89_ACMA][1][6] = 32, @@ -50311,6 +52462,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][6] = -8, [0][0][RTW89_UK][1][6] = 32, [0][0][RTW89_UK][0][6] = -8, + [0][0][RTW89_THAILAND][1][6] = 30, + [0][0][RTW89_THAILAND][0][6] = -18, [0][0][RTW89_FCC][1][8] = -18, [0][0][RTW89_FCC][2][8] = 44, [0][0][RTW89_ETSI][1][8] = 32, @@ -50318,6 +52471,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][8] = 30, [0][0][RTW89_MKK][0][8] = -8, [0][0][RTW89_IC][1][8] = -18, + [0][0][RTW89_IC][2][8] = 44, [0][0][RTW89_KCC][1][8] = -2, [0][0][RTW89_KCC][0][8] = -2, [0][0][RTW89_ACMA][1][8] = 32, @@ -50327,6 +52481,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][8] = -8, [0][0][RTW89_UK][1][8] = 32, [0][0][RTW89_UK][0][8] = -8, + [0][0][RTW89_THAILAND][1][8] = 30, + [0][0][RTW89_THAILAND][0][8] = -18, [0][0][RTW89_FCC][1][10] = -18, [0][0][RTW89_FCC][2][10] = 44, [0][0][RTW89_ETSI][1][10] = 32, @@ -50334,6 +52490,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][10] = 30, [0][0][RTW89_MKK][0][10] = -8, [0][0][RTW89_IC][1][10] = -18, + [0][0][RTW89_IC][2][10] = 44, [0][0][RTW89_KCC][1][10] = -2, [0][0][RTW89_KCC][0][10] = -2, [0][0][RTW89_ACMA][1][10] = 32, @@ -50343,6 +52500,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][10] = -8, [0][0][RTW89_UK][1][10] = 32, [0][0][RTW89_UK][0][10] = -8, + [0][0][RTW89_THAILAND][1][10] = 30, + [0][0][RTW89_THAILAND][0][10] = -18, [0][0][RTW89_FCC][1][12] = -18, [0][0][RTW89_FCC][2][12] = 44, [0][0][RTW89_ETSI][1][12] = 32, @@ -50350,6 +52509,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][12] = 30, [0][0][RTW89_MKK][0][12] = -8, [0][0][RTW89_IC][1][12] = -18, + [0][0][RTW89_IC][2][12] = 44, [0][0][RTW89_KCC][1][12] = -2, [0][0][RTW89_KCC][0][12] = -2, [0][0][RTW89_ACMA][1][12] = 32, @@ -50359,6 +52519,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][12] = -8, [0][0][RTW89_UK][1][12] = 32, [0][0][RTW89_UK][0][12] = -8, + [0][0][RTW89_THAILAND][1][12] = 30, + [0][0][RTW89_THAILAND][0][12] = -18, [0][0][RTW89_FCC][1][14] = -18, [0][0][RTW89_FCC][2][14] = 44, [0][0][RTW89_ETSI][1][14] = 32, @@ -50366,6 +52528,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][14] = 30, [0][0][RTW89_MKK][0][14] = -8, [0][0][RTW89_IC][1][14] = -18, + [0][0][RTW89_IC][2][14] = 44, [0][0][RTW89_KCC][1][14] = -2, [0][0][RTW89_KCC][0][14] = -2, [0][0][RTW89_ACMA][1][14] = 32, @@ -50375,6 +52538,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][14] = -8, [0][0][RTW89_UK][1][14] = 32, [0][0][RTW89_UK][0][14] = -8, + [0][0][RTW89_THAILAND][1][14] = 30, + [0][0][RTW89_THAILAND][0][14] = -18, [0][0][RTW89_FCC][1][15] = -18, [0][0][RTW89_FCC][2][15] = 44, [0][0][RTW89_ETSI][1][15] = 32, @@ -50382,6 +52547,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][15] = 30, [0][0][RTW89_MKK][0][15] = -8, [0][0][RTW89_IC][1][15] = -18, + [0][0][RTW89_IC][2][15] = 44, [0][0][RTW89_KCC][1][15] = -2, [0][0][RTW89_KCC][0][15] = -2, [0][0][RTW89_ACMA][1][15] = 32, @@ -50391,6 +52557,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][15] = -8, [0][0][RTW89_UK][1][15] = 32, [0][0][RTW89_UK][0][15] = -8, + [0][0][RTW89_THAILAND][1][15] = 30, + [0][0][RTW89_THAILAND][0][15] = -18, [0][0][RTW89_FCC][1][17] = -18, [0][0][RTW89_FCC][2][17] = 44, [0][0][RTW89_ETSI][1][17] = 32, @@ -50398,6 +52566,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][17] = 30, [0][0][RTW89_MKK][0][17] = -8, [0][0][RTW89_IC][1][17] = -18, + [0][0][RTW89_IC][2][17] = 44, [0][0][RTW89_KCC][1][17] = -2, [0][0][RTW89_KCC][0][17] = -2, [0][0][RTW89_ACMA][1][17] = 32, @@ -50407,6 +52576,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][17] = -8, [0][0][RTW89_UK][1][17] = 32, [0][0][RTW89_UK][0][17] = -8, + [0][0][RTW89_THAILAND][1][17] = 30, + [0][0][RTW89_THAILAND][0][17] = -18, [0][0][RTW89_FCC][1][19] = -18, [0][0][RTW89_FCC][2][19] = 44, [0][0][RTW89_ETSI][1][19] = 32, @@ -50414,6 +52585,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][19] = 30, [0][0][RTW89_MKK][0][19] = -8, [0][0][RTW89_IC][1][19] = -18, + [0][0][RTW89_IC][2][19] = 44, [0][0][RTW89_KCC][1][19] = -2, [0][0][RTW89_KCC][0][19] = -2, [0][0][RTW89_ACMA][1][19] = 32, @@ -50423,6 +52595,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][19] = -8, [0][0][RTW89_UK][1][19] = 32, [0][0][RTW89_UK][0][19] = -8, + [0][0][RTW89_THAILAND][1][19] = 30, + [0][0][RTW89_THAILAND][0][19] = -18, [0][0][RTW89_FCC][1][21] = -18, [0][0][RTW89_FCC][2][21] = 44, [0][0][RTW89_ETSI][1][21] = 32, @@ -50430,6 +52604,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][21] = 30, [0][0][RTW89_MKK][0][21] = -8, [0][0][RTW89_IC][1][21] = -18, + [0][0][RTW89_IC][2][21] = 44, [0][0][RTW89_KCC][1][21] = -2, [0][0][RTW89_KCC][0][21] = -2, [0][0][RTW89_ACMA][1][21] = 32, @@ -50439,6 +52614,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][21] = -8, [0][0][RTW89_UK][1][21] = 32, [0][0][RTW89_UK][0][21] = -8, + [0][0][RTW89_THAILAND][1][21] = 30, + [0][0][RTW89_THAILAND][0][21] = -18, [0][0][RTW89_FCC][1][23] = -18, [0][0][RTW89_FCC][2][23] = 54, [0][0][RTW89_ETSI][1][23] = 32, @@ -50446,6 +52623,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][23] = 30, [0][0][RTW89_MKK][0][23] = -8, [0][0][RTW89_IC][1][23] = -18, + [0][0][RTW89_IC][2][23] = 54, [0][0][RTW89_KCC][1][23] = -2, [0][0][RTW89_KCC][0][23] = -2, [0][0][RTW89_ACMA][1][23] = 32, @@ -50455,6 +52633,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][23] = -8, [0][0][RTW89_UK][1][23] = 32, [0][0][RTW89_UK][0][23] = -8, + [0][0][RTW89_THAILAND][1][23] = 30, + [0][0][RTW89_THAILAND][0][23] = -18, [0][0][RTW89_FCC][1][25] = -18, [0][0][RTW89_FCC][2][25] = 54, [0][0][RTW89_ETSI][1][25] = 32, @@ -50462,6 +52642,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][25] = 30, [0][0][RTW89_MKK][0][25] = -8, [0][0][RTW89_IC][1][25] = -18, + [0][0][RTW89_IC][2][25] = 54, [0][0][RTW89_KCC][1][25] = -2, [0][0][RTW89_KCC][0][25] = -2, [0][0][RTW89_ACMA][1][25] = 32, @@ -50471,6 +52652,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][25] = -8, [0][0][RTW89_UK][1][25] = 32, [0][0][RTW89_UK][0][25] = -8, + [0][0][RTW89_THAILAND][1][25] = 30, + [0][0][RTW89_THAILAND][0][25] = -18, [0][0][RTW89_FCC][1][27] = -18, [0][0][RTW89_FCC][2][27] = 54, [0][0][RTW89_ETSI][1][27] = 32, @@ -50478,6 +52661,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][27] = 30, [0][0][RTW89_MKK][0][27] = -8, [0][0][RTW89_IC][1][27] = -18, + [0][0][RTW89_IC][2][27] = 54, [0][0][RTW89_KCC][1][27] = -2, [0][0][RTW89_KCC][0][27] = -2, [0][0][RTW89_ACMA][1][27] = 32, @@ -50487,6 +52671,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][27] = -8, [0][0][RTW89_UK][1][27] = 32, [0][0][RTW89_UK][0][27] = -8, + [0][0][RTW89_THAILAND][1][27] = 30, + [0][0][RTW89_THAILAND][0][27] = -18, [0][0][RTW89_FCC][1][29] = -18, [0][0][RTW89_FCC][2][29] = 54, [0][0][RTW89_ETSI][1][29] = 32, @@ -50494,6 +52680,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][29] = 30, [0][0][RTW89_MKK][0][29] = -8, [0][0][RTW89_IC][1][29] = -18, + [0][0][RTW89_IC][2][29] = 54, [0][0][RTW89_KCC][1][29] = -2, [0][0][RTW89_KCC][0][29] = -2, [0][0][RTW89_ACMA][1][29] = 32, @@ -50503,6 +52690,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][29] = -8, [0][0][RTW89_UK][1][29] = 32, [0][0][RTW89_UK][0][29] = -8, + [0][0][RTW89_THAILAND][1][29] = 30, + [0][0][RTW89_THAILAND][0][29] = -18, [0][0][RTW89_FCC][1][30] = -18, [0][0][RTW89_FCC][2][30] = 54, [0][0][RTW89_ETSI][1][30] = 32, @@ -50510,6 +52699,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][30] = 30, [0][0][RTW89_MKK][0][30] = -8, [0][0][RTW89_IC][1][30] = -18, + [0][0][RTW89_IC][2][30] = 54, [0][0][RTW89_KCC][1][30] = -2, [0][0][RTW89_KCC][0][30] = -2, [0][0][RTW89_ACMA][1][30] = 32, @@ -50519,6 +52709,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][30] = -8, [0][0][RTW89_UK][1][30] = 32, [0][0][RTW89_UK][0][30] = -8, + [0][0][RTW89_THAILAND][1][30] = 30, + [0][0][RTW89_THAILAND][0][30] = -18, [0][0][RTW89_FCC][1][32] = -18, [0][0][RTW89_FCC][2][32] = 54, [0][0][RTW89_ETSI][1][32] = 32, @@ -50526,6 +52718,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][32] = 30, [0][0][RTW89_MKK][0][32] = -8, [0][0][RTW89_IC][1][32] = -18, + [0][0][RTW89_IC][2][32] = 54, [0][0][RTW89_KCC][1][32] = -2, [0][0][RTW89_KCC][0][32] = -2, [0][0][RTW89_ACMA][1][32] = 32, @@ -50535,6 +52728,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][32] = -8, [0][0][RTW89_UK][1][32] = 32, [0][0][RTW89_UK][0][32] = -8, + [0][0][RTW89_THAILAND][1][32] = 30, + [0][0][RTW89_THAILAND][0][32] = -18, [0][0][RTW89_FCC][1][34] = -18, [0][0][RTW89_FCC][2][34] = 54, [0][0][RTW89_ETSI][1][34] = 32, @@ -50542,6 +52737,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][34] = 30, [0][0][RTW89_MKK][0][34] = -8, [0][0][RTW89_IC][1][34] = -18, + [0][0][RTW89_IC][2][34] = 54, [0][0][RTW89_KCC][1][34] = -2, [0][0][RTW89_KCC][0][34] = -2, [0][0][RTW89_ACMA][1][34] = 32, @@ -50551,6 +52747,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][34] = -8, [0][0][RTW89_UK][1][34] = 32, [0][0][RTW89_UK][0][34] = -8, + [0][0][RTW89_THAILAND][1][34] = 30, + [0][0][RTW89_THAILAND][0][34] = -18, [0][0][RTW89_FCC][1][36] = -18, [0][0][RTW89_FCC][2][36] = 54, [0][0][RTW89_ETSI][1][36] = 32, @@ -50558,6 +52756,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][36] = 30, [0][0][RTW89_MKK][0][36] = -8, [0][0][RTW89_IC][1][36] = -18, + [0][0][RTW89_IC][2][36] = 54, [0][0][RTW89_KCC][1][36] = -2, [0][0][RTW89_KCC][0][36] = -2, [0][0][RTW89_ACMA][1][36] = 32, @@ -50567,6 +52766,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][36] = -8, [0][0][RTW89_UK][1][36] = 32, [0][0][RTW89_UK][0][36] = -8, + [0][0][RTW89_THAILAND][1][36] = 30, + [0][0][RTW89_THAILAND][0][36] = -18, [0][0][RTW89_FCC][1][38] = -18, [0][0][RTW89_FCC][2][38] = 54, [0][0][RTW89_ETSI][1][38] = 32, @@ -50574,6 +52775,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][38] = 30, [0][0][RTW89_MKK][0][38] = -8, [0][0][RTW89_IC][1][38] = -18, + [0][0][RTW89_IC][2][38] = 54, [0][0][RTW89_KCC][1][38] = -2, [0][0][RTW89_KCC][0][38] = -2, [0][0][RTW89_ACMA][1][38] = 32, @@ -50583,6 +52785,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][38] = -8, [0][0][RTW89_UK][1][38] = 32, [0][0][RTW89_UK][0][38] = -8, + [0][0][RTW89_THAILAND][1][38] = 30, + [0][0][RTW89_THAILAND][0][38] = -18, [0][0][RTW89_FCC][1][40] = -18, [0][0][RTW89_FCC][2][40] = 54, [0][0][RTW89_ETSI][1][40] = 32, @@ -50590,6 +52794,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][40] = 30, [0][0][RTW89_MKK][0][40] = -8, [0][0][RTW89_IC][1][40] = -18, + [0][0][RTW89_IC][2][40] = 54, [0][0][RTW89_KCC][1][40] = -2, [0][0][RTW89_KCC][0][40] = -2, [0][0][RTW89_ACMA][1][40] = 32, @@ -50599,6 +52804,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][40] = -8, [0][0][RTW89_UK][1][40] = 32, [0][0][RTW89_UK][0][40] = -8, + [0][0][RTW89_THAILAND][1][40] = 30, + [0][0][RTW89_THAILAND][0][40] = -18, [0][0][RTW89_FCC][1][42] = -18, [0][0][RTW89_FCC][2][42] = 54, [0][0][RTW89_ETSI][1][42] = 32, @@ -50606,6 +52813,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][42] = 30, [0][0][RTW89_MKK][0][42] = -8, [0][0][RTW89_IC][1][42] = -18, + [0][0][RTW89_IC][2][42] = 54, [0][0][RTW89_KCC][1][42] = -2, [0][0][RTW89_KCC][0][42] = -2, [0][0][RTW89_ACMA][1][42] = 32, @@ -50615,6 +52823,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][42] = -8, [0][0][RTW89_UK][1][42] = 32, [0][0][RTW89_UK][0][42] = -8, + [0][0][RTW89_THAILAND][1][42] = 30, + [0][0][RTW89_THAILAND][0][42] = -18, [0][0][RTW89_FCC][1][44] = -16, [0][0][RTW89_FCC][2][44] = 56, [0][0][RTW89_ETSI][1][44] = 32, @@ -50622,6 +52832,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][44] = 8, [0][0][RTW89_MKK][0][44] = -10, [0][0][RTW89_IC][1][44] = -16, + [0][0][RTW89_IC][2][44] = 56, [0][0][RTW89_KCC][1][44] = -2, [0][0][RTW89_KCC][0][44] = -2, [0][0][RTW89_ACMA][1][44] = 32, @@ -50631,6 +52842,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][44] = -6, [0][0][RTW89_UK][1][44] = 32, [0][0][RTW89_UK][0][44] = -6, + [0][0][RTW89_THAILAND][1][44] = 30, + [0][0][RTW89_THAILAND][0][44] = -16, [0][0][RTW89_FCC][1][45] = -16, [0][0][RTW89_FCC][2][45] = 127, [0][0][RTW89_ETSI][1][45] = 127, @@ -50638,6 +52851,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][45] = 127, [0][0][RTW89_MKK][0][45] = 127, [0][0][RTW89_IC][1][45] = -16, + [0][0][RTW89_IC][2][45] = 56, [0][0][RTW89_KCC][1][45] = -2, [0][0][RTW89_KCC][0][45] = 127, [0][0][RTW89_ACMA][1][45] = 127, @@ -50647,6 +52861,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][45] = 127, [0][0][RTW89_UK][1][45] = 127, [0][0][RTW89_UK][0][45] = 127, + [0][0][RTW89_THAILAND][1][45] = 127, + [0][0][RTW89_THAILAND][0][45] = 127, [0][0][RTW89_FCC][1][47] = -18, [0][0][RTW89_FCC][2][47] = 127, [0][0][RTW89_ETSI][1][47] = 127, @@ -50654,6 +52870,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][47] = 127, [0][0][RTW89_MKK][0][47] = 127, [0][0][RTW89_IC][1][47] = -18, + [0][0][RTW89_IC][2][47] = 56, [0][0][RTW89_KCC][1][47] = -2, [0][0][RTW89_KCC][0][47] = 127, [0][0][RTW89_ACMA][1][47] = 127, @@ -50663,6 +52880,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][47] = 127, [0][0][RTW89_UK][1][47] = 127, [0][0][RTW89_UK][0][47] = 127, + [0][0][RTW89_THAILAND][1][47] = 127, + [0][0][RTW89_THAILAND][0][47] = 127, [0][0][RTW89_FCC][1][49] = -18, [0][0][RTW89_FCC][2][49] = 127, [0][0][RTW89_ETSI][1][49] = 127, @@ -50670,6 +52889,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][49] = 127, [0][0][RTW89_MKK][0][49] = 127, [0][0][RTW89_IC][1][49] = -18, + [0][0][RTW89_IC][2][49] = 56, [0][0][RTW89_KCC][1][49] = -2, [0][0][RTW89_KCC][0][49] = 127, [0][0][RTW89_ACMA][1][49] = 127, @@ -50679,6 +52899,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][49] = 127, [0][0][RTW89_UK][1][49] = 127, [0][0][RTW89_UK][0][49] = 127, + [0][0][RTW89_THAILAND][1][49] = 127, + [0][0][RTW89_THAILAND][0][49] = 127, [0][0][RTW89_FCC][1][51] = -18, [0][0][RTW89_FCC][2][51] = 127, [0][0][RTW89_ETSI][1][51] = 127, @@ -50686,6 +52908,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][51] = 127, [0][0][RTW89_MKK][0][51] = 127, [0][0][RTW89_IC][1][51] = -18, + [0][0][RTW89_IC][2][51] = 56, [0][0][RTW89_KCC][1][51] = -2, [0][0][RTW89_KCC][0][51] = 127, [0][0][RTW89_ACMA][1][51] = 127, @@ -50695,6 +52918,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][51] = 127, [0][0][RTW89_UK][1][51] = 127, [0][0][RTW89_UK][0][51] = 127, + [0][0][RTW89_THAILAND][1][51] = 127, + [0][0][RTW89_THAILAND][0][51] = 127, [0][0][RTW89_FCC][1][53] = -16, [0][0][RTW89_FCC][2][53] = 127, [0][0][RTW89_ETSI][1][53] = 127, @@ -50702,6 +52927,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][53] = 127, [0][0][RTW89_MKK][0][53] = 127, [0][0][RTW89_IC][1][53] = -16, + [0][0][RTW89_IC][2][53] = 56, [0][0][RTW89_KCC][1][53] = -2, [0][0][RTW89_KCC][0][53] = 127, [0][0][RTW89_ACMA][1][53] = 127, @@ -50711,6 +52937,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][53] = 127, [0][0][RTW89_UK][1][53] = 127, [0][0][RTW89_UK][0][53] = 127, + [0][0][RTW89_THAILAND][1][53] = 127, + [0][0][RTW89_THAILAND][0][53] = 127, [0][0][RTW89_FCC][1][55] = -18, [0][0][RTW89_FCC][2][55] = 56, [0][0][RTW89_ETSI][1][55] = 127, @@ -50718,6 +52946,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][55] = 127, [0][0][RTW89_MKK][0][55] = 127, [0][0][RTW89_IC][1][55] = -18, + [0][0][RTW89_IC][2][55] = 56, [0][0][RTW89_KCC][1][55] = -2, [0][0][RTW89_KCC][0][55] = 127, [0][0][RTW89_ACMA][1][55] = 127, @@ -50727,6 +52956,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][55] = 127, [0][0][RTW89_UK][1][55] = 127, [0][0][RTW89_UK][0][55] = 127, + [0][0][RTW89_THAILAND][1][55] = 127, + [0][0][RTW89_THAILAND][0][55] = 127, [0][0][RTW89_FCC][1][57] = -18, [0][0][RTW89_FCC][2][57] = 56, [0][0][RTW89_ETSI][1][57] = 127, @@ -50734,6 +52965,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][57] = 127, [0][0][RTW89_MKK][0][57] = 127, [0][0][RTW89_IC][1][57] = -18, + [0][0][RTW89_IC][2][57] = 56, [0][0][RTW89_KCC][1][57] = -2, [0][0][RTW89_KCC][0][57] = 127, [0][0][RTW89_ACMA][1][57] = 127, @@ -50743,6 +52975,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][57] = 127, [0][0][RTW89_UK][1][57] = 127, [0][0][RTW89_UK][0][57] = 127, + [0][0][RTW89_THAILAND][1][57] = 127, + [0][0][RTW89_THAILAND][0][57] = 127, [0][0][RTW89_FCC][1][59] = -18, [0][0][RTW89_FCC][2][59] = 56, [0][0][RTW89_ETSI][1][59] = 127, @@ -50750,6 +52984,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][59] = 127, [0][0][RTW89_MKK][0][59] = 127, [0][0][RTW89_IC][1][59] = -18, + [0][0][RTW89_IC][2][59] = 56, [0][0][RTW89_KCC][1][59] = -2, [0][0][RTW89_KCC][0][59] = 127, [0][0][RTW89_ACMA][1][59] = 127, @@ -50759,6 +52994,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][59] = 127, [0][0][RTW89_UK][1][59] = 127, [0][0][RTW89_UK][0][59] = 127, + [0][0][RTW89_THAILAND][1][59] = 127, + [0][0][RTW89_THAILAND][0][59] = 127, [0][0][RTW89_FCC][1][60] = -18, [0][0][RTW89_FCC][2][60] = 56, [0][0][RTW89_ETSI][1][60] = 127, @@ -50766,6 +53003,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][60] = 127, [0][0][RTW89_MKK][0][60] = 127, [0][0][RTW89_IC][1][60] = -18, + [0][0][RTW89_IC][2][60] = 56, [0][0][RTW89_KCC][1][60] = -2, [0][0][RTW89_KCC][0][60] = 127, [0][0][RTW89_ACMA][1][60] = 127, @@ -50775,6 +53013,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][60] = 127, [0][0][RTW89_UK][1][60] = 127, [0][0][RTW89_UK][0][60] = 127, + [0][0][RTW89_THAILAND][1][60] = 127, + [0][0][RTW89_THAILAND][0][60] = 127, [0][0][RTW89_FCC][1][62] = -18, [0][0][RTW89_FCC][2][62] = 56, [0][0][RTW89_ETSI][1][62] = 127, @@ -50782,6 +53022,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][62] = 127, [0][0][RTW89_MKK][0][62] = 127, [0][0][RTW89_IC][1][62] = -18, + [0][0][RTW89_IC][2][62] = 56, [0][0][RTW89_KCC][1][62] = -2, [0][0][RTW89_KCC][0][62] = 127, [0][0][RTW89_ACMA][1][62] = 127, @@ -50791,6 +53032,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][62] = 127, [0][0][RTW89_UK][1][62] = 127, [0][0][RTW89_UK][0][62] = 127, + [0][0][RTW89_THAILAND][1][62] = 127, + [0][0][RTW89_THAILAND][0][62] = 127, [0][0][RTW89_FCC][1][64] = -18, [0][0][RTW89_FCC][2][64] = 56, [0][0][RTW89_ETSI][1][64] = 127, @@ -50798,6 +53041,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][64] = 127, [0][0][RTW89_MKK][0][64] = 127, [0][0][RTW89_IC][1][64] = -18, + [0][0][RTW89_IC][2][64] = 56, [0][0][RTW89_KCC][1][64] = -2, [0][0][RTW89_KCC][0][64] = 127, [0][0][RTW89_ACMA][1][64] = 127, @@ -50807,6 +53051,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][64] = 127, [0][0][RTW89_UK][1][64] = 127, [0][0][RTW89_UK][0][64] = 127, + [0][0][RTW89_THAILAND][1][64] = 127, + [0][0][RTW89_THAILAND][0][64] = 127, [0][0][RTW89_FCC][1][66] = -18, [0][0][RTW89_FCC][2][66] = 56, [0][0][RTW89_ETSI][1][66] = 127, @@ -50814,6 +53060,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][66] = 127, [0][0][RTW89_MKK][0][66] = 127, [0][0][RTW89_IC][1][66] = -18, + [0][0][RTW89_IC][2][66] = 56, [0][0][RTW89_KCC][1][66] = -2, [0][0][RTW89_KCC][0][66] = 127, [0][0][RTW89_ACMA][1][66] = 127, @@ -50823,6 +53070,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][66] = 127, [0][0][RTW89_UK][1][66] = 127, [0][0][RTW89_UK][0][66] = 127, + [0][0][RTW89_THAILAND][1][66] = 127, + [0][0][RTW89_THAILAND][0][66] = 127, [0][0][RTW89_FCC][1][68] = -18, [0][0][RTW89_FCC][2][68] = 56, [0][0][RTW89_ETSI][1][68] = 127, @@ -50830,6 +53079,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][68] = 127, [0][0][RTW89_MKK][0][68] = 127, [0][0][RTW89_IC][1][68] = -18, + [0][0][RTW89_IC][2][68] = 56, [0][0][RTW89_KCC][1][68] = -2, [0][0][RTW89_KCC][0][68] = 127, [0][0][RTW89_ACMA][1][68] = 127, @@ -50839,6 +53089,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][68] = 127, [0][0][RTW89_UK][1][68] = 127, [0][0][RTW89_UK][0][68] = 127, + [0][0][RTW89_THAILAND][1][68] = 127, + [0][0][RTW89_THAILAND][0][68] = 127, [0][0][RTW89_FCC][1][70] = -16, [0][0][RTW89_FCC][2][70] = 56, [0][0][RTW89_ETSI][1][70] = 127, @@ -50846,6 +53098,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][70] = 127, [0][0][RTW89_MKK][0][70] = 127, [0][0][RTW89_IC][1][70] = -16, + [0][0][RTW89_IC][2][70] = 56, [0][0][RTW89_KCC][1][70] = -2, [0][0][RTW89_KCC][0][70] = 127, [0][0][RTW89_ACMA][1][70] = 127, @@ -50855,6 +53108,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][70] = 127, [0][0][RTW89_UK][1][70] = 127, [0][0][RTW89_UK][0][70] = 127, + [0][0][RTW89_THAILAND][1][70] = 127, + [0][0][RTW89_THAILAND][0][70] = 127, [0][0][RTW89_FCC][1][72] = -18, [0][0][RTW89_FCC][2][72] = 56, [0][0][RTW89_ETSI][1][72] = 127, @@ -50862,6 +53117,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][72] = 127, [0][0][RTW89_MKK][0][72] = 127, [0][0][RTW89_IC][1][72] = -18, + [0][0][RTW89_IC][2][72] = 56, [0][0][RTW89_KCC][1][72] = -2, [0][0][RTW89_KCC][0][72] = 127, [0][0][RTW89_ACMA][1][72] = 127, @@ -50871,6 +53127,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][72] = 127, [0][0][RTW89_UK][1][72] = 127, [0][0][RTW89_UK][0][72] = 127, + [0][0][RTW89_THAILAND][1][72] = 127, + [0][0][RTW89_THAILAND][0][72] = 127, [0][0][RTW89_FCC][1][74] = -18, [0][0][RTW89_FCC][2][74] = 56, [0][0][RTW89_ETSI][1][74] = 127, @@ -50878,6 +53136,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][74] = 127, [0][0][RTW89_MKK][0][74] = 127, [0][0][RTW89_IC][1][74] = -18, + [0][0][RTW89_IC][2][74] = 56, [0][0][RTW89_KCC][1][74] = -2, [0][0][RTW89_KCC][0][74] = 127, [0][0][RTW89_ACMA][1][74] = 127, @@ -50887,6 +53146,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][74] = 127, [0][0][RTW89_UK][1][74] = 127, [0][0][RTW89_UK][0][74] = 127, + [0][0][RTW89_THAILAND][1][74] = 127, + [0][0][RTW89_THAILAND][0][74] = 127, [0][0][RTW89_FCC][1][75] = -18, [0][0][RTW89_FCC][2][75] = 56, [0][0][RTW89_ETSI][1][75] = 127, @@ -50894,6 +53155,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][75] = 127, [0][0][RTW89_MKK][0][75] = 127, [0][0][RTW89_IC][1][75] = -18, + [0][0][RTW89_IC][2][75] = 56, [0][0][RTW89_KCC][1][75] = -2, [0][0][RTW89_KCC][0][75] = 127, [0][0][RTW89_ACMA][1][75] = 127, @@ -50903,6 +53165,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][75] = 127, [0][0][RTW89_UK][1][75] = 127, [0][0][RTW89_UK][0][75] = 127, + [0][0][RTW89_THAILAND][1][75] = 127, + [0][0][RTW89_THAILAND][0][75] = 127, [0][0][RTW89_FCC][1][77] = -18, [0][0][RTW89_FCC][2][77] = 56, [0][0][RTW89_ETSI][1][77] = 127, @@ -50910,6 +53174,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][77] = 127, [0][0][RTW89_MKK][0][77] = 127, [0][0][RTW89_IC][1][77] = -18, + [0][0][RTW89_IC][2][77] = 56, [0][0][RTW89_KCC][1][77] = -2, [0][0][RTW89_KCC][0][77] = 127, [0][0][RTW89_ACMA][1][77] = 127, @@ -50919,6 +53184,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][77] = 127, [0][0][RTW89_UK][1][77] = 127, [0][0][RTW89_UK][0][77] = 127, + [0][0][RTW89_THAILAND][1][77] = 127, + [0][0][RTW89_THAILAND][0][77] = 127, [0][0][RTW89_FCC][1][79] = -18, [0][0][RTW89_FCC][2][79] = 56, [0][0][RTW89_ETSI][1][79] = 127, @@ -50926,6 +53193,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][79] = 127, [0][0][RTW89_MKK][0][79] = 127, [0][0][RTW89_IC][1][79] = -18, + [0][0][RTW89_IC][2][79] = 56, [0][0][RTW89_KCC][1][79] = -2, [0][0][RTW89_KCC][0][79] = 127, [0][0][RTW89_ACMA][1][79] = 127, @@ -50935,6 +53203,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][79] = 127, [0][0][RTW89_UK][1][79] = 127, [0][0][RTW89_UK][0][79] = 127, + [0][0][RTW89_THAILAND][1][79] = 127, + [0][0][RTW89_THAILAND][0][79] = 127, [0][0][RTW89_FCC][1][81] = -18, [0][0][RTW89_FCC][2][81] = 56, [0][0][RTW89_ETSI][1][81] = 127, @@ -50942,6 +53212,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][81] = 127, [0][0][RTW89_MKK][0][81] = 127, [0][0][RTW89_IC][1][81] = -18, + [0][0][RTW89_IC][2][81] = 56, [0][0][RTW89_KCC][1][81] = -2, [0][0][RTW89_KCC][0][81] = 127, [0][0][RTW89_ACMA][1][81] = 127, @@ -50951,6 +53222,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][81] = 127, [0][0][RTW89_UK][1][81] = 127, [0][0][RTW89_UK][0][81] = 127, + [0][0][RTW89_THAILAND][1][81] = 127, + [0][0][RTW89_THAILAND][0][81] = 127, [0][0][RTW89_FCC][1][83] = -18, [0][0][RTW89_FCC][2][83] = 56, [0][0][RTW89_ETSI][1][83] = 127, @@ -50958,6 +53231,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][83] = 127, [0][0][RTW89_MKK][0][83] = 127, [0][0][RTW89_IC][1][83] = -18, + [0][0][RTW89_IC][2][83] = 56, [0][0][RTW89_KCC][1][83] = -2, [0][0][RTW89_KCC][0][83] = 127, [0][0][RTW89_ACMA][1][83] = 127, @@ -50967,6 +53241,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][83] = 127, [0][0][RTW89_UK][1][83] = 127, [0][0][RTW89_UK][0][83] = 127, + [0][0][RTW89_THAILAND][1][83] = 127, + [0][0][RTW89_THAILAND][0][83] = 127, [0][0][RTW89_FCC][1][85] = -18, [0][0][RTW89_FCC][2][85] = 56, [0][0][RTW89_ETSI][1][85] = 127, @@ -50974,6 +53250,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][85] = 127, [0][0][RTW89_MKK][0][85] = 127, [0][0][RTW89_IC][1][85] = -18, + [0][0][RTW89_IC][2][85] = 56, [0][0][RTW89_KCC][1][85] = -2, [0][0][RTW89_KCC][0][85] = 127, [0][0][RTW89_ACMA][1][85] = 127, @@ -50983,6 +53260,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][85] = 127, [0][0][RTW89_UK][1][85] = 127, [0][0][RTW89_UK][0][85] = 127, + [0][0][RTW89_THAILAND][1][85] = 127, + [0][0][RTW89_THAILAND][0][85] = 127, [0][0][RTW89_FCC][1][87] = -16, [0][0][RTW89_FCC][2][87] = 127, [0][0][RTW89_ETSI][1][87] = 127, @@ -50990,6 +53269,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][87] = 127, [0][0][RTW89_MKK][0][87] = 127, [0][0][RTW89_IC][1][87] = -16, + [0][0][RTW89_IC][2][87] = 127, [0][0][RTW89_KCC][1][87] = -2, [0][0][RTW89_KCC][0][87] = 127, [0][0][RTW89_ACMA][1][87] = 127, @@ -50999,6 +53279,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][87] = 127, [0][0][RTW89_UK][1][87] = 127, [0][0][RTW89_UK][0][87] = 127, + [0][0][RTW89_THAILAND][1][87] = 127, + [0][0][RTW89_THAILAND][0][87] = 127, [0][0][RTW89_FCC][1][89] = -16, [0][0][RTW89_FCC][2][89] = 127, [0][0][RTW89_ETSI][1][89] = 127, @@ -51006,6 +53288,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][89] = 127, [0][0][RTW89_MKK][0][89] = 127, [0][0][RTW89_IC][1][89] = -16, + [0][0][RTW89_IC][2][89] = 127, [0][0][RTW89_KCC][1][89] = -2, [0][0][RTW89_KCC][0][89] = 127, [0][0][RTW89_ACMA][1][89] = 127, @@ -51015,6 +53298,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][89] = 127, [0][0][RTW89_UK][1][89] = 127, [0][0][RTW89_UK][0][89] = 127, + [0][0][RTW89_THAILAND][1][89] = 127, + [0][0][RTW89_THAILAND][0][89] = 127, [0][0][RTW89_FCC][1][90] = -16, [0][0][RTW89_FCC][2][90] = 127, [0][0][RTW89_ETSI][1][90] = 127, @@ -51022,6 +53307,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][90] = 127, [0][0][RTW89_MKK][0][90] = 127, [0][0][RTW89_IC][1][90] = -16, + [0][0][RTW89_IC][2][90] = 127, [0][0][RTW89_KCC][1][90] = -2, [0][0][RTW89_KCC][0][90] = 127, [0][0][RTW89_ACMA][1][90] = 127, @@ -51031,6 +53317,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][90] = 127, [0][0][RTW89_UK][1][90] = 127, [0][0][RTW89_UK][0][90] = 127, + [0][0][RTW89_THAILAND][1][90] = 127, + [0][0][RTW89_THAILAND][0][90] = 127, [0][0][RTW89_FCC][1][92] = -16, [0][0][RTW89_FCC][2][92] = 127, [0][0][RTW89_ETSI][1][92] = 127, @@ -51038,6 +53326,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][92] = 127, [0][0][RTW89_MKK][0][92] = 127, [0][0][RTW89_IC][1][92] = -16, + [0][0][RTW89_IC][2][92] = 127, [0][0][RTW89_KCC][1][92] = -2, [0][0][RTW89_KCC][0][92] = 127, [0][0][RTW89_ACMA][1][92] = 127, @@ -51047,6 +53336,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][92] = 127, [0][0][RTW89_UK][1][92] = 127, [0][0][RTW89_UK][0][92] = 127, + [0][0][RTW89_THAILAND][1][92] = 127, + [0][0][RTW89_THAILAND][0][92] = 127, [0][0][RTW89_FCC][1][94] = -16, [0][0][RTW89_FCC][2][94] = 127, [0][0][RTW89_ETSI][1][94] = 127, @@ -51054,6 +53345,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][94] = 127, [0][0][RTW89_MKK][0][94] = 127, [0][0][RTW89_IC][1][94] = -16, + [0][0][RTW89_IC][2][94] = 127, [0][0][RTW89_KCC][1][94] = -2, [0][0][RTW89_KCC][0][94] = 127, [0][0][RTW89_ACMA][1][94] = 127, @@ -51063,6 +53355,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][94] = 127, [0][0][RTW89_UK][1][94] = 127, [0][0][RTW89_UK][0][94] = 127, + [0][0][RTW89_THAILAND][1][94] = 127, + [0][0][RTW89_THAILAND][0][94] = 127, [0][0][RTW89_FCC][1][96] = -16, [0][0][RTW89_FCC][2][96] = 127, [0][0][RTW89_ETSI][1][96] = 127, @@ -51070,6 +53364,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][96] = 127, [0][0][RTW89_MKK][0][96] = 127, [0][0][RTW89_IC][1][96] = -16, + [0][0][RTW89_IC][2][96] = 127, [0][0][RTW89_KCC][1][96] = -2, [0][0][RTW89_KCC][0][96] = 127, [0][0][RTW89_ACMA][1][96] = 127, @@ -51079,6 +53374,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][96] = 127, [0][0][RTW89_UK][1][96] = 127, [0][0][RTW89_UK][0][96] = 127, + [0][0][RTW89_THAILAND][1][96] = 127, + [0][0][RTW89_THAILAND][0][96] = 127, [0][0][RTW89_FCC][1][98] = -16, [0][0][RTW89_FCC][2][98] = 127, [0][0][RTW89_ETSI][1][98] = 127, @@ -51086,6 +53383,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][98] = 127, [0][0][RTW89_MKK][0][98] = 127, [0][0][RTW89_IC][1][98] = -16, + [0][0][RTW89_IC][2][98] = 127, [0][0][RTW89_KCC][1][98] = -2, [0][0][RTW89_KCC][0][98] = 127, [0][0][RTW89_ACMA][1][98] = 127, @@ -51095,6 +53393,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][98] = 127, [0][0][RTW89_UK][1][98] = 127, [0][0][RTW89_UK][0][98] = 127, + [0][0][RTW89_THAILAND][1][98] = 127, + [0][0][RTW89_THAILAND][0][98] = 127, [0][0][RTW89_FCC][1][100] = -16, [0][0][RTW89_FCC][2][100] = 127, [0][0][RTW89_ETSI][1][100] = 127, @@ -51102,6 +53402,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][100] = 127, [0][0][RTW89_MKK][0][100] = 127, [0][0][RTW89_IC][1][100] = -16, + [0][0][RTW89_IC][2][100] = 127, [0][0][RTW89_KCC][1][100] = -2, [0][0][RTW89_KCC][0][100] = 127, [0][0][RTW89_ACMA][1][100] = 127, @@ -51111,6 +53412,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][100] = 127, [0][0][RTW89_UK][1][100] = 127, [0][0][RTW89_UK][0][100] = 127, + [0][0][RTW89_THAILAND][1][100] = 127, + [0][0][RTW89_THAILAND][0][100] = 127, [0][0][RTW89_FCC][1][102] = -16, [0][0][RTW89_FCC][2][102] = 127, [0][0][RTW89_ETSI][1][102] = 127, @@ -51118,6 +53421,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][102] = 127, [0][0][RTW89_MKK][0][102] = 127, [0][0][RTW89_IC][1][102] = -16, + [0][0][RTW89_IC][2][102] = 127, [0][0][RTW89_KCC][1][102] = -2, [0][0][RTW89_KCC][0][102] = 127, [0][0][RTW89_ACMA][1][102] = 127, @@ -51127,6 +53431,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][102] = 127, [0][0][RTW89_UK][1][102] = 127, [0][0][RTW89_UK][0][102] = 127, + [0][0][RTW89_THAILAND][1][102] = 127, + [0][0][RTW89_THAILAND][0][102] = 127, [0][0][RTW89_FCC][1][104] = -16, [0][0][RTW89_FCC][2][104] = 127, [0][0][RTW89_ETSI][1][104] = 127, @@ -51134,6 +53440,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][104] = 127, [0][0][RTW89_MKK][0][104] = 127, [0][0][RTW89_IC][1][104] = -16, + [0][0][RTW89_IC][2][104] = 127, [0][0][RTW89_KCC][1][104] = -2, [0][0][RTW89_KCC][0][104] = 127, [0][0][RTW89_ACMA][1][104] = 127, @@ -51143,6 +53450,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][104] = 127, [0][0][RTW89_UK][1][104] = 127, [0][0][RTW89_UK][0][104] = 127, + [0][0][RTW89_THAILAND][1][104] = 127, + [0][0][RTW89_THAILAND][0][104] = 127, [0][0][RTW89_FCC][1][105] = -16, [0][0][RTW89_FCC][2][105] = 127, [0][0][RTW89_ETSI][1][105] = 127, @@ -51150,6 +53459,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][105] = 127, [0][0][RTW89_MKK][0][105] = 127, [0][0][RTW89_IC][1][105] = -16, + [0][0][RTW89_IC][2][105] = 127, [0][0][RTW89_KCC][1][105] = -2, [0][0][RTW89_KCC][0][105] = 127, [0][0][RTW89_ACMA][1][105] = 127, @@ -51159,6 +53469,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][105] = 127, [0][0][RTW89_UK][1][105] = 127, [0][0][RTW89_UK][0][105] = 127, + [0][0][RTW89_THAILAND][1][105] = 127, + [0][0][RTW89_THAILAND][0][105] = 127, [0][0][RTW89_FCC][1][107] = -12, [0][0][RTW89_FCC][2][107] = 127, [0][0][RTW89_ETSI][1][107] = 127, @@ -51166,6 +53478,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][107] = 127, [0][0][RTW89_MKK][0][107] = 127, [0][0][RTW89_IC][1][107] = -12, + [0][0][RTW89_IC][2][107] = 127, [0][0][RTW89_KCC][1][107] = -2, [0][0][RTW89_KCC][0][107] = 127, [0][0][RTW89_ACMA][1][107] = 127, @@ -51175,6 +53488,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][107] = 127, [0][0][RTW89_UK][1][107] = 127, [0][0][RTW89_UK][0][107] = 127, + [0][0][RTW89_THAILAND][1][107] = 127, + [0][0][RTW89_THAILAND][0][107] = 127, [0][0][RTW89_FCC][1][109] = -12, [0][0][RTW89_FCC][2][109] = 127, [0][0][RTW89_ETSI][1][109] = 127, @@ -51182,6 +53497,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][109] = 127, [0][0][RTW89_MKK][0][109] = 127, [0][0][RTW89_IC][1][109] = -12, + [0][0][RTW89_IC][2][109] = 127, [0][0][RTW89_KCC][1][109] = 127, [0][0][RTW89_KCC][0][109] = 127, [0][0][RTW89_ACMA][1][109] = 127, @@ -51191,6 +53507,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][109] = 127, [0][0][RTW89_UK][1][109] = 127, [0][0][RTW89_UK][0][109] = 127, + [0][0][RTW89_THAILAND][1][109] = 127, + [0][0][RTW89_THAILAND][0][109] = 127, [0][0][RTW89_FCC][1][111] = 127, [0][0][RTW89_FCC][2][111] = 127, [0][0][RTW89_ETSI][1][111] = 127, @@ -51198,6 +53516,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][111] = 127, [0][0][RTW89_MKK][0][111] = 127, [0][0][RTW89_IC][1][111] = 127, + [0][0][RTW89_IC][2][111] = 127, [0][0][RTW89_KCC][1][111] = 127, [0][0][RTW89_KCC][0][111] = 127, [0][0][RTW89_ACMA][1][111] = 127, @@ -51207,6 +53526,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][111] = 127, [0][0][RTW89_UK][1][111] = 127, [0][0][RTW89_UK][0][111] = 127, + [0][0][RTW89_THAILAND][1][111] = 127, + [0][0][RTW89_THAILAND][0][111] = 127, [0][0][RTW89_FCC][1][113] = 127, [0][0][RTW89_FCC][2][113] = 127, [0][0][RTW89_ETSI][1][113] = 127, @@ -51214,6 +53535,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][113] = 127, [0][0][RTW89_MKK][0][113] = 127, [0][0][RTW89_IC][1][113] = 127, + [0][0][RTW89_IC][2][113] = 127, [0][0][RTW89_KCC][1][113] = 127, [0][0][RTW89_KCC][0][113] = 127, [0][0][RTW89_ACMA][1][113] = 127, @@ -51223,6 +53545,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][113] = 127, [0][0][RTW89_UK][1][113] = 127, [0][0][RTW89_UK][0][113] = 127, + [0][0][RTW89_THAILAND][1][113] = 127, + [0][0][RTW89_THAILAND][0][113] = 127, [0][0][RTW89_FCC][1][115] = 127, [0][0][RTW89_FCC][2][115] = 127, [0][0][RTW89_ETSI][1][115] = 127, @@ -51230,6 +53554,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][115] = 127, [0][0][RTW89_MKK][0][115] = 127, [0][0][RTW89_IC][1][115] = 127, + [0][0][RTW89_IC][2][115] = 127, [0][0][RTW89_KCC][1][115] = 127, [0][0][RTW89_KCC][0][115] = 127, [0][0][RTW89_ACMA][1][115] = 127, @@ -51239,6 +53564,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][115] = 127, [0][0][RTW89_UK][1][115] = 127, [0][0][RTW89_UK][0][115] = 127, + [0][0][RTW89_THAILAND][1][115] = 127, + [0][0][RTW89_THAILAND][0][115] = 127, [0][0][RTW89_FCC][1][117] = 127, [0][0][RTW89_FCC][2][117] = 127, [0][0][RTW89_ETSI][1][117] = 127, @@ -51246,6 +53573,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][117] = 127, [0][0][RTW89_MKK][0][117] = 127, [0][0][RTW89_IC][1][117] = 127, + [0][0][RTW89_IC][2][117] = 127, [0][0][RTW89_KCC][1][117] = 127, [0][0][RTW89_KCC][0][117] = 127, [0][0][RTW89_ACMA][1][117] = 127, @@ -51255,6 +53583,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][117] = 127, [0][0][RTW89_UK][1][117] = 127, [0][0][RTW89_UK][0][117] = 127, + [0][0][RTW89_THAILAND][1][117] = 127, + [0][0][RTW89_THAILAND][0][117] = 127, [0][0][RTW89_FCC][1][119] = 127, [0][0][RTW89_FCC][2][119] = 127, [0][0][RTW89_ETSI][1][119] = 127, @@ -51262,6 +53592,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][1][119] = 127, [0][0][RTW89_MKK][0][119] = 127, [0][0][RTW89_IC][1][119] = 127, + [0][0][RTW89_IC][2][119] = 127, [0][0][RTW89_KCC][1][119] = 127, [0][0][RTW89_KCC][0][119] = 127, [0][0][RTW89_ACMA][1][119] = 127, @@ -51271,6 +53602,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_QATAR][0][119] = 127, [0][0][RTW89_UK][1][119] = 127, [0][0][RTW89_UK][0][119] = 127, + [0][0][RTW89_THAILAND][1][119] = 127, + [0][0][RTW89_THAILAND][0][119] = 127, [0][1][RTW89_FCC][1][0] = -40, [0][1][RTW89_FCC][2][0] = 32, [0][1][RTW89_ETSI][1][0] = 20, @@ -51278,6 +53611,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][0] = 18, [0][1][RTW89_MKK][0][0] = -20, [0][1][RTW89_IC][1][0] = -40, + [0][1][RTW89_IC][2][0] = 32, [0][1][RTW89_KCC][1][0] = -14, [0][1][RTW89_KCC][0][0] = -14, [0][1][RTW89_ACMA][1][0] = 20, @@ -51287,6 +53621,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][0] = -18, [0][1][RTW89_UK][1][0] = 20, [0][1][RTW89_UK][0][0] = -18, + [0][1][RTW89_THAILAND][1][0] = 6, + [0][1][RTW89_THAILAND][0][0] = -40, [0][1][RTW89_FCC][1][2] = -40, [0][1][RTW89_FCC][2][2] = 32, [0][1][RTW89_ETSI][1][2] = 20, @@ -51294,6 +53630,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][2] = 18, [0][1][RTW89_MKK][0][2] = -22, [0][1][RTW89_IC][1][2] = -40, + [0][1][RTW89_IC][2][2] = 32, [0][1][RTW89_KCC][1][2] = -14, [0][1][RTW89_KCC][0][2] = -14, [0][1][RTW89_ACMA][1][2] = 20, @@ -51303,6 +53640,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][2] = -18, [0][1][RTW89_UK][1][2] = 20, [0][1][RTW89_UK][0][2] = -18, + [0][1][RTW89_THAILAND][1][2] = 6, + [0][1][RTW89_THAILAND][0][2] = -40, [0][1][RTW89_FCC][1][4] = -40, [0][1][RTW89_FCC][2][4] = 32, [0][1][RTW89_ETSI][1][4] = 20, @@ -51310,6 +53649,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][4] = 18, [0][1][RTW89_MKK][0][4] = -22, [0][1][RTW89_IC][1][4] = -40, + [0][1][RTW89_IC][2][4] = 32, [0][1][RTW89_KCC][1][4] = -14, [0][1][RTW89_KCC][0][4] = -14, [0][1][RTW89_ACMA][1][4] = 20, @@ -51319,6 +53659,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][4] = -18, [0][1][RTW89_UK][1][4] = 20, [0][1][RTW89_UK][0][4] = -18, + [0][1][RTW89_THAILAND][1][4] = 6, + [0][1][RTW89_THAILAND][0][4] = -40, [0][1][RTW89_FCC][1][6] = -40, [0][1][RTW89_FCC][2][6] = 32, [0][1][RTW89_ETSI][1][6] = 20, @@ -51326,6 +53668,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][6] = 18, [0][1][RTW89_MKK][0][6] = -22, [0][1][RTW89_IC][1][6] = -40, + [0][1][RTW89_IC][2][6] = 32, [0][1][RTW89_KCC][1][6] = -14, [0][1][RTW89_KCC][0][6] = -14, [0][1][RTW89_ACMA][1][6] = 20, @@ -51335,6 +53678,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][6] = -18, [0][1][RTW89_UK][1][6] = 20, [0][1][RTW89_UK][0][6] = -18, + [0][1][RTW89_THAILAND][1][6] = 6, + [0][1][RTW89_THAILAND][0][6] = -40, [0][1][RTW89_FCC][1][8] = -40, [0][1][RTW89_FCC][2][8] = 32, [0][1][RTW89_ETSI][1][8] = 20, @@ -51342,6 +53687,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][8] = 18, [0][1][RTW89_MKK][0][8] = -22, [0][1][RTW89_IC][1][8] = -40, + [0][1][RTW89_IC][2][8] = 32, [0][1][RTW89_KCC][1][8] = -14, [0][1][RTW89_KCC][0][8] = -14, [0][1][RTW89_ACMA][1][8] = 20, @@ -51351,6 +53697,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][8] = -18, [0][1][RTW89_UK][1][8] = 20, [0][1][RTW89_UK][0][8] = -18, + [0][1][RTW89_THAILAND][1][8] = 6, + [0][1][RTW89_THAILAND][0][8] = -40, [0][1][RTW89_FCC][1][10] = -40, [0][1][RTW89_FCC][2][10] = 32, [0][1][RTW89_ETSI][1][10] = 20, @@ -51358,6 +53706,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][10] = 18, [0][1][RTW89_MKK][0][10] = -22, [0][1][RTW89_IC][1][10] = -40, + [0][1][RTW89_IC][2][10] = 32, [0][1][RTW89_KCC][1][10] = -14, [0][1][RTW89_KCC][0][10] = -14, [0][1][RTW89_ACMA][1][10] = 20, @@ -51367,6 +53716,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][10] = -18, [0][1][RTW89_UK][1][10] = 20, [0][1][RTW89_UK][0][10] = -18, + [0][1][RTW89_THAILAND][1][10] = 6, + [0][1][RTW89_THAILAND][0][10] = -40, [0][1][RTW89_FCC][1][12] = -40, [0][1][RTW89_FCC][2][12] = 32, [0][1][RTW89_ETSI][1][12] = 20, @@ -51374,6 +53725,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][12] = 18, [0][1][RTW89_MKK][0][12] = -22, [0][1][RTW89_IC][1][12] = -40, + [0][1][RTW89_IC][2][12] = 32, [0][1][RTW89_KCC][1][12] = -14, [0][1][RTW89_KCC][0][12] = -14, [0][1][RTW89_ACMA][1][12] = 20, @@ -51383,6 +53735,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][12] = -18, [0][1][RTW89_UK][1][12] = 20, [0][1][RTW89_UK][0][12] = -18, + [0][1][RTW89_THAILAND][1][12] = 6, + [0][1][RTW89_THAILAND][0][12] = -40, [0][1][RTW89_FCC][1][14] = -40, [0][1][RTW89_FCC][2][14] = 32, [0][1][RTW89_ETSI][1][14] = 20, @@ -51390,6 +53744,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][14] = 18, [0][1][RTW89_MKK][0][14] = -22, [0][1][RTW89_IC][1][14] = -40, + [0][1][RTW89_IC][2][14] = 32, [0][1][RTW89_KCC][1][14] = -14, [0][1][RTW89_KCC][0][14] = -14, [0][1][RTW89_ACMA][1][14] = 20, @@ -51399,6 +53754,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][14] = -18, [0][1][RTW89_UK][1][14] = 20, [0][1][RTW89_UK][0][14] = -18, + [0][1][RTW89_THAILAND][1][14] = 6, + [0][1][RTW89_THAILAND][0][14] = -40, [0][1][RTW89_FCC][1][15] = -40, [0][1][RTW89_FCC][2][15] = 32, [0][1][RTW89_ETSI][1][15] = 20, @@ -51406,6 +53763,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][15] = 18, [0][1][RTW89_MKK][0][15] = -22, [0][1][RTW89_IC][1][15] = -40, + [0][1][RTW89_IC][2][15] = 32, [0][1][RTW89_KCC][1][15] = -14, [0][1][RTW89_KCC][0][15] = -14, [0][1][RTW89_ACMA][1][15] = 20, @@ -51415,6 +53773,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][15] = -18, [0][1][RTW89_UK][1][15] = 20, [0][1][RTW89_UK][0][15] = -18, + [0][1][RTW89_THAILAND][1][15] = 6, + [0][1][RTW89_THAILAND][0][15] = -40, [0][1][RTW89_FCC][1][17] = -40, [0][1][RTW89_FCC][2][17] = 32, [0][1][RTW89_ETSI][1][17] = 20, @@ -51422,6 +53782,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][17] = 18, [0][1][RTW89_MKK][0][17] = -22, [0][1][RTW89_IC][1][17] = -40, + [0][1][RTW89_IC][2][17] = 32, [0][1][RTW89_KCC][1][17] = -14, [0][1][RTW89_KCC][0][17] = -14, [0][1][RTW89_ACMA][1][17] = 20, @@ -51431,6 +53792,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][17] = -18, [0][1][RTW89_UK][1][17] = 20, [0][1][RTW89_UK][0][17] = -18, + [0][1][RTW89_THAILAND][1][17] = 6, + [0][1][RTW89_THAILAND][0][17] = -40, [0][1][RTW89_FCC][1][19] = -40, [0][1][RTW89_FCC][2][19] = 32, [0][1][RTW89_ETSI][1][19] = 20, @@ -51438,6 +53801,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][19] = 18, [0][1][RTW89_MKK][0][19] = -22, [0][1][RTW89_IC][1][19] = -40, + [0][1][RTW89_IC][2][19] = 32, [0][1][RTW89_KCC][1][19] = -14, [0][1][RTW89_KCC][0][19] = -14, [0][1][RTW89_ACMA][1][19] = 20, @@ -51447,6 +53811,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][19] = -18, [0][1][RTW89_UK][1][19] = 20, [0][1][RTW89_UK][0][19] = -18, + [0][1][RTW89_THAILAND][1][19] = 6, + [0][1][RTW89_THAILAND][0][19] = -40, [0][1][RTW89_FCC][1][21] = -40, [0][1][RTW89_FCC][2][21] = 32, [0][1][RTW89_ETSI][1][21] = 20, @@ -51454,6 +53820,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][21] = 18, [0][1][RTW89_MKK][0][21] = -22, [0][1][RTW89_IC][1][21] = -40, + [0][1][RTW89_IC][2][21] = 32, [0][1][RTW89_KCC][1][21] = -14, [0][1][RTW89_KCC][0][21] = -14, [0][1][RTW89_ACMA][1][21] = 20, @@ -51463,6 +53830,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][21] = -18, [0][1][RTW89_UK][1][21] = 20, [0][1][RTW89_UK][0][21] = -18, + [0][1][RTW89_THAILAND][1][21] = 6, + [0][1][RTW89_THAILAND][0][21] = -40, [0][1][RTW89_FCC][1][23] = -40, [0][1][RTW89_FCC][2][23] = 32, [0][1][RTW89_ETSI][1][23] = 20, @@ -51470,6 +53839,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][23] = 18, [0][1][RTW89_MKK][0][23] = -22, [0][1][RTW89_IC][1][23] = -40, + [0][1][RTW89_IC][2][23] = 32, [0][1][RTW89_KCC][1][23] = -14, [0][1][RTW89_KCC][0][23] = -14, [0][1][RTW89_ACMA][1][23] = 20, @@ -51479,6 +53849,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][23] = -18, [0][1][RTW89_UK][1][23] = 20, [0][1][RTW89_UK][0][23] = -18, + [0][1][RTW89_THAILAND][1][23] = 6, + [0][1][RTW89_THAILAND][0][23] = -40, [0][1][RTW89_FCC][1][25] = -40, [0][1][RTW89_FCC][2][25] = 32, [0][1][RTW89_ETSI][1][25] = 20, @@ -51486,6 +53858,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][25] = -4, [0][1][RTW89_MKK][0][25] = -22, [0][1][RTW89_IC][1][25] = -40, + [0][1][RTW89_IC][2][25] = 32, [0][1][RTW89_KCC][1][25] = -14, [0][1][RTW89_KCC][0][25] = -14, [0][1][RTW89_ACMA][1][25] = 20, @@ -51495,6 +53868,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][25] = -18, [0][1][RTW89_UK][1][25] = 20, [0][1][RTW89_UK][0][25] = -18, + [0][1][RTW89_THAILAND][1][25] = 6, + [0][1][RTW89_THAILAND][0][25] = -40, [0][1][RTW89_FCC][1][27] = -40, [0][1][RTW89_FCC][2][27] = 32, [0][1][RTW89_ETSI][1][27] = 20, @@ -51502,6 +53877,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][27] = -4, [0][1][RTW89_MKK][0][27] = -22, [0][1][RTW89_IC][1][27] = -40, + [0][1][RTW89_IC][2][27] = 32, [0][1][RTW89_KCC][1][27] = -14, [0][1][RTW89_KCC][0][27] = -14, [0][1][RTW89_ACMA][1][27] = 20, @@ -51511,6 +53887,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][27] = -18, [0][1][RTW89_UK][1][27] = 20, [0][1][RTW89_UK][0][27] = -18, + [0][1][RTW89_THAILAND][1][27] = 6, + [0][1][RTW89_THAILAND][0][27] = -40, [0][1][RTW89_FCC][1][29] = -40, [0][1][RTW89_FCC][2][29] = 32, [0][1][RTW89_ETSI][1][29] = 20, @@ -51518,6 +53896,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][29] = -4, [0][1][RTW89_MKK][0][29] = -22, [0][1][RTW89_IC][1][29] = -40, + [0][1][RTW89_IC][2][29] = 32, [0][1][RTW89_KCC][1][29] = -14, [0][1][RTW89_KCC][0][29] = -14, [0][1][RTW89_ACMA][1][29] = 20, @@ -51527,6 +53906,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][29] = -18, [0][1][RTW89_UK][1][29] = 20, [0][1][RTW89_UK][0][29] = -18, + [0][1][RTW89_THAILAND][1][29] = 6, + [0][1][RTW89_THAILAND][0][29] = -40, [0][1][RTW89_FCC][1][30] = -40, [0][1][RTW89_FCC][2][30] = 32, [0][1][RTW89_ETSI][1][30] = 20, @@ -51534,6 +53915,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][30] = -4, [0][1][RTW89_MKK][0][30] = -22, [0][1][RTW89_IC][1][30] = -40, + [0][1][RTW89_IC][2][30] = 32, [0][1][RTW89_KCC][1][30] = -14, [0][1][RTW89_KCC][0][30] = -14, [0][1][RTW89_ACMA][1][30] = 20, @@ -51543,6 +53925,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][30] = -18, [0][1][RTW89_UK][1][30] = 20, [0][1][RTW89_UK][0][30] = -18, + [0][1][RTW89_THAILAND][1][30] = 6, + [0][1][RTW89_THAILAND][0][30] = -40, [0][1][RTW89_FCC][1][32] = -40, [0][1][RTW89_FCC][2][32] = 32, [0][1][RTW89_ETSI][1][32] = 20, @@ -51550,6 +53934,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][32] = -4, [0][1][RTW89_MKK][0][32] = -22, [0][1][RTW89_IC][1][32] = -40, + [0][1][RTW89_IC][2][32] = 32, [0][1][RTW89_KCC][1][32] = -14, [0][1][RTW89_KCC][0][32] = -14, [0][1][RTW89_ACMA][1][32] = 20, @@ -51559,6 +53944,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][32] = -18, [0][1][RTW89_UK][1][32] = 20, [0][1][RTW89_UK][0][32] = -18, + [0][1][RTW89_THAILAND][1][32] = 6, + [0][1][RTW89_THAILAND][0][32] = -40, [0][1][RTW89_FCC][1][34] = -40, [0][1][RTW89_FCC][2][34] = 32, [0][1][RTW89_ETSI][1][34] = 20, @@ -51566,6 +53953,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][34] = -4, [0][1][RTW89_MKK][0][34] = -22, [0][1][RTW89_IC][1][34] = -40, + [0][1][RTW89_IC][2][34] = 32, [0][1][RTW89_KCC][1][34] = -14, [0][1][RTW89_KCC][0][34] = -14, [0][1][RTW89_ACMA][1][34] = 20, @@ -51575,6 +53963,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][34] = -18, [0][1][RTW89_UK][1][34] = 20, [0][1][RTW89_UK][0][34] = -18, + [0][1][RTW89_THAILAND][1][34] = 6, + [0][1][RTW89_THAILAND][0][34] = -40, [0][1][RTW89_FCC][1][36] = -40, [0][1][RTW89_FCC][2][36] = 32, [0][1][RTW89_ETSI][1][36] = 20, @@ -51582,6 +53972,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][36] = -4, [0][1][RTW89_MKK][0][36] = -22, [0][1][RTW89_IC][1][36] = -40, + [0][1][RTW89_IC][2][36] = 32, [0][1][RTW89_KCC][1][36] = -14, [0][1][RTW89_KCC][0][36] = -14, [0][1][RTW89_ACMA][1][36] = 20, @@ -51591,6 +53982,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][36] = -18, [0][1][RTW89_UK][1][36] = 20, [0][1][RTW89_UK][0][36] = -18, + [0][1][RTW89_THAILAND][1][36] = 6, + [0][1][RTW89_THAILAND][0][36] = -40, [0][1][RTW89_FCC][1][38] = -40, [0][1][RTW89_FCC][2][38] = 32, [0][1][RTW89_ETSI][1][38] = 20, @@ -51598,6 +53991,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][38] = -4, [0][1][RTW89_MKK][0][38] = -22, [0][1][RTW89_IC][1][38] = -40, + [0][1][RTW89_IC][2][38] = 32, [0][1][RTW89_KCC][1][38] = -14, [0][1][RTW89_KCC][0][38] = -14, [0][1][RTW89_ACMA][1][38] = 20, @@ -51607,6 +54001,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][38] = -18, [0][1][RTW89_UK][1][38] = 20, [0][1][RTW89_UK][0][38] = -18, + [0][1][RTW89_THAILAND][1][38] = 6, + [0][1][RTW89_THAILAND][0][38] = -40, [0][1][RTW89_FCC][1][40] = -40, [0][1][RTW89_FCC][2][40] = 32, [0][1][RTW89_ETSI][1][40] = 20, @@ -51614,6 +54010,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][40] = -4, [0][1][RTW89_MKK][0][40] = -22, [0][1][RTW89_IC][1][40] = -40, + [0][1][RTW89_IC][2][40] = 32, [0][1][RTW89_KCC][1][40] = -14, [0][1][RTW89_KCC][0][40] = -14, [0][1][RTW89_ACMA][1][40] = 20, @@ -51623,6 +54020,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][40] = -18, [0][1][RTW89_UK][1][40] = 20, [0][1][RTW89_UK][0][40] = -18, + [0][1][RTW89_THAILAND][1][40] = 6, + [0][1][RTW89_THAILAND][0][40] = -40, [0][1][RTW89_FCC][1][42] = -40, [0][1][RTW89_FCC][2][42] = 32, [0][1][RTW89_ETSI][1][42] = 20, @@ -51630,6 +54029,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][42] = -4, [0][1][RTW89_MKK][0][42] = -22, [0][1][RTW89_IC][1][42] = -40, + [0][1][RTW89_IC][2][42] = 32, [0][1][RTW89_KCC][1][42] = -14, [0][1][RTW89_KCC][0][42] = -14, [0][1][RTW89_ACMA][1][42] = 20, @@ -51639,6 +54039,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][42] = -18, [0][1][RTW89_UK][1][42] = 20, [0][1][RTW89_UK][0][42] = -18, + [0][1][RTW89_THAILAND][1][42] = 6, + [0][1][RTW89_THAILAND][0][42] = -40, [0][1][RTW89_FCC][1][44] = -40, [0][1][RTW89_FCC][2][44] = 32, [0][1][RTW89_ETSI][1][44] = 20, @@ -51646,6 +54048,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][44] = -4, [0][1][RTW89_MKK][0][44] = -22, [0][1][RTW89_IC][1][44] = -40, + [0][1][RTW89_IC][2][44] = 32, [0][1][RTW89_KCC][1][44] = -14, [0][1][RTW89_KCC][0][44] = -14, [0][1][RTW89_ACMA][1][44] = 20, @@ -51655,6 +54058,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][44] = -18, [0][1][RTW89_UK][1][44] = 20, [0][1][RTW89_UK][0][44] = -18, + [0][1][RTW89_THAILAND][1][44] = 6, + [0][1][RTW89_THAILAND][0][44] = -40, [0][1][RTW89_FCC][1][45] = -40, [0][1][RTW89_FCC][2][45] = 127, [0][1][RTW89_ETSI][1][45] = 127, @@ -51662,6 +54067,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][45] = 127, [0][1][RTW89_MKK][0][45] = 127, [0][1][RTW89_IC][1][45] = -40, + [0][1][RTW89_IC][2][45] = 32, [0][1][RTW89_KCC][1][45] = -14, [0][1][RTW89_KCC][0][45] = 127, [0][1][RTW89_ACMA][1][45] = 127, @@ -51671,6 +54077,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][45] = 127, [0][1][RTW89_UK][1][45] = 127, [0][1][RTW89_UK][0][45] = 127, + [0][1][RTW89_THAILAND][1][45] = 127, + [0][1][RTW89_THAILAND][0][45] = 127, [0][1][RTW89_FCC][1][47] = -40, [0][1][RTW89_FCC][2][47] = 127, [0][1][RTW89_ETSI][1][47] = 127, @@ -51678,6 +54086,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][47] = 127, [0][1][RTW89_MKK][0][47] = 127, [0][1][RTW89_IC][1][47] = -40, + [0][1][RTW89_IC][2][47] = 32, [0][1][RTW89_KCC][1][47] = -14, [0][1][RTW89_KCC][0][47] = 127, [0][1][RTW89_ACMA][1][47] = 127, @@ -51687,6 +54096,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][47] = 127, [0][1][RTW89_UK][1][47] = 127, [0][1][RTW89_UK][0][47] = 127, + [0][1][RTW89_THAILAND][1][47] = 127, + [0][1][RTW89_THAILAND][0][47] = 127, [0][1][RTW89_FCC][1][49] = -40, [0][1][RTW89_FCC][2][49] = 127, [0][1][RTW89_ETSI][1][49] = 127, @@ -51694,6 +54105,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][49] = 127, [0][1][RTW89_MKK][0][49] = 127, [0][1][RTW89_IC][1][49] = -40, + [0][1][RTW89_IC][2][49] = 32, [0][1][RTW89_KCC][1][49] = -14, [0][1][RTW89_KCC][0][49] = 127, [0][1][RTW89_ACMA][1][49] = 127, @@ -51703,6 +54115,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][49] = 127, [0][1][RTW89_UK][1][49] = 127, [0][1][RTW89_UK][0][49] = 127, + [0][1][RTW89_THAILAND][1][49] = 127, + [0][1][RTW89_THAILAND][0][49] = 127, [0][1][RTW89_FCC][1][51] = -40, [0][1][RTW89_FCC][2][51] = 127, [0][1][RTW89_ETSI][1][51] = 127, @@ -51710,6 +54124,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][51] = 127, [0][1][RTW89_MKK][0][51] = 127, [0][1][RTW89_IC][1][51] = -40, + [0][1][RTW89_IC][2][51] = 32, [0][1][RTW89_KCC][1][51] = -14, [0][1][RTW89_KCC][0][51] = 127, [0][1][RTW89_ACMA][1][51] = 127, @@ -51719,6 +54134,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][51] = 127, [0][1][RTW89_UK][1][51] = 127, [0][1][RTW89_UK][0][51] = 127, + [0][1][RTW89_THAILAND][1][51] = 127, + [0][1][RTW89_THAILAND][0][51] = 127, [0][1][RTW89_FCC][1][53] = -40, [0][1][RTW89_FCC][2][53] = 127, [0][1][RTW89_ETSI][1][53] = 127, @@ -51726,6 +54143,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][53] = 127, [0][1][RTW89_MKK][0][53] = 127, [0][1][RTW89_IC][1][53] = -40, + [0][1][RTW89_IC][2][53] = 32, [0][1][RTW89_KCC][1][53] = -14, [0][1][RTW89_KCC][0][53] = 127, [0][1][RTW89_ACMA][1][53] = 127, @@ -51735,6 +54153,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][53] = 127, [0][1][RTW89_UK][1][53] = 127, [0][1][RTW89_UK][0][53] = 127, + [0][1][RTW89_THAILAND][1][53] = 127, + [0][1][RTW89_THAILAND][0][53] = 127, [0][1][RTW89_FCC][1][55] = -40, [0][1][RTW89_FCC][2][55] = 30, [0][1][RTW89_ETSI][1][55] = 127, @@ -51742,6 +54162,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][55] = 127, [0][1][RTW89_MKK][0][55] = 127, [0][1][RTW89_IC][1][55] = -40, + [0][1][RTW89_IC][2][55] = 30, [0][1][RTW89_KCC][1][55] = -14, [0][1][RTW89_KCC][0][55] = 127, [0][1][RTW89_ACMA][1][55] = 127, @@ -51751,6 +54172,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][55] = 127, [0][1][RTW89_UK][1][55] = 127, [0][1][RTW89_UK][0][55] = 127, + [0][1][RTW89_THAILAND][1][55] = 127, + [0][1][RTW89_THAILAND][0][55] = 127, [0][1][RTW89_FCC][1][57] = -40, [0][1][RTW89_FCC][2][57] = 30, [0][1][RTW89_ETSI][1][57] = 127, @@ -51758,6 +54181,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][57] = 127, [0][1][RTW89_MKK][0][57] = 127, [0][1][RTW89_IC][1][57] = -40, + [0][1][RTW89_IC][2][57] = 30, [0][1][RTW89_KCC][1][57] = -14, [0][1][RTW89_KCC][0][57] = 127, [0][1][RTW89_ACMA][1][57] = 127, @@ -51767,6 +54191,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][57] = 127, [0][1][RTW89_UK][1][57] = 127, [0][1][RTW89_UK][0][57] = 127, + [0][1][RTW89_THAILAND][1][57] = 127, + [0][1][RTW89_THAILAND][0][57] = 127, [0][1][RTW89_FCC][1][59] = -40, [0][1][RTW89_FCC][2][59] = 30, [0][1][RTW89_ETSI][1][59] = 127, @@ -51774,6 +54200,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][59] = 127, [0][1][RTW89_MKK][0][59] = 127, [0][1][RTW89_IC][1][59] = -40, + [0][1][RTW89_IC][2][59] = 30, [0][1][RTW89_KCC][1][59] = -14, [0][1][RTW89_KCC][0][59] = 127, [0][1][RTW89_ACMA][1][59] = 127, @@ -51783,6 +54210,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][59] = 127, [0][1][RTW89_UK][1][59] = 127, [0][1][RTW89_UK][0][59] = 127, + [0][1][RTW89_THAILAND][1][59] = 127, + [0][1][RTW89_THAILAND][0][59] = 127, [0][1][RTW89_FCC][1][60] = -40, [0][1][RTW89_FCC][2][60] = 30, [0][1][RTW89_ETSI][1][60] = 127, @@ -51790,6 +54219,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][60] = 127, [0][1][RTW89_MKK][0][60] = 127, [0][1][RTW89_IC][1][60] = -40, + [0][1][RTW89_IC][2][60] = 30, [0][1][RTW89_KCC][1][60] = -14, [0][1][RTW89_KCC][0][60] = 127, [0][1][RTW89_ACMA][1][60] = 127, @@ -51799,6 +54229,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][60] = 127, [0][1][RTW89_UK][1][60] = 127, [0][1][RTW89_UK][0][60] = 127, + [0][1][RTW89_THAILAND][1][60] = 127, + [0][1][RTW89_THAILAND][0][60] = 127, [0][1][RTW89_FCC][1][62] = -40, [0][1][RTW89_FCC][2][62] = 30, [0][1][RTW89_ETSI][1][62] = 127, @@ -51806,6 +54238,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][62] = 127, [0][1][RTW89_MKK][0][62] = 127, [0][1][RTW89_IC][1][62] = -40, + [0][1][RTW89_IC][2][62] = 30, [0][1][RTW89_KCC][1][62] = -14, [0][1][RTW89_KCC][0][62] = 127, [0][1][RTW89_ACMA][1][62] = 127, @@ -51815,6 +54248,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][62] = 127, [0][1][RTW89_UK][1][62] = 127, [0][1][RTW89_UK][0][62] = 127, + [0][1][RTW89_THAILAND][1][62] = 127, + [0][1][RTW89_THAILAND][0][62] = 127, [0][1][RTW89_FCC][1][64] = -40, [0][1][RTW89_FCC][2][64] = 30, [0][1][RTW89_ETSI][1][64] = 127, @@ -51822,6 +54257,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][64] = 127, [0][1][RTW89_MKK][0][64] = 127, [0][1][RTW89_IC][1][64] = -40, + [0][1][RTW89_IC][2][64] = 30, [0][1][RTW89_KCC][1][64] = -14, [0][1][RTW89_KCC][0][64] = 127, [0][1][RTW89_ACMA][1][64] = 127, @@ -51831,6 +54267,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][64] = 127, [0][1][RTW89_UK][1][64] = 127, [0][1][RTW89_UK][0][64] = 127, + [0][1][RTW89_THAILAND][1][64] = 127, + [0][1][RTW89_THAILAND][0][64] = 127, [0][1][RTW89_FCC][1][66] = -40, [0][1][RTW89_FCC][2][66] = 30, [0][1][RTW89_ETSI][1][66] = 127, @@ -51838,6 +54276,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][66] = 127, [0][1][RTW89_MKK][0][66] = 127, [0][1][RTW89_IC][1][66] = -40, + [0][1][RTW89_IC][2][66] = 30, [0][1][RTW89_KCC][1][66] = -14, [0][1][RTW89_KCC][0][66] = 127, [0][1][RTW89_ACMA][1][66] = 127, @@ -51847,6 +54286,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][66] = 127, [0][1][RTW89_UK][1][66] = 127, [0][1][RTW89_UK][0][66] = 127, + [0][1][RTW89_THAILAND][1][66] = 127, + [0][1][RTW89_THAILAND][0][66] = 127, [0][1][RTW89_FCC][1][68] = -40, [0][1][RTW89_FCC][2][68] = 30, [0][1][RTW89_ETSI][1][68] = 127, @@ -51854,6 +54295,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][68] = 127, [0][1][RTW89_MKK][0][68] = 127, [0][1][RTW89_IC][1][68] = -40, + [0][1][RTW89_IC][2][68] = 30, [0][1][RTW89_KCC][1][68] = -14, [0][1][RTW89_KCC][0][68] = 127, [0][1][RTW89_ACMA][1][68] = 127, @@ -51863,6 +54305,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][68] = 127, [0][1][RTW89_UK][1][68] = 127, [0][1][RTW89_UK][0][68] = 127, + [0][1][RTW89_THAILAND][1][68] = 127, + [0][1][RTW89_THAILAND][0][68] = 127, [0][1][RTW89_FCC][1][70] = -38, [0][1][RTW89_FCC][2][70] = 30, [0][1][RTW89_ETSI][1][70] = 127, @@ -51870,6 +54314,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][70] = 127, [0][1][RTW89_MKK][0][70] = 127, [0][1][RTW89_IC][1][70] = -38, + [0][1][RTW89_IC][2][70] = 30, [0][1][RTW89_KCC][1][70] = -14, [0][1][RTW89_KCC][0][70] = 127, [0][1][RTW89_ACMA][1][70] = 127, @@ -51879,6 +54324,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][70] = 127, [0][1][RTW89_UK][1][70] = 127, [0][1][RTW89_UK][0][70] = 127, + [0][1][RTW89_THAILAND][1][70] = 127, + [0][1][RTW89_THAILAND][0][70] = 127, [0][1][RTW89_FCC][1][72] = -38, [0][1][RTW89_FCC][2][72] = 30, [0][1][RTW89_ETSI][1][72] = 127, @@ -51886,6 +54333,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][72] = 127, [0][1][RTW89_MKK][0][72] = 127, [0][1][RTW89_IC][1][72] = -38, + [0][1][RTW89_IC][2][72] = 30, [0][1][RTW89_KCC][1][72] = -14, [0][1][RTW89_KCC][0][72] = 127, [0][1][RTW89_ACMA][1][72] = 127, @@ -51895,6 +54343,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][72] = 127, [0][1][RTW89_UK][1][72] = 127, [0][1][RTW89_UK][0][72] = 127, + [0][1][RTW89_THAILAND][1][72] = 127, + [0][1][RTW89_THAILAND][0][72] = 127, [0][1][RTW89_FCC][1][74] = -38, [0][1][RTW89_FCC][2][74] = 30, [0][1][RTW89_ETSI][1][74] = 127, @@ -51902,6 +54352,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][74] = 127, [0][1][RTW89_MKK][0][74] = 127, [0][1][RTW89_IC][1][74] = -38, + [0][1][RTW89_IC][2][74] = 30, [0][1][RTW89_KCC][1][74] = -14, [0][1][RTW89_KCC][0][74] = 127, [0][1][RTW89_ACMA][1][74] = 127, @@ -51911,6 +54362,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][74] = 127, [0][1][RTW89_UK][1][74] = 127, [0][1][RTW89_UK][0][74] = 127, + [0][1][RTW89_THAILAND][1][74] = 127, + [0][1][RTW89_THAILAND][0][74] = 127, [0][1][RTW89_FCC][1][75] = -38, [0][1][RTW89_FCC][2][75] = 30, [0][1][RTW89_ETSI][1][75] = 127, @@ -51918,6 +54371,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][75] = 127, [0][1][RTW89_MKK][0][75] = 127, [0][1][RTW89_IC][1][75] = -38, + [0][1][RTW89_IC][2][75] = 30, [0][1][RTW89_KCC][1][75] = -14, [0][1][RTW89_KCC][0][75] = 127, [0][1][RTW89_ACMA][1][75] = 127, @@ -51927,6 +54381,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][75] = 127, [0][1][RTW89_UK][1][75] = 127, [0][1][RTW89_UK][0][75] = 127, + [0][1][RTW89_THAILAND][1][75] = 127, + [0][1][RTW89_THAILAND][0][75] = 127, [0][1][RTW89_FCC][1][77] = -38, [0][1][RTW89_FCC][2][77] = 30, [0][1][RTW89_ETSI][1][77] = 127, @@ -51934,6 +54390,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][77] = 127, [0][1][RTW89_MKK][0][77] = 127, [0][1][RTW89_IC][1][77] = -38, + [0][1][RTW89_IC][2][77] = 30, [0][1][RTW89_KCC][1][77] = -14, [0][1][RTW89_KCC][0][77] = 127, [0][1][RTW89_ACMA][1][77] = 127, @@ -51943,6 +54400,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][77] = 127, [0][1][RTW89_UK][1][77] = 127, [0][1][RTW89_UK][0][77] = 127, + [0][1][RTW89_THAILAND][1][77] = 127, + [0][1][RTW89_THAILAND][0][77] = 127, [0][1][RTW89_FCC][1][79] = -38, [0][1][RTW89_FCC][2][79] = 30, [0][1][RTW89_ETSI][1][79] = 127, @@ -51950,6 +54409,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][79] = 127, [0][1][RTW89_MKK][0][79] = 127, [0][1][RTW89_IC][1][79] = -38, + [0][1][RTW89_IC][2][79] = 30, [0][1][RTW89_KCC][1][79] = -14, [0][1][RTW89_KCC][0][79] = 127, [0][1][RTW89_ACMA][1][79] = 127, @@ -51959,6 +54419,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][79] = 127, [0][1][RTW89_UK][1][79] = 127, [0][1][RTW89_UK][0][79] = 127, + [0][1][RTW89_THAILAND][1][79] = 127, + [0][1][RTW89_THAILAND][0][79] = 127, [0][1][RTW89_FCC][1][81] = -38, [0][1][RTW89_FCC][2][81] = 30, [0][1][RTW89_ETSI][1][81] = 127, @@ -51966,6 +54428,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][81] = 127, [0][1][RTW89_MKK][0][81] = 127, [0][1][RTW89_IC][1][81] = -38, + [0][1][RTW89_IC][2][81] = 30, [0][1][RTW89_KCC][1][81] = -14, [0][1][RTW89_KCC][0][81] = 127, [0][1][RTW89_ACMA][1][81] = 127, @@ -51975,6 +54438,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][81] = 127, [0][1][RTW89_UK][1][81] = 127, [0][1][RTW89_UK][0][81] = 127, + [0][1][RTW89_THAILAND][1][81] = 127, + [0][1][RTW89_THAILAND][0][81] = 127, [0][1][RTW89_FCC][1][83] = -38, [0][1][RTW89_FCC][2][83] = 30, [0][1][RTW89_ETSI][1][83] = 127, @@ -51982,6 +54447,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][83] = 127, [0][1][RTW89_MKK][0][83] = 127, [0][1][RTW89_IC][1][83] = -38, + [0][1][RTW89_IC][2][83] = 30, [0][1][RTW89_KCC][1][83] = -14, [0][1][RTW89_KCC][0][83] = 127, [0][1][RTW89_ACMA][1][83] = 127, @@ -51991,6 +54457,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][83] = 127, [0][1][RTW89_UK][1][83] = 127, [0][1][RTW89_UK][0][83] = 127, + [0][1][RTW89_THAILAND][1][83] = 127, + [0][1][RTW89_THAILAND][0][83] = 127, [0][1][RTW89_FCC][1][85] = -38, [0][1][RTW89_FCC][2][85] = 30, [0][1][RTW89_ETSI][1][85] = 127, @@ -51998,6 +54466,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][85] = 127, [0][1][RTW89_MKK][0][85] = 127, [0][1][RTW89_IC][1][85] = -38, + [0][1][RTW89_IC][2][85] = 30, [0][1][RTW89_KCC][1][85] = -14, [0][1][RTW89_KCC][0][85] = 127, [0][1][RTW89_ACMA][1][85] = 127, @@ -52007,6 +54476,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][85] = 127, [0][1][RTW89_UK][1][85] = 127, [0][1][RTW89_UK][0][85] = 127, + [0][1][RTW89_THAILAND][1][85] = 127, + [0][1][RTW89_THAILAND][0][85] = 127, [0][1][RTW89_FCC][1][87] = -40, [0][1][RTW89_FCC][2][87] = 127, [0][1][RTW89_ETSI][1][87] = 127, @@ -52014,6 +54485,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][87] = 127, [0][1][RTW89_MKK][0][87] = 127, [0][1][RTW89_IC][1][87] = -40, + [0][1][RTW89_IC][2][87] = 127, [0][1][RTW89_KCC][1][87] = -14, [0][1][RTW89_KCC][0][87] = 127, [0][1][RTW89_ACMA][1][87] = 127, @@ -52023,6 +54495,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][87] = 127, [0][1][RTW89_UK][1][87] = 127, [0][1][RTW89_UK][0][87] = 127, + [0][1][RTW89_THAILAND][1][87] = 127, + [0][1][RTW89_THAILAND][0][87] = 127, [0][1][RTW89_FCC][1][89] = -38, [0][1][RTW89_FCC][2][89] = 127, [0][1][RTW89_ETSI][1][89] = 127, @@ -52030,6 +54504,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][89] = 127, [0][1][RTW89_MKK][0][89] = 127, [0][1][RTW89_IC][1][89] = -38, + [0][1][RTW89_IC][2][89] = 127, [0][1][RTW89_KCC][1][89] = -14, [0][1][RTW89_KCC][0][89] = 127, [0][1][RTW89_ACMA][1][89] = 127, @@ -52039,6 +54514,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][89] = 127, [0][1][RTW89_UK][1][89] = 127, [0][1][RTW89_UK][0][89] = 127, + [0][1][RTW89_THAILAND][1][89] = 127, + [0][1][RTW89_THAILAND][0][89] = 127, [0][1][RTW89_FCC][1][90] = -38, [0][1][RTW89_FCC][2][90] = 127, [0][1][RTW89_ETSI][1][90] = 127, @@ -52046,6 +54523,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][90] = 127, [0][1][RTW89_MKK][0][90] = 127, [0][1][RTW89_IC][1][90] = -38, + [0][1][RTW89_IC][2][90] = 127, [0][1][RTW89_KCC][1][90] = -14, [0][1][RTW89_KCC][0][90] = 127, [0][1][RTW89_ACMA][1][90] = 127, @@ -52055,6 +54533,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][90] = 127, [0][1][RTW89_UK][1][90] = 127, [0][1][RTW89_UK][0][90] = 127, + [0][1][RTW89_THAILAND][1][90] = 127, + [0][1][RTW89_THAILAND][0][90] = 127, [0][1][RTW89_FCC][1][92] = -38, [0][1][RTW89_FCC][2][92] = 127, [0][1][RTW89_ETSI][1][92] = 127, @@ -52062,6 +54542,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][92] = 127, [0][1][RTW89_MKK][0][92] = 127, [0][1][RTW89_IC][1][92] = -38, + [0][1][RTW89_IC][2][92] = 127, [0][1][RTW89_KCC][1][92] = -14, [0][1][RTW89_KCC][0][92] = 127, [0][1][RTW89_ACMA][1][92] = 127, @@ -52071,6 +54552,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][92] = 127, [0][1][RTW89_UK][1][92] = 127, [0][1][RTW89_UK][0][92] = 127, + [0][1][RTW89_THAILAND][1][92] = 127, + [0][1][RTW89_THAILAND][0][92] = 127, [0][1][RTW89_FCC][1][94] = -38, [0][1][RTW89_FCC][2][94] = 127, [0][1][RTW89_ETSI][1][94] = 127, @@ -52078,6 +54561,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][94] = 127, [0][1][RTW89_MKK][0][94] = 127, [0][1][RTW89_IC][1][94] = -38, + [0][1][RTW89_IC][2][94] = 127, [0][1][RTW89_KCC][1][94] = -14, [0][1][RTW89_KCC][0][94] = 127, [0][1][RTW89_ACMA][1][94] = 127, @@ -52087,6 +54571,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][94] = 127, [0][1][RTW89_UK][1][94] = 127, [0][1][RTW89_UK][0][94] = 127, + [0][1][RTW89_THAILAND][1][94] = 127, + [0][1][RTW89_THAILAND][0][94] = 127, [0][1][RTW89_FCC][1][96] = -38, [0][1][RTW89_FCC][2][96] = 127, [0][1][RTW89_ETSI][1][96] = 127, @@ -52094,6 +54580,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][96] = 127, [0][1][RTW89_MKK][0][96] = 127, [0][1][RTW89_IC][1][96] = -38, + [0][1][RTW89_IC][2][96] = 127, [0][1][RTW89_KCC][1][96] = -14, [0][1][RTW89_KCC][0][96] = 127, [0][1][RTW89_ACMA][1][96] = 127, @@ -52103,6 +54590,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][96] = 127, [0][1][RTW89_UK][1][96] = 127, [0][1][RTW89_UK][0][96] = 127, + [0][1][RTW89_THAILAND][1][96] = 127, + [0][1][RTW89_THAILAND][0][96] = 127, [0][1][RTW89_FCC][1][98] = -38, [0][1][RTW89_FCC][2][98] = 127, [0][1][RTW89_ETSI][1][98] = 127, @@ -52110,6 +54599,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][98] = 127, [0][1][RTW89_MKK][0][98] = 127, [0][1][RTW89_IC][1][98] = -38, + [0][1][RTW89_IC][2][98] = 127, [0][1][RTW89_KCC][1][98] = -14, [0][1][RTW89_KCC][0][98] = 127, [0][1][RTW89_ACMA][1][98] = 127, @@ -52119,6 +54609,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][98] = 127, [0][1][RTW89_UK][1][98] = 127, [0][1][RTW89_UK][0][98] = 127, + [0][1][RTW89_THAILAND][1][98] = 127, + [0][1][RTW89_THAILAND][0][98] = 127, [0][1][RTW89_FCC][1][100] = -38, [0][1][RTW89_FCC][2][100] = 127, [0][1][RTW89_ETSI][1][100] = 127, @@ -52126,6 +54618,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][100] = 127, [0][1][RTW89_MKK][0][100] = 127, [0][1][RTW89_IC][1][100] = -38, + [0][1][RTW89_IC][2][100] = 127, [0][1][RTW89_KCC][1][100] = -14, [0][1][RTW89_KCC][0][100] = 127, [0][1][RTW89_ACMA][1][100] = 127, @@ -52135,6 +54628,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][100] = 127, [0][1][RTW89_UK][1][100] = 127, [0][1][RTW89_UK][0][100] = 127, + [0][1][RTW89_THAILAND][1][100] = 127, + [0][1][RTW89_THAILAND][0][100] = 127, [0][1][RTW89_FCC][1][102] = -38, [0][1][RTW89_FCC][2][102] = 127, [0][1][RTW89_ETSI][1][102] = 127, @@ -52142,6 +54637,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][102] = 127, [0][1][RTW89_MKK][0][102] = 127, [0][1][RTW89_IC][1][102] = -38, + [0][1][RTW89_IC][2][102] = 127, [0][1][RTW89_KCC][1][102] = -14, [0][1][RTW89_KCC][0][102] = 127, [0][1][RTW89_ACMA][1][102] = 127, @@ -52151,6 +54647,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][102] = 127, [0][1][RTW89_UK][1][102] = 127, [0][1][RTW89_UK][0][102] = 127, + [0][1][RTW89_THAILAND][1][102] = 127, + [0][1][RTW89_THAILAND][0][102] = 127, [0][1][RTW89_FCC][1][104] = -38, [0][1][RTW89_FCC][2][104] = 127, [0][1][RTW89_ETSI][1][104] = 127, @@ -52158,6 +54656,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][104] = 127, [0][1][RTW89_MKK][0][104] = 127, [0][1][RTW89_IC][1][104] = -38, + [0][1][RTW89_IC][2][104] = 127, [0][1][RTW89_KCC][1][104] = -14, [0][1][RTW89_KCC][0][104] = 127, [0][1][RTW89_ACMA][1][104] = 127, @@ -52167,6 +54666,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][104] = 127, [0][1][RTW89_UK][1][104] = 127, [0][1][RTW89_UK][0][104] = 127, + [0][1][RTW89_THAILAND][1][104] = 127, + [0][1][RTW89_THAILAND][0][104] = 127, [0][1][RTW89_FCC][1][105] = -38, [0][1][RTW89_FCC][2][105] = 127, [0][1][RTW89_ETSI][1][105] = 127, @@ -52174,6 +54675,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][105] = 127, [0][1][RTW89_MKK][0][105] = 127, [0][1][RTW89_IC][1][105] = -38, + [0][1][RTW89_IC][2][105] = 127, [0][1][RTW89_KCC][1][105] = -14, [0][1][RTW89_KCC][0][105] = 127, [0][1][RTW89_ACMA][1][105] = 127, @@ -52183,6 +54685,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][105] = 127, [0][1][RTW89_UK][1][105] = 127, [0][1][RTW89_UK][0][105] = 127, + [0][1][RTW89_THAILAND][1][105] = 127, + [0][1][RTW89_THAILAND][0][105] = 127, [0][1][RTW89_FCC][1][107] = -34, [0][1][RTW89_FCC][2][107] = 127, [0][1][RTW89_ETSI][1][107] = 127, @@ -52190,6 +54694,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][107] = 127, [0][1][RTW89_MKK][0][107] = 127, [0][1][RTW89_IC][1][107] = -34, + [0][1][RTW89_IC][2][107] = 127, [0][1][RTW89_KCC][1][107] = -14, [0][1][RTW89_KCC][0][107] = 127, [0][1][RTW89_ACMA][1][107] = 127, @@ -52199,6 +54704,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][107] = 127, [0][1][RTW89_UK][1][107] = 127, [0][1][RTW89_UK][0][107] = 127, + [0][1][RTW89_THAILAND][1][107] = 127, + [0][1][RTW89_THAILAND][0][107] = 127, [0][1][RTW89_FCC][1][109] = -34, [0][1][RTW89_FCC][2][109] = 127, [0][1][RTW89_ETSI][1][109] = 127, @@ -52206,6 +54713,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][109] = 127, [0][1][RTW89_MKK][0][109] = 127, [0][1][RTW89_IC][1][109] = -34, + [0][1][RTW89_IC][2][109] = 127, [0][1][RTW89_KCC][1][109] = 127, [0][1][RTW89_KCC][0][109] = 127, [0][1][RTW89_ACMA][1][109] = 127, @@ -52215,6 +54723,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][109] = 127, [0][1][RTW89_UK][1][109] = 127, [0][1][RTW89_UK][0][109] = 127, + [0][1][RTW89_THAILAND][1][109] = 127, + [0][1][RTW89_THAILAND][0][109] = 127, [0][1][RTW89_FCC][1][111] = 127, [0][1][RTW89_FCC][2][111] = 127, [0][1][RTW89_ETSI][1][111] = 127, @@ -52222,6 +54732,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][111] = 127, [0][1][RTW89_MKK][0][111] = 127, [0][1][RTW89_IC][1][111] = 127, + [0][1][RTW89_IC][2][111] = 127, [0][1][RTW89_KCC][1][111] = 127, [0][1][RTW89_KCC][0][111] = 127, [0][1][RTW89_ACMA][1][111] = 127, @@ -52231,6 +54742,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][111] = 127, [0][1][RTW89_UK][1][111] = 127, [0][1][RTW89_UK][0][111] = 127, + [0][1][RTW89_THAILAND][1][111] = 127, + [0][1][RTW89_THAILAND][0][111] = 127, [0][1][RTW89_FCC][1][113] = 127, [0][1][RTW89_FCC][2][113] = 127, [0][1][RTW89_ETSI][1][113] = 127, @@ -52238,6 +54751,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][113] = 127, [0][1][RTW89_MKK][0][113] = 127, [0][1][RTW89_IC][1][113] = 127, + [0][1][RTW89_IC][2][113] = 127, [0][1][RTW89_KCC][1][113] = 127, [0][1][RTW89_KCC][0][113] = 127, [0][1][RTW89_ACMA][1][113] = 127, @@ -52247,6 +54761,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][113] = 127, [0][1][RTW89_UK][1][113] = 127, [0][1][RTW89_UK][0][113] = 127, + [0][1][RTW89_THAILAND][1][113] = 127, + [0][1][RTW89_THAILAND][0][113] = 127, [0][1][RTW89_FCC][1][115] = 127, [0][1][RTW89_FCC][2][115] = 127, [0][1][RTW89_ETSI][1][115] = 127, @@ -52254,6 +54770,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][115] = 127, [0][1][RTW89_MKK][0][115] = 127, [0][1][RTW89_IC][1][115] = 127, + [0][1][RTW89_IC][2][115] = 127, [0][1][RTW89_KCC][1][115] = 127, [0][1][RTW89_KCC][0][115] = 127, [0][1][RTW89_ACMA][1][115] = 127, @@ -52263,6 +54780,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][115] = 127, [0][1][RTW89_UK][1][115] = 127, [0][1][RTW89_UK][0][115] = 127, + [0][1][RTW89_THAILAND][1][115] = 127, + [0][1][RTW89_THAILAND][0][115] = 127, [0][1][RTW89_FCC][1][117] = 127, [0][1][RTW89_FCC][2][117] = 127, [0][1][RTW89_ETSI][1][117] = 127, @@ -52270,6 +54789,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][117] = 127, [0][1][RTW89_MKK][0][117] = 127, [0][1][RTW89_IC][1][117] = 127, + [0][1][RTW89_IC][2][117] = 127, [0][1][RTW89_KCC][1][117] = 127, [0][1][RTW89_KCC][0][117] = 127, [0][1][RTW89_ACMA][1][117] = 127, @@ -52279,6 +54799,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][117] = 127, [0][1][RTW89_UK][1][117] = 127, [0][1][RTW89_UK][0][117] = 127, + [0][1][RTW89_THAILAND][1][117] = 127, + [0][1][RTW89_THAILAND][0][117] = 127, [0][1][RTW89_FCC][1][119] = 127, [0][1][RTW89_FCC][2][119] = 127, [0][1][RTW89_ETSI][1][119] = 127, @@ -52286,6 +54808,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][1][119] = 127, [0][1][RTW89_MKK][0][119] = 127, [0][1][RTW89_IC][1][119] = 127, + [0][1][RTW89_IC][2][119] = 127, [0][1][RTW89_KCC][1][119] = 127, [0][1][RTW89_KCC][0][119] = 127, [0][1][RTW89_ACMA][1][119] = 127, @@ -52295,6 +54818,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_QATAR][0][119] = 127, [0][1][RTW89_UK][1][119] = 127, [0][1][RTW89_UK][0][119] = 127, + [0][1][RTW89_THAILAND][1][119] = 127, + [0][1][RTW89_THAILAND][0][119] = 127, [1][0][RTW89_FCC][1][0] = -4, [1][0][RTW89_FCC][2][0] = 52, [1][0][RTW89_ETSI][1][0] = 46, @@ -52302,6 +54827,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][0] = 42, [1][0][RTW89_MKK][0][0] = 2, [1][0][RTW89_IC][1][0] = -4, + [1][0][RTW89_IC][2][0] = 52, [1][0][RTW89_KCC][1][0] = -2, [1][0][RTW89_KCC][0][0] = -2, [1][0][RTW89_ACMA][1][0] = 46, @@ -52311,6 +54837,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][0] = 6, [1][0][RTW89_UK][1][0] = 46, [1][0][RTW89_UK][0][0] = 6, + [1][0][RTW89_THAILAND][1][0] = 42, + [1][0][RTW89_THAILAND][0][0] = -4, [1][0][RTW89_FCC][1][2] = -4, [1][0][RTW89_FCC][2][2] = 52, [1][0][RTW89_ETSI][1][2] = 46, @@ -52318,6 +54846,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][2] = 42, [1][0][RTW89_MKK][0][2] = 2, [1][0][RTW89_IC][1][2] = -4, + [1][0][RTW89_IC][2][2] = 52, [1][0][RTW89_KCC][1][2] = -2, [1][0][RTW89_KCC][0][2] = -2, [1][0][RTW89_ACMA][1][2] = 46, @@ -52327,6 +54856,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][2] = 6, [1][0][RTW89_UK][1][2] = 46, [1][0][RTW89_UK][0][2] = 6, + [1][0][RTW89_THAILAND][1][2] = 42, + [1][0][RTW89_THAILAND][0][2] = -4, [1][0][RTW89_FCC][1][4] = -4, [1][0][RTW89_FCC][2][4] = 52, [1][0][RTW89_ETSI][1][4] = 46, @@ -52334,6 +54865,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][4] = 42, [1][0][RTW89_MKK][0][4] = 2, [1][0][RTW89_IC][1][4] = -4, + [1][0][RTW89_IC][2][4] = 52, [1][0][RTW89_KCC][1][4] = -2, [1][0][RTW89_KCC][0][4] = -2, [1][0][RTW89_ACMA][1][4] = 46, @@ -52343,6 +54875,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][4] = 6, [1][0][RTW89_UK][1][4] = 46, [1][0][RTW89_UK][0][4] = 6, + [1][0][RTW89_THAILAND][1][4] = 42, + [1][0][RTW89_THAILAND][0][4] = -4, [1][0][RTW89_FCC][1][6] = -4, [1][0][RTW89_FCC][2][6] = 52, [1][0][RTW89_ETSI][1][6] = 46, @@ -52350,6 +54884,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][6] = 42, [1][0][RTW89_MKK][0][6] = 2, [1][0][RTW89_IC][1][6] = -4, + [1][0][RTW89_IC][2][6] = 52, [1][0][RTW89_KCC][1][6] = -2, [1][0][RTW89_KCC][0][6] = -2, [1][0][RTW89_ACMA][1][6] = 46, @@ -52359,6 +54894,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][6] = 6, [1][0][RTW89_UK][1][6] = 46, [1][0][RTW89_UK][0][6] = 6, + [1][0][RTW89_THAILAND][1][6] = 42, + [1][0][RTW89_THAILAND][0][6] = -4, [1][0][RTW89_FCC][1][8] = -4, [1][0][RTW89_FCC][2][8] = 52, [1][0][RTW89_ETSI][1][8] = 46, @@ -52366,6 +54903,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][8] = 42, [1][0][RTW89_MKK][0][8] = 2, [1][0][RTW89_IC][1][8] = -4, + [1][0][RTW89_IC][2][8] = 52, [1][0][RTW89_KCC][1][8] = -2, [1][0][RTW89_KCC][0][8] = -2, [1][0][RTW89_ACMA][1][8] = 46, @@ -52375,6 +54913,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][8] = 6, [1][0][RTW89_UK][1][8] = 46, [1][0][RTW89_UK][0][8] = 6, + [1][0][RTW89_THAILAND][1][8] = 42, + [1][0][RTW89_THAILAND][0][8] = -4, [1][0][RTW89_FCC][1][10] = -4, [1][0][RTW89_FCC][2][10] = 52, [1][0][RTW89_ETSI][1][10] = 46, @@ -52382,6 +54922,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][10] = 42, [1][0][RTW89_MKK][0][10] = 2, [1][0][RTW89_IC][1][10] = -4, + [1][0][RTW89_IC][2][10] = 52, [1][0][RTW89_KCC][1][10] = -2, [1][0][RTW89_KCC][0][10] = -2, [1][0][RTW89_ACMA][1][10] = 46, @@ -52391,6 +54932,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][10] = 6, [1][0][RTW89_UK][1][10] = 46, [1][0][RTW89_UK][0][10] = 6, + [1][0][RTW89_THAILAND][1][10] = 42, + [1][0][RTW89_THAILAND][0][10] = -4, [1][0][RTW89_FCC][1][12] = -4, [1][0][RTW89_FCC][2][12] = 52, [1][0][RTW89_ETSI][1][12] = 46, @@ -52398,6 +54941,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][12] = 42, [1][0][RTW89_MKK][0][12] = 2, [1][0][RTW89_IC][1][12] = -4, + [1][0][RTW89_IC][2][12] = 52, [1][0][RTW89_KCC][1][12] = -2, [1][0][RTW89_KCC][0][12] = -2, [1][0][RTW89_ACMA][1][12] = 46, @@ -52407,6 +54951,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][12] = 6, [1][0][RTW89_UK][1][12] = 46, [1][0][RTW89_UK][0][12] = 6, + [1][0][RTW89_THAILAND][1][12] = 42, + [1][0][RTW89_THAILAND][0][12] = -4, [1][0][RTW89_FCC][1][14] = -4, [1][0][RTW89_FCC][2][14] = 52, [1][0][RTW89_ETSI][1][14] = 46, @@ -52414,6 +54960,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][14] = 42, [1][0][RTW89_MKK][0][14] = 2, [1][0][RTW89_IC][1][14] = -4, + [1][0][RTW89_IC][2][14] = 52, [1][0][RTW89_KCC][1][14] = -2, [1][0][RTW89_KCC][0][14] = -2, [1][0][RTW89_ACMA][1][14] = 46, @@ -52423,6 +54970,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][14] = 6, [1][0][RTW89_UK][1][14] = 46, [1][0][RTW89_UK][0][14] = 6, + [1][0][RTW89_THAILAND][1][14] = 42, + [1][0][RTW89_THAILAND][0][14] = -4, [1][0][RTW89_FCC][1][15] = -4, [1][0][RTW89_FCC][2][15] = 52, [1][0][RTW89_ETSI][1][15] = 46, @@ -52430,6 +54979,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][15] = 42, [1][0][RTW89_MKK][0][15] = 2, [1][0][RTW89_IC][1][15] = -4, + [1][0][RTW89_IC][2][15] = 52, [1][0][RTW89_KCC][1][15] = -2, [1][0][RTW89_KCC][0][15] = -2, [1][0][RTW89_ACMA][1][15] = 46, @@ -52439,6 +54989,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][15] = 6, [1][0][RTW89_UK][1][15] = 46, [1][0][RTW89_UK][0][15] = 6, + [1][0][RTW89_THAILAND][1][15] = 42, + [1][0][RTW89_THAILAND][0][15] = -4, [1][0][RTW89_FCC][1][17] = -4, [1][0][RTW89_FCC][2][17] = 52, [1][0][RTW89_ETSI][1][17] = 46, @@ -52446,6 +54998,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][17] = 42, [1][0][RTW89_MKK][0][17] = 2, [1][0][RTW89_IC][1][17] = -4, + [1][0][RTW89_IC][2][17] = 52, [1][0][RTW89_KCC][1][17] = -2, [1][0][RTW89_KCC][0][17] = -2, [1][0][RTW89_ACMA][1][17] = 46, @@ -52455,6 +55008,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][17] = 6, [1][0][RTW89_UK][1][17] = 46, [1][0][RTW89_UK][0][17] = 6, + [1][0][RTW89_THAILAND][1][17] = 42, + [1][0][RTW89_THAILAND][0][17] = -4, [1][0][RTW89_FCC][1][19] = -4, [1][0][RTW89_FCC][2][19] = 52, [1][0][RTW89_ETSI][1][19] = 46, @@ -52462,6 +55017,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][19] = 42, [1][0][RTW89_MKK][0][19] = 2, [1][0][RTW89_IC][1][19] = -4, + [1][0][RTW89_IC][2][19] = 52, [1][0][RTW89_KCC][1][19] = -2, [1][0][RTW89_KCC][0][19] = -2, [1][0][RTW89_ACMA][1][19] = 46, @@ -52471,6 +55027,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][19] = 6, [1][0][RTW89_UK][1][19] = 46, [1][0][RTW89_UK][0][19] = 6, + [1][0][RTW89_THAILAND][1][19] = 42, + [1][0][RTW89_THAILAND][0][19] = -4, [1][0][RTW89_FCC][1][21] = -4, [1][0][RTW89_FCC][2][21] = 52, [1][0][RTW89_ETSI][1][21] = 46, @@ -52478,6 +55036,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][21] = 42, [1][0][RTW89_MKK][0][21] = 2, [1][0][RTW89_IC][1][21] = -4, + [1][0][RTW89_IC][2][21] = 52, [1][0][RTW89_KCC][1][21] = -2, [1][0][RTW89_KCC][0][21] = -2, [1][0][RTW89_ACMA][1][21] = 46, @@ -52487,6 +55046,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][21] = 6, [1][0][RTW89_UK][1][21] = 46, [1][0][RTW89_UK][0][21] = 6, + [1][0][RTW89_THAILAND][1][21] = 42, + [1][0][RTW89_THAILAND][0][21] = -4, [1][0][RTW89_FCC][1][23] = -4, [1][0][RTW89_FCC][2][23] = 66, [1][0][RTW89_ETSI][1][23] = 46, @@ -52494,6 +55055,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][23] = 42, [1][0][RTW89_MKK][0][23] = 2, [1][0][RTW89_IC][1][23] = -4, + [1][0][RTW89_IC][2][23] = 66, [1][0][RTW89_KCC][1][23] = -2, [1][0][RTW89_KCC][0][23] = -2, [1][0][RTW89_ACMA][1][23] = 46, @@ -52503,6 +55065,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][23] = 6, [1][0][RTW89_UK][1][23] = 46, [1][0][RTW89_UK][0][23] = 6, + [1][0][RTW89_THAILAND][1][23] = 42, + [1][0][RTW89_THAILAND][0][23] = -4, [1][0][RTW89_FCC][1][25] = -4, [1][0][RTW89_FCC][2][25] = 66, [1][0][RTW89_ETSI][1][25] = 46, @@ -52510,6 +55074,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][25] = 42, [1][0][RTW89_MKK][0][25] = 2, [1][0][RTW89_IC][1][25] = -4, + [1][0][RTW89_IC][2][25] = 66, [1][0][RTW89_KCC][1][25] = -2, [1][0][RTW89_KCC][0][25] = -2, [1][0][RTW89_ACMA][1][25] = 46, @@ -52519,6 +55084,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][25] = 6, [1][0][RTW89_UK][1][25] = 46, [1][0][RTW89_UK][0][25] = 6, + [1][0][RTW89_THAILAND][1][25] = 42, + [1][0][RTW89_THAILAND][0][25] = -4, [1][0][RTW89_FCC][1][27] = -4, [1][0][RTW89_FCC][2][27] = 66, [1][0][RTW89_ETSI][1][27] = 46, @@ -52526,6 +55093,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][27] = 42, [1][0][RTW89_MKK][0][27] = 2, [1][0][RTW89_IC][1][27] = -4, + [1][0][RTW89_IC][2][27] = 66, [1][0][RTW89_KCC][1][27] = -2, [1][0][RTW89_KCC][0][27] = -2, [1][0][RTW89_ACMA][1][27] = 46, @@ -52535,6 +55103,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][27] = 6, [1][0][RTW89_UK][1][27] = 46, [1][0][RTW89_UK][0][27] = 6, + [1][0][RTW89_THAILAND][1][27] = 42, + [1][0][RTW89_THAILAND][0][27] = -4, [1][0][RTW89_FCC][1][29] = -4, [1][0][RTW89_FCC][2][29] = 66, [1][0][RTW89_ETSI][1][29] = 46, @@ -52542,6 +55112,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][29] = 42, [1][0][RTW89_MKK][0][29] = 2, [1][0][RTW89_IC][1][29] = -4, + [1][0][RTW89_IC][2][29] = 66, [1][0][RTW89_KCC][1][29] = -2, [1][0][RTW89_KCC][0][29] = -2, [1][0][RTW89_ACMA][1][29] = 46, @@ -52551,6 +55122,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][29] = 6, [1][0][RTW89_UK][1][29] = 46, [1][0][RTW89_UK][0][29] = 6, + [1][0][RTW89_THAILAND][1][29] = 42, + [1][0][RTW89_THAILAND][0][29] = -4, [1][0][RTW89_FCC][1][30] = -4, [1][0][RTW89_FCC][2][30] = 66, [1][0][RTW89_ETSI][1][30] = 46, @@ -52558,6 +55131,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][30] = 42, [1][0][RTW89_MKK][0][30] = 2, [1][0][RTW89_IC][1][30] = -4, + [1][0][RTW89_IC][2][30] = 66, [1][0][RTW89_KCC][1][30] = -2, [1][0][RTW89_KCC][0][30] = -2, [1][0][RTW89_ACMA][1][30] = 46, @@ -52567,6 +55141,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][30] = 6, [1][0][RTW89_UK][1][30] = 46, [1][0][RTW89_UK][0][30] = 6, + [1][0][RTW89_THAILAND][1][30] = 42, + [1][0][RTW89_THAILAND][0][30] = -4, [1][0][RTW89_FCC][1][32] = -4, [1][0][RTW89_FCC][2][32] = 66, [1][0][RTW89_ETSI][1][32] = 46, @@ -52574,6 +55150,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][32] = 42, [1][0][RTW89_MKK][0][32] = 2, [1][0][RTW89_IC][1][32] = -4, + [1][0][RTW89_IC][2][32] = 66, [1][0][RTW89_KCC][1][32] = -2, [1][0][RTW89_KCC][0][32] = -2, [1][0][RTW89_ACMA][1][32] = 46, @@ -52583,6 +55160,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][32] = 6, [1][0][RTW89_UK][1][32] = 46, [1][0][RTW89_UK][0][32] = 6, + [1][0][RTW89_THAILAND][1][32] = 42, + [1][0][RTW89_THAILAND][0][32] = -4, [1][0][RTW89_FCC][1][34] = -4, [1][0][RTW89_FCC][2][34] = 66, [1][0][RTW89_ETSI][1][34] = 46, @@ -52590,6 +55169,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][34] = 42, [1][0][RTW89_MKK][0][34] = 2, [1][0][RTW89_IC][1][34] = -4, + [1][0][RTW89_IC][2][34] = 66, [1][0][RTW89_KCC][1][34] = -2, [1][0][RTW89_KCC][0][34] = -2, [1][0][RTW89_ACMA][1][34] = 46, @@ -52599,6 +55179,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][34] = 6, [1][0][RTW89_UK][1][34] = 46, [1][0][RTW89_UK][0][34] = 6, + [1][0][RTW89_THAILAND][1][34] = 42, + [1][0][RTW89_THAILAND][0][34] = -4, [1][0][RTW89_FCC][1][36] = -4, [1][0][RTW89_FCC][2][36] = 66, [1][0][RTW89_ETSI][1][36] = 46, @@ -52606,6 +55188,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][36] = 42, [1][0][RTW89_MKK][0][36] = 2, [1][0][RTW89_IC][1][36] = -4, + [1][0][RTW89_IC][2][36] = 66, [1][0][RTW89_KCC][1][36] = -2, [1][0][RTW89_KCC][0][36] = -2, [1][0][RTW89_ACMA][1][36] = 46, @@ -52615,6 +55198,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][36] = 6, [1][0][RTW89_UK][1][36] = 46, [1][0][RTW89_UK][0][36] = 6, + [1][0][RTW89_THAILAND][1][36] = 42, + [1][0][RTW89_THAILAND][0][36] = -4, [1][0][RTW89_FCC][1][38] = -4, [1][0][RTW89_FCC][2][38] = 66, [1][0][RTW89_ETSI][1][38] = 46, @@ -52622,6 +55207,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][38] = 42, [1][0][RTW89_MKK][0][38] = 2, [1][0][RTW89_IC][1][38] = -4, + [1][0][RTW89_IC][2][38] = 66, [1][0][RTW89_KCC][1][38] = -2, [1][0][RTW89_KCC][0][38] = -2, [1][0][RTW89_ACMA][1][38] = 46, @@ -52631,6 +55217,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][38] = 6, [1][0][RTW89_UK][1][38] = 46, [1][0][RTW89_UK][0][38] = 6, + [1][0][RTW89_THAILAND][1][38] = 42, + [1][0][RTW89_THAILAND][0][38] = -4, [1][0][RTW89_FCC][1][40] = -4, [1][0][RTW89_FCC][2][40] = 66, [1][0][RTW89_ETSI][1][40] = 46, @@ -52638,6 +55226,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][40] = 42, [1][0][RTW89_MKK][0][40] = 2, [1][0][RTW89_IC][1][40] = -4, + [1][0][RTW89_IC][2][40] = 66, [1][0][RTW89_KCC][1][40] = -2, [1][0][RTW89_KCC][0][40] = -2, [1][0][RTW89_ACMA][1][40] = 46, @@ -52647,6 +55236,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][40] = 6, [1][0][RTW89_UK][1][40] = 46, [1][0][RTW89_UK][0][40] = 6, + [1][0][RTW89_THAILAND][1][40] = 42, + [1][0][RTW89_THAILAND][0][40] = -4, [1][0][RTW89_FCC][1][42] = -4, [1][0][RTW89_FCC][2][42] = 66, [1][0][RTW89_ETSI][1][42] = 46, @@ -52654,6 +55245,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][42] = 42, [1][0][RTW89_MKK][0][42] = 2, [1][0][RTW89_IC][1][42] = -4, + [1][0][RTW89_IC][2][42] = 66, [1][0][RTW89_KCC][1][42] = -2, [1][0][RTW89_KCC][0][42] = -2, [1][0][RTW89_ACMA][1][42] = 46, @@ -52663,6 +55255,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][42] = 6, [1][0][RTW89_UK][1][42] = 46, [1][0][RTW89_UK][0][42] = 6, + [1][0][RTW89_THAILAND][1][42] = 42, + [1][0][RTW89_THAILAND][0][42] = -4, [1][0][RTW89_FCC][1][44] = -4, [1][0][RTW89_FCC][2][44] = 66, [1][0][RTW89_ETSI][1][44] = 46, @@ -52670,6 +55264,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][44] = 22, [1][0][RTW89_MKK][0][44] = 4, [1][0][RTW89_IC][1][44] = -4, + [1][0][RTW89_IC][2][44] = 66, [1][0][RTW89_KCC][1][44] = -2, [1][0][RTW89_KCC][0][44] = -2, [1][0][RTW89_ACMA][1][44] = 46, @@ -52679,6 +55274,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][44] = 8, [1][0][RTW89_UK][1][44] = 46, [1][0][RTW89_UK][0][44] = 8, + [1][0][RTW89_THAILAND][1][44] = 42, + [1][0][RTW89_THAILAND][0][44] = -4, [1][0][RTW89_FCC][1][45] = -4, [1][0][RTW89_FCC][2][45] = 127, [1][0][RTW89_ETSI][1][45] = 127, @@ -52686,6 +55283,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][45] = 127, [1][0][RTW89_MKK][0][45] = 127, [1][0][RTW89_IC][1][45] = -4, + [1][0][RTW89_IC][2][45] = 68, [1][0][RTW89_KCC][1][45] = -2, [1][0][RTW89_KCC][0][45] = 127, [1][0][RTW89_ACMA][1][45] = 127, @@ -52695,6 +55293,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][45] = 127, [1][0][RTW89_UK][1][45] = 127, [1][0][RTW89_UK][0][45] = 127, + [1][0][RTW89_THAILAND][1][45] = 127, + [1][0][RTW89_THAILAND][0][45] = 127, [1][0][RTW89_FCC][1][47] = -4, [1][0][RTW89_FCC][2][47] = 127, [1][0][RTW89_ETSI][1][47] = 127, @@ -52702,6 +55302,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][47] = 127, [1][0][RTW89_MKK][0][47] = 127, [1][0][RTW89_IC][1][47] = -4, + [1][0][RTW89_IC][2][47] = 68, [1][0][RTW89_KCC][1][47] = -2, [1][0][RTW89_KCC][0][47] = 127, [1][0][RTW89_ACMA][1][47] = 127, @@ -52711,6 +55312,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][47] = 127, [1][0][RTW89_UK][1][47] = 127, [1][0][RTW89_UK][0][47] = 127, + [1][0][RTW89_THAILAND][1][47] = 127, + [1][0][RTW89_THAILAND][0][47] = 127, [1][0][RTW89_FCC][1][49] = -4, [1][0][RTW89_FCC][2][49] = 127, [1][0][RTW89_ETSI][1][49] = 127, @@ -52718,6 +55321,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][49] = 127, [1][0][RTW89_MKK][0][49] = 127, [1][0][RTW89_IC][1][49] = -4, + [1][0][RTW89_IC][2][49] = 68, [1][0][RTW89_KCC][1][49] = -2, [1][0][RTW89_KCC][0][49] = 127, [1][0][RTW89_ACMA][1][49] = 127, @@ -52727,6 +55331,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][49] = 127, [1][0][RTW89_UK][1][49] = 127, [1][0][RTW89_UK][0][49] = 127, + [1][0][RTW89_THAILAND][1][49] = 127, + [1][0][RTW89_THAILAND][0][49] = 127, [1][0][RTW89_FCC][1][51] = -4, [1][0][RTW89_FCC][2][51] = 127, [1][0][RTW89_ETSI][1][51] = 127, @@ -52734,6 +55340,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][51] = 127, [1][0][RTW89_MKK][0][51] = 127, [1][0][RTW89_IC][1][51] = -4, + [1][0][RTW89_IC][2][51] = 68, [1][0][RTW89_KCC][1][51] = -2, [1][0][RTW89_KCC][0][51] = 127, [1][0][RTW89_ACMA][1][51] = 127, @@ -52743,6 +55350,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][51] = 127, [1][0][RTW89_UK][1][51] = 127, [1][0][RTW89_UK][0][51] = 127, + [1][0][RTW89_THAILAND][1][51] = 127, + [1][0][RTW89_THAILAND][0][51] = 127, [1][0][RTW89_FCC][1][53] = -4, [1][0][RTW89_FCC][2][53] = 127, [1][0][RTW89_ETSI][1][53] = 127, @@ -52750,6 +55359,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][53] = 127, [1][0][RTW89_MKK][0][53] = 127, [1][0][RTW89_IC][1][53] = -4, + [1][0][RTW89_IC][2][53] = 68, [1][0][RTW89_KCC][1][53] = -2, [1][0][RTW89_KCC][0][53] = 127, [1][0][RTW89_ACMA][1][53] = 127, @@ -52759,6 +55369,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][53] = 127, [1][0][RTW89_UK][1][53] = 127, [1][0][RTW89_UK][0][53] = 127, + [1][0][RTW89_THAILAND][1][53] = 127, + [1][0][RTW89_THAILAND][0][53] = 127, [1][0][RTW89_FCC][1][55] = -4, [1][0][RTW89_FCC][2][55] = 68, [1][0][RTW89_ETSI][1][55] = 127, @@ -52766,6 +55378,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][55] = 127, [1][0][RTW89_MKK][0][55] = 127, [1][0][RTW89_IC][1][55] = -4, + [1][0][RTW89_IC][2][55] = 68, [1][0][RTW89_KCC][1][55] = -2, [1][0][RTW89_KCC][0][55] = 127, [1][0][RTW89_ACMA][1][55] = 127, @@ -52775,6 +55388,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][55] = 127, [1][0][RTW89_UK][1][55] = 127, [1][0][RTW89_UK][0][55] = 127, + [1][0][RTW89_THAILAND][1][55] = 127, + [1][0][RTW89_THAILAND][0][55] = 127, [1][0][RTW89_FCC][1][57] = -4, [1][0][RTW89_FCC][2][57] = 68, [1][0][RTW89_ETSI][1][57] = 127, @@ -52782,6 +55397,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][57] = 127, [1][0][RTW89_MKK][0][57] = 127, [1][0][RTW89_IC][1][57] = -4, + [1][0][RTW89_IC][2][57] = 68, [1][0][RTW89_KCC][1][57] = -2, [1][0][RTW89_KCC][0][57] = 127, [1][0][RTW89_ACMA][1][57] = 127, @@ -52791,6 +55407,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][57] = 127, [1][0][RTW89_UK][1][57] = 127, [1][0][RTW89_UK][0][57] = 127, + [1][0][RTW89_THAILAND][1][57] = 127, + [1][0][RTW89_THAILAND][0][57] = 127, [1][0][RTW89_FCC][1][59] = -4, [1][0][RTW89_FCC][2][59] = 68, [1][0][RTW89_ETSI][1][59] = 127, @@ -52798,6 +55416,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][59] = 127, [1][0][RTW89_MKK][0][59] = 127, [1][0][RTW89_IC][1][59] = -4, + [1][0][RTW89_IC][2][59] = 68, [1][0][RTW89_KCC][1][59] = -2, [1][0][RTW89_KCC][0][59] = 127, [1][0][RTW89_ACMA][1][59] = 127, @@ -52807,6 +55426,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][59] = 127, [1][0][RTW89_UK][1][59] = 127, [1][0][RTW89_UK][0][59] = 127, + [1][0][RTW89_THAILAND][1][59] = 127, + [1][0][RTW89_THAILAND][0][59] = 127, [1][0][RTW89_FCC][1][60] = -4, [1][0][RTW89_FCC][2][60] = 68, [1][0][RTW89_ETSI][1][60] = 127, @@ -52814,6 +55435,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][60] = 127, [1][0][RTW89_MKK][0][60] = 127, [1][0][RTW89_IC][1][60] = -4, + [1][0][RTW89_IC][2][60] = 68, [1][0][RTW89_KCC][1][60] = -2, [1][0][RTW89_KCC][0][60] = 127, [1][0][RTW89_ACMA][1][60] = 127, @@ -52823,6 +55445,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][60] = 127, [1][0][RTW89_UK][1][60] = 127, [1][0][RTW89_UK][0][60] = 127, + [1][0][RTW89_THAILAND][1][60] = 127, + [1][0][RTW89_THAILAND][0][60] = 127, [1][0][RTW89_FCC][1][62] = -4, [1][0][RTW89_FCC][2][62] = 68, [1][0][RTW89_ETSI][1][62] = 127, @@ -52830,6 +55454,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][62] = 127, [1][0][RTW89_MKK][0][62] = 127, [1][0][RTW89_IC][1][62] = -4, + [1][0][RTW89_IC][2][62] = 68, [1][0][RTW89_KCC][1][62] = -2, [1][0][RTW89_KCC][0][62] = 127, [1][0][RTW89_ACMA][1][62] = 127, @@ -52839,6 +55464,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][62] = 127, [1][0][RTW89_UK][1][62] = 127, [1][0][RTW89_UK][0][62] = 127, + [1][0][RTW89_THAILAND][1][62] = 127, + [1][0][RTW89_THAILAND][0][62] = 127, [1][0][RTW89_FCC][1][64] = -4, [1][0][RTW89_FCC][2][64] = 68, [1][0][RTW89_ETSI][1][64] = 127, @@ -52846,6 +55473,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][64] = 127, [1][0][RTW89_MKK][0][64] = 127, [1][0][RTW89_IC][1][64] = -4, + [1][0][RTW89_IC][2][64] = 68, [1][0][RTW89_KCC][1][64] = -2, [1][0][RTW89_KCC][0][64] = 127, [1][0][RTW89_ACMA][1][64] = 127, @@ -52855,6 +55483,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][64] = 127, [1][0][RTW89_UK][1][64] = 127, [1][0][RTW89_UK][0][64] = 127, + [1][0][RTW89_THAILAND][1][64] = 127, + [1][0][RTW89_THAILAND][0][64] = 127, [1][0][RTW89_FCC][1][66] = -4, [1][0][RTW89_FCC][2][66] = 68, [1][0][RTW89_ETSI][1][66] = 127, @@ -52862,6 +55492,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][66] = 127, [1][0][RTW89_MKK][0][66] = 127, [1][0][RTW89_IC][1][66] = -4, + [1][0][RTW89_IC][2][66] = 68, [1][0][RTW89_KCC][1][66] = -2, [1][0][RTW89_KCC][0][66] = 127, [1][0][RTW89_ACMA][1][66] = 127, @@ -52871,6 +55502,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][66] = 127, [1][0][RTW89_UK][1][66] = 127, [1][0][RTW89_UK][0][66] = 127, + [1][0][RTW89_THAILAND][1][66] = 127, + [1][0][RTW89_THAILAND][0][66] = 127, [1][0][RTW89_FCC][1][68] = -4, [1][0][RTW89_FCC][2][68] = 68, [1][0][RTW89_ETSI][1][68] = 127, @@ -52878,6 +55511,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][68] = 127, [1][0][RTW89_MKK][0][68] = 127, [1][0][RTW89_IC][1][68] = -4, + [1][0][RTW89_IC][2][68] = 68, [1][0][RTW89_KCC][1][68] = -2, [1][0][RTW89_KCC][0][68] = 127, [1][0][RTW89_ACMA][1][68] = 127, @@ -52887,6 +55521,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][68] = 127, [1][0][RTW89_UK][1][68] = 127, [1][0][RTW89_UK][0][68] = 127, + [1][0][RTW89_THAILAND][1][68] = 127, + [1][0][RTW89_THAILAND][0][68] = 127, [1][0][RTW89_FCC][1][70] = -4, [1][0][RTW89_FCC][2][70] = 68, [1][0][RTW89_ETSI][1][70] = 127, @@ -52894,6 +55530,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][70] = 127, [1][0][RTW89_MKK][0][70] = 127, [1][0][RTW89_IC][1][70] = -4, + [1][0][RTW89_IC][2][70] = 68, [1][0][RTW89_KCC][1][70] = -2, [1][0][RTW89_KCC][0][70] = 127, [1][0][RTW89_ACMA][1][70] = 127, @@ -52903,6 +55540,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][70] = 127, [1][0][RTW89_UK][1][70] = 127, [1][0][RTW89_UK][0][70] = 127, + [1][0][RTW89_THAILAND][1][70] = 127, + [1][0][RTW89_THAILAND][0][70] = 127, [1][0][RTW89_FCC][1][72] = -4, [1][0][RTW89_FCC][2][72] = 68, [1][0][RTW89_ETSI][1][72] = 127, @@ -52910,6 +55549,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][72] = 127, [1][0][RTW89_MKK][0][72] = 127, [1][0][RTW89_IC][1][72] = -4, + [1][0][RTW89_IC][2][72] = 68, [1][0][RTW89_KCC][1][72] = -2, [1][0][RTW89_KCC][0][72] = 127, [1][0][RTW89_ACMA][1][72] = 127, @@ -52919,6 +55559,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][72] = 127, [1][0][RTW89_UK][1][72] = 127, [1][0][RTW89_UK][0][72] = 127, + [1][0][RTW89_THAILAND][1][72] = 127, + [1][0][RTW89_THAILAND][0][72] = 127, [1][0][RTW89_FCC][1][74] = -4, [1][0][RTW89_FCC][2][74] = 68, [1][0][RTW89_ETSI][1][74] = 127, @@ -52926,6 +55568,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][74] = 127, [1][0][RTW89_MKK][0][74] = 127, [1][0][RTW89_IC][1][74] = -4, + [1][0][RTW89_IC][2][74] = 68, [1][0][RTW89_KCC][1][74] = -2, [1][0][RTW89_KCC][0][74] = 127, [1][0][RTW89_ACMA][1][74] = 127, @@ -52935,6 +55578,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][74] = 127, [1][0][RTW89_UK][1][74] = 127, [1][0][RTW89_UK][0][74] = 127, + [1][0][RTW89_THAILAND][1][74] = 127, + [1][0][RTW89_THAILAND][0][74] = 127, [1][0][RTW89_FCC][1][75] = -4, [1][0][RTW89_FCC][2][75] = 68, [1][0][RTW89_ETSI][1][75] = 127, @@ -52942,6 +55587,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][75] = 127, [1][0][RTW89_MKK][0][75] = 127, [1][0][RTW89_IC][1][75] = -4, + [1][0][RTW89_IC][2][75] = 68, [1][0][RTW89_KCC][1][75] = -2, [1][0][RTW89_KCC][0][75] = 127, [1][0][RTW89_ACMA][1][75] = 127, @@ -52951,6 +55597,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][75] = 127, [1][0][RTW89_UK][1][75] = 127, [1][0][RTW89_UK][0][75] = 127, + [1][0][RTW89_THAILAND][1][75] = 127, + [1][0][RTW89_THAILAND][0][75] = 127, [1][0][RTW89_FCC][1][77] = -4, [1][0][RTW89_FCC][2][77] = 68, [1][0][RTW89_ETSI][1][77] = 127, @@ -52958,6 +55606,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][77] = 127, [1][0][RTW89_MKK][0][77] = 127, [1][0][RTW89_IC][1][77] = -4, + [1][0][RTW89_IC][2][77] = 68, [1][0][RTW89_KCC][1][77] = -2, [1][0][RTW89_KCC][0][77] = 127, [1][0][RTW89_ACMA][1][77] = 127, @@ -52967,6 +55616,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][77] = 127, [1][0][RTW89_UK][1][77] = 127, [1][0][RTW89_UK][0][77] = 127, + [1][0][RTW89_THAILAND][1][77] = 127, + [1][0][RTW89_THAILAND][0][77] = 127, [1][0][RTW89_FCC][1][79] = -4, [1][0][RTW89_FCC][2][79] = 68, [1][0][RTW89_ETSI][1][79] = 127, @@ -52974,6 +55625,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][79] = 127, [1][0][RTW89_MKK][0][79] = 127, [1][0][RTW89_IC][1][79] = -4, + [1][0][RTW89_IC][2][79] = 68, [1][0][RTW89_KCC][1][79] = -2, [1][0][RTW89_KCC][0][79] = 127, [1][0][RTW89_ACMA][1][79] = 127, @@ -52983,6 +55635,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][79] = 127, [1][0][RTW89_UK][1][79] = 127, [1][0][RTW89_UK][0][79] = 127, + [1][0][RTW89_THAILAND][1][79] = 127, + [1][0][RTW89_THAILAND][0][79] = 127, [1][0][RTW89_FCC][1][81] = -4, [1][0][RTW89_FCC][2][81] = 68, [1][0][RTW89_ETSI][1][81] = 127, @@ -52990,6 +55644,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][81] = 127, [1][0][RTW89_MKK][0][81] = 127, [1][0][RTW89_IC][1][81] = -4, + [1][0][RTW89_IC][2][81] = 68, [1][0][RTW89_KCC][1][81] = -2, [1][0][RTW89_KCC][0][81] = 127, [1][0][RTW89_ACMA][1][81] = 127, @@ -52999,6 +55654,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][81] = 127, [1][0][RTW89_UK][1][81] = 127, [1][0][RTW89_UK][0][81] = 127, + [1][0][RTW89_THAILAND][1][81] = 127, + [1][0][RTW89_THAILAND][0][81] = 127, [1][0][RTW89_FCC][1][83] = -4, [1][0][RTW89_FCC][2][83] = 68, [1][0][RTW89_ETSI][1][83] = 127, @@ -53006,6 +55663,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][83] = 127, [1][0][RTW89_MKK][0][83] = 127, [1][0][RTW89_IC][1][83] = -4, + [1][0][RTW89_IC][2][83] = 68, [1][0][RTW89_KCC][1][83] = -2, [1][0][RTW89_KCC][0][83] = 127, [1][0][RTW89_ACMA][1][83] = 127, @@ -53015,6 +55673,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][83] = 127, [1][0][RTW89_UK][1][83] = 127, [1][0][RTW89_UK][0][83] = 127, + [1][0][RTW89_THAILAND][1][83] = 127, + [1][0][RTW89_THAILAND][0][83] = 127, [1][0][RTW89_FCC][1][85] = -4, [1][0][RTW89_FCC][2][85] = 68, [1][0][RTW89_ETSI][1][85] = 127, @@ -53022,6 +55682,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][85] = 127, [1][0][RTW89_MKK][0][85] = 127, [1][0][RTW89_IC][1][85] = -4, + [1][0][RTW89_IC][2][85] = 68, [1][0][RTW89_KCC][1][85] = -2, [1][0][RTW89_KCC][0][85] = 127, [1][0][RTW89_ACMA][1][85] = 127, @@ -53031,6 +55692,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][85] = 127, [1][0][RTW89_UK][1][85] = 127, [1][0][RTW89_UK][0][85] = 127, + [1][0][RTW89_THAILAND][1][85] = 127, + [1][0][RTW89_THAILAND][0][85] = 127, [1][0][RTW89_FCC][1][87] = -4, [1][0][RTW89_FCC][2][87] = 127, [1][0][RTW89_ETSI][1][87] = 127, @@ -53038,6 +55701,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][87] = 127, [1][0][RTW89_MKK][0][87] = 127, [1][0][RTW89_IC][1][87] = -4, + [1][0][RTW89_IC][2][87] = 127, [1][0][RTW89_KCC][1][87] = -2, [1][0][RTW89_KCC][0][87] = 127, [1][0][RTW89_ACMA][1][87] = 127, @@ -53047,6 +55711,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][87] = 127, [1][0][RTW89_UK][1][87] = 127, [1][0][RTW89_UK][0][87] = 127, + [1][0][RTW89_THAILAND][1][87] = 127, + [1][0][RTW89_THAILAND][0][87] = 127, [1][0][RTW89_FCC][1][89] = -4, [1][0][RTW89_FCC][2][89] = 127, [1][0][RTW89_ETSI][1][89] = 127, @@ -53054,6 +55720,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][89] = 127, [1][0][RTW89_MKK][0][89] = 127, [1][0][RTW89_IC][1][89] = -4, + [1][0][RTW89_IC][2][89] = 127, [1][0][RTW89_KCC][1][89] = -2, [1][0][RTW89_KCC][0][89] = 127, [1][0][RTW89_ACMA][1][89] = 127, @@ -53063,6 +55730,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][89] = 127, [1][0][RTW89_UK][1][89] = 127, [1][0][RTW89_UK][0][89] = 127, + [1][0][RTW89_THAILAND][1][89] = 127, + [1][0][RTW89_THAILAND][0][89] = 127, [1][0][RTW89_FCC][1][90] = -4, [1][0][RTW89_FCC][2][90] = 127, [1][0][RTW89_ETSI][1][90] = 127, @@ -53070,6 +55739,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][90] = 127, [1][0][RTW89_MKK][0][90] = 127, [1][0][RTW89_IC][1][90] = -4, + [1][0][RTW89_IC][2][90] = 127, [1][0][RTW89_KCC][1][90] = -2, [1][0][RTW89_KCC][0][90] = 127, [1][0][RTW89_ACMA][1][90] = 127, @@ -53079,6 +55749,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][90] = 127, [1][0][RTW89_UK][1][90] = 127, [1][0][RTW89_UK][0][90] = 127, + [1][0][RTW89_THAILAND][1][90] = 127, + [1][0][RTW89_THAILAND][0][90] = 127, [1][0][RTW89_FCC][1][92] = -4, [1][0][RTW89_FCC][2][92] = 127, [1][0][RTW89_ETSI][1][92] = 127, @@ -53086,6 +55758,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][92] = 127, [1][0][RTW89_MKK][0][92] = 127, [1][0][RTW89_IC][1][92] = -4, + [1][0][RTW89_IC][2][92] = 127, [1][0][RTW89_KCC][1][92] = -2, [1][0][RTW89_KCC][0][92] = 127, [1][0][RTW89_ACMA][1][92] = 127, @@ -53095,6 +55768,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][92] = 127, [1][0][RTW89_UK][1][92] = 127, [1][0][RTW89_UK][0][92] = 127, + [1][0][RTW89_THAILAND][1][92] = 127, + [1][0][RTW89_THAILAND][0][92] = 127, [1][0][RTW89_FCC][1][94] = -4, [1][0][RTW89_FCC][2][94] = 127, [1][0][RTW89_ETSI][1][94] = 127, @@ -53102,6 +55777,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][94] = 127, [1][0][RTW89_MKK][0][94] = 127, [1][0][RTW89_IC][1][94] = -4, + [1][0][RTW89_IC][2][94] = 127, [1][0][RTW89_KCC][1][94] = -2, [1][0][RTW89_KCC][0][94] = 127, [1][0][RTW89_ACMA][1][94] = 127, @@ -53111,6 +55787,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][94] = 127, [1][0][RTW89_UK][1][94] = 127, [1][0][RTW89_UK][0][94] = 127, + [1][0][RTW89_THAILAND][1][94] = 127, + [1][0][RTW89_THAILAND][0][94] = 127, [1][0][RTW89_FCC][1][96] = -4, [1][0][RTW89_FCC][2][96] = 127, [1][0][RTW89_ETSI][1][96] = 127, @@ -53118,6 +55796,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][96] = 127, [1][0][RTW89_MKK][0][96] = 127, [1][0][RTW89_IC][1][96] = -4, + [1][0][RTW89_IC][2][96] = 127, [1][0][RTW89_KCC][1][96] = -2, [1][0][RTW89_KCC][0][96] = 127, [1][0][RTW89_ACMA][1][96] = 127, @@ -53127,6 +55806,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][96] = 127, [1][0][RTW89_UK][1][96] = 127, [1][0][RTW89_UK][0][96] = 127, + [1][0][RTW89_THAILAND][1][96] = 127, + [1][0][RTW89_THAILAND][0][96] = 127, [1][0][RTW89_FCC][1][98] = -4, [1][0][RTW89_FCC][2][98] = 127, [1][0][RTW89_ETSI][1][98] = 127, @@ -53134,6 +55815,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][98] = 127, [1][0][RTW89_MKK][0][98] = 127, [1][0][RTW89_IC][1][98] = -4, + [1][0][RTW89_IC][2][98] = 127, [1][0][RTW89_KCC][1][98] = -2, [1][0][RTW89_KCC][0][98] = 127, [1][0][RTW89_ACMA][1][98] = 127, @@ -53143,6 +55825,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][98] = 127, [1][0][RTW89_UK][1][98] = 127, [1][0][RTW89_UK][0][98] = 127, + [1][0][RTW89_THAILAND][1][98] = 127, + [1][0][RTW89_THAILAND][0][98] = 127, [1][0][RTW89_FCC][1][100] = -4, [1][0][RTW89_FCC][2][100] = 127, [1][0][RTW89_ETSI][1][100] = 127, @@ -53150,6 +55834,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][100] = 127, [1][0][RTW89_MKK][0][100] = 127, [1][0][RTW89_IC][1][100] = -4, + [1][0][RTW89_IC][2][100] = 127, [1][0][RTW89_KCC][1][100] = -2, [1][0][RTW89_KCC][0][100] = 127, [1][0][RTW89_ACMA][1][100] = 127, @@ -53159,6 +55844,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][100] = 127, [1][0][RTW89_UK][1][100] = 127, [1][0][RTW89_UK][0][100] = 127, + [1][0][RTW89_THAILAND][1][100] = 127, + [1][0][RTW89_THAILAND][0][100] = 127, [1][0][RTW89_FCC][1][102] = -4, [1][0][RTW89_FCC][2][102] = 127, [1][0][RTW89_ETSI][1][102] = 127, @@ -53166,6 +55853,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][102] = 127, [1][0][RTW89_MKK][0][102] = 127, [1][0][RTW89_IC][1][102] = -4, + [1][0][RTW89_IC][2][102] = 127, [1][0][RTW89_KCC][1][102] = -2, [1][0][RTW89_KCC][0][102] = 127, [1][0][RTW89_ACMA][1][102] = 127, @@ -53175,6 +55863,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][102] = 127, [1][0][RTW89_UK][1][102] = 127, [1][0][RTW89_UK][0][102] = 127, + [1][0][RTW89_THAILAND][1][102] = 127, + [1][0][RTW89_THAILAND][0][102] = 127, [1][0][RTW89_FCC][1][104] = -4, [1][0][RTW89_FCC][2][104] = 127, [1][0][RTW89_ETSI][1][104] = 127, @@ -53182,6 +55872,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][104] = 127, [1][0][RTW89_MKK][0][104] = 127, [1][0][RTW89_IC][1][104] = -4, + [1][0][RTW89_IC][2][104] = 127, [1][0][RTW89_KCC][1][104] = -2, [1][0][RTW89_KCC][0][104] = 127, [1][0][RTW89_ACMA][1][104] = 127, @@ -53191,6 +55882,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][104] = 127, [1][0][RTW89_UK][1][104] = 127, [1][0][RTW89_UK][0][104] = 127, + [1][0][RTW89_THAILAND][1][104] = 127, + [1][0][RTW89_THAILAND][0][104] = 127, [1][0][RTW89_FCC][1][105] = -4, [1][0][RTW89_FCC][2][105] = 127, [1][0][RTW89_ETSI][1][105] = 127, @@ -53198,6 +55891,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][105] = 127, [1][0][RTW89_MKK][0][105] = 127, [1][0][RTW89_IC][1][105] = -4, + [1][0][RTW89_IC][2][105] = 127, [1][0][RTW89_KCC][1][105] = -2, [1][0][RTW89_KCC][0][105] = 127, [1][0][RTW89_ACMA][1][105] = 127, @@ -53207,6 +55901,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][105] = 127, [1][0][RTW89_UK][1][105] = 127, [1][0][RTW89_UK][0][105] = 127, + [1][0][RTW89_THAILAND][1][105] = 127, + [1][0][RTW89_THAILAND][0][105] = 127, [1][0][RTW89_FCC][1][107] = 1, [1][0][RTW89_FCC][2][107] = 127, [1][0][RTW89_ETSI][1][107] = 127, @@ -53214,6 +55910,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][107] = 127, [1][0][RTW89_MKK][0][107] = 127, [1][0][RTW89_IC][1][107] = 1, + [1][0][RTW89_IC][2][107] = 127, [1][0][RTW89_KCC][1][107] = -2, [1][0][RTW89_KCC][0][107] = 127, [1][0][RTW89_ACMA][1][107] = 127, @@ -53223,6 +55920,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][107] = 127, [1][0][RTW89_UK][1][107] = 127, [1][0][RTW89_UK][0][107] = 127, + [1][0][RTW89_THAILAND][1][107] = 127, + [1][0][RTW89_THAILAND][0][107] = 127, [1][0][RTW89_FCC][1][109] = 2, [1][0][RTW89_FCC][2][109] = 127, [1][0][RTW89_ETSI][1][109] = 127, @@ -53230,6 +55929,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][109] = 127, [1][0][RTW89_MKK][0][109] = 127, [1][0][RTW89_IC][1][109] = 2, + [1][0][RTW89_IC][2][109] = 127, [1][0][RTW89_KCC][1][109] = 127, [1][0][RTW89_KCC][0][109] = 127, [1][0][RTW89_ACMA][1][109] = 127, @@ -53239,6 +55939,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][109] = 127, [1][0][RTW89_UK][1][109] = 127, [1][0][RTW89_UK][0][109] = 127, + [1][0][RTW89_THAILAND][1][109] = 127, + [1][0][RTW89_THAILAND][0][109] = 127, [1][0][RTW89_FCC][1][111] = 127, [1][0][RTW89_FCC][2][111] = 127, [1][0][RTW89_ETSI][1][111] = 127, @@ -53246,6 +55948,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][111] = 127, [1][0][RTW89_MKK][0][111] = 127, [1][0][RTW89_IC][1][111] = 127, + [1][0][RTW89_IC][2][111] = 127, [1][0][RTW89_KCC][1][111] = 127, [1][0][RTW89_KCC][0][111] = 127, [1][0][RTW89_ACMA][1][111] = 127, @@ -53255,6 +55958,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][111] = 127, [1][0][RTW89_UK][1][111] = 127, [1][0][RTW89_UK][0][111] = 127, + [1][0][RTW89_THAILAND][1][111] = 127, + [1][0][RTW89_THAILAND][0][111] = 127, [1][0][RTW89_FCC][1][113] = 127, [1][0][RTW89_FCC][2][113] = 127, [1][0][RTW89_ETSI][1][113] = 127, @@ -53262,6 +55967,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][113] = 127, [1][0][RTW89_MKK][0][113] = 127, [1][0][RTW89_IC][1][113] = 127, + [1][0][RTW89_IC][2][113] = 127, [1][0][RTW89_KCC][1][113] = 127, [1][0][RTW89_KCC][0][113] = 127, [1][0][RTW89_ACMA][1][113] = 127, @@ -53271,6 +55977,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][113] = 127, [1][0][RTW89_UK][1][113] = 127, [1][0][RTW89_UK][0][113] = 127, + [1][0][RTW89_THAILAND][1][113] = 127, + [1][0][RTW89_THAILAND][0][113] = 127, [1][0][RTW89_FCC][1][115] = 127, [1][0][RTW89_FCC][2][115] = 127, [1][0][RTW89_ETSI][1][115] = 127, @@ -53278,6 +55986,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][115] = 127, [1][0][RTW89_MKK][0][115] = 127, [1][0][RTW89_IC][1][115] = 127, + [1][0][RTW89_IC][2][115] = 127, [1][0][RTW89_KCC][1][115] = 127, [1][0][RTW89_KCC][0][115] = 127, [1][0][RTW89_ACMA][1][115] = 127, @@ -53287,6 +55996,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][115] = 127, [1][0][RTW89_UK][1][115] = 127, [1][0][RTW89_UK][0][115] = 127, + [1][0][RTW89_THAILAND][1][115] = 127, + [1][0][RTW89_THAILAND][0][115] = 127, [1][0][RTW89_FCC][1][117] = 127, [1][0][RTW89_FCC][2][117] = 127, [1][0][RTW89_ETSI][1][117] = 127, @@ -53294,6 +56005,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][117] = 127, [1][0][RTW89_MKK][0][117] = 127, [1][0][RTW89_IC][1][117] = 127, + [1][0][RTW89_IC][2][117] = 127, [1][0][RTW89_KCC][1][117] = 127, [1][0][RTW89_KCC][0][117] = 127, [1][0][RTW89_ACMA][1][117] = 127, @@ -53303,6 +56015,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][117] = 127, [1][0][RTW89_UK][1][117] = 127, [1][0][RTW89_UK][0][117] = 127, + [1][0][RTW89_THAILAND][1][117] = 127, + [1][0][RTW89_THAILAND][0][117] = 127, [1][0][RTW89_FCC][1][119] = 127, [1][0][RTW89_FCC][2][119] = 127, [1][0][RTW89_ETSI][1][119] = 127, @@ -53310,6 +56024,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][1][119] = 127, [1][0][RTW89_MKK][0][119] = 127, [1][0][RTW89_IC][1][119] = 127, + [1][0][RTW89_IC][2][119] = 127, [1][0][RTW89_KCC][1][119] = 127, [1][0][RTW89_KCC][0][119] = 127, [1][0][RTW89_ACMA][1][119] = 127, @@ -53319,6 +56034,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_QATAR][0][119] = 127, [1][0][RTW89_UK][1][119] = 127, [1][0][RTW89_UK][0][119] = 127, + [1][0][RTW89_THAILAND][1][119] = 127, + [1][0][RTW89_THAILAND][0][119] = 127, [1][1][RTW89_FCC][1][0] = -26, [1][1][RTW89_FCC][2][0] = 44, [1][1][RTW89_ETSI][1][0] = 32, @@ -53326,6 +56043,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][0] = 30, [1][1][RTW89_MKK][0][0] = -10, [1][1][RTW89_IC][1][0] = -26, + [1][1][RTW89_IC][2][0] = 44, [1][1][RTW89_KCC][1][0] = -14, [1][1][RTW89_KCC][0][0] = -14, [1][1][RTW89_ACMA][1][0] = 32, @@ -53335,6 +56053,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][0] = -6, [1][1][RTW89_UK][1][0] = 32, [1][1][RTW89_UK][0][0] = -6, + [1][1][RTW89_THAILAND][1][0] = 18, + [1][1][RTW89_THAILAND][0][0] = -26, [1][1][RTW89_FCC][1][2] = -28, [1][1][RTW89_FCC][2][2] = 44, [1][1][RTW89_ETSI][1][2] = 32, @@ -53342,6 +56062,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][2] = 30, [1][1][RTW89_MKK][0][2] = -10, [1][1][RTW89_IC][1][2] = -28, + [1][1][RTW89_IC][2][2] = 44, [1][1][RTW89_KCC][1][2] = -14, [1][1][RTW89_KCC][0][2] = -14, [1][1][RTW89_ACMA][1][2] = 32, @@ -53351,6 +56072,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][2] = -6, [1][1][RTW89_UK][1][2] = 32, [1][1][RTW89_UK][0][2] = -6, + [1][1][RTW89_THAILAND][1][2] = 18, + [1][1][RTW89_THAILAND][0][2] = -28, [1][1][RTW89_FCC][1][4] = -28, [1][1][RTW89_FCC][2][4] = 44, [1][1][RTW89_ETSI][1][4] = 32, @@ -53358,6 +56081,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][4] = 30, [1][1][RTW89_MKK][0][4] = -10, [1][1][RTW89_IC][1][4] = -28, + [1][1][RTW89_IC][2][4] = 44, [1][1][RTW89_KCC][1][4] = -14, [1][1][RTW89_KCC][0][4] = -14, [1][1][RTW89_ACMA][1][4] = 32, @@ -53367,6 +56091,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][4] = -6, [1][1][RTW89_UK][1][4] = 32, [1][1][RTW89_UK][0][4] = -6, + [1][1][RTW89_THAILAND][1][4] = 18, + [1][1][RTW89_THAILAND][0][4] = -28, [1][1][RTW89_FCC][1][6] = -28, [1][1][RTW89_FCC][2][6] = 44, [1][1][RTW89_ETSI][1][6] = 32, @@ -53374,6 +56100,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][6] = 30, [1][1][RTW89_MKK][0][6] = -10, [1][1][RTW89_IC][1][6] = -28, + [1][1][RTW89_IC][2][6] = 44, [1][1][RTW89_KCC][1][6] = -14, [1][1][RTW89_KCC][0][6] = -14, [1][1][RTW89_ACMA][1][6] = 32, @@ -53383,6 +56110,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][6] = -6, [1][1][RTW89_UK][1][6] = 32, [1][1][RTW89_UK][0][6] = -6, + [1][1][RTW89_THAILAND][1][6] = 18, + [1][1][RTW89_THAILAND][0][6] = -28, [1][1][RTW89_FCC][1][8] = -28, [1][1][RTW89_FCC][2][8] = 44, [1][1][RTW89_ETSI][1][8] = 32, @@ -53390,6 +56119,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][8] = 30, [1][1][RTW89_MKK][0][8] = -10, [1][1][RTW89_IC][1][8] = -28, + [1][1][RTW89_IC][2][8] = 44, [1][1][RTW89_KCC][1][8] = -14, [1][1][RTW89_KCC][0][8] = -14, [1][1][RTW89_ACMA][1][8] = 32, @@ -53399,6 +56129,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][8] = -6, [1][1][RTW89_UK][1][8] = 32, [1][1][RTW89_UK][0][8] = -6, + [1][1][RTW89_THAILAND][1][8] = 18, + [1][1][RTW89_THAILAND][0][8] = -28, [1][1][RTW89_FCC][1][10] = -28, [1][1][RTW89_FCC][2][10] = 44, [1][1][RTW89_ETSI][1][10] = 32, @@ -53406,6 +56138,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][10] = 30, [1][1][RTW89_MKK][0][10] = -10, [1][1][RTW89_IC][1][10] = -28, + [1][1][RTW89_IC][2][10] = 44, [1][1][RTW89_KCC][1][10] = -14, [1][1][RTW89_KCC][0][10] = -14, [1][1][RTW89_ACMA][1][10] = 32, @@ -53415,6 +56148,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][10] = -6, [1][1][RTW89_UK][1][10] = 32, [1][1][RTW89_UK][0][10] = -6, + [1][1][RTW89_THAILAND][1][10] = 18, + [1][1][RTW89_THAILAND][0][10] = -28, [1][1][RTW89_FCC][1][12] = -28, [1][1][RTW89_FCC][2][12] = 44, [1][1][RTW89_ETSI][1][12] = 32, @@ -53422,6 +56157,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][12] = 30, [1][1][RTW89_MKK][0][12] = -10, [1][1][RTW89_IC][1][12] = -28, + [1][1][RTW89_IC][2][12] = 44, [1][1][RTW89_KCC][1][12] = -14, [1][1][RTW89_KCC][0][12] = -14, [1][1][RTW89_ACMA][1][12] = 32, @@ -53431,6 +56167,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][12] = -6, [1][1][RTW89_UK][1][12] = 32, [1][1][RTW89_UK][0][12] = -6, + [1][1][RTW89_THAILAND][1][12] = 18, + [1][1][RTW89_THAILAND][0][12] = -28, [1][1][RTW89_FCC][1][14] = -28, [1][1][RTW89_FCC][2][14] = 44, [1][1][RTW89_ETSI][1][14] = 32, @@ -53438,6 +56176,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][14] = 30, [1][1][RTW89_MKK][0][14] = -10, [1][1][RTW89_IC][1][14] = -28, + [1][1][RTW89_IC][2][14] = 44, [1][1][RTW89_KCC][1][14] = -14, [1][1][RTW89_KCC][0][14] = -14, [1][1][RTW89_ACMA][1][14] = 32, @@ -53447,6 +56186,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][14] = -6, [1][1][RTW89_UK][1][14] = 32, [1][1][RTW89_UK][0][14] = -6, + [1][1][RTW89_THAILAND][1][14] = 18, + [1][1][RTW89_THAILAND][0][14] = -28, [1][1][RTW89_FCC][1][15] = -28, [1][1][RTW89_FCC][2][15] = 44, [1][1][RTW89_ETSI][1][15] = 32, @@ -53454,6 +56195,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][15] = 30, [1][1][RTW89_MKK][0][15] = -10, [1][1][RTW89_IC][1][15] = -28, + [1][1][RTW89_IC][2][15] = 44, [1][1][RTW89_KCC][1][15] = -14, [1][1][RTW89_KCC][0][15] = -14, [1][1][RTW89_ACMA][1][15] = 32, @@ -53463,6 +56205,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][15] = -6, [1][1][RTW89_UK][1][15] = 32, [1][1][RTW89_UK][0][15] = -6, + [1][1][RTW89_THAILAND][1][15] = 18, + [1][1][RTW89_THAILAND][0][15] = -28, [1][1][RTW89_FCC][1][17] = -28, [1][1][RTW89_FCC][2][17] = 44, [1][1][RTW89_ETSI][1][17] = 32, @@ -53470,6 +56214,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][17] = 30, [1][1][RTW89_MKK][0][17] = -10, [1][1][RTW89_IC][1][17] = -28, + [1][1][RTW89_IC][2][17] = 44, [1][1][RTW89_KCC][1][17] = -14, [1][1][RTW89_KCC][0][17] = -14, [1][1][RTW89_ACMA][1][17] = 32, @@ -53479,6 +56224,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][17] = -6, [1][1][RTW89_UK][1][17] = 32, [1][1][RTW89_UK][0][17] = -6, + [1][1][RTW89_THAILAND][1][17] = 18, + [1][1][RTW89_THAILAND][0][17] = -28, [1][1][RTW89_FCC][1][19] = -28, [1][1][RTW89_FCC][2][19] = 44, [1][1][RTW89_ETSI][1][19] = 32, @@ -53486,6 +56233,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][19] = 30, [1][1][RTW89_MKK][0][19] = -10, [1][1][RTW89_IC][1][19] = -28, + [1][1][RTW89_IC][2][19] = 44, [1][1][RTW89_KCC][1][19] = -14, [1][1][RTW89_KCC][0][19] = -14, [1][1][RTW89_ACMA][1][19] = 32, @@ -53495,6 +56243,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][19] = -6, [1][1][RTW89_UK][1][19] = 32, [1][1][RTW89_UK][0][19] = -6, + [1][1][RTW89_THAILAND][1][19] = 18, + [1][1][RTW89_THAILAND][0][19] = -28, [1][1][RTW89_FCC][1][21] = -28, [1][1][RTW89_FCC][2][21] = 44, [1][1][RTW89_ETSI][1][21] = 32, @@ -53502,6 +56252,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][21] = 30, [1][1][RTW89_MKK][0][21] = -10, [1][1][RTW89_IC][1][21] = -28, + [1][1][RTW89_IC][2][21] = 44, [1][1][RTW89_KCC][1][21] = -14, [1][1][RTW89_KCC][0][21] = -14, [1][1][RTW89_ACMA][1][21] = 32, @@ -53511,6 +56262,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][21] = -6, [1][1][RTW89_UK][1][21] = 32, [1][1][RTW89_UK][0][21] = -6, + [1][1][RTW89_THAILAND][1][21] = 18, + [1][1][RTW89_THAILAND][0][21] = -28, [1][1][RTW89_FCC][1][23] = -28, [1][1][RTW89_FCC][2][23] = 44, [1][1][RTW89_ETSI][1][23] = 32, @@ -53518,6 +56271,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][23] = 32, [1][1][RTW89_MKK][0][23] = -10, [1][1][RTW89_IC][1][23] = -28, + [1][1][RTW89_IC][2][23] = 44, [1][1][RTW89_KCC][1][23] = -14, [1][1][RTW89_KCC][0][23] = -14, [1][1][RTW89_ACMA][1][23] = 32, @@ -53527,6 +56281,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][23] = -6, [1][1][RTW89_UK][1][23] = 32, [1][1][RTW89_UK][0][23] = -6, + [1][1][RTW89_THAILAND][1][23] = 18, + [1][1][RTW89_THAILAND][0][23] = -28, [1][1][RTW89_FCC][1][25] = -28, [1][1][RTW89_FCC][2][25] = 44, [1][1][RTW89_ETSI][1][25] = 32, @@ -53534,6 +56290,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][25] = 32, [1][1][RTW89_MKK][0][25] = -10, [1][1][RTW89_IC][1][25] = -28, + [1][1][RTW89_IC][2][25] = 44, [1][1][RTW89_KCC][1][25] = -14, [1][1][RTW89_KCC][0][25] = -14, [1][1][RTW89_ACMA][1][25] = 32, @@ -53543,6 +56300,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][25] = -6, [1][1][RTW89_UK][1][25] = 32, [1][1][RTW89_UK][0][25] = -6, + [1][1][RTW89_THAILAND][1][25] = 18, + [1][1][RTW89_THAILAND][0][25] = -28, [1][1][RTW89_FCC][1][27] = -28, [1][1][RTW89_FCC][2][27] = 44, [1][1][RTW89_ETSI][1][27] = 32, @@ -53550,6 +56309,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][27] = 32, [1][1][RTW89_MKK][0][27] = -10, [1][1][RTW89_IC][1][27] = -28, + [1][1][RTW89_IC][2][27] = 44, [1][1][RTW89_KCC][1][27] = -14, [1][1][RTW89_KCC][0][27] = -14, [1][1][RTW89_ACMA][1][27] = 32, @@ -53559,6 +56319,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][27] = -6, [1][1][RTW89_UK][1][27] = 32, [1][1][RTW89_UK][0][27] = -6, + [1][1][RTW89_THAILAND][1][27] = 18, + [1][1][RTW89_THAILAND][0][27] = -28, [1][1][RTW89_FCC][1][29] = -28, [1][1][RTW89_FCC][2][29] = 44, [1][1][RTW89_ETSI][1][29] = 32, @@ -53566,6 +56328,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][29] = 32, [1][1][RTW89_MKK][0][29] = -10, [1][1][RTW89_IC][1][29] = -28, + [1][1][RTW89_IC][2][29] = 44, [1][1][RTW89_KCC][1][29] = -14, [1][1][RTW89_KCC][0][29] = -14, [1][1][RTW89_ACMA][1][29] = 32, @@ -53575,6 +56338,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][29] = -6, [1][1][RTW89_UK][1][29] = 32, [1][1][RTW89_UK][0][29] = -6, + [1][1][RTW89_THAILAND][1][29] = 18, + [1][1][RTW89_THAILAND][0][29] = -28, [1][1][RTW89_FCC][1][30] = -28, [1][1][RTW89_FCC][2][30] = 44, [1][1][RTW89_ETSI][1][30] = 32, @@ -53582,6 +56347,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][30] = 32, [1][1][RTW89_MKK][0][30] = -10, [1][1][RTW89_IC][1][30] = -28, + [1][1][RTW89_IC][2][30] = 44, [1][1][RTW89_KCC][1][30] = -14, [1][1][RTW89_KCC][0][30] = -14, [1][1][RTW89_ACMA][1][30] = 32, @@ -53591,6 +56357,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][30] = -6, [1][1][RTW89_UK][1][30] = 32, [1][1][RTW89_UK][0][30] = -6, + [1][1][RTW89_THAILAND][1][30] = 18, + [1][1][RTW89_THAILAND][0][30] = -28, [1][1][RTW89_FCC][1][32] = -28, [1][1][RTW89_FCC][2][32] = 44, [1][1][RTW89_ETSI][1][32] = 32, @@ -53598,6 +56366,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][32] = 32, [1][1][RTW89_MKK][0][32] = -10, [1][1][RTW89_IC][1][32] = -28, + [1][1][RTW89_IC][2][32] = 44, [1][1][RTW89_KCC][1][32] = -14, [1][1][RTW89_KCC][0][32] = -14, [1][1][RTW89_ACMA][1][32] = 32, @@ -53607,6 +56376,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][32] = -6, [1][1][RTW89_UK][1][32] = 32, [1][1][RTW89_UK][0][32] = -6, + [1][1][RTW89_THAILAND][1][32] = 18, + [1][1][RTW89_THAILAND][0][32] = -28, [1][1][RTW89_FCC][1][34] = -28, [1][1][RTW89_FCC][2][34] = 44, [1][1][RTW89_ETSI][1][34] = 32, @@ -53614,6 +56385,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][34] = 32, [1][1][RTW89_MKK][0][34] = -10, [1][1][RTW89_IC][1][34] = -28, + [1][1][RTW89_IC][2][34] = 44, [1][1][RTW89_KCC][1][34] = -14, [1][1][RTW89_KCC][0][34] = -14, [1][1][RTW89_ACMA][1][34] = 32, @@ -53623,6 +56395,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][34] = -6, [1][1][RTW89_UK][1][34] = 32, [1][1][RTW89_UK][0][34] = -6, + [1][1][RTW89_THAILAND][1][34] = 18, + [1][1][RTW89_THAILAND][0][34] = -28, [1][1][RTW89_FCC][1][36] = -28, [1][1][RTW89_FCC][2][36] = 44, [1][1][RTW89_ETSI][1][36] = 32, @@ -53630,6 +56404,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][36] = 32, [1][1][RTW89_MKK][0][36] = -10, [1][1][RTW89_IC][1][36] = -28, + [1][1][RTW89_IC][2][36] = 44, [1][1][RTW89_KCC][1][36] = -14, [1][1][RTW89_KCC][0][36] = -14, [1][1][RTW89_ACMA][1][36] = 32, @@ -53639,6 +56414,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][36] = -6, [1][1][RTW89_UK][1][36] = 32, [1][1][RTW89_UK][0][36] = -6, + [1][1][RTW89_THAILAND][1][36] = 18, + [1][1][RTW89_THAILAND][0][36] = -28, [1][1][RTW89_FCC][1][38] = -28, [1][1][RTW89_FCC][2][38] = 44, [1][1][RTW89_ETSI][1][38] = 32, @@ -53646,6 +56423,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][38] = 32, [1][1][RTW89_MKK][0][38] = -10, [1][1][RTW89_IC][1][38] = -28, + [1][1][RTW89_IC][2][38] = 44, [1][1][RTW89_KCC][1][38] = -14, [1][1][RTW89_KCC][0][38] = -14, [1][1][RTW89_ACMA][1][38] = 32, @@ -53655,6 +56433,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][38] = -6, [1][1][RTW89_UK][1][38] = 32, [1][1][RTW89_UK][0][38] = -6, + [1][1][RTW89_THAILAND][1][38] = 18, + [1][1][RTW89_THAILAND][0][38] = -28, [1][1][RTW89_FCC][1][40] = -28, [1][1][RTW89_FCC][2][40] = 44, [1][1][RTW89_ETSI][1][40] = 32, @@ -53662,6 +56442,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][40] = 32, [1][1][RTW89_MKK][0][40] = -10, [1][1][RTW89_IC][1][40] = -28, + [1][1][RTW89_IC][2][40] = 44, [1][1][RTW89_KCC][1][40] = -14, [1][1][RTW89_KCC][0][40] = -14, [1][1][RTW89_ACMA][1][40] = 32, @@ -53671,6 +56452,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][40] = -6, [1][1][RTW89_UK][1][40] = 32, [1][1][RTW89_UK][0][40] = -6, + [1][1][RTW89_THAILAND][1][40] = 18, + [1][1][RTW89_THAILAND][0][40] = -28, [1][1][RTW89_FCC][1][42] = -28, [1][1][RTW89_FCC][2][42] = 44, [1][1][RTW89_ETSI][1][42] = 32, @@ -53678,6 +56461,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][42] = 32, [1][1][RTW89_MKK][0][42] = -10, [1][1][RTW89_IC][1][42] = -28, + [1][1][RTW89_IC][2][42] = 44, [1][1][RTW89_KCC][1][42] = -14, [1][1][RTW89_KCC][0][42] = -14, [1][1][RTW89_ACMA][1][42] = 32, @@ -53687,6 +56471,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][42] = -6, [1][1][RTW89_UK][1][42] = 32, [1][1][RTW89_UK][0][42] = -6, + [1][1][RTW89_THAILAND][1][42] = 18, + [1][1][RTW89_THAILAND][0][42] = -28, [1][1][RTW89_FCC][1][44] = -28, [1][1][RTW89_FCC][2][44] = 44, [1][1][RTW89_ETSI][1][44] = 34, @@ -53694,6 +56480,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][44] = 4, [1][1][RTW89_MKK][0][44] = -8, [1][1][RTW89_IC][1][44] = -28, + [1][1][RTW89_IC][2][44] = 44, [1][1][RTW89_KCC][1][44] = -14, [1][1][RTW89_KCC][0][44] = -14, [1][1][RTW89_ACMA][1][44] = 34, @@ -53703,6 +56490,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][44] = -4, [1][1][RTW89_UK][1][44] = 34, [1][1][RTW89_UK][0][44] = -4, + [1][1][RTW89_THAILAND][1][44] = 18, + [1][1][RTW89_THAILAND][0][44] = -28, [1][1][RTW89_FCC][1][45] = -26, [1][1][RTW89_FCC][2][45] = 127, [1][1][RTW89_ETSI][1][45] = 127, @@ -53710,6 +56499,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][45] = 127, [1][1][RTW89_MKK][0][45] = 127, [1][1][RTW89_IC][1][45] = -26, + [1][1][RTW89_IC][2][45] = 44, [1][1][RTW89_KCC][1][45] = -14, [1][1][RTW89_KCC][0][45] = 127, [1][1][RTW89_ACMA][1][45] = 127, @@ -53719,6 +56509,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][45] = 127, [1][1][RTW89_UK][1][45] = 127, [1][1][RTW89_UK][0][45] = 127, + [1][1][RTW89_THAILAND][1][45] = 127, + [1][1][RTW89_THAILAND][0][45] = 127, [1][1][RTW89_FCC][1][47] = -28, [1][1][RTW89_FCC][2][47] = 127, [1][1][RTW89_ETSI][1][47] = 127, @@ -53726,6 +56518,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][47] = 127, [1][1][RTW89_MKK][0][47] = 127, [1][1][RTW89_IC][1][47] = -28, + [1][1][RTW89_IC][2][47] = 44, [1][1][RTW89_KCC][1][47] = -14, [1][1][RTW89_KCC][0][47] = 127, [1][1][RTW89_ACMA][1][47] = 127, @@ -53735,6 +56528,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][47] = 127, [1][1][RTW89_UK][1][47] = 127, [1][1][RTW89_UK][0][47] = 127, + [1][1][RTW89_THAILAND][1][47] = 127, + [1][1][RTW89_THAILAND][0][47] = 127, [1][1][RTW89_FCC][1][49] = -28, [1][1][RTW89_FCC][2][49] = 127, [1][1][RTW89_ETSI][1][49] = 127, @@ -53742,6 +56537,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][49] = 127, [1][1][RTW89_MKK][0][49] = 127, [1][1][RTW89_IC][1][49] = -28, + [1][1][RTW89_IC][2][49] = 44, [1][1][RTW89_KCC][1][49] = -14, [1][1][RTW89_KCC][0][49] = 127, [1][1][RTW89_ACMA][1][49] = 127, @@ -53751,6 +56547,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][49] = 127, [1][1][RTW89_UK][1][49] = 127, [1][1][RTW89_UK][0][49] = 127, + [1][1][RTW89_THAILAND][1][49] = 127, + [1][1][RTW89_THAILAND][0][49] = 127, [1][1][RTW89_FCC][1][51] = -28, [1][1][RTW89_FCC][2][51] = 127, [1][1][RTW89_ETSI][1][51] = 127, @@ -53758,6 +56556,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][51] = 127, [1][1][RTW89_MKK][0][51] = 127, [1][1][RTW89_IC][1][51] = -28, + [1][1][RTW89_IC][2][51] = 44, [1][1][RTW89_KCC][1][51] = -14, [1][1][RTW89_KCC][0][51] = 127, [1][1][RTW89_ACMA][1][51] = 127, @@ -53767,6 +56566,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][51] = 127, [1][1][RTW89_UK][1][51] = 127, [1][1][RTW89_UK][0][51] = 127, + [1][1][RTW89_THAILAND][1][51] = 127, + [1][1][RTW89_THAILAND][0][51] = 127, [1][1][RTW89_FCC][1][53] = -26, [1][1][RTW89_FCC][2][53] = 127, [1][1][RTW89_ETSI][1][53] = 127, @@ -53774,6 +56575,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][53] = 127, [1][1][RTW89_MKK][0][53] = 127, [1][1][RTW89_IC][1][53] = -26, + [1][1][RTW89_IC][2][53] = 44, [1][1][RTW89_KCC][1][53] = -14, [1][1][RTW89_KCC][0][53] = 127, [1][1][RTW89_ACMA][1][53] = 127, @@ -53783,6 +56585,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][53] = 127, [1][1][RTW89_UK][1][53] = 127, [1][1][RTW89_UK][0][53] = 127, + [1][1][RTW89_THAILAND][1][53] = 127, + [1][1][RTW89_THAILAND][0][53] = 127, [1][1][RTW89_FCC][1][55] = -28, [1][1][RTW89_FCC][2][55] = 44, [1][1][RTW89_ETSI][1][55] = 127, @@ -53790,6 +56594,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][55] = 127, [1][1][RTW89_MKK][0][55] = 127, [1][1][RTW89_IC][1][55] = -28, + [1][1][RTW89_IC][2][55] = 44, [1][1][RTW89_KCC][1][55] = -14, [1][1][RTW89_KCC][0][55] = 127, [1][1][RTW89_ACMA][1][55] = 127, @@ -53799,6 +56604,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][55] = 127, [1][1][RTW89_UK][1][55] = 127, [1][1][RTW89_UK][0][55] = 127, + [1][1][RTW89_THAILAND][1][55] = 127, + [1][1][RTW89_THAILAND][0][55] = 127, [1][1][RTW89_FCC][1][57] = -28, [1][1][RTW89_FCC][2][57] = 44, [1][1][RTW89_ETSI][1][57] = 127, @@ -53806,6 +56613,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][57] = 127, [1][1][RTW89_MKK][0][57] = 127, [1][1][RTW89_IC][1][57] = -28, + [1][1][RTW89_IC][2][57] = 44, [1][1][RTW89_KCC][1][57] = -14, [1][1][RTW89_KCC][0][57] = 127, [1][1][RTW89_ACMA][1][57] = 127, @@ -53815,6 +56623,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][57] = 127, [1][1][RTW89_UK][1][57] = 127, [1][1][RTW89_UK][0][57] = 127, + [1][1][RTW89_THAILAND][1][57] = 127, + [1][1][RTW89_THAILAND][0][57] = 127, [1][1][RTW89_FCC][1][59] = -28, [1][1][RTW89_FCC][2][59] = 44, [1][1][RTW89_ETSI][1][59] = 127, @@ -53822,6 +56632,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][59] = 127, [1][1][RTW89_MKK][0][59] = 127, [1][1][RTW89_IC][1][59] = -28, + [1][1][RTW89_IC][2][59] = 44, [1][1][RTW89_KCC][1][59] = -14, [1][1][RTW89_KCC][0][59] = 127, [1][1][RTW89_ACMA][1][59] = 127, @@ -53831,6 +56642,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][59] = 127, [1][1][RTW89_UK][1][59] = 127, [1][1][RTW89_UK][0][59] = 127, + [1][1][RTW89_THAILAND][1][59] = 127, + [1][1][RTW89_THAILAND][0][59] = 127, [1][1][RTW89_FCC][1][60] = -28, [1][1][RTW89_FCC][2][60] = 44, [1][1][RTW89_ETSI][1][60] = 127, @@ -53838,6 +56651,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][60] = 127, [1][1][RTW89_MKK][0][60] = 127, [1][1][RTW89_IC][1][60] = -28, + [1][1][RTW89_IC][2][60] = 44, [1][1][RTW89_KCC][1][60] = -14, [1][1][RTW89_KCC][0][60] = 127, [1][1][RTW89_ACMA][1][60] = 127, @@ -53847,6 +56661,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][60] = 127, [1][1][RTW89_UK][1][60] = 127, [1][1][RTW89_UK][0][60] = 127, + [1][1][RTW89_THAILAND][1][60] = 127, + [1][1][RTW89_THAILAND][0][60] = 127, [1][1][RTW89_FCC][1][62] = -28, [1][1][RTW89_FCC][2][62] = 44, [1][1][RTW89_ETSI][1][62] = 127, @@ -53854,6 +56670,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][62] = 127, [1][1][RTW89_MKK][0][62] = 127, [1][1][RTW89_IC][1][62] = -28, + [1][1][RTW89_IC][2][62] = 44, [1][1][RTW89_KCC][1][62] = -14, [1][1][RTW89_KCC][0][62] = 127, [1][1][RTW89_ACMA][1][62] = 127, @@ -53863,6 +56680,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][62] = 127, [1][1][RTW89_UK][1][62] = 127, [1][1][RTW89_UK][0][62] = 127, + [1][1][RTW89_THAILAND][1][62] = 127, + [1][1][RTW89_THAILAND][0][62] = 127, [1][1][RTW89_FCC][1][64] = -28, [1][1][RTW89_FCC][2][64] = 44, [1][1][RTW89_ETSI][1][64] = 127, @@ -53870,6 +56689,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][64] = 127, [1][1][RTW89_MKK][0][64] = 127, [1][1][RTW89_IC][1][64] = -28, + [1][1][RTW89_IC][2][64] = 44, [1][1][RTW89_KCC][1][64] = -14, [1][1][RTW89_KCC][0][64] = 127, [1][1][RTW89_ACMA][1][64] = 127, @@ -53879,6 +56699,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][64] = 127, [1][1][RTW89_UK][1][64] = 127, [1][1][RTW89_UK][0][64] = 127, + [1][1][RTW89_THAILAND][1][64] = 127, + [1][1][RTW89_THAILAND][0][64] = 127, [1][1][RTW89_FCC][1][66] = -28, [1][1][RTW89_FCC][2][66] = 44, [1][1][RTW89_ETSI][1][66] = 127, @@ -53886,6 +56708,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][66] = 127, [1][1][RTW89_MKK][0][66] = 127, [1][1][RTW89_IC][1][66] = -28, + [1][1][RTW89_IC][2][66] = 44, [1][1][RTW89_KCC][1][66] = -14, [1][1][RTW89_KCC][0][66] = 127, [1][1][RTW89_ACMA][1][66] = 127, @@ -53895,6 +56718,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][66] = 127, [1][1][RTW89_UK][1][66] = 127, [1][1][RTW89_UK][0][66] = 127, + [1][1][RTW89_THAILAND][1][66] = 127, + [1][1][RTW89_THAILAND][0][66] = 127, [1][1][RTW89_FCC][1][68] = -28, [1][1][RTW89_FCC][2][68] = 44, [1][1][RTW89_ETSI][1][68] = 127, @@ -53902,6 +56727,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][68] = 127, [1][1][RTW89_MKK][0][68] = 127, [1][1][RTW89_IC][1][68] = -28, + [1][1][RTW89_IC][2][68] = 44, [1][1][RTW89_KCC][1][68] = -14, [1][1][RTW89_KCC][0][68] = 127, [1][1][RTW89_ACMA][1][68] = 127, @@ -53911,6 +56737,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][68] = 127, [1][1][RTW89_UK][1][68] = 127, [1][1][RTW89_UK][0][68] = 127, + [1][1][RTW89_THAILAND][1][68] = 127, + [1][1][RTW89_THAILAND][0][68] = 127, [1][1][RTW89_FCC][1][70] = -26, [1][1][RTW89_FCC][2][70] = 44, [1][1][RTW89_ETSI][1][70] = 127, @@ -53918,6 +56746,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][70] = 127, [1][1][RTW89_MKK][0][70] = 127, [1][1][RTW89_IC][1][70] = -26, + [1][1][RTW89_IC][2][70] = 44, [1][1][RTW89_KCC][1][70] = -14, [1][1][RTW89_KCC][0][70] = 127, [1][1][RTW89_ACMA][1][70] = 127, @@ -53927,6 +56756,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][70] = 127, [1][1][RTW89_UK][1][70] = 127, [1][1][RTW89_UK][0][70] = 127, + [1][1][RTW89_THAILAND][1][70] = 127, + [1][1][RTW89_THAILAND][0][70] = 127, [1][1][RTW89_FCC][1][72] = -28, [1][1][RTW89_FCC][2][72] = 44, [1][1][RTW89_ETSI][1][72] = 127, @@ -53934,6 +56765,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][72] = 127, [1][1][RTW89_MKK][0][72] = 127, [1][1][RTW89_IC][1][72] = -28, + [1][1][RTW89_IC][2][72] = 44, [1][1][RTW89_KCC][1][72] = -14, [1][1][RTW89_KCC][0][72] = 127, [1][1][RTW89_ACMA][1][72] = 127, @@ -53943,6 +56775,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][72] = 127, [1][1][RTW89_UK][1][72] = 127, [1][1][RTW89_UK][0][72] = 127, + [1][1][RTW89_THAILAND][1][72] = 127, + [1][1][RTW89_THAILAND][0][72] = 127, [1][1][RTW89_FCC][1][74] = -28, [1][1][RTW89_FCC][2][74] = 44, [1][1][RTW89_ETSI][1][74] = 127, @@ -53950,6 +56784,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][74] = 127, [1][1][RTW89_MKK][0][74] = 127, [1][1][RTW89_IC][1][74] = -28, + [1][1][RTW89_IC][2][74] = 44, [1][1][RTW89_KCC][1][74] = -14, [1][1][RTW89_KCC][0][74] = 127, [1][1][RTW89_ACMA][1][74] = 127, @@ -53959,6 +56794,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][74] = 127, [1][1][RTW89_UK][1][74] = 127, [1][1][RTW89_UK][0][74] = 127, + [1][1][RTW89_THAILAND][1][74] = 127, + [1][1][RTW89_THAILAND][0][74] = 127, [1][1][RTW89_FCC][1][75] = -28, [1][1][RTW89_FCC][2][75] = 44, [1][1][RTW89_ETSI][1][75] = 127, @@ -53966,6 +56803,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][75] = 127, [1][1][RTW89_MKK][0][75] = 127, [1][1][RTW89_IC][1][75] = -28, + [1][1][RTW89_IC][2][75] = 44, [1][1][RTW89_KCC][1][75] = -14, [1][1][RTW89_KCC][0][75] = 127, [1][1][RTW89_ACMA][1][75] = 127, @@ -53975,6 +56813,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][75] = 127, [1][1][RTW89_UK][1][75] = 127, [1][1][RTW89_UK][0][75] = 127, + [1][1][RTW89_THAILAND][1][75] = 127, + [1][1][RTW89_THAILAND][0][75] = 127, [1][1][RTW89_FCC][1][77] = -28, [1][1][RTW89_FCC][2][77] = 44, [1][1][RTW89_ETSI][1][77] = 127, @@ -53982,6 +56822,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][77] = 127, [1][1][RTW89_MKK][0][77] = 127, [1][1][RTW89_IC][1][77] = -28, + [1][1][RTW89_IC][2][77] = 44, [1][1][RTW89_KCC][1][77] = -14, [1][1][RTW89_KCC][0][77] = 127, [1][1][RTW89_ACMA][1][77] = 127, @@ -53991,6 +56832,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][77] = 127, [1][1][RTW89_UK][1][77] = 127, [1][1][RTW89_UK][0][77] = 127, + [1][1][RTW89_THAILAND][1][77] = 127, + [1][1][RTW89_THAILAND][0][77] = 127, [1][1][RTW89_FCC][1][79] = -28, [1][1][RTW89_FCC][2][79] = 44, [1][1][RTW89_ETSI][1][79] = 127, @@ -53998,6 +56841,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][79] = 127, [1][1][RTW89_MKK][0][79] = 127, [1][1][RTW89_IC][1][79] = -28, + [1][1][RTW89_IC][2][79] = 44, [1][1][RTW89_KCC][1][79] = -14, [1][1][RTW89_KCC][0][79] = 127, [1][1][RTW89_ACMA][1][79] = 127, @@ -54007,6 +56851,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][79] = 127, [1][1][RTW89_UK][1][79] = 127, [1][1][RTW89_UK][0][79] = 127, + [1][1][RTW89_THAILAND][1][79] = 127, + [1][1][RTW89_THAILAND][0][79] = 127, [1][1][RTW89_FCC][1][81] = -28, [1][1][RTW89_FCC][2][81] = 44, [1][1][RTW89_ETSI][1][81] = 127, @@ -54014,6 +56860,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][81] = 127, [1][1][RTW89_MKK][0][81] = 127, [1][1][RTW89_IC][1][81] = -28, + [1][1][RTW89_IC][2][81] = 44, [1][1][RTW89_KCC][1][81] = -14, [1][1][RTW89_KCC][0][81] = 127, [1][1][RTW89_ACMA][1][81] = 127, @@ -54023,6 +56870,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][81] = 127, [1][1][RTW89_UK][1][81] = 127, [1][1][RTW89_UK][0][81] = 127, + [1][1][RTW89_THAILAND][1][81] = 127, + [1][1][RTW89_THAILAND][0][81] = 127, [1][1][RTW89_FCC][1][83] = -28, [1][1][RTW89_FCC][2][83] = 44, [1][1][RTW89_ETSI][1][83] = 127, @@ -54030,6 +56879,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][83] = 127, [1][1][RTW89_MKK][0][83] = 127, [1][1][RTW89_IC][1][83] = -28, + [1][1][RTW89_IC][2][83] = 44, [1][1][RTW89_KCC][1][83] = -14, [1][1][RTW89_KCC][0][83] = 127, [1][1][RTW89_ACMA][1][83] = 127, @@ -54039,6 +56889,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][83] = 127, [1][1][RTW89_UK][1][83] = 127, [1][1][RTW89_UK][0][83] = 127, + [1][1][RTW89_THAILAND][1][83] = 127, + [1][1][RTW89_THAILAND][0][83] = 127, [1][1][RTW89_FCC][1][85] = -28, [1][1][RTW89_FCC][2][85] = 44, [1][1][RTW89_ETSI][1][85] = 127, @@ -54046,6 +56898,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][85] = 127, [1][1][RTW89_MKK][0][85] = 127, [1][1][RTW89_IC][1][85] = -28, + [1][1][RTW89_IC][2][85] = 44, [1][1][RTW89_KCC][1][85] = -14, [1][1][RTW89_KCC][0][85] = 127, [1][1][RTW89_ACMA][1][85] = 127, @@ -54055,6 +56908,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][85] = 127, [1][1][RTW89_UK][1][85] = 127, [1][1][RTW89_UK][0][85] = 127, + [1][1][RTW89_THAILAND][1][85] = 127, + [1][1][RTW89_THAILAND][0][85] = 127, [1][1][RTW89_FCC][1][87] = -28, [1][1][RTW89_FCC][2][87] = 127, [1][1][RTW89_ETSI][1][87] = 127, @@ -54062,6 +56917,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][87] = 127, [1][1][RTW89_MKK][0][87] = 127, [1][1][RTW89_IC][1][87] = -28, + [1][1][RTW89_IC][2][87] = 127, [1][1][RTW89_KCC][1][87] = -14, [1][1][RTW89_KCC][0][87] = 127, [1][1][RTW89_ACMA][1][87] = 127, @@ -54071,6 +56927,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][87] = 127, [1][1][RTW89_UK][1][87] = 127, [1][1][RTW89_UK][0][87] = 127, + [1][1][RTW89_THAILAND][1][87] = 127, + [1][1][RTW89_THAILAND][0][87] = 127, [1][1][RTW89_FCC][1][89] = -26, [1][1][RTW89_FCC][2][89] = 127, [1][1][RTW89_ETSI][1][89] = 127, @@ -54078,6 +56936,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][89] = 127, [1][1][RTW89_MKK][0][89] = 127, [1][1][RTW89_IC][1][89] = -26, + [1][1][RTW89_IC][2][89] = 127, [1][1][RTW89_KCC][1][89] = -14, [1][1][RTW89_KCC][0][89] = 127, [1][1][RTW89_ACMA][1][89] = 127, @@ -54087,6 +56946,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][89] = 127, [1][1][RTW89_UK][1][89] = 127, [1][1][RTW89_UK][0][89] = 127, + [1][1][RTW89_THAILAND][1][89] = 127, + [1][1][RTW89_THAILAND][0][89] = 127, [1][1][RTW89_FCC][1][90] = -26, [1][1][RTW89_FCC][2][90] = 127, [1][1][RTW89_ETSI][1][90] = 127, @@ -54094,6 +56955,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][90] = 127, [1][1][RTW89_MKK][0][90] = 127, [1][1][RTW89_IC][1][90] = -26, + [1][1][RTW89_IC][2][90] = 127, [1][1][RTW89_KCC][1][90] = -14, [1][1][RTW89_KCC][0][90] = 127, [1][1][RTW89_ACMA][1][90] = 127, @@ -54103,6 +56965,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][90] = 127, [1][1][RTW89_UK][1][90] = 127, [1][1][RTW89_UK][0][90] = 127, + [1][1][RTW89_THAILAND][1][90] = 127, + [1][1][RTW89_THAILAND][0][90] = 127, [1][1][RTW89_FCC][1][92] = -26, [1][1][RTW89_FCC][2][92] = 127, [1][1][RTW89_ETSI][1][92] = 127, @@ -54110,6 +56974,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][92] = 127, [1][1][RTW89_MKK][0][92] = 127, [1][1][RTW89_IC][1][92] = -26, + [1][1][RTW89_IC][2][92] = 127, [1][1][RTW89_KCC][1][92] = -14, [1][1][RTW89_KCC][0][92] = 127, [1][1][RTW89_ACMA][1][92] = 127, @@ -54119,6 +56984,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][92] = 127, [1][1][RTW89_UK][1][92] = 127, [1][1][RTW89_UK][0][92] = 127, + [1][1][RTW89_THAILAND][1][92] = 127, + [1][1][RTW89_THAILAND][0][92] = 127, [1][1][RTW89_FCC][1][94] = -26, [1][1][RTW89_FCC][2][94] = 127, [1][1][RTW89_ETSI][1][94] = 127, @@ -54126,6 +56993,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][94] = 127, [1][1][RTW89_MKK][0][94] = 127, [1][1][RTW89_IC][1][94] = -26, + [1][1][RTW89_IC][2][94] = 127, [1][1][RTW89_KCC][1][94] = -14, [1][1][RTW89_KCC][0][94] = 127, [1][1][RTW89_ACMA][1][94] = 127, @@ -54135,6 +57003,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][94] = 127, [1][1][RTW89_UK][1][94] = 127, [1][1][RTW89_UK][0][94] = 127, + [1][1][RTW89_THAILAND][1][94] = 127, + [1][1][RTW89_THAILAND][0][94] = 127, [1][1][RTW89_FCC][1][96] = -26, [1][1][RTW89_FCC][2][96] = 127, [1][1][RTW89_ETSI][1][96] = 127, @@ -54142,6 +57012,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][96] = 127, [1][1][RTW89_MKK][0][96] = 127, [1][1][RTW89_IC][1][96] = -26, + [1][1][RTW89_IC][2][96] = 127, [1][1][RTW89_KCC][1][96] = -14, [1][1][RTW89_KCC][0][96] = 127, [1][1][RTW89_ACMA][1][96] = 127, @@ -54151,6 +57022,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][96] = 127, [1][1][RTW89_UK][1][96] = 127, [1][1][RTW89_UK][0][96] = 127, + [1][1][RTW89_THAILAND][1][96] = 127, + [1][1][RTW89_THAILAND][0][96] = 127, [1][1][RTW89_FCC][1][98] = -26, [1][1][RTW89_FCC][2][98] = 127, [1][1][RTW89_ETSI][1][98] = 127, @@ -54158,6 +57031,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][98] = 127, [1][1][RTW89_MKK][0][98] = 127, [1][1][RTW89_IC][1][98] = -26, + [1][1][RTW89_IC][2][98] = 127, [1][1][RTW89_KCC][1][98] = -14, [1][1][RTW89_KCC][0][98] = 127, [1][1][RTW89_ACMA][1][98] = 127, @@ -54167,6 +57041,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][98] = 127, [1][1][RTW89_UK][1][98] = 127, [1][1][RTW89_UK][0][98] = 127, + [1][1][RTW89_THAILAND][1][98] = 127, + [1][1][RTW89_THAILAND][0][98] = 127, [1][1][RTW89_FCC][1][100] = -26, [1][1][RTW89_FCC][2][100] = 127, [1][1][RTW89_ETSI][1][100] = 127, @@ -54174,6 +57050,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][100] = 127, [1][1][RTW89_MKK][0][100] = 127, [1][1][RTW89_IC][1][100] = -26, + [1][1][RTW89_IC][2][100] = 127, [1][1][RTW89_KCC][1][100] = -14, [1][1][RTW89_KCC][0][100] = 127, [1][1][RTW89_ACMA][1][100] = 127, @@ -54183,6 +57060,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][100] = 127, [1][1][RTW89_UK][1][100] = 127, [1][1][RTW89_UK][0][100] = 127, + [1][1][RTW89_THAILAND][1][100] = 127, + [1][1][RTW89_THAILAND][0][100] = 127, [1][1][RTW89_FCC][1][102] = -26, [1][1][RTW89_FCC][2][102] = 127, [1][1][RTW89_ETSI][1][102] = 127, @@ -54190,6 +57069,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][102] = 127, [1][1][RTW89_MKK][0][102] = 127, [1][1][RTW89_IC][1][102] = -26, + [1][1][RTW89_IC][2][102] = 127, [1][1][RTW89_KCC][1][102] = -14, [1][1][RTW89_KCC][0][102] = 127, [1][1][RTW89_ACMA][1][102] = 127, @@ -54199,6 +57079,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][102] = 127, [1][1][RTW89_UK][1][102] = 127, [1][1][RTW89_UK][0][102] = 127, + [1][1][RTW89_THAILAND][1][102] = 127, + [1][1][RTW89_THAILAND][0][102] = 127, [1][1][RTW89_FCC][1][104] = -26, [1][1][RTW89_FCC][2][104] = 127, [1][1][RTW89_ETSI][1][104] = 127, @@ -54206,6 +57088,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][104] = 127, [1][1][RTW89_MKK][0][104] = 127, [1][1][RTW89_IC][1][104] = -26, + [1][1][RTW89_IC][2][104] = 127, [1][1][RTW89_KCC][1][104] = -14, [1][1][RTW89_KCC][0][104] = 127, [1][1][RTW89_ACMA][1][104] = 127, @@ -54215,6 +57098,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][104] = 127, [1][1][RTW89_UK][1][104] = 127, [1][1][RTW89_UK][0][104] = 127, + [1][1][RTW89_THAILAND][1][104] = 127, + [1][1][RTW89_THAILAND][0][104] = 127, [1][1][RTW89_FCC][1][105] = -26, [1][1][RTW89_FCC][2][105] = 127, [1][1][RTW89_ETSI][1][105] = 127, @@ -54222,6 +57107,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][105] = 127, [1][1][RTW89_MKK][0][105] = 127, [1][1][RTW89_IC][1][105] = -26, + [1][1][RTW89_IC][2][105] = 127, [1][1][RTW89_KCC][1][105] = -14, [1][1][RTW89_KCC][0][105] = 127, [1][1][RTW89_ACMA][1][105] = 127, @@ -54231,6 +57117,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][105] = 127, [1][1][RTW89_UK][1][105] = 127, [1][1][RTW89_UK][0][105] = 127, + [1][1][RTW89_THAILAND][1][105] = 127, + [1][1][RTW89_THAILAND][0][105] = 127, [1][1][RTW89_FCC][1][107] = -22, [1][1][RTW89_FCC][2][107] = 127, [1][1][RTW89_ETSI][1][107] = 127, @@ -54238,6 +57126,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][107] = 127, [1][1][RTW89_MKK][0][107] = 127, [1][1][RTW89_IC][1][107] = -22, + [1][1][RTW89_IC][2][107] = 127, [1][1][RTW89_KCC][1][107] = -14, [1][1][RTW89_KCC][0][107] = 127, [1][1][RTW89_ACMA][1][107] = 127, @@ -54247,6 +57136,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][107] = 127, [1][1][RTW89_UK][1][107] = 127, [1][1][RTW89_UK][0][107] = 127, + [1][1][RTW89_THAILAND][1][107] = 127, + [1][1][RTW89_THAILAND][0][107] = 127, [1][1][RTW89_FCC][1][109] = -22, [1][1][RTW89_FCC][2][109] = 127, [1][1][RTW89_ETSI][1][109] = 127, @@ -54254,6 +57145,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][109] = 127, [1][1][RTW89_MKK][0][109] = 127, [1][1][RTW89_IC][1][109] = -22, + [1][1][RTW89_IC][2][109] = 127, [1][1][RTW89_KCC][1][109] = 127, [1][1][RTW89_KCC][0][109] = 127, [1][1][RTW89_ACMA][1][109] = 127, @@ -54263,6 +57155,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][109] = 127, [1][1][RTW89_UK][1][109] = 127, [1][1][RTW89_UK][0][109] = 127, + [1][1][RTW89_THAILAND][1][109] = 127, + [1][1][RTW89_THAILAND][0][109] = 127, [1][1][RTW89_FCC][1][111] = 127, [1][1][RTW89_FCC][2][111] = 127, [1][1][RTW89_ETSI][1][111] = 127, @@ -54270,6 +57164,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][111] = 127, [1][1][RTW89_MKK][0][111] = 127, [1][1][RTW89_IC][1][111] = 127, + [1][1][RTW89_IC][2][111] = 127, [1][1][RTW89_KCC][1][111] = 127, [1][1][RTW89_KCC][0][111] = 127, [1][1][RTW89_ACMA][1][111] = 127, @@ -54279,6 +57174,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][111] = 127, [1][1][RTW89_UK][1][111] = 127, [1][1][RTW89_UK][0][111] = 127, + [1][1][RTW89_THAILAND][1][111] = 127, + [1][1][RTW89_THAILAND][0][111] = 127, [1][1][RTW89_FCC][1][113] = 127, [1][1][RTW89_FCC][2][113] = 127, [1][1][RTW89_ETSI][1][113] = 127, @@ -54286,6 +57183,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][113] = 127, [1][1][RTW89_MKK][0][113] = 127, [1][1][RTW89_IC][1][113] = 127, + [1][1][RTW89_IC][2][113] = 127, [1][1][RTW89_KCC][1][113] = 127, [1][1][RTW89_KCC][0][113] = 127, [1][1][RTW89_ACMA][1][113] = 127, @@ -54295,6 +57193,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][113] = 127, [1][1][RTW89_UK][1][113] = 127, [1][1][RTW89_UK][0][113] = 127, + [1][1][RTW89_THAILAND][1][113] = 127, + [1][1][RTW89_THAILAND][0][113] = 127, [1][1][RTW89_FCC][1][115] = 127, [1][1][RTW89_FCC][2][115] = 127, [1][1][RTW89_ETSI][1][115] = 127, @@ -54302,6 +57202,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][115] = 127, [1][1][RTW89_MKK][0][115] = 127, [1][1][RTW89_IC][1][115] = 127, + [1][1][RTW89_IC][2][115] = 127, [1][1][RTW89_KCC][1][115] = 127, [1][1][RTW89_KCC][0][115] = 127, [1][1][RTW89_ACMA][1][115] = 127, @@ -54311,6 +57212,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][115] = 127, [1][1][RTW89_UK][1][115] = 127, [1][1][RTW89_UK][0][115] = 127, + [1][1][RTW89_THAILAND][1][115] = 127, + [1][1][RTW89_THAILAND][0][115] = 127, [1][1][RTW89_FCC][1][117] = 127, [1][1][RTW89_FCC][2][117] = 127, [1][1][RTW89_ETSI][1][117] = 127, @@ -54318,6 +57221,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][117] = 127, [1][1][RTW89_MKK][0][117] = 127, [1][1][RTW89_IC][1][117] = 127, + [1][1][RTW89_IC][2][117] = 127, [1][1][RTW89_KCC][1][117] = 127, [1][1][RTW89_KCC][0][117] = 127, [1][1][RTW89_ACMA][1][117] = 127, @@ -54327,6 +57231,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][117] = 127, [1][1][RTW89_UK][1][117] = 127, [1][1][RTW89_UK][0][117] = 127, + [1][1][RTW89_THAILAND][1][117] = 127, + [1][1][RTW89_THAILAND][0][117] = 127, [1][1][RTW89_FCC][1][119] = 127, [1][1][RTW89_FCC][2][119] = 127, [1][1][RTW89_ETSI][1][119] = 127, @@ -54334,6 +57240,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][1][119] = 127, [1][1][RTW89_MKK][0][119] = 127, [1][1][RTW89_IC][1][119] = 127, + [1][1][RTW89_IC][2][119] = 127, [1][1][RTW89_KCC][1][119] = 127, [1][1][RTW89_KCC][0][119] = 127, [1][1][RTW89_ACMA][1][119] = 127, @@ -54343,6 +57250,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_QATAR][0][119] = 127, [1][1][RTW89_UK][1][119] = 127, [1][1][RTW89_UK][0][119] = 127, + [1][1][RTW89_THAILAND][1][119] = 127, + [1][1][RTW89_THAILAND][0][119] = 127, [2][0][RTW89_FCC][1][0] = 8, [2][0][RTW89_FCC][2][0] = 60, [2][0][RTW89_ETSI][1][0] = 56, @@ -54350,6 +57259,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][0] = 54, [2][0][RTW89_MKK][0][0] = 14, [2][0][RTW89_IC][1][0] = 8, + [2][0][RTW89_IC][2][0] = 60, [2][0][RTW89_KCC][1][0] = -2, [2][0][RTW89_KCC][0][0] = -2, [2][0][RTW89_ACMA][1][0] = 56, @@ -54359,6 +57269,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][0] = 18, [2][0][RTW89_UK][1][0] = 56, [2][0][RTW89_UK][0][0] = 18, + [2][0][RTW89_THAILAND][1][0] = 52, + [2][0][RTW89_THAILAND][0][0] = 8, [2][0][RTW89_FCC][1][2] = 8, [2][0][RTW89_FCC][2][2] = 60, [2][0][RTW89_ETSI][1][2] = 56, @@ -54366,6 +57278,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][2] = 54, [2][0][RTW89_MKK][0][2] = 14, [2][0][RTW89_IC][1][2] = 8, + [2][0][RTW89_IC][2][2] = 60, [2][0][RTW89_KCC][1][2] = -2, [2][0][RTW89_KCC][0][2] = -2, [2][0][RTW89_ACMA][1][2] = 56, @@ -54375,6 +57288,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][2] = 18, [2][0][RTW89_UK][1][2] = 56, [2][0][RTW89_UK][0][2] = 18, + [2][0][RTW89_THAILAND][1][2] = 52, + [2][0][RTW89_THAILAND][0][2] = 8, [2][0][RTW89_FCC][1][4] = 8, [2][0][RTW89_FCC][2][4] = 60, [2][0][RTW89_ETSI][1][4] = 56, @@ -54382,6 +57297,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][4] = 54, [2][0][RTW89_MKK][0][4] = 14, [2][0][RTW89_IC][1][4] = 8, + [2][0][RTW89_IC][2][4] = 60, [2][0][RTW89_KCC][1][4] = -2, [2][0][RTW89_KCC][0][4] = -2, [2][0][RTW89_ACMA][1][4] = 56, @@ -54391,6 +57307,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][4] = 18, [2][0][RTW89_UK][1][4] = 56, [2][0][RTW89_UK][0][4] = 18, + [2][0][RTW89_THAILAND][1][4] = 52, + [2][0][RTW89_THAILAND][0][4] = 8, [2][0][RTW89_FCC][1][6] = 8, [2][0][RTW89_FCC][2][6] = 60, [2][0][RTW89_ETSI][1][6] = 56, @@ -54398,6 +57316,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][6] = 54, [2][0][RTW89_MKK][0][6] = 14, [2][0][RTW89_IC][1][6] = 8, + [2][0][RTW89_IC][2][6] = 60, [2][0][RTW89_KCC][1][6] = -2, [2][0][RTW89_KCC][0][6] = -2, [2][0][RTW89_ACMA][1][6] = 56, @@ -54407,6 +57326,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][6] = 18, [2][0][RTW89_UK][1][6] = 56, [2][0][RTW89_UK][0][6] = 18, + [2][0][RTW89_THAILAND][1][6] = 52, + [2][0][RTW89_THAILAND][0][6] = 8, [2][0][RTW89_FCC][1][8] = 8, [2][0][RTW89_FCC][2][8] = 60, [2][0][RTW89_ETSI][1][8] = 56, @@ -54414,6 +57335,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][8] = 54, [2][0][RTW89_MKK][0][8] = 14, [2][0][RTW89_IC][1][8] = 8, + [2][0][RTW89_IC][2][8] = 60, [2][0][RTW89_KCC][1][8] = -2, [2][0][RTW89_KCC][0][8] = -2, [2][0][RTW89_ACMA][1][8] = 56, @@ -54423,6 +57345,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][8] = 18, [2][0][RTW89_UK][1][8] = 56, [2][0][RTW89_UK][0][8] = 18, + [2][0][RTW89_THAILAND][1][8] = 52, + [2][0][RTW89_THAILAND][0][8] = 8, [2][0][RTW89_FCC][1][10] = 8, [2][0][RTW89_FCC][2][10] = 60, [2][0][RTW89_ETSI][1][10] = 56, @@ -54430,6 +57354,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][10] = 54, [2][0][RTW89_MKK][0][10] = 14, [2][0][RTW89_IC][1][10] = 8, + [2][0][RTW89_IC][2][10] = 60, [2][0][RTW89_KCC][1][10] = -2, [2][0][RTW89_KCC][0][10] = -2, [2][0][RTW89_ACMA][1][10] = 56, @@ -54439,6 +57364,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][10] = 18, [2][0][RTW89_UK][1][10] = 56, [2][0][RTW89_UK][0][10] = 18, + [2][0][RTW89_THAILAND][1][10] = 52, + [2][0][RTW89_THAILAND][0][10] = 8, [2][0][RTW89_FCC][1][12] = 8, [2][0][RTW89_FCC][2][12] = 60, [2][0][RTW89_ETSI][1][12] = 56, @@ -54446,6 +57373,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][12] = 54, [2][0][RTW89_MKK][0][12] = 14, [2][0][RTW89_IC][1][12] = 8, + [2][0][RTW89_IC][2][12] = 60, [2][0][RTW89_KCC][1][12] = -2, [2][0][RTW89_KCC][0][12] = -2, [2][0][RTW89_ACMA][1][12] = 56, @@ -54455,6 +57383,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][12] = 18, [2][0][RTW89_UK][1][12] = 56, [2][0][RTW89_UK][0][12] = 18, + [2][0][RTW89_THAILAND][1][12] = 52, + [2][0][RTW89_THAILAND][0][12] = 8, [2][0][RTW89_FCC][1][14] = 8, [2][0][RTW89_FCC][2][14] = 60, [2][0][RTW89_ETSI][1][14] = 56, @@ -54462,6 +57392,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][14] = 54, [2][0][RTW89_MKK][0][14] = 14, [2][0][RTW89_IC][1][14] = 8, + [2][0][RTW89_IC][2][14] = 60, [2][0][RTW89_KCC][1][14] = -2, [2][0][RTW89_KCC][0][14] = -2, [2][0][RTW89_ACMA][1][14] = 56, @@ -54471,6 +57402,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][14] = 18, [2][0][RTW89_UK][1][14] = 56, [2][0][RTW89_UK][0][14] = 18, + [2][0][RTW89_THAILAND][1][14] = 52, + [2][0][RTW89_THAILAND][0][14] = 8, [2][0][RTW89_FCC][1][15] = 8, [2][0][RTW89_FCC][2][15] = 60, [2][0][RTW89_ETSI][1][15] = 56, @@ -54478,6 +57411,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][15] = 54, [2][0][RTW89_MKK][0][15] = 14, [2][0][RTW89_IC][1][15] = 8, + [2][0][RTW89_IC][2][15] = 60, [2][0][RTW89_KCC][1][15] = -2, [2][0][RTW89_KCC][0][15] = -2, [2][0][RTW89_ACMA][1][15] = 56, @@ -54487,6 +57421,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][15] = 18, [2][0][RTW89_UK][1][15] = 56, [2][0][RTW89_UK][0][15] = 18, + [2][0][RTW89_THAILAND][1][15] = 52, + [2][0][RTW89_THAILAND][0][15] = 8, [2][0][RTW89_FCC][1][17] = 8, [2][0][RTW89_FCC][2][17] = 60, [2][0][RTW89_ETSI][1][17] = 56, @@ -54494,6 +57430,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][17] = 54, [2][0][RTW89_MKK][0][17] = 14, [2][0][RTW89_IC][1][17] = 8, + [2][0][RTW89_IC][2][17] = 60, [2][0][RTW89_KCC][1][17] = -2, [2][0][RTW89_KCC][0][17] = -2, [2][0][RTW89_ACMA][1][17] = 56, @@ -54503,6 +57440,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][17] = 18, [2][0][RTW89_UK][1][17] = 56, [2][0][RTW89_UK][0][17] = 18, + [2][0][RTW89_THAILAND][1][17] = 52, + [2][0][RTW89_THAILAND][0][17] = 8, [2][0][RTW89_FCC][1][19] = 8, [2][0][RTW89_FCC][2][19] = 60, [2][0][RTW89_ETSI][1][19] = 56, @@ -54510,6 +57449,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][19] = 54, [2][0][RTW89_MKK][0][19] = 14, [2][0][RTW89_IC][1][19] = 8, + [2][0][RTW89_IC][2][19] = 60, [2][0][RTW89_KCC][1][19] = -2, [2][0][RTW89_KCC][0][19] = -2, [2][0][RTW89_ACMA][1][19] = 56, @@ -54519,6 +57459,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][19] = 18, [2][0][RTW89_UK][1][19] = 56, [2][0][RTW89_UK][0][19] = 18, + [2][0][RTW89_THAILAND][1][19] = 52, + [2][0][RTW89_THAILAND][0][19] = 8, [2][0][RTW89_FCC][1][21] = 8, [2][0][RTW89_FCC][2][21] = 60, [2][0][RTW89_ETSI][1][21] = 56, @@ -54526,6 +57468,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][21] = 54, [2][0][RTW89_MKK][0][21] = 14, [2][0][RTW89_IC][1][21] = 8, + [2][0][RTW89_IC][2][21] = 60, [2][0][RTW89_KCC][1][21] = -2, [2][0][RTW89_KCC][0][21] = -2, [2][0][RTW89_ACMA][1][21] = 56, @@ -54535,13 +57478,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][21] = 18, [2][0][RTW89_UK][1][21] = 56, [2][0][RTW89_UK][0][21] = 18, + [2][0][RTW89_THAILAND][1][21] = 52, + [2][0][RTW89_THAILAND][0][21] = 8, [2][0][RTW89_FCC][1][23] = 8, - [2][0][RTW89_FCC][2][23] = 78, + [2][0][RTW89_FCC][2][23] = 70, [2][0][RTW89_ETSI][1][23] = 56, [2][0][RTW89_ETSI][0][23] = 18, [2][0][RTW89_MKK][1][23] = 56, [2][0][RTW89_MKK][0][23] = 14, [2][0][RTW89_IC][1][23] = 8, + [2][0][RTW89_IC][2][23] = 70, [2][0][RTW89_KCC][1][23] = -2, [2][0][RTW89_KCC][0][23] = -2, [2][0][RTW89_ACMA][1][23] = 56, @@ -54551,13 +57497,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][23] = 18, [2][0][RTW89_UK][1][23] = 56, [2][0][RTW89_UK][0][23] = 18, + [2][0][RTW89_THAILAND][1][23] = 52, + [2][0][RTW89_THAILAND][0][23] = 8, [2][0][RTW89_FCC][1][25] = 8, - [2][0][RTW89_FCC][2][25] = 78, + [2][0][RTW89_FCC][2][25] = 70, [2][0][RTW89_ETSI][1][25] = 56, [2][0][RTW89_ETSI][0][25] = 18, [2][0][RTW89_MKK][1][25] = 56, [2][0][RTW89_MKK][0][25] = 14, [2][0][RTW89_IC][1][25] = 8, + [2][0][RTW89_IC][2][25] = 70, [2][0][RTW89_KCC][1][25] = -2, [2][0][RTW89_KCC][0][25] = -2, [2][0][RTW89_ACMA][1][25] = 56, @@ -54567,13 +57516,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][25] = 18, [2][0][RTW89_UK][1][25] = 56, [2][0][RTW89_UK][0][25] = 18, + [2][0][RTW89_THAILAND][1][25] = 52, + [2][0][RTW89_THAILAND][0][25] = 8, [2][0][RTW89_FCC][1][27] = 8, - [2][0][RTW89_FCC][2][27] = 78, + [2][0][RTW89_FCC][2][27] = 70, [2][0][RTW89_ETSI][1][27] = 56, [2][0][RTW89_ETSI][0][27] = 18, [2][0][RTW89_MKK][1][27] = 56, [2][0][RTW89_MKK][0][27] = 14, [2][0][RTW89_IC][1][27] = 8, + [2][0][RTW89_IC][2][27] = 70, [2][0][RTW89_KCC][1][27] = -2, [2][0][RTW89_KCC][0][27] = -2, [2][0][RTW89_ACMA][1][27] = 56, @@ -54583,13 +57535,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][27] = 18, [2][0][RTW89_UK][1][27] = 56, [2][0][RTW89_UK][0][27] = 18, + [2][0][RTW89_THAILAND][1][27] = 52, + [2][0][RTW89_THAILAND][0][27] = 8, [2][0][RTW89_FCC][1][29] = 8, - [2][0][RTW89_FCC][2][29] = 78, + [2][0][RTW89_FCC][2][29] = 70, [2][0][RTW89_ETSI][1][29] = 56, [2][0][RTW89_ETSI][0][29] = 18, [2][0][RTW89_MKK][1][29] = 56, [2][0][RTW89_MKK][0][29] = 14, [2][0][RTW89_IC][1][29] = 8, + [2][0][RTW89_IC][2][29] = 70, [2][0][RTW89_KCC][1][29] = -2, [2][0][RTW89_KCC][0][29] = -2, [2][0][RTW89_ACMA][1][29] = 56, @@ -54599,13 +57554,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][29] = 18, [2][0][RTW89_UK][1][29] = 56, [2][0][RTW89_UK][0][29] = 18, + [2][0][RTW89_THAILAND][1][29] = 52, + [2][0][RTW89_THAILAND][0][29] = 8, [2][0][RTW89_FCC][1][30] = 8, - [2][0][RTW89_FCC][2][30] = 78, + [2][0][RTW89_FCC][2][30] = 70, [2][0][RTW89_ETSI][1][30] = 56, [2][0][RTW89_ETSI][0][30] = 18, [2][0][RTW89_MKK][1][30] = 56, [2][0][RTW89_MKK][0][30] = 14, [2][0][RTW89_IC][1][30] = 8, + [2][0][RTW89_IC][2][30] = 70, [2][0][RTW89_KCC][1][30] = -2, [2][0][RTW89_KCC][0][30] = -2, [2][0][RTW89_ACMA][1][30] = 56, @@ -54615,13 +57573,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][30] = 18, [2][0][RTW89_UK][1][30] = 56, [2][0][RTW89_UK][0][30] = 18, + [2][0][RTW89_THAILAND][1][30] = 52, + [2][0][RTW89_THAILAND][0][30] = 8, [2][0][RTW89_FCC][1][32] = 8, - [2][0][RTW89_FCC][2][32] = 78, + [2][0][RTW89_FCC][2][32] = 70, [2][0][RTW89_ETSI][1][32] = 56, [2][0][RTW89_ETSI][0][32] = 18, [2][0][RTW89_MKK][1][32] = 56, [2][0][RTW89_MKK][0][32] = 14, [2][0][RTW89_IC][1][32] = 8, + [2][0][RTW89_IC][2][32] = 70, [2][0][RTW89_KCC][1][32] = -2, [2][0][RTW89_KCC][0][32] = -2, [2][0][RTW89_ACMA][1][32] = 56, @@ -54631,13 +57592,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][32] = 18, [2][0][RTW89_UK][1][32] = 56, [2][0][RTW89_UK][0][32] = 18, + [2][0][RTW89_THAILAND][1][32] = 52, + [2][0][RTW89_THAILAND][0][32] = 8, [2][0][RTW89_FCC][1][34] = 8, - [2][0][RTW89_FCC][2][34] = 78, + [2][0][RTW89_FCC][2][34] = 70, [2][0][RTW89_ETSI][1][34] = 56, [2][0][RTW89_ETSI][0][34] = 18, [2][0][RTW89_MKK][1][34] = 56, [2][0][RTW89_MKK][0][34] = 14, [2][0][RTW89_IC][1][34] = 8, + [2][0][RTW89_IC][2][34] = 70, [2][0][RTW89_KCC][1][34] = -2, [2][0][RTW89_KCC][0][34] = -2, [2][0][RTW89_ACMA][1][34] = 56, @@ -54647,13 +57611,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][34] = 18, [2][0][RTW89_UK][1][34] = 56, [2][0][RTW89_UK][0][34] = 18, + [2][0][RTW89_THAILAND][1][34] = 52, + [2][0][RTW89_THAILAND][0][34] = 8, [2][0][RTW89_FCC][1][36] = 8, - [2][0][RTW89_FCC][2][36] = 78, + [2][0][RTW89_FCC][2][36] = 70, [2][0][RTW89_ETSI][1][36] = 56, [2][0][RTW89_ETSI][0][36] = 18, [2][0][RTW89_MKK][1][36] = 56, [2][0][RTW89_MKK][0][36] = 14, [2][0][RTW89_IC][1][36] = 8, + [2][0][RTW89_IC][2][36] = 70, [2][0][RTW89_KCC][1][36] = -2, [2][0][RTW89_KCC][0][36] = -2, [2][0][RTW89_ACMA][1][36] = 56, @@ -54663,13 +57630,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][36] = 18, [2][0][RTW89_UK][1][36] = 56, [2][0][RTW89_UK][0][36] = 18, + [2][0][RTW89_THAILAND][1][36] = 52, + [2][0][RTW89_THAILAND][0][36] = 8, [2][0][RTW89_FCC][1][38] = 8, - [2][0][RTW89_FCC][2][38] = 78, + [2][0][RTW89_FCC][2][38] = 70, [2][0][RTW89_ETSI][1][38] = 56, [2][0][RTW89_ETSI][0][38] = 18, [2][0][RTW89_MKK][1][38] = 56, [2][0][RTW89_MKK][0][38] = 14, [2][0][RTW89_IC][1][38] = 8, + [2][0][RTW89_IC][2][38] = 70, [2][0][RTW89_KCC][1][38] = -2, [2][0][RTW89_KCC][0][38] = -2, [2][0][RTW89_ACMA][1][38] = 56, @@ -54679,13 +57649,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][38] = 18, [2][0][RTW89_UK][1][38] = 56, [2][0][RTW89_UK][0][38] = 18, + [2][0][RTW89_THAILAND][1][38] = 52, + [2][0][RTW89_THAILAND][0][38] = 8, [2][0][RTW89_FCC][1][40] = 8, - [2][0][RTW89_FCC][2][40] = 78, + [2][0][RTW89_FCC][2][40] = 70, [2][0][RTW89_ETSI][1][40] = 56, [2][0][RTW89_ETSI][0][40] = 18, [2][0][RTW89_MKK][1][40] = 56, [2][0][RTW89_MKK][0][40] = 14, [2][0][RTW89_IC][1][40] = 8, + [2][0][RTW89_IC][2][40] = 70, [2][0][RTW89_KCC][1][40] = -2, [2][0][RTW89_KCC][0][40] = -2, [2][0][RTW89_ACMA][1][40] = 56, @@ -54695,13 +57668,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][40] = 18, [2][0][RTW89_UK][1][40] = 56, [2][0][RTW89_UK][0][40] = 18, + [2][0][RTW89_THAILAND][1][40] = 52, + [2][0][RTW89_THAILAND][0][40] = 8, [2][0][RTW89_FCC][1][42] = 8, - [2][0][RTW89_FCC][2][42] = 78, + [2][0][RTW89_FCC][2][42] = 70, [2][0][RTW89_ETSI][1][42] = 56, [2][0][RTW89_ETSI][0][42] = 18, [2][0][RTW89_MKK][1][42] = 56, [2][0][RTW89_MKK][0][42] = 14, [2][0][RTW89_IC][1][42] = 8, + [2][0][RTW89_IC][2][42] = 70, [2][0][RTW89_KCC][1][42] = -2, [2][0][RTW89_KCC][0][42] = -2, [2][0][RTW89_ACMA][1][42] = 56, @@ -54711,13 +57687,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][42] = 18, [2][0][RTW89_UK][1][42] = 56, [2][0][RTW89_UK][0][42] = 18, + [2][0][RTW89_THAILAND][1][42] = 52, + [2][0][RTW89_THAILAND][0][42] = 8, [2][0][RTW89_FCC][1][44] = 8, - [2][0][RTW89_FCC][2][44] = 78, + [2][0][RTW89_FCC][2][44] = 70, [2][0][RTW89_ETSI][1][44] = 56, [2][0][RTW89_ETSI][0][44] = 18, [2][0][RTW89_MKK][1][44] = 32, [2][0][RTW89_MKK][0][44] = 14, [2][0][RTW89_IC][1][44] = 8, + [2][0][RTW89_IC][2][44] = 70, [2][0][RTW89_KCC][1][44] = -2, [2][0][RTW89_KCC][0][44] = -2, [2][0][RTW89_ACMA][1][44] = 56, @@ -54727,6 +57706,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][44] = 18, [2][0][RTW89_UK][1][44] = 56, [2][0][RTW89_UK][0][44] = 18, + [2][0][RTW89_THAILAND][1][44] = 52, + [2][0][RTW89_THAILAND][0][44] = 8, [2][0][RTW89_FCC][1][45] = 8, [2][0][RTW89_FCC][2][45] = 127, [2][0][RTW89_ETSI][1][45] = 127, @@ -54734,6 +57715,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][45] = 127, [2][0][RTW89_MKK][0][45] = 127, [2][0][RTW89_IC][1][45] = 8, + [2][0][RTW89_IC][2][45] = 70, [2][0][RTW89_KCC][1][45] = -2, [2][0][RTW89_KCC][0][45] = 127, [2][0][RTW89_ACMA][1][45] = 127, @@ -54743,6 +57725,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][45] = 127, [2][0][RTW89_UK][1][45] = 127, [2][0][RTW89_UK][0][45] = 127, + [2][0][RTW89_THAILAND][1][45] = 127, + [2][0][RTW89_THAILAND][0][45] = 127, [2][0][RTW89_FCC][1][47] = 8, [2][0][RTW89_FCC][2][47] = 127, [2][0][RTW89_ETSI][1][47] = 127, @@ -54750,6 +57734,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][47] = 127, [2][0][RTW89_MKK][0][47] = 127, [2][0][RTW89_IC][1][47] = 8, + [2][0][RTW89_IC][2][47] = 70, [2][0][RTW89_KCC][1][47] = -2, [2][0][RTW89_KCC][0][47] = 127, [2][0][RTW89_ACMA][1][47] = 127, @@ -54759,6 +57744,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][47] = 127, [2][0][RTW89_UK][1][47] = 127, [2][0][RTW89_UK][0][47] = 127, + [2][0][RTW89_THAILAND][1][47] = 127, + [2][0][RTW89_THAILAND][0][47] = 127, [2][0][RTW89_FCC][1][49] = 8, [2][0][RTW89_FCC][2][49] = 127, [2][0][RTW89_ETSI][1][49] = 127, @@ -54766,6 +57753,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][49] = 127, [2][0][RTW89_MKK][0][49] = 127, [2][0][RTW89_IC][1][49] = 8, + [2][0][RTW89_IC][2][49] = 70, [2][0][RTW89_KCC][1][49] = -2, [2][0][RTW89_KCC][0][49] = 127, [2][0][RTW89_ACMA][1][49] = 127, @@ -54775,6 +57763,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][49] = 127, [2][0][RTW89_UK][1][49] = 127, [2][0][RTW89_UK][0][49] = 127, + [2][0][RTW89_THAILAND][1][49] = 127, + [2][0][RTW89_THAILAND][0][49] = 127, [2][0][RTW89_FCC][1][51] = 8, [2][0][RTW89_FCC][2][51] = 127, [2][0][RTW89_ETSI][1][51] = 127, @@ -54782,6 +57772,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][51] = 127, [2][0][RTW89_MKK][0][51] = 127, [2][0][RTW89_IC][1][51] = 8, + [2][0][RTW89_IC][2][51] = 70, [2][0][RTW89_KCC][1][51] = -2, [2][0][RTW89_KCC][0][51] = 127, [2][0][RTW89_ACMA][1][51] = 127, @@ -54791,6 +57782,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][51] = 127, [2][0][RTW89_UK][1][51] = 127, [2][0][RTW89_UK][0][51] = 127, + [2][0][RTW89_THAILAND][1][51] = 127, + [2][0][RTW89_THAILAND][0][51] = 127, [2][0][RTW89_FCC][1][53] = 8, [2][0][RTW89_FCC][2][53] = 127, [2][0][RTW89_ETSI][1][53] = 127, @@ -54798,6 +57791,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][53] = 127, [2][0][RTW89_MKK][0][53] = 127, [2][0][RTW89_IC][1][53] = 8, + [2][0][RTW89_IC][2][53] = 70, [2][0][RTW89_KCC][1][53] = -2, [2][0][RTW89_KCC][0][53] = 127, [2][0][RTW89_ACMA][1][53] = 127, @@ -54807,13 +57801,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][53] = 127, [2][0][RTW89_UK][1][53] = 127, [2][0][RTW89_UK][0][53] = 127, + [2][0][RTW89_THAILAND][1][53] = 127, + [2][0][RTW89_THAILAND][0][53] = 127, [2][0][RTW89_FCC][1][55] = 8, - [2][0][RTW89_FCC][2][55] = 78, + [2][0][RTW89_FCC][2][55] = 68, [2][0][RTW89_ETSI][1][55] = 127, [2][0][RTW89_ETSI][0][55] = 127, [2][0][RTW89_MKK][1][55] = 127, [2][0][RTW89_MKK][0][55] = 127, [2][0][RTW89_IC][1][55] = 8, + [2][0][RTW89_IC][2][55] = 68, [2][0][RTW89_KCC][1][55] = -2, [2][0][RTW89_KCC][0][55] = 127, [2][0][RTW89_ACMA][1][55] = 127, @@ -54823,13 +57820,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][55] = 127, [2][0][RTW89_UK][1][55] = 127, [2][0][RTW89_UK][0][55] = 127, + [2][0][RTW89_THAILAND][1][55] = 127, + [2][0][RTW89_THAILAND][0][55] = 127, [2][0][RTW89_FCC][1][57] = 8, - [2][0][RTW89_FCC][2][57] = 78, + [2][0][RTW89_FCC][2][57] = 68, [2][0][RTW89_ETSI][1][57] = 127, [2][0][RTW89_ETSI][0][57] = 127, [2][0][RTW89_MKK][1][57] = 127, [2][0][RTW89_MKK][0][57] = 127, [2][0][RTW89_IC][1][57] = 8, + [2][0][RTW89_IC][2][57] = 68, [2][0][RTW89_KCC][1][57] = -2, [2][0][RTW89_KCC][0][57] = 127, [2][0][RTW89_ACMA][1][57] = 127, @@ -54839,13 +57839,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][57] = 127, [2][0][RTW89_UK][1][57] = 127, [2][0][RTW89_UK][0][57] = 127, + [2][0][RTW89_THAILAND][1][57] = 127, + [2][0][RTW89_THAILAND][0][57] = 127, [2][0][RTW89_FCC][1][59] = 8, - [2][0][RTW89_FCC][2][59] = 78, + [2][0][RTW89_FCC][2][59] = 68, [2][0][RTW89_ETSI][1][59] = 127, [2][0][RTW89_ETSI][0][59] = 127, [2][0][RTW89_MKK][1][59] = 127, [2][0][RTW89_MKK][0][59] = 127, [2][0][RTW89_IC][1][59] = 8, + [2][0][RTW89_IC][2][59] = 68, [2][0][RTW89_KCC][1][59] = -2, [2][0][RTW89_KCC][0][59] = 127, [2][0][RTW89_ACMA][1][59] = 127, @@ -54855,13 +57858,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][59] = 127, [2][0][RTW89_UK][1][59] = 127, [2][0][RTW89_UK][0][59] = 127, + [2][0][RTW89_THAILAND][1][59] = 127, + [2][0][RTW89_THAILAND][0][59] = 127, [2][0][RTW89_FCC][1][60] = 8, - [2][0][RTW89_FCC][2][60] = 78, + [2][0][RTW89_FCC][2][60] = 68, [2][0][RTW89_ETSI][1][60] = 127, [2][0][RTW89_ETSI][0][60] = 127, [2][0][RTW89_MKK][1][60] = 127, [2][0][RTW89_MKK][0][60] = 127, [2][0][RTW89_IC][1][60] = 8, + [2][0][RTW89_IC][2][60] = 68, [2][0][RTW89_KCC][1][60] = -2, [2][0][RTW89_KCC][0][60] = 127, [2][0][RTW89_ACMA][1][60] = 127, @@ -54871,13 +57877,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][60] = 127, [2][0][RTW89_UK][1][60] = 127, [2][0][RTW89_UK][0][60] = 127, + [2][0][RTW89_THAILAND][1][60] = 127, + [2][0][RTW89_THAILAND][0][60] = 127, [2][0][RTW89_FCC][1][62] = 8, - [2][0][RTW89_FCC][2][62] = 78, + [2][0][RTW89_FCC][2][62] = 68, [2][0][RTW89_ETSI][1][62] = 127, [2][0][RTW89_ETSI][0][62] = 127, [2][0][RTW89_MKK][1][62] = 127, [2][0][RTW89_MKK][0][62] = 127, [2][0][RTW89_IC][1][62] = 8, + [2][0][RTW89_IC][2][62] = 68, [2][0][RTW89_KCC][1][62] = -2, [2][0][RTW89_KCC][0][62] = 127, [2][0][RTW89_ACMA][1][62] = 127, @@ -54887,13 +57896,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][62] = 127, [2][0][RTW89_UK][1][62] = 127, [2][0][RTW89_UK][0][62] = 127, + [2][0][RTW89_THAILAND][1][62] = 127, + [2][0][RTW89_THAILAND][0][62] = 127, [2][0][RTW89_FCC][1][64] = 8, - [2][0][RTW89_FCC][2][64] = 78, + [2][0][RTW89_FCC][2][64] = 68, [2][0][RTW89_ETSI][1][64] = 127, [2][0][RTW89_ETSI][0][64] = 127, [2][0][RTW89_MKK][1][64] = 127, [2][0][RTW89_MKK][0][64] = 127, [2][0][RTW89_IC][1][64] = 8, + [2][0][RTW89_IC][2][64] = 68, [2][0][RTW89_KCC][1][64] = -2, [2][0][RTW89_KCC][0][64] = 127, [2][0][RTW89_ACMA][1][64] = 127, @@ -54903,13 +57915,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][64] = 127, [2][0][RTW89_UK][1][64] = 127, [2][0][RTW89_UK][0][64] = 127, + [2][0][RTW89_THAILAND][1][64] = 127, + [2][0][RTW89_THAILAND][0][64] = 127, [2][0][RTW89_FCC][1][66] = 8, - [2][0][RTW89_FCC][2][66] = 78, + [2][0][RTW89_FCC][2][66] = 68, [2][0][RTW89_ETSI][1][66] = 127, [2][0][RTW89_ETSI][0][66] = 127, [2][0][RTW89_MKK][1][66] = 127, [2][0][RTW89_MKK][0][66] = 127, [2][0][RTW89_IC][1][66] = 8, + [2][0][RTW89_IC][2][66] = 68, [2][0][RTW89_KCC][1][66] = -2, [2][0][RTW89_KCC][0][66] = 127, [2][0][RTW89_ACMA][1][66] = 127, @@ -54919,13 +57934,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][66] = 127, [2][0][RTW89_UK][1][66] = 127, [2][0][RTW89_UK][0][66] = 127, + [2][0][RTW89_THAILAND][1][66] = 127, + [2][0][RTW89_THAILAND][0][66] = 127, [2][0][RTW89_FCC][1][68] = 8, - [2][0][RTW89_FCC][2][68] = 78, + [2][0][RTW89_FCC][2][68] = 68, [2][0][RTW89_ETSI][1][68] = 127, [2][0][RTW89_ETSI][0][68] = 127, [2][0][RTW89_MKK][1][68] = 127, [2][0][RTW89_MKK][0][68] = 127, [2][0][RTW89_IC][1][68] = 8, + [2][0][RTW89_IC][2][68] = 68, [2][0][RTW89_KCC][1][68] = -2, [2][0][RTW89_KCC][0][68] = 127, [2][0][RTW89_ACMA][1][68] = 127, @@ -54935,13 +57953,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][68] = 127, [2][0][RTW89_UK][1][68] = 127, [2][0][RTW89_UK][0][68] = 127, + [2][0][RTW89_THAILAND][1][68] = 127, + [2][0][RTW89_THAILAND][0][68] = 127, [2][0][RTW89_FCC][1][70] = 8, - [2][0][RTW89_FCC][2][70] = 78, + [2][0][RTW89_FCC][2][70] = 68, [2][0][RTW89_ETSI][1][70] = 127, [2][0][RTW89_ETSI][0][70] = 127, [2][0][RTW89_MKK][1][70] = 127, [2][0][RTW89_MKK][0][70] = 127, [2][0][RTW89_IC][1][70] = 8, + [2][0][RTW89_IC][2][70] = 68, [2][0][RTW89_KCC][1][70] = -2, [2][0][RTW89_KCC][0][70] = 127, [2][0][RTW89_ACMA][1][70] = 127, @@ -54951,13 +57972,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][70] = 127, [2][0][RTW89_UK][1][70] = 127, [2][0][RTW89_UK][0][70] = 127, + [2][0][RTW89_THAILAND][1][70] = 127, + [2][0][RTW89_THAILAND][0][70] = 127, [2][0][RTW89_FCC][1][72] = 8, - [2][0][RTW89_FCC][2][72] = 78, + [2][0][RTW89_FCC][2][72] = 68, [2][0][RTW89_ETSI][1][72] = 127, [2][0][RTW89_ETSI][0][72] = 127, [2][0][RTW89_MKK][1][72] = 127, [2][0][RTW89_MKK][0][72] = 127, [2][0][RTW89_IC][1][72] = 8, + [2][0][RTW89_IC][2][72] = 68, [2][0][RTW89_KCC][1][72] = -2, [2][0][RTW89_KCC][0][72] = 127, [2][0][RTW89_ACMA][1][72] = 127, @@ -54967,13 +57991,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][72] = 127, [2][0][RTW89_UK][1][72] = 127, [2][0][RTW89_UK][0][72] = 127, + [2][0][RTW89_THAILAND][1][72] = 127, + [2][0][RTW89_THAILAND][0][72] = 127, [2][0][RTW89_FCC][1][74] = 8, - [2][0][RTW89_FCC][2][74] = 78, + [2][0][RTW89_FCC][2][74] = 68, [2][0][RTW89_ETSI][1][74] = 127, [2][0][RTW89_ETSI][0][74] = 127, [2][0][RTW89_MKK][1][74] = 127, [2][0][RTW89_MKK][0][74] = 127, [2][0][RTW89_IC][1][74] = 8, + [2][0][RTW89_IC][2][74] = 68, [2][0][RTW89_KCC][1][74] = -2, [2][0][RTW89_KCC][0][74] = 127, [2][0][RTW89_ACMA][1][74] = 127, @@ -54983,13 +58010,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][74] = 127, [2][0][RTW89_UK][1][74] = 127, [2][0][RTW89_UK][0][74] = 127, + [2][0][RTW89_THAILAND][1][74] = 127, + [2][0][RTW89_THAILAND][0][74] = 127, [2][0][RTW89_FCC][1][75] = 8, - [2][0][RTW89_FCC][2][75] = 78, + [2][0][RTW89_FCC][2][75] = 68, [2][0][RTW89_ETSI][1][75] = 127, [2][0][RTW89_ETSI][0][75] = 127, [2][0][RTW89_MKK][1][75] = 127, [2][0][RTW89_MKK][0][75] = 127, [2][0][RTW89_IC][1][75] = 8, + [2][0][RTW89_IC][2][75] = 68, [2][0][RTW89_KCC][1][75] = -2, [2][0][RTW89_KCC][0][75] = 127, [2][0][RTW89_ACMA][1][75] = 127, @@ -54999,13 +58029,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][75] = 127, [2][0][RTW89_UK][1][75] = 127, [2][0][RTW89_UK][0][75] = 127, + [2][0][RTW89_THAILAND][1][75] = 127, + [2][0][RTW89_THAILAND][0][75] = 127, [2][0][RTW89_FCC][1][77] = 8, - [2][0][RTW89_FCC][2][77] = 78, + [2][0][RTW89_FCC][2][77] = 68, [2][0][RTW89_ETSI][1][77] = 127, [2][0][RTW89_ETSI][0][77] = 127, [2][0][RTW89_MKK][1][77] = 127, [2][0][RTW89_MKK][0][77] = 127, [2][0][RTW89_IC][1][77] = 8, + [2][0][RTW89_IC][2][77] = 68, [2][0][RTW89_KCC][1][77] = -2, [2][0][RTW89_KCC][0][77] = 127, [2][0][RTW89_ACMA][1][77] = 127, @@ -55015,13 +58048,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][77] = 127, [2][0][RTW89_UK][1][77] = 127, [2][0][RTW89_UK][0][77] = 127, + [2][0][RTW89_THAILAND][1][77] = 127, + [2][0][RTW89_THAILAND][0][77] = 127, [2][0][RTW89_FCC][1][79] = 8, - [2][0][RTW89_FCC][2][79] = 78, + [2][0][RTW89_FCC][2][79] = 68, [2][0][RTW89_ETSI][1][79] = 127, [2][0][RTW89_ETSI][0][79] = 127, [2][0][RTW89_MKK][1][79] = 127, [2][0][RTW89_MKK][0][79] = 127, [2][0][RTW89_IC][1][79] = 8, + [2][0][RTW89_IC][2][79] = 68, [2][0][RTW89_KCC][1][79] = -2, [2][0][RTW89_KCC][0][79] = 127, [2][0][RTW89_ACMA][1][79] = 127, @@ -55031,13 +58067,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][79] = 127, [2][0][RTW89_UK][1][79] = 127, [2][0][RTW89_UK][0][79] = 127, + [2][0][RTW89_THAILAND][1][79] = 127, + [2][0][RTW89_THAILAND][0][79] = 127, [2][0][RTW89_FCC][1][81] = 8, - [2][0][RTW89_FCC][2][81] = 78, + [2][0][RTW89_FCC][2][81] = 68, [2][0][RTW89_ETSI][1][81] = 127, [2][0][RTW89_ETSI][0][81] = 127, [2][0][RTW89_MKK][1][81] = 127, [2][0][RTW89_MKK][0][81] = 127, [2][0][RTW89_IC][1][81] = 8, + [2][0][RTW89_IC][2][81] = 68, [2][0][RTW89_KCC][1][81] = -2, [2][0][RTW89_KCC][0][81] = 127, [2][0][RTW89_ACMA][1][81] = 127, @@ -55047,13 +58086,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][81] = 127, [2][0][RTW89_UK][1][81] = 127, [2][0][RTW89_UK][0][81] = 127, + [2][0][RTW89_THAILAND][1][81] = 127, + [2][0][RTW89_THAILAND][0][81] = 127, [2][0][RTW89_FCC][1][83] = 8, - [2][0][RTW89_FCC][2][83] = 78, + [2][0][RTW89_FCC][2][83] = 68, [2][0][RTW89_ETSI][1][83] = 127, [2][0][RTW89_ETSI][0][83] = 127, [2][0][RTW89_MKK][1][83] = 127, [2][0][RTW89_MKK][0][83] = 127, [2][0][RTW89_IC][1][83] = 8, + [2][0][RTW89_IC][2][83] = 68, [2][0][RTW89_KCC][1][83] = -2, [2][0][RTW89_KCC][0][83] = 127, [2][0][RTW89_ACMA][1][83] = 127, @@ -55063,13 +58105,16 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][83] = 127, [2][0][RTW89_UK][1][83] = 127, [2][0][RTW89_UK][0][83] = 127, + [2][0][RTW89_THAILAND][1][83] = 127, + [2][0][RTW89_THAILAND][0][83] = 127, [2][0][RTW89_FCC][1][85] = 8, - [2][0][RTW89_FCC][2][85] = 78, + [2][0][RTW89_FCC][2][85] = 68, [2][0][RTW89_ETSI][1][85] = 127, [2][0][RTW89_ETSI][0][85] = 127, [2][0][RTW89_MKK][1][85] = 127, [2][0][RTW89_MKK][0][85] = 127, [2][0][RTW89_IC][1][85] = 8, + [2][0][RTW89_IC][2][85] = 68, [2][0][RTW89_KCC][1][85] = -2, [2][0][RTW89_KCC][0][85] = 127, [2][0][RTW89_ACMA][1][85] = 127, @@ -55079,6 +58124,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][85] = 127, [2][0][RTW89_UK][1][85] = 127, [2][0][RTW89_UK][0][85] = 127, + [2][0][RTW89_THAILAND][1][85] = 127, + [2][0][RTW89_THAILAND][0][85] = 127, [2][0][RTW89_FCC][1][87] = 8, [2][0][RTW89_FCC][2][87] = 127, [2][0][RTW89_ETSI][1][87] = 127, @@ -55086,6 +58133,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][87] = 127, [2][0][RTW89_MKK][0][87] = 127, [2][0][RTW89_IC][1][87] = 8, + [2][0][RTW89_IC][2][87] = 127, [2][0][RTW89_KCC][1][87] = -2, [2][0][RTW89_KCC][0][87] = 127, [2][0][RTW89_ACMA][1][87] = 127, @@ -55095,6 +58143,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][87] = 127, [2][0][RTW89_UK][1][87] = 127, [2][0][RTW89_UK][0][87] = 127, + [2][0][RTW89_THAILAND][1][87] = 127, + [2][0][RTW89_THAILAND][0][87] = 127, [2][0][RTW89_FCC][1][89] = 8, [2][0][RTW89_FCC][2][89] = 127, [2][0][RTW89_ETSI][1][89] = 127, @@ -55102,6 +58152,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][89] = 127, [2][0][RTW89_MKK][0][89] = 127, [2][0][RTW89_IC][1][89] = 8, + [2][0][RTW89_IC][2][89] = 127, [2][0][RTW89_KCC][1][89] = -2, [2][0][RTW89_KCC][0][89] = 127, [2][0][RTW89_ACMA][1][89] = 127, @@ -55111,6 +58162,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][89] = 127, [2][0][RTW89_UK][1][89] = 127, [2][0][RTW89_UK][0][89] = 127, + [2][0][RTW89_THAILAND][1][89] = 127, + [2][0][RTW89_THAILAND][0][89] = 127, [2][0][RTW89_FCC][1][90] = 8, [2][0][RTW89_FCC][2][90] = 127, [2][0][RTW89_ETSI][1][90] = 127, @@ -55118,6 +58171,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][90] = 127, [2][0][RTW89_MKK][0][90] = 127, [2][0][RTW89_IC][1][90] = 8, + [2][0][RTW89_IC][2][90] = 127, [2][0][RTW89_KCC][1][90] = -2, [2][0][RTW89_KCC][0][90] = 127, [2][0][RTW89_ACMA][1][90] = 127, @@ -55127,6 +58181,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][90] = 127, [2][0][RTW89_UK][1][90] = 127, [2][0][RTW89_UK][0][90] = 127, + [2][0][RTW89_THAILAND][1][90] = 127, + [2][0][RTW89_THAILAND][0][90] = 127, [2][0][RTW89_FCC][1][92] = 8, [2][0][RTW89_FCC][2][92] = 127, [2][0][RTW89_ETSI][1][92] = 127, @@ -55134,6 +58190,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][92] = 127, [2][0][RTW89_MKK][0][92] = 127, [2][0][RTW89_IC][1][92] = 8, + [2][0][RTW89_IC][2][92] = 127, [2][0][RTW89_KCC][1][92] = -2, [2][0][RTW89_KCC][0][92] = 127, [2][0][RTW89_ACMA][1][92] = 127, @@ -55143,6 +58200,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][92] = 127, [2][0][RTW89_UK][1][92] = 127, [2][0][RTW89_UK][0][92] = 127, + [2][0][RTW89_THAILAND][1][92] = 127, + [2][0][RTW89_THAILAND][0][92] = 127, [2][0][RTW89_FCC][1][94] = 8, [2][0][RTW89_FCC][2][94] = 127, [2][0][RTW89_ETSI][1][94] = 127, @@ -55150,6 +58209,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][94] = 127, [2][0][RTW89_MKK][0][94] = 127, [2][0][RTW89_IC][1][94] = 8, + [2][0][RTW89_IC][2][94] = 127, [2][0][RTW89_KCC][1][94] = -2, [2][0][RTW89_KCC][0][94] = 127, [2][0][RTW89_ACMA][1][94] = 127, @@ -55159,6 +58219,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][94] = 127, [2][0][RTW89_UK][1][94] = 127, [2][0][RTW89_UK][0][94] = 127, + [2][0][RTW89_THAILAND][1][94] = 127, + [2][0][RTW89_THAILAND][0][94] = 127, [2][0][RTW89_FCC][1][96] = 8, [2][0][RTW89_FCC][2][96] = 127, [2][0][RTW89_ETSI][1][96] = 127, @@ -55166,6 +58228,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][96] = 127, [2][0][RTW89_MKK][0][96] = 127, [2][0][RTW89_IC][1][96] = 8, + [2][0][RTW89_IC][2][96] = 127, [2][0][RTW89_KCC][1][96] = -2, [2][0][RTW89_KCC][0][96] = 127, [2][0][RTW89_ACMA][1][96] = 127, @@ -55175,6 +58238,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][96] = 127, [2][0][RTW89_UK][1][96] = 127, [2][0][RTW89_UK][0][96] = 127, + [2][0][RTW89_THAILAND][1][96] = 127, + [2][0][RTW89_THAILAND][0][96] = 127, [2][0][RTW89_FCC][1][98] = 8, [2][0][RTW89_FCC][2][98] = 127, [2][0][RTW89_ETSI][1][98] = 127, @@ -55182,6 +58247,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][98] = 127, [2][0][RTW89_MKK][0][98] = 127, [2][0][RTW89_IC][1][98] = 8, + [2][0][RTW89_IC][2][98] = 127, [2][0][RTW89_KCC][1][98] = -2, [2][0][RTW89_KCC][0][98] = 127, [2][0][RTW89_ACMA][1][98] = 127, @@ -55191,6 +58257,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][98] = 127, [2][0][RTW89_UK][1][98] = 127, [2][0][RTW89_UK][0][98] = 127, + [2][0][RTW89_THAILAND][1][98] = 127, + [2][0][RTW89_THAILAND][0][98] = 127, [2][0][RTW89_FCC][1][100] = 8, [2][0][RTW89_FCC][2][100] = 127, [2][0][RTW89_ETSI][1][100] = 127, @@ -55198,6 +58266,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][100] = 127, [2][0][RTW89_MKK][0][100] = 127, [2][0][RTW89_IC][1][100] = 8, + [2][0][RTW89_IC][2][100] = 127, [2][0][RTW89_KCC][1][100] = -2, [2][0][RTW89_KCC][0][100] = 127, [2][0][RTW89_ACMA][1][100] = 127, @@ -55207,6 +58276,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][100] = 127, [2][0][RTW89_UK][1][100] = 127, [2][0][RTW89_UK][0][100] = 127, + [2][0][RTW89_THAILAND][1][100] = 127, + [2][0][RTW89_THAILAND][0][100] = 127, [2][0][RTW89_FCC][1][102] = 8, [2][0][RTW89_FCC][2][102] = 127, [2][0][RTW89_ETSI][1][102] = 127, @@ -55214,6 +58285,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][102] = 127, [2][0][RTW89_MKK][0][102] = 127, [2][0][RTW89_IC][1][102] = 8, + [2][0][RTW89_IC][2][102] = 127, [2][0][RTW89_KCC][1][102] = -2, [2][0][RTW89_KCC][0][102] = 127, [2][0][RTW89_ACMA][1][102] = 127, @@ -55223,6 +58295,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][102] = 127, [2][0][RTW89_UK][1][102] = 127, [2][0][RTW89_UK][0][102] = 127, + [2][0][RTW89_THAILAND][1][102] = 127, + [2][0][RTW89_THAILAND][0][102] = 127, [2][0][RTW89_FCC][1][104] = 8, [2][0][RTW89_FCC][2][104] = 127, [2][0][RTW89_ETSI][1][104] = 127, @@ -55230,6 +58304,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][104] = 127, [2][0][RTW89_MKK][0][104] = 127, [2][0][RTW89_IC][1][104] = 8, + [2][0][RTW89_IC][2][104] = 127, [2][0][RTW89_KCC][1][104] = -2, [2][0][RTW89_KCC][0][104] = 127, [2][0][RTW89_ACMA][1][104] = 127, @@ -55239,6 +58314,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][104] = 127, [2][0][RTW89_UK][1][104] = 127, [2][0][RTW89_UK][0][104] = 127, + [2][0][RTW89_THAILAND][1][104] = 127, + [2][0][RTW89_THAILAND][0][104] = 127, [2][0][RTW89_FCC][1][105] = 8, [2][0][RTW89_FCC][2][105] = 127, [2][0][RTW89_ETSI][1][105] = 127, @@ -55246,6 +58323,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][105] = 127, [2][0][RTW89_MKK][0][105] = 127, [2][0][RTW89_IC][1][105] = 8, + [2][0][RTW89_IC][2][105] = 127, [2][0][RTW89_KCC][1][105] = -2, [2][0][RTW89_KCC][0][105] = 127, [2][0][RTW89_ACMA][1][105] = 127, @@ -55255,6 +58333,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][105] = 127, [2][0][RTW89_UK][1][105] = 127, [2][0][RTW89_UK][0][105] = 127, + [2][0][RTW89_THAILAND][1][105] = 127, + [2][0][RTW89_THAILAND][0][105] = 127, [2][0][RTW89_FCC][1][107] = 10, [2][0][RTW89_FCC][2][107] = 127, [2][0][RTW89_ETSI][1][107] = 127, @@ -55262,6 +58342,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][107] = 127, [2][0][RTW89_MKK][0][107] = 127, [2][0][RTW89_IC][1][107] = 10, + [2][0][RTW89_IC][2][107] = 127, [2][0][RTW89_KCC][1][107] = -2, [2][0][RTW89_KCC][0][107] = 127, [2][0][RTW89_ACMA][1][107] = 127, @@ -55271,6 +58352,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][107] = 127, [2][0][RTW89_UK][1][107] = 127, [2][0][RTW89_UK][0][107] = 127, + [2][0][RTW89_THAILAND][1][107] = 127, + [2][0][RTW89_THAILAND][0][107] = 127, [2][0][RTW89_FCC][1][109] = 12, [2][0][RTW89_FCC][2][109] = 127, [2][0][RTW89_ETSI][1][109] = 127, @@ -55278,6 +58361,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][109] = 127, [2][0][RTW89_MKK][0][109] = 127, [2][0][RTW89_IC][1][109] = 12, + [2][0][RTW89_IC][2][109] = 127, [2][0][RTW89_KCC][1][109] = 127, [2][0][RTW89_KCC][0][109] = 127, [2][0][RTW89_ACMA][1][109] = 127, @@ -55287,6 +58371,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][109] = 127, [2][0][RTW89_UK][1][109] = 127, [2][0][RTW89_UK][0][109] = 127, + [2][0][RTW89_THAILAND][1][109] = 127, + [2][0][RTW89_THAILAND][0][109] = 127, [2][0][RTW89_FCC][1][111] = 127, [2][0][RTW89_FCC][2][111] = 127, [2][0][RTW89_ETSI][1][111] = 127, @@ -55294,6 +58380,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][111] = 127, [2][0][RTW89_MKK][0][111] = 127, [2][0][RTW89_IC][1][111] = 127, + [2][0][RTW89_IC][2][111] = 127, [2][0][RTW89_KCC][1][111] = 127, [2][0][RTW89_KCC][0][111] = 127, [2][0][RTW89_ACMA][1][111] = 127, @@ -55303,6 +58390,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][111] = 127, [2][0][RTW89_UK][1][111] = 127, [2][0][RTW89_UK][0][111] = 127, + [2][0][RTW89_THAILAND][1][111] = 127, + [2][0][RTW89_THAILAND][0][111] = 127, [2][0][RTW89_FCC][1][113] = 127, [2][0][RTW89_FCC][2][113] = 127, [2][0][RTW89_ETSI][1][113] = 127, @@ -55310,6 +58399,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][113] = 127, [2][0][RTW89_MKK][0][113] = 127, [2][0][RTW89_IC][1][113] = 127, + [2][0][RTW89_IC][2][113] = 127, [2][0][RTW89_KCC][1][113] = 127, [2][0][RTW89_KCC][0][113] = 127, [2][0][RTW89_ACMA][1][113] = 127, @@ -55319,6 +58409,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][113] = 127, [2][0][RTW89_UK][1][113] = 127, [2][0][RTW89_UK][0][113] = 127, + [2][0][RTW89_THAILAND][1][113] = 127, + [2][0][RTW89_THAILAND][0][113] = 127, [2][0][RTW89_FCC][1][115] = 127, [2][0][RTW89_FCC][2][115] = 127, [2][0][RTW89_ETSI][1][115] = 127, @@ -55326,6 +58418,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][115] = 127, [2][0][RTW89_MKK][0][115] = 127, [2][0][RTW89_IC][1][115] = 127, + [2][0][RTW89_IC][2][115] = 127, [2][0][RTW89_KCC][1][115] = 127, [2][0][RTW89_KCC][0][115] = 127, [2][0][RTW89_ACMA][1][115] = 127, @@ -55335,6 +58428,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][115] = 127, [2][0][RTW89_UK][1][115] = 127, [2][0][RTW89_UK][0][115] = 127, + [2][0][RTW89_THAILAND][1][115] = 127, + [2][0][RTW89_THAILAND][0][115] = 127, [2][0][RTW89_FCC][1][117] = 127, [2][0][RTW89_FCC][2][117] = 127, [2][0][RTW89_ETSI][1][117] = 127, @@ -55342,6 +58437,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][117] = 127, [2][0][RTW89_MKK][0][117] = 127, [2][0][RTW89_IC][1][117] = 127, + [2][0][RTW89_IC][2][117] = 127, [2][0][RTW89_KCC][1][117] = 127, [2][0][RTW89_KCC][0][117] = 127, [2][0][RTW89_ACMA][1][117] = 127, @@ -55351,6 +58447,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][117] = 127, [2][0][RTW89_UK][1][117] = 127, [2][0][RTW89_UK][0][117] = 127, + [2][0][RTW89_THAILAND][1][117] = 127, + [2][0][RTW89_THAILAND][0][117] = 127, [2][0][RTW89_FCC][1][119] = 127, [2][0][RTW89_FCC][2][119] = 127, [2][0][RTW89_ETSI][1][119] = 127, @@ -55358,6 +58456,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][1][119] = 127, [2][0][RTW89_MKK][0][119] = 127, [2][0][RTW89_IC][1][119] = 127, + [2][0][RTW89_IC][2][119] = 127, [2][0][RTW89_KCC][1][119] = 127, [2][0][RTW89_KCC][0][119] = 127, [2][0][RTW89_ACMA][1][119] = 127, @@ -55367,6 +58466,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_QATAR][0][119] = 127, [2][0][RTW89_UK][1][119] = 127, [2][0][RTW89_UK][0][119] = 127, + [2][0][RTW89_THAILAND][1][119] = 127, + [2][0][RTW89_THAILAND][0][119] = 127, [2][1][RTW89_FCC][1][0] = -16, [2][1][RTW89_FCC][2][0] = 54, [2][1][RTW89_ETSI][1][0] = 44, @@ -55374,6 +58475,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][0] = 42, [2][1][RTW89_MKK][0][0] = 2, [2][1][RTW89_IC][1][0] = -16, + [2][1][RTW89_IC][2][0] = 54, [2][1][RTW89_KCC][1][0] = -14, [2][1][RTW89_KCC][0][0] = -14, [2][1][RTW89_ACMA][1][0] = 44, @@ -55383,6 +58485,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][0] = 6, [2][1][RTW89_UK][1][0] = 44, [2][1][RTW89_UK][0][0] = 6, + [2][1][RTW89_THAILAND][1][0] = 28, + [2][1][RTW89_THAILAND][0][0] = -16, [2][1][RTW89_FCC][1][2] = -16, [2][1][RTW89_FCC][2][2] = 54, [2][1][RTW89_ETSI][1][2] = 44, @@ -55390,6 +58494,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][2] = 40, [2][1][RTW89_MKK][0][2] = 2, [2][1][RTW89_IC][1][2] = -16, + [2][1][RTW89_IC][2][2] = 54, [2][1][RTW89_KCC][1][2] = -14, [2][1][RTW89_KCC][0][2] = -14, [2][1][RTW89_ACMA][1][2] = 44, @@ -55399,6 +58504,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][2] = 6, [2][1][RTW89_UK][1][2] = 44, [2][1][RTW89_UK][0][2] = 6, + [2][1][RTW89_THAILAND][1][2] = 28, + [2][1][RTW89_THAILAND][0][2] = -16, [2][1][RTW89_FCC][1][4] = -16, [2][1][RTW89_FCC][2][4] = 54, [2][1][RTW89_ETSI][1][4] = 44, @@ -55406,6 +58513,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][4] = 40, [2][1][RTW89_MKK][0][4] = 2, [2][1][RTW89_IC][1][4] = -16, + [2][1][RTW89_IC][2][4] = 54, [2][1][RTW89_KCC][1][4] = -14, [2][1][RTW89_KCC][0][4] = -14, [2][1][RTW89_ACMA][1][4] = 44, @@ -55415,6 +58523,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][4] = 6, [2][1][RTW89_UK][1][4] = 44, [2][1][RTW89_UK][0][4] = 6, + [2][1][RTW89_THAILAND][1][4] = 28, + [2][1][RTW89_THAILAND][0][4] = -16, [2][1][RTW89_FCC][1][6] = -16, [2][1][RTW89_FCC][2][6] = 54, [2][1][RTW89_ETSI][1][6] = 44, @@ -55422,6 +58532,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][6] = 40, [2][1][RTW89_MKK][0][6] = 2, [2][1][RTW89_IC][1][6] = -16, + [2][1][RTW89_IC][2][6] = 54, [2][1][RTW89_KCC][1][6] = -14, [2][1][RTW89_KCC][0][6] = -14, [2][1][RTW89_ACMA][1][6] = 44, @@ -55431,6 +58542,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][6] = 6, [2][1][RTW89_UK][1][6] = 44, [2][1][RTW89_UK][0][6] = 6, + [2][1][RTW89_THAILAND][1][6] = 28, + [2][1][RTW89_THAILAND][0][6] = -16, [2][1][RTW89_FCC][1][8] = -16, [2][1][RTW89_FCC][2][8] = 54, [2][1][RTW89_ETSI][1][8] = 44, @@ -55438,6 +58551,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][8] = 40, [2][1][RTW89_MKK][0][8] = 2, [2][1][RTW89_IC][1][8] = -16, + [2][1][RTW89_IC][2][8] = 54, [2][1][RTW89_KCC][1][8] = -14, [2][1][RTW89_KCC][0][8] = -14, [2][1][RTW89_ACMA][1][8] = 44, @@ -55447,6 +58561,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][8] = 6, [2][1][RTW89_UK][1][8] = 44, [2][1][RTW89_UK][0][8] = 6, + [2][1][RTW89_THAILAND][1][8] = 28, + [2][1][RTW89_THAILAND][0][8] = -16, [2][1][RTW89_FCC][1][10] = -16, [2][1][RTW89_FCC][2][10] = 54, [2][1][RTW89_ETSI][1][10] = 44, @@ -55454,6 +58570,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][10] = 40, [2][1][RTW89_MKK][0][10] = 2, [2][1][RTW89_IC][1][10] = -16, + [2][1][RTW89_IC][2][10] = 54, [2][1][RTW89_KCC][1][10] = -14, [2][1][RTW89_KCC][0][10] = -14, [2][1][RTW89_ACMA][1][10] = 44, @@ -55463,6 +58580,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][10] = 6, [2][1][RTW89_UK][1][10] = 44, [2][1][RTW89_UK][0][10] = 6, + [2][1][RTW89_THAILAND][1][10] = 28, + [2][1][RTW89_THAILAND][0][10] = -16, [2][1][RTW89_FCC][1][12] = -16, [2][1][RTW89_FCC][2][12] = 54, [2][1][RTW89_ETSI][1][12] = 44, @@ -55470,6 +58589,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][12] = 40, [2][1][RTW89_MKK][0][12] = 2, [2][1][RTW89_IC][1][12] = -16, + [2][1][RTW89_IC][2][12] = 54, [2][1][RTW89_KCC][1][12] = -14, [2][1][RTW89_KCC][0][12] = -14, [2][1][RTW89_ACMA][1][12] = 44, @@ -55479,6 +58599,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][12] = 6, [2][1][RTW89_UK][1][12] = 44, [2][1][RTW89_UK][0][12] = 6, + [2][1][RTW89_THAILAND][1][12] = 28, + [2][1][RTW89_THAILAND][0][12] = -16, [2][1][RTW89_FCC][1][14] = -16, [2][1][RTW89_FCC][2][14] = 54, [2][1][RTW89_ETSI][1][14] = 44, @@ -55486,6 +58608,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][14] = 40, [2][1][RTW89_MKK][0][14] = 2, [2][1][RTW89_IC][1][14] = -16, + [2][1][RTW89_IC][2][14] = 54, [2][1][RTW89_KCC][1][14] = -14, [2][1][RTW89_KCC][0][14] = -14, [2][1][RTW89_ACMA][1][14] = 44, @@ -55495,6 +58618,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][14] = 6, [2][1][RTW89_UK][1][14] = 44, [2][1][RTW89_UK][0][14] = 6, + [2][1][RTW89_THAILAND][1][14] = 28, + [2][1][RTW89_THAILAND][0][14] = -16, [2][1][RTW89_FCC][1][15] = -16, [2][1][RTW89_FCC][2][15] = 54, [2][1][RTW89_ETSI][1][15] = 44, @@ -55502,6 +58627,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][15] = 40, [2][1][RTW89_MKK][0][15] = 2, [2][1][RTW89_IC][1][15] = -16, + [2][1][RTW89_IC][2][15] = 54, [2][1][RTW89_KCC][1][15] = -14, [2][1][RTW89_KCC][0][15] = -14, [2][1][RTW89_ACMA][1][15] = 44, @@ -55511,6 +58637,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][15] = 6, [2][1][RTW89_UK][1][15] = 44, [2][1][RTW89_UK][0][15] = 6, + [2][1][RTW89_THAILAND][1][15] = 28, + [2][1][RTW89_THAILAND][0][15] = -16, [2][1][RTW89_FCC][1][17] = -16, [2][1][RTW89_FCC][2][17] = 54, [2][1][RTW89_ETSI][1][17] = 44, @@ -55518,6 +58646,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][17] = 40, [2][1][RTW89_MKK][0][17] = 2, [2][1][RTW89_IC][1][17] = -16, + [2][1][RTW89_IC][2][17] = 54, [2][1][RTW89_KCC][1][17] = -14, [2][1][RTW89_KCC][0][17] = -14, [2][1][RTW89_ACMA][1][17] = 44, @@ -55527,6 +58656,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][17] = 6, [2][1][RTW89_UK][1][17] = 44, [2][1][RTW89_UK][0][17] = 6, + [2][1][RTW89_THAILAND][1][17] = 28, + [2][1][RTW89_THAILAND][0][17] = -16, [2][1][RTW89_FCC][1][19] = -16, [2][1][RTW89_FCC][2][19] = 54, [2][1][RTW89_ETSI][1][19] = 44, @@ -55534,6 +58665,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][19] = 40, [2][1][RTW89_MKK][0][19] = 2, [2][1][RTW89_IC][1][19] = -16, + [2][1][RTW89_IC][2][19] = 54, [2][1][RTW89_KCC][1][19] = -14, [2][1][RTW89_KCC][0][19] = -14, [2][1][RTW89_ACMA][1][19] = 44, @@ -55543,6 +58675,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][19] = 6, [2][1][RTW89_UK][1][19] = 44, [2][1][RTW89_UK][0][19] = 6, + [2][1][RTW89_THAILAND][1][19] = 28, + [2][1][RTW89_THAILAND][0][19] = -16, [2][1][RTW89_FCC][1][21] = -16, [2][1][RTW89_FCC][2][21] = 54, [2][1][RTW89_ETSI][1][21] = 44, @@ -55550,6 +58684,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][21] = 40, [2][1][RTW89_MKK][0][21] = 2, [2][1][RTW89_IC][1][21] = -16, + [2][1][RTW89_IC][2][21] = 54, [2][1][RTW89_KCC][1][21] = -14, [2][1][RTW89_KCC][0][21] = -14, [2][1][RTW89_ACMA][1][21] = 44, @@ -55559,6 +58694,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][21] = 6, [2][1][RTW89_UK][1][21] = 44, [2][1][RTW89_UK][0][21] = 6, + [2][1][RTW89_THAILAND][1][21] = 28, + [2][1][RTW89_THAILAND][0][21] = -16, [2][1][RTW89_FCC][1][23] = -16, [2][1][RTW89_FCC][2][23] = 54, [2][1][RTW89_ETSI][1][23] = 44, @@ -55566,6 +58703,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][23] = 40, [2][1][RTW89_MKK][0][23] = 2, [2][1][RTW89_IC][1][23] = -16, + [2][1][RTW89_IC][2][23] = 54, [2][1][RTW89_KCC][1][23] = -14, [2][1][RTW89_KCC][0][23] = -14, [2][1][RTW89_ACMA][1][23] = 44, @@ -55575,6 +58713,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][23] = 6, [2][1][RTW89_UK][1][23] = 44, [2][1][RTW89_UK][0][23] = 6, + [2][1][RTW89_THAILAND][1][23] = 30, + [2][1][RTW89_THAILAND][0][23] = -16, [2][1][RTW89_FCC][1][25] = -16, [2][1][RTW89_FCC][2][25] = 54, [2][1][RTW89_ETSI][1][25] = 44, @@ -55582,6 +58722,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][25] = 40, [2][1][RTW89_MKK][0][25] = 2, [2][1][RTW89_IC][1][25] = -16, + [2][1][RTW89_IC][2][25] = 54, [2][1][RTW89_KCC][1][25] = -14, [2][1][RTW89_KCC][0][25] = -14, [2][1][RTW89_ACMA][1][25] = 44, @@ -55591,6 +58732,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][25] = 6, [2][1][RTW89_UK][1][25] = 44, [2][1][RTW89_UK][0][25] = 6, + [2][1][RTW89_THAILAND][1][25] = 28, + [2][1][RTW89_THAILAND][0][25] = -16, [2][1][RTW89_FCC][1][27] = -16, [2][1][RTW89_FCC][2][27] = 54, [2][1][RTW89_ETSI][1][27] = 44, @@ -55598,6 +58741,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][27] = 40, [2][1][RTW89_MKK][0][27] = 2, [2][1][RTW89_IC][1][27] = -16, + [2][1][RTW89_IC][2][27] = 54, [2][1][RTW89_KCC][1][27] = -14, [2][1][RTW89_KCC][0][27] = -14, [2][1][RTW89_ACMA][1][27] = 44, @@ -55607,6 +58751,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][27] = 6, [2][1][RTW89_UK][1][27] = 44, [2][1][RTW89_UK][0][27] = 6, + [2][1][RTW89_THAILAND][1][27] = 28, + [2][1][RTW89_THAILAND][0][27] = -16, [2][1][RTW89_FCC][1][29] = -16, [2][1][RTW89_FCC][2][29] = 54, [2][1][RTW89_ETSI][1][29] = 44, @@ -55614,6 +58760,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][29] = 40, [2][1][RTW89_MKK][0][29] = 2, [2][1][RTW89_IC][1][29] = -16, + [2][1][RTW89_IC][2][29] = 54, [2][1][RTW89_KCC][1][29] = -14, [2][1][RTW89_KCC][0][29] = -14, [2][1][RTW89_ACMA][1][29] = 44, @@ -55623,6 +58770,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][29] = 6, [2][1][RTW89_UK][1][29] = 44, [2][1][RTW89_UK][0][29] = 6, + [2][1][RTW89_THAILAND][1][29] = 28, + [2][1][RTW89_THAILAND][0][29] = -16, [2][1][RTW89_FCC][1][30] = -16, [2][1][RTW89_FCC][2][30] = 54, [2][1][RTW89_ETSI][1][30] = 44, @@ -55630,6 +58779,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][30] = 40, [2][1][RTW89_MKK][0][30] = 2, [2][1][RTW89_IC][1][30] = -16, + [2][1][RTW89_IC][2][30] = 54, [2][1][RTW89_KCC][1][30] = -14, [2][1][RTW89_KCC][0][30] = -14, [2][1][RTW89_ACMA][1][30] = 44, @@ -55639,6 +58789,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][30] = 6, [2][1][RTW89_UK][1][30] = 44, [2][1][RTW89_UK][0][30] = 6, + [2][1][RTW89_THAILAND][1][30] = 28, + [2][1][RTW89_THAILAND][0][30] = -16, [2][1][RTW89_FCC][1][32] = -16, [2][1][RTW89_FCC][2][32] = 54, [2][1][RTW89_ETSI][1][32] = 44, @@ -55646,6 +58798,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][32] = 40, [2][1][RTW89_MKK][0][32] = 2, [2][1][RTW89_IC][1][32] = -16, + [2][1][RTW89_IC][2][32] = 54, [2][1][RTW89_KCC][1][32] = -14, [2][1][RTW89_KCC][0][32] = -14, [2][1][RTW89_ACMA][1][32] = 44, @@ -55655,6 +58808,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][32] = 6, [2][1][RTW89_UK][1][32] = 44, [2][1][RTW89_UK][0][32] = 6, + [2][1][RTW89_THAILAND][1][32] = 28, + [2][1][RTW89_THAILAND][0][32] = -16, [2][1][RTW89_FCC][1][34] = -16, [2][1][RTW89_FCC][2][34] = 54, [2][1][RTW89_ETSI][1][34] = 44, @@ -55662,6 +58817,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][34] = 40, [2][1][RTW89_MKK][0][34] = 2, [2][1][RTW89_IC][1][34] = -16, + [2][1][RTW89_IC][2][34] = 54, [2][1][RTW89_KCC][1][34] = -14, [2][1][RTW89_KCC][0][34] = -14, [2][1][RTW89_ACMA][1][34] = 44, @@ -55671,6 +58827,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][34] = 6, [2][1][RTW89_UK][1][34] = 44, [2][1][RTW89_UK][0][34] = 6, + [2][1][RTW89_THAILAND][1][34] = 28, + [2][1][RTW89_THAILAND][0][34] = -16, [2][1][RTW89_FCC][1][36] = -16, [2][1][RTW89_FCC][2][36] = 54, [2][1][RTW89_ETSI][1][36] = 44, @@ -55678,6 +58836,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][36] = 40, [2][1][RTW89_MKK][0][36] = 2, [2][1][RTW89_IC][1][36] = -16, + [2][1][RTW89_IC][2][36] = 54, [2][1][RTW89_KCC][1][36] = -14, [2][1][RTW89_KCC][0][36] = -14, [2][1][RTW89_ACMA][1][36] = 44, @@ -55687,6 +58846,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][36] = 6, [2][1][RTW89_UK][1][36] = 44, [2][1][RTW89_UK][0][36] = 6, + [2][1][RTW89_THAILAND][1][36] = 28, + [2][1][RTW89_THAILAND][0][36] = -16, [2][1][RTW89_FCC][1][38] = -16, [2][1][RTW89_FCC][2][38] = 54, [2][1][RTW89_ETSI][1][38] = 44, @@ -55694,6 +58855,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][38] = 40, [2][1][RTW89_MKK][0][38] = 2, [2][1][RTW89_IC][1][38] = -16, + [2][1][RTW89_IC][2][38] = 54, [2][1][RTW89_KCC][1][38] = -14, [2][1][RTW89_KCC][0][38] = -14, [2][1][RTW89_ACMA][1][38] = 44, @@ -55703,6 +58865,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][38] = 6, [2][1][RTW89_UK][1][38] = 44, [2][1][RTW89_UK][0][38] = 6, + [2][1][RTW89_THAILAND][1][38] = 28, + [2][1][RTW89_THAILAND][0][38] = -16, [2][1][RTW89_FCC][1][40] = -16, [2][1][RTW89_FCC][2][40] = 54, [2][1][RTW89_ETSI][1][40] = 44, @@ -55710,6 +58874,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][40] = 40, [2][1][RTW89_MKK][0][40] = 2, [2][1][RTW89_IC][1][40] = -16, + [2][1][RTW89_IC][2][40] = 54, [2][1][RTW89_KCC][1][40] = -14, [2][1][RTW89_KCC][0][40] = -14, [2][1][RTW89_ACMA][1][40] = 44, @@ -55719,6 +58884,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][40] = 6, [2][1][RTW89_UK][1][40] = 44, [2][1][RTW89_UK][0][40] = 6, + [2][1][RTW89_THAILAND][1][40] = 28, + [2][1][RTW89_THAILAND][0][40] = -16, [2][1][RTW89_FCC][1][42] = -16, [2][1][RTW89_FCC][2][42] = 54, [2][1][RTW89_ETSI][1][42] = 44, @@ -55726,6 +58893,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][42] = 40, [2][1][RTW89_MKK][0][42] = 2, [2][1][RTW89_IC][1][42] = -16, + [2][1][RTW89_IC][2][42] = 54, [2][1][RTW89_KCC][1][42] = -14, [2][1][RTW89_KCC][0][42] = -14, [2][1][RTW89_ACMA][1][42] = 44, @@ -55735,6 +58903,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][42] = 6, [2][1][RTW89_UK][1][42] = 44, [2][1][RTW89_UK][0][42] = 6, + [2][1][RTW89_THAILAND][1][42] = 28, + [2][1][RTW89_THAILAND][0][42] = -16, [2][1][RTW89_FCC][1][44] = -16, [2][1][RTW89_FCC][2][44] = 54, [2][1][RTW89_ETSI][1][44] = 44, @@ -55742,6 +58912,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][44] = 16, [2][1][RTW89_MKK][0][44] = 2, [2][1][RTW89_IC][1][44] = -16, + [2][1][RTW89_IC][2][44] = 54, [2][1][RTW89_KCC][1][44] = -14, [2][1][RTW89_KCC][0][44] = -14, [2][1][RTW89_ACMA][1][44] = 44, @@ -55751,6 +58922,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][44] = 6, [2][1][RTW89_UK][1][44] = 44, [2][1][RTW89_UK][0][44] = 6, + [2][1][RTW89_THAILAND][1][44] = 28, + [2][1][RTW89_THAILAND][0][44] = -16, [2][1][RTW89_FCC][1][45] = -16, [2][1][RTW89_FCC][2][45] = 127, [2][1][RTW89_ETSI][1][45] = 127, @@ -55758,6 +58931,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][45] = 127, [2][1][RTW89_MKK][0][45] = 127, [2][1][RTW89_IC][1][45] = -16, + [2][1][RTW89_IC][2][45] = 56, [2][1][RTW89_KCC][1][45] = -14, [2][1][RTW89_KCC][0][45] = 127, [2][1][RTW89_ACMA][1][45] = 127, @@ -55767,6 +58941,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][45] = 127, [2][1][RTW89_UK][1][45] = 127, [2][1][RTW89_UK][0][45] = 127, + [2][1][RTW89_THAILAND][1][45] = 127, + [2][1][RTW89_THAILAND][0][45] = 127, [2][1][RTW89_FCC][1][47] = -16, [2][1][RTW89_FCC][2][47] = 127, [2][1][RTW89_ETSI][1][47] = 127, @@ -55774,6 +58950,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][47] = 127, [2][1][RTW89_MKK][0][47] = 127, [2][1][RTW89_IC][1][47] = -16, + [2][1][RTW89_IC][2][47] = 56, [2][1][RTW89_KCC][1][47] = -14, [2][1][RTW89_KCC][0][47] = 127, [2][1][RTW89_ACMA][1][47] = 127, @@ -55783,6 +58960,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][47] = 127, [2][1][RTW89_UK][1][47] = 127, [2][1][RTW89_UK][0][47] = 127, + [2][1][RTW89_THAILAND][1][47] = 127, + [2][1][RTW89_THAILAND][0][47] = 127, [2][1][RTW89_FCC][1][49] = -16, [2][1][RTW89_FCC][2][49] = 127, [2][1][RTW89_ETSI][1][49] = 127, @@ -55790,6 +58969,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][49] = 127, [2][1][RTW89_MKK][0][49] = 127, [2][1][RTW89_IC][1][49] = -16, + [2][1][RTW89_IC][2][49] = 56, [2][1][RTW89_KCC][1][49] = -14, [2][1][RTW89_KCC][0][49] = 127, [2][1][RTW89_ACMA][1][49] = 127, @@ -55799,6 +58979,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][49] = 127, [2][1][RTW89_UK][1][49] = 127, [2][1][RTW89_UK][0][49] = 127, + [2][1][RTW89_THAILAND][1][49] = 127, + [2][1][RTW89_THAILAND][0][49] = 127, [2][1][RTW89_FCC][1][51] = -16, [2][1][RTW89_FCC][2][51] = 127, [2][1][RTW89_ETSI][1][51] = 127, @@ -55806,6 +58988,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][51] = 127, [2][1][RTW89_MKK][0][51] = 127, [2][1][RTW89_IC][1][51] = -16, + [2][1][RTW89_IC][2][51] = 56, [2][1][RTW89_KCC][1][51] = -14, [2][1][RTW89_KCC][0][51] = 127, [2][1][RTW89_ACMA][1][51] = 127, @@ -55815,6 +58998,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][51] = 127, [2][1][RTW89_UK][1][51] = 127, [2][1][RTW89_UK][0][51] = 127, + [2][1][RTW89_THAILAND][1][51] = 127, + [2][1][RTW89_THAILAND][0][51] = 127, [2][1][RTW89_FCC][1][53] = -16, [2][1][RTW89_FCC][2][53] = 127, [2][1][RTW89_ETSI][1][53] = 127, @@ -55822,6 +59007,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][53] = 127, [2][1][RTW89_MKK][0][53] = 127, [2][1][RTW89_IC][1][53] = -16, + [2][1][RTW89_IC][2][53] = 56, [2][1][RTW89_KCC][1][53] = -14, [2][1][RTW89_KCC][0][53] = 127, [2][1][RTW89_ACMA][1][53] = 127, @@ -55831,6 +59017,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][53] = 127, [2][1][RTW89_UK][1][53] = 127, [2][1][RTW89_UK][0][53] = 127, + [2][1][RTW89_THAILAND][1][53] = 127, + [2][1][RTW89_THAILAND][0][53] = 127, [2][1][RTW89_FCC][1][55] = -16, [2][1][RTW89_FCC][2][55] = 54, [2][1][RTW89_ETSI][1][55] = 127, @@ -55838,6 +59026,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][55] = 127, [2][1][RTW89_MKK][0][55] = 127, [2][1][RTW89_IC][1][55] = -16, + [2][1][RTW89_IC][2][55] = 54, [2][1][RTW89_KCC][1][55] = -14, [2][1][RTW89_KCC][0][55] = 127, [2][1][RTW89_ACMA][1][55] = 127, @@ -55847,6 +59036,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][55] = 127, [2][1][RTW89_UK][1][55] = 127, [2][1][RTW89_UK][0][55] = 127, + [2][1][RTW89_THAILAND][1][55] = 127, + [2][1][RTW89_THAILAND][0][55] = 127, [2][1][RTW89_FCC][1][57] = -16, [2][1][RTW89_FCC][2][57] = 54, [2][1][RTW89_ETSI][1][57] = 127, @@ -55854,6 +59045,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][57] = 127, [2][1][RTW89_MKK][0][57] = 127, [2][1][RTW89_IC][1][57] = -16, + [2][1][RTW89_IC][2][57] = 54, [2][1][RTW89_KCC][1][57] = -14, [2][1][RTW89_KCC][0][57] = 127, [2][1][RTW89_ACMA][1][57] = 127, @@ -55863,6 +59055,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][57] = 127, [2][1][RTW89_UK][1][57] = 127, [2][1][RTW89_UK][0][57] = 127, + [2][1][RTW89_THAILAND][1][57] = 127, + [2][1][RTW89_THAILAND][0][57] = 127, [2][1][RTW89_FCC][1][59] = -16, [2][1][RTW89_FCC][2][59] = 54, [2][1][RTW89_ETSI][1][59] = 127, @@ -55870,6 +59064,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][59] = 127, [2][1][RTW89_MKK][0][59] = 127, [2][1][RTW89_IC][1][59] = -16, + [2][1][RTW89_IC][2][59] = 54, [2][1][RTW89_KCC][1][59] = -14, [2][1][RTW89_KCC][0][59] = 127, [2][1][RTW89_ACMA][1][59] = 127, @@ -55879,6 +59074,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][59] = 127, [2][1][RTW89_UK][1][59] = 127, [2][1][RTW89_UK][0][59] = 127, + [2][1][RTW89_THAILAND][1][59] = 127, + [2][1][RTW89_THAILAND][0][59] = 127, [2][1][RTW89_FCC][1][60] = -16, [2][1][RTW89_FCC][2][60] = 54, [2][1][RTW89_ETSI][1][60] = 127, @@ -55886,6 +59083,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][60] = 127, [2][1][RTW89_MKK][0][60] = 127, [2][1][RTW89_IC][1][60] = -16, + [2][1][RTW89_IC][2][60] = 54, [2][1][RTW89_KCC][1][60] = -14, [2][1][RTW89_KCC][0][60] = 127, [2][1][RTW89_ACMA][1][60] = 127, @@ -55895,6 +59093,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][60] = 127, [2][1][RTW89_UK][1][60] = 127, [2][1][RTW89_UK][0][60] = 127, + [2][1][RTW89_THAILAND][1][60] = 127, + [2][1][RTW89_THAILAND][0][60] = 127, [2][1][RTW89_FCC][1][62] = -16, [2][1][RTW89_FCC][2][62] = 54, [2][1][RTW89_ETSI][1][62] = 127, @@ -55902,6 +59102,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][62] = 127, [2][1][RTW89_MKK][0][62] = 127, [2][1][RTW89_IC][1][62] = -16, + [2][1][RTW89_IC][2][62] = 54, [2][1][RTW89_KCC][1][62] = -14, [2][1][RTW89_KCC][0][62] = 127, [2][1][RTW89_ACMA][1][62] = 127, @@ -55911,6 +59112,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][62] = 127, [2][1][RTW89_UK][1][62] = 127, [2][1][RTW89_UK][0][62] = 127, + [2][1][RTW89_THAILAND][1][62] = 127, + [2][1][RTW89_THAILAND][0][62] = 127, [2][1][RTW89_FCC][1][64] = -16, [2][1][RTW89_FCC][2][64] = 54, [2][1][RTW89_ETSI][1][64] = 127, @@ -55918,6 +59121,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][64] = 127, [2][1][RTW89_MKK][0][64] = 127, [2][1][RTW89_IC][1][64] = -16, + [2][1][RTW89_IC][2][64] = 54, [2][1][RTW89_KCC][1][64] = -14, [2][1][RTW89_KCC][0][64] = 127, [2][1][RTW89_ACMA][1][64] = 127, @@ -55927,6 +59131,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][64] = 127, [2][1][RTW89_UK][1][64] = 127, [2][1][RTW89_UK][0][64] = 127, + [2][1][RTW89_THAILAND][1][64] = 127, + [2][1][RTW89_THAILAND][0][64] = 127, [2][1][RTW89_FCC][1][66] = -16, [2][1][RTW89_FCC][2][66] = 54, [2][1][RTW89_ETSI][1][66] = 127, @@ -55934,6 +59140,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][66] = 127, [2][1][RTW89_MKK][0][66] = 127, [2][1][RTW89_IC][1][66] = -16, + [2][1][RTW89_IC][2][66] = 54, [2][1][RTW89_KCC][1][66] = -14, [2][1][RTW89_KCC][0][66] = 127, [2][1][RTW89_ACMA][1][66] = 127, @@ -55943,6 +59150,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][66] = 127, [2][1][RTW89_UK][1][66] = 127, [2][1][RTW89_UK][0][66] = 127, + [2][1][RTW89_THAILAND][1][66] = 127, + [2][1][RTW89_THAILAND][0][66] = 127, [2][1][RTW89_FCC][1][68] = -16, [2][1][RTW89_FCC][2][68] = 54, [2][1][RTW89_ETSI][1][68] = 127, @@ -55950,6 +59159,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][68] = 127, [2][1][RTW89_MKK][0][68] = 127, [2][1][RTW89_IC][1][68] = -16, + [2][1][RTW89_IC][2][68] = 54, [2][1][RTW89_KCC][1][68] = -14, [2][1][RTW89_KCC][0][68] = 127, [2][1][RTW89_ACMA][1][68] = 127, @@ -55959,6 +59169,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][68] = 127, [2][1][RTW89_UK][1][68] = 127, [2][1][RTW89_UK][0][68] = 127, + [2][1][RTW89_THAILAND][1][68] = 127, + [2][1][RTW89_THAILAND][0][68] = 127, [2][1][RTW89_FCC][1][70] = -16, [2][1][RTW89_FCC][2][70] = 56, [2][1][RTW89_ETSI][1][70] = 127, @@ -55966,6 +59178,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][70] = 127, [2][1][RTW89_MKK][0][70] = 127, [2][1][RTW89_IC][1][70] = -16, + [2][1][RTW89_IC][2][70] = 56, [2][1][RTW89_KCC][1][70] = -14, [2][1][RTW89_KCC][0][70] = 127, [2][1][RTW89_ACMA][1][70] = 127, @@ -55975,6 +59188,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][70] = 127, [2][1][RTW89_UK][1][70] = 127, [2][1][RTW89_UK][0][70] = 127, + [2][1][RTW89_THAILAND][1][70] = 127, + [2][1][RTW89_THAILAND][0][70] = 127, [2][1][RTW89_FCC][1][72] = -16, [2][1][RTW89_FCC][2][72] = 56, [2][1][RTW89_ETSI][1][72] = 127, @@ -55982,6 +59197,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][72] = 127, [2][1][RTW89_MKK][0][72] = 127, [2][1][RTW89_IC][1][72] = -16, + [2][1][RTW89_IC][2][72] = 56, [2][1][RTW89_KCC][1][72] = -14, [2][1][RTW89_KCC][0][72] = 127, [2][1][RTW89_ACMA][1][72] = 127, @@ -55991,6 +59207,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][72] = 127, [2][1][RTW89_UK][1][72] = 127, [2][1][RTW89_UK][0][72] = 127, + [2][1][RTW89_THAILAND][1][72] = 127, + [2][1][RTW89_THAILAND][0][72] = 127, [2][1][RTW89_FCC][1][74] = -16, [2][1][RTW89_FCC][2][74] = 56, [2][1][RTW89_ETSI][1][74] = 127, @@ -55998,6 +59216,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][74] = 127, [2][1][RTW89_MKK][0][74] = 127, [2][1][RTW89_IC][1][74] = -16, + [2][1][RTW89_IC][2][74] = 56, [2][1][RTW89_KCC][1][74] = -14, [2][1][RTW89_KCC][0][74] = 127, [2][1][RTW89_ACMA][1][74] = 127, @@ -56007,6 +59226,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][74] = 127, [2][1][RTW89_UK][1][74] = 127, [2][1][RTW89_UK][0][74] = 127, + [2][1][RTW89_THAILAND][1][74] = 127, + [2][1][RTW89_THAILAND][0][74] = 127, [2][1][RTW89_FCC][1][75] = -16, [2][1][RTW89_FCC][2][75] = 56, [2][1][RTW89_ETSI][1][75] = 127, @@ -56014,6 +59235,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][75] = 127, [2][1][RTW89_MKK][0][75] = 127, [2][1][RTW89_IC][1][75] = -16, + [2][1][RTW89_IC][2][75] = 56, [2][1][RTW89_KCC][1][75] = -14, [2][1][RTW89_KCC][0][75] = 127, [2][1][RTW89_ACMA][1][75] = 127, @@ -56023,6 +59245,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][75] = 127, [2][1][RTW89_UK][1][75] = 127, [2][1][RTW89_UK][0][75] = 127, + [2][1][RTW89_THAILAND][1][75] = 127, + [2][1][RTW89_THAILAND][0][75] = 127, [2][1][RTW89_FCC][1][77] = -16, [2][1][RTW89_FCC][2][77] = 56, [2][1][RTW89_ETSI][1][77] = 127, @@ -56030,6 +59254,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][77] = 127, [2][1][RTW89_MKK][0][77] = 127, [2][1][RTW89_IC][1][77] = -16, + [2][1][RTW89_IC][2][77] = 56, [2][1][RTW89_KCC][1][77] = -14, [2][1][RTW89_KCC][0][77] = 127, [2][1][RTW89_ACMA][1][77] = 127, @@ -56039,6 +59264,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][77] = 127, [2][1][RTW89_UK][1][77] = 127, [2][1][RTW89_UK][0][77] = 127, + [2][1][RTW89_THAILAND][1][77] = 127, + [2][1][RTW89_THAILAND][0][77] = 127, [2][1][RTW89_FCC][1][79] = -16, [2][1][RTW89_FCC][2][79] = 56, [2][1][RTW89_ETSI][1][79] = 127, @@ -56046,6 +59273,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][79] = 127, [2][1][RTW89_MKK][0][79] = 127, [2][1][RTW89_IC][1][79] = -16, + [2][1][RTW89_IC][2][79] = 56, [2][1][RTW89_KCC][1][79] = -14, [2][1][RTW89_KCC][0][79] = 127, [2][1][RTW89_ACMA][1][79] = 127, @@ -56055,6 +59283,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][79] = 127, [2][1][RTW89_UK][1][79] = 127, [2][1][RTW89_UK][0][79] = 127, + [2][1][RTW89_THAILAND][1][79] = 127, + [2][1][RTW89_THAILAND][0][79] = 127, [2][1][RTW89_FCC][1][81] = -16, [2][1][RTW89_FCC][2][81] = 56, [2][1][RTW89_ETSI][1][81] = 127, @@ -56062,6 +59292,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][81] = 127, [2][1][RTW89_MKK][0][81] = 127, [2][1][RTW89_IC][1][81] = -16, + [2][1][RTW89_IC][2][81] = 56, [2][1][RTW89_KCC][1][81] = -14, [2][1][RTW89_KCC][0][81] = 127, [2][1][RTW89_ACMA][1][81] = 127, @@ -56071,6 +59302,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][81] = 127, [2][1][RTW89_UK][1][81] = 127, [2][1][RTW89_UK][0][81] = 127, + [2][1][RTW89_THAILAND][1][81] = 127, + [2][1][RTW89_THAILAND][0][81] = 127, [2][1][RTW89_FCC][1][83] = -16, [2][1][RTW89_FCC][2][83] = 56, [2][1][RTW89_ETSI][1][83] = 127, @@ -56078,6 +59311,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][83] = 127, [2][1][RTW89_MKK][0][83] = 127, [2][1][RTW89_IC][1][83] = -16, + [2][1][RTW89_IC][2][83] = 56, [2][1][RTW89_KCC][1][83] = -14, [2][1][RTW89_KCC][0][83] = 127, [2][1][RTW89_ACMA][1][83] = 127, @@ -56087,6 +59321,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][83] = 127, [2][1][RTW89_UK][1][83] = 127, [2][1][RTW89_UK][0][83] = 127, + [2][1][RTW89_THAILAND][1][83] = 127, + [2][1][RTW89_THAILAND][0][83] = 127, [2][1][RTW89_FCC][1][85] = -18, [2][1][RTW89_FCC][2][85] = 56, [2][1][RTW89_ETSI][1][85] = 127, @@ -56094,6 +59330,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][85] = 127, [2][1][RTW89_MKK][0][85] = 127, [2][1][RTW89_IC][1][85] = -18, + [2][1][RTW89_IC][2][85] = 56, [2][1][RTW89_KCC][1][85] = -14, [2][1][RTW89_KCC][0][85] = 127, [2][1][RTW89_ACMA][1][85] = 127, @@ -56103,6 +59340,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][85] = 127, [2][1][RTW89_UK][1][85] = 127, [2][1][RTW89_UK][0][85] = 127, + [2][1][RTW89_THAILAND][1][85] = 127, + [2][1][RTW89_THAILAND][0][85] = 127, [2][1][RTW89_FCC][1][87] = -16, [2][1][RTW89_FCC][2][87] = 127, [2][1][RTW89_ETSI][1][87] = 127, @@ -56110,6 +59349,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][87] = 127, [2][1][RTW89_MKK][0][87] = 127, [2][1][RTW89_IC][1][87] = -16, + [2][1][RTW89_IC][2][87] = 127, [2][1][RTW89_KCC][1][87] = -14, [2][1][RTW89_KCC][0][87] = 127, [2][1][RTW89_ACMA][1][87] = 127, @@ -56119,6 +59359,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][87] = 127, [2][1][RTW89_UK][1][87] = 127, [2][1][RTW89_UK][0][87] = 127, + [2][1][RTW89_THAILAND][1][87] = 127, + [2][1][RTW89_THAILAND][0][87] = 127, [2][1][RTW89_FCC][1][89] = -16, [2][1][RTW89_FCC][2][89] = 127, [2][1][RTW89_ETSI][1][89] = 127, @@ -56126,6 +59368,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][89] = 127, [2][1][RTW89_MKK][0][89] = 127, [2][1][RTW89_IC][1][89] = -16, + [2][1][RTW89_IC][2][89] = 127, [2][1][RTW89_KCC][1][89] = -14, [2][1][RTW89_KCC][0][89] = 127, [2][1][RTW89_ACMA][1][89] = 127, @@ -56135,6 +59378,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][89] = 127, [2][1][RTW89_UK][1][89] = 127, [2][1][RTW89_UK][0][89] = 127, + [2][1][RTW89_THAILAND][1][89] = 127, + [2][1][RTW89_THAILAND][0][89] = 127, [2][1][RTW89_FCC][1][90] = -16, [2][1][RTW89_FCC][2][90] = 127, [2][1][RTW89_ETSI][1][90] = 127, @@ -56142,6 +59387,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][90] = 127, [2][1][RTW89_MKK][0][90] = 127, [2][1][RTW89_IC][1][90] = -16, + [2][1][RTW89_IC][2][90] = 127, [2][1][RTW89_KCC][1][90] = -14, [2][1][RTW89_KCC][0][90] = 127, [2][1][RTW89_ACMA][1][90] = 127, @@ -56151,6 +59397,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][90] = 127, [2][1][RTW89_UK][1][90] = 127, [2][1][RTW89_UK][0][90] = 127, + [2][1][RTW89_THAILAND][1][90] = 127, + [2][1][RTW89_THAILAND][0][90] = 127, [2][1][RTW89_FCC][1][92] = -16, [2][1][RTW89_FCC][2][92] = 127, [2][1][RTW89_ETSI][1][92] = 127, @@ -56158,6 +59406,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][92] = 127, [2][1][RTW89_MKK][0][92] = 127, [2][1][RTW89_IC][1][92] = -16, + [2][1][RTW89_IC][2][92] = 127, [2][1][RTW89_KCC][1][92] = -14, [2][1][RTW89_KCC][0][92] = 127, [2][1][RTW89_ACMA][1][92] = 127, @@ -56167,6 +59416,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][92] = 127, [2][1][RTW89_UK][1][92] = 127, [2][1][RTW89_UK][0][92] = 127, + [2][1][RTW89_THAILAND][1][92] = 127, + [2][1][RTW89_THAILAND][0][92] = 127, [2][1][RTW89_FCC][1][94] = -16, [2][1][RTW89_FCC][2][94] = 127, [2][1][RTW89_ETSI][1][94] = 127, @@ -56174,6 +59425,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][94] = 127, [2][1][RTW89_MKK][0][94] = 127, [2][1][RTW89_IC][1][94] = -16, + [2][1][RTW89_IC][2][94] = 127, [2][1][RTW89_KCC][1][94] = -14, [2][1][RTW89_KCC][0][94] = 127, [2][1][RTW89_ACMA][1][94] = 127, @@ -56183,6 +59435,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][94] = 127, [2][1][RTW89_UK][1][94] = 127, [2][1][RTW89_UK][0][94] = 127, + [2][1][RTW89_THAILAND][1][94] = 127, + [2][1][RTW89_THAILAND][0][94] = 127, [2][1][RTW89_FCC][1][96] = -16, [2][1][RTW89_FCC][2][96] = 127, [2][1][RTW89_ETSI][1][96] = 127, @@ -56190,6 +59444,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][96] = 127, [2][1][RTW89_MKK][0][96] = 127, [2][1][RTW89_IC][1][96] = -16, + [2][1][RTW89_IC][2][96] = 127, [2][1][RTW89_KCC][1][96] = -14, [2][1][RTW89_KCC][0][96] = 127, [2][1][RTW89_ACMA][1][96] = 127, @@ -56199,6 +59454,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][96] = 127, [2][1][RTW89_UK][1][96] = 127, [2][1][RTW89_UK][0][96] = 127, + [2][1][RTW89_THAILAND][1][96] = 127, + [2][1][RTW89_THAILAND][0][96] = 127, [2][1][RTW89_FCC][1][98] = -16, [2][1][RTW89_FCC][2][98] = 127, [2][1][RTW89_ETSI][1][98] = 127, @@ -56206,6 +59463,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][98] = 127, [2][1][RTW89_MKK][0][98] = 127, [2][1][RTW89_IC][1][98] = -16, + [2][1][RTW89_IC][2][98] = 127, [2][1][RTW89_KCC][1][98] = -14, [2][1][RTW89_KCC][0][98] = 127, [2][1][RTW89_ACMA][1][98] = 127, @@ -56215,6 +59473,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][98] = 127, [2][1][RTW89_UK][1][98] = 127, [2][1][RTW89_UK][0][98] = 127, + [2][1][RTW89_THAILAND][1][98] = 127, + [2][1][RTW89_THAILAND][0][98] = 127, [2][1][RTW89_FCC][1][100] = -16, [2][1][RTW89_FCC][2][100] = 127, [2][1][RTW89_ETSI][1][100] = 127, @@ -56222,6 +59482,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][100] = 127, [2][1][RTW89_MKK][0][100] = 127, [2][1][RTW89_IC][1][100] = -16, + [2][1][RTW89_IC][2][100] = 127, [2][1][RTW89_KCC][1][100] = -14, [2][1][RTW89_KCC][0][100] = 127, [2][1][RTW89_ACMA][1][100] = 127, @@ -56231,6 +59492,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][100] = 127, [2][1][RTW89_UK][1][100] = 127, [2][1][RTW89_UK][0][100] = 127, + [2][1][RTW89_THAILAND][1][100] = 127, + [2][1][RTW89_THAILAND][0][100] = 127, [2][1][RTW89_FCC][1][102] = -16, [2][1][RTW89_FCC][2][102] = 127, [2][1][RTW89_ETSI][1][102] = 127, @@ -56238,6 +59501,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][102] = 127, [2][1][RTW89_MKK][0][102] = 127, [2][1][RTW89_IC][1][102] = -16, + [2][1][RTW89_IC][2][102] = 127, [2][1][RTW89_KCC][1][102] = -14, [2][1][RTW89_KCC][0][102] = 127, [2][1][RTW89_ACMA][1][102] = 127, @@ -56247,6 +59511,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][102] = 127, [2][1][RTW89_UK][1][102] = 127, [2][1][RTW89_UK][0][102] = 127, + [2][1][RTW89_THAILAND][1][102] = 127, + [2][1][RTW89_THAILAND][0][102] = 127, [2][1][RTW89_FCC][1][104] = -16, [2][1][RTW89_FCC][2][104] = 127, [2][1][RTW89_ETSI][1][104] = 127, @@ -56254,6 +59520,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][104] = 127, [2][1][RTW89_MKK][0][104] = 127, [2][1][RTW89_IC][1][104] = -16, + [2][1][RTW89_IC][2][104] = 127, [2][1][RTW89_KCC][1][104] = -14, [2][1][RTW89_KCC][0][104] = 127, [2][1][RTW89_ACMA][1][104] = 127, @@ -56263,6 +59530,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][104] = 127, [2][1][RTW89_UK][1][104] = 127, [2][1][RTW89_UK][0][104] = 127, + [2][1][RTW89_THAILAND][1][104] = 127, + [2][1][RTW89_THAILAND][0][104] = 127, [2][1][RTW89_FCC][1][105] = -16, [2][1][RTW89_FCC][2][105] = 127, [2][1][RTW89_ETSI][1][105] = 127, @@ -56270,6 +59539,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][105] = 127, [2][1][RTW89_MKK][0][105] = 127, [2][1][RTW89_IC][1][105] = -16, + [2][1][RTW89_IC][2][105] = 127, [2][1][RTW89_KCC][1][105] = -14, [2][1][RTW89_KCC][0][105] = 127, [2][1][RTW89_ACMA][1][105] = 127, @@ -56279,6 +59549,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][105] = 127, [2][1][RTW89_UK][1][105] = 127, [2][1][RTW89_UK][0][105] = 127, + [2][1][RTW89_THAILAND][1][105] = 127, + [2][1][RTW89_THAILAND][0][105] = 127, [2][1][RTW89_FCC][1][107] = -12, [2][1][RTW89_FCC][2][107] = 127, [2][1][RTW89_ETSI][1][107] = 127, @@ -56286,6 +59558,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][107] = 127, [2][1][RTW89_MKK][0][107] = 127, [2][1][RTW89_IC][1][107] = -12, + [2][1][RTW89_IC][2][107] = 127, [2][1][RTW89_KCC][1][107] = -14, [2][1][RTW89_KCC][0][107] = 127, [2][1][RTW89_ACMA][1][107] = 127, @@ -56295,6 +59568,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][107] = 127, [2][1][RTW89_UK][1][107] = 127, [2][1][RTW89_UK][0][107] = 127, + [2][1][RTW89_THAILAND][1][107] = 127, + [2][1][RTW89_THAILAND][0][107] = 127, [2][1][RTW89_FCC][1][109] = -10, [2][1][RTW89_FCC][2][109] = 127, [2][1][RTW89_ETSI][1][109] = 127, @@ -56302,6 +59577,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][109] = 127, [2][1][RTW89_MKK][0][109] = 127, [2][1][RTW89_IC][1][109] = -10, + [2][1][RTW89_IC][2][109] = 127, [2][1][RTW89_KCC][1][109] = 127, [2][1][RTW89_KCC][0][109] = 127, [2][1][RTW89_ACMA][1][109] = 127, @@ -56311,6 +59587,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][109] = 127, [2][1][RTW89_UK][1][109] = 127, [2][1][RTW89_UK][0][109] = 127, + [2][1][RTW89_THAILAND][1][109] = 127, + [2][1][RTW89_THAILAND][0][109] = 127, [2][1][RTW89_FCC][1][111] = 127, [2][1][RTW89_FCC][2][111] = 127, [2][1][RTW89_ETSI][1][111] = 127, @@ -56318,6 +59596,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][111] = 127, [2][1][RTW89_MKK][0][111] = 127, [2][1][RTW89_IC][1][111] = 127, + [2][1][RTW89_IC][2][111] = 127, [2][1][RTW89_KCC][1][111] = 127, [2][1][RTW89_KCC][0][111] = 127, [2][1][RTW89_ACMA][1][111] = 127, @@ -56327,6 +59606,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][111] = 127, [2][1][RTW89_UK][1][111] = 127, [2][1][RTW89_UK][0][111] = 127, + [2][1][RTW89_THAILAND][1][111] = 127, + [2][1][RTW89_THAILAND][0][111] = 127, [2][1][RTW89_FCC][1][113] = 127, [2][1][RTW89_FCC][2][113] = 127, [2][1][RTW89_ETSI][1][113] = 127, @@ -56334,6 +59615,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][113] = 127, [2][1][RTW89_MKK][0][113] = 127, [2][1][RTW89_IC][1][113] = 127, + [2][1][RTW89_IC][2][113] = 127, [2][1][RTW89_KCC][1][113] = 127, [2][1][RTW89_KCC][0][113] = 127, [2][1][RTW89_ACMA][1][113] = 127, @@ -56343,6 +59625,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][113] = 127, [2][1][RTW89_UK][1][113] = 127, [2][1][RTW89_UK][0][113] = 127, + [2][1][RTW89_THAILAND][1][113] = 127, + [2][1][RTW89_THAILAND][0][113] = 127, [2][1][RTW89_FCC][1][115] = 127, [2][1][RTW89_FCC][2][115] = 127, [2][1][RTW89_ETSI][1][115] = 127, @@ -56350,6 +59634,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][115] = 127, [2][1][RTW89_MKK][0][115] = 127, [2][1][RTW89_IC][1][115] = 127, + [2][1][RTW89_IC][2][115] = 127, [2][1][RTW89_KCC][1][115] = 127, [2][1][RTW89_KCC][0][115] = 127, [2][1][RTW89_ACMA][1][115] = 127, @@ -56359,6 +59644,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][115] = 127, [2][1][RTW89_UK][1][115] = 127, [2][1][RTW89_UK][0][115] = 127, + [2][1][RTW89_THAILAND][1][115] = 127, + [2][1][RTW89_THAILAND][0][115] = 127, [2][1][RTW89_FCC][1][117] = 127, [2][1][RTW89_FCC][2][117] = 127, [2][1][RTW89_ETSI][1][117] = 127, @@ -56366,6 +59653,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][117] = 127, [2][1][RTW89_MKK][0][117] = 127, [2][1][RTW89_IC][1][117] = 127, + [2][1][RTW89_IC][2][117] = 127, [2][1][RTW89_KCC][1][117] = 127, [2][1][RTW89_KCC][0][117] = 127, [2][1][RTW89_ACMA][1][117] = 127, @@ -56375,6 +59663,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][117] = 127, [2][1][RTW89_UK][1][117] = 127, [2][1][RTW89_UK][0][117] = 127, + [2][1][RTW89_THAILAND][1][117] = 127, + [2][1][RTW89_THAILAND][0][117] = 127, [2][1][RTW89_FCC][1][119] = 127, [2][1][RTW89_FCC][2][119] = 127, [2][1][RTW89_ETSI][1][119] = 127, @@ -56382,6 +59672,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MKK][1][119] = 127, [2][1][RTW89_MKK][0][119] = 127, [2][1][RTW89_IC][1][119] = 127, + [2][1][RTW89_IC][2][119] = 127, [2][1][RTW89_KCC][1][119] = 127, [2][1][RTW89_KCC][0][119] = 127, [2][1][RTW89_ACMA][1][119] = 127, @@ -56391,6 +59682,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_QATAR][0][119] = 127, [2][1][RTW89_UK][1][119] = 127, [2][1][RTW89_UK][0][119] = 127, + [2][1][RTW89_THAILAND][1][119] = 127, + [2][1][RTW89_THAILAND][0][119] = 127, }; const struct rtw89_phy_table rtw89_8852c_phy_bb_table = { @@ -56425,6 +59718,7 @@ const struct rtw89_phy_table rtw89_8852c_phy_nctl_table = { .rf_path = 0, /* don't care */ }; +static const struct rtw89_txpwr_table rtw89_8852c_byr_table = { .data = rtw89_8852c_txpwr_byrate, .size = ARRAY_SIZE(rtw89_8852c_txpwr_byrate), @@ -56452,12 +59746,16 @@ const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg = { const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table = { .data[RTW89_TSSI_BANDEDGE_FLAT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .data[RTW89_TSSI_BANDEDGE_LOW] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .data[RTW89_TSSI_BANDEDGE_MID] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - .data[RTW89_TSSI_BANDEDGE_HIGH] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_LOW] = {0x1d, 0x1d, 0x1d, 0x2f, 0xf, 0xf, 0x2f, 0x38, + 0x28, 0x18, 0x8, 0x8, 0x18, 0x28, 0x38}, + .data[RTW89_TSSI_BANDEDGE_MID] = {0x24, 0x24, 0x24, 0x3b, 0x13, 0x13, 0x3b, 0x46, + 0x32, 0x1e, 0xa, 0xa, 0x1e, 0x32, 0x46}, + .data[RTW89_TSSI_BANDEDGE_HIGH] = {0x2a, 0x2a, 0x2a, 0x46, 0x17, 0x17, 0x46, 0x53, + 0x3b, 0x24, 0xc, 0xc, 0x24, 0x3b, 0x53}, }; const struct rtw89_rfe_parms rtw89_8852c_dflt_parms = { + .byr_tbl = &rtw89_8852c_byr_table, .rule_2ghz = { .lmt = &rtw89_8852c_txpwr_lmt_2g, .lmt_ru = &rtw89_8852c_txpwr_lmt_ru_2g, @@ -56470,4 +59768,8 @@ const struct rtw89_rfe_parms rtw89_8852c_dflt_parms = { .lmt = &rtw89_8852c_txpwr_lmt_6g, .lmt_ru = &rtw89_8852c_txpwr_lmt_ru_6g, }, + .tx_shape = { + .lmt = &rtw89_8852c_tx_shape_lmt, + .lmt_ru = &rtw89_8852c_tx_shape_lmt_ru, + }, }; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h index 3eb0c4995174..7c9f3ecdc4e7 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h @@ -12,11 +12,8 @@ extern const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table; -extern const struct rtw89_txpwr_table rtw89_8852c_byr_table; extern const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table; extern const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg; -extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_NUM][RTW89_RS_TX_SHAPE_NUM] - [RTW89_REGD_NUM]; extern const struct rtw89_rfe_parms rtw89_8852c_dflt_parms; #endif diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c index dfccae81c380..aed05b026c6c 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.c +++ b/drivers/net/wireless/realtek/rtw89/sar.c @@ -2,9 +2,16 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include "acpi.h" #include "debug.h" +#include "phy.h" +#include "reg.h" #include "sar.h" +#define RTW89_TAS_FACTOR 2 /* unit: 0.25 dBm */ +#define RTW89_TAS_DPR_GAP (1 << RTW89_TAS_FACTOR) +#define RTW89_TAS_DELTA (2 << RTW89_TAS_FACTOR) + static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev, u32 center_freq) { @@ -78,17 +85,15 @@ static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = { RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8), }; -static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg) +static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, + u32 center_freq, s32 *cfg) { struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - enum rtw89_band band = chan->band_type; - u32 center_freq = chan->freq; const struct rtw89_sar_span *span = NULL; enum rtw89_sar_subband subband_l, subband_h; int idx; - if (band == RTW89_BAND_6G) { + if (center_freq >= RTW89_SAR_6GHZ_SPAN_HEAD) { idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq); /* To decrease size of rtw89_sar_overlapping_6ghz[], * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs @@ -108,8 +113,8 @@ static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg) } rtw89_debug(rtwdev, RTW89_DBG_SAR, - "for {band %u, center_freq %u}, SAR subband: {%u, %u}\n", - band, center_freq, subband_l, subband_h); + "center_freq %u: SAR subband {%u, %u}\n", + center_freq, subband_l, subband_h); if (!rtwsar->set[subband_l] && !rtwsar->set[subband_h]) return -ENODATA; @@ -157,11 +162,35 @@ static s8 rtw89_txpwr_sar_to_mac(struct rtw89_dev *rtwdev, u8 fct, s32 cfg) RTW89_SAR_TXPWR_MAC_MAX); } -s8 rtw89_query_sar(struct rtw89_dev *rtwdev) +static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl, + s8 cfg) +{ + const u8 fct = sar_hdl->txpwr_factor_sar; + + if (fct > RTW89_TAS_FACTOR) + return cfg << (fct - RTW89_TAS_FACTOR); + else + return cfg >> (RTW89_TAS_FACTOR - fct); +} + +static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl, + s8 cfg) +{ + const u8 fct = sar_hdl->txpwr_factor_sar; + + if (fct > RTW89_TAS_FACTOR) + return cfg >> (fct - RTW89_TAS_FACTOR); + else + return cfg << (RTW89_TAS_FACTOR - fct); +} + +s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; + struct rtw89_tas_info *tas = &rtwdev->tas; + s8 delta; int ret; s32 cfg; u8 fct; @@ -171,16 +200,30 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev) if (src == RTW89_SAR_SOURCE_NONE) return RTW89_SAR_TXPWR_MAC_MAX; - ret = sar_hdl->query_sar_config(rtwdev, &cfg); + ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg); if (ret) return RTW89_SAR_TXPWR_MAC_MAX; + if (tas->enable) { + switch (tas->state) { + case RTW89_TAS_STATE_DPR_OFF: + return RTW89_SAR_TXPWR_MAC_MAX; + case RTW89_TAS_STATE_DPR_ON: + delta = rtw89_txpwr_tas_to_sar(sar_hdl, tas->delta); + cfg -= delta; + break; + case RTW89_TAS_STATE_DPR_FORBID: + default: + break; + } + } + fct = sar_hdl->txpwr_factor_sar; return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg); } -void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev) +void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ @@ -199,7 +242,7 @@ void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev) seq_printf(m, "source: %d (%s)\n", src, sar_hdl->descr_sar_source); - ret = sar_hdl->query_sar_config(rtwdev, &cfg); + ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg); if (ret) { seq_printf(m, "config: return code: %d\n", ret); seq_printf(m, "assign: max setting: %d (unit: 1/%lu dBm)\n", @@ -212,6 +255,19 @@ void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev) seq_printf(m, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct)); } +void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + + if (!tas->enable) { + seq_puts(m, "no TAS is applied\n"); + return; + } + + seq_printf(m, "DPR gap: %d\n", tas->dpr_gap); + seq_printf(m, "TAS delta: %d\n", tas->delta); +} + static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev, const struct rtw89_sar_cfg_common *sar) { @@ -292,3 +348,145 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, return rtw89_apply_sar_common(rtwdev, &sar_common); } + +static void rtw89_tas_state_update(struct rtw89_dev *rtwdev) +{ + const enum rtw89_sar_sources src = rtwdev->sar.src; + /* its members are protected by rtw89_sar_set_src() */ + const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; + struct rtw89_tas_info *tas = &rtwdev->tas; + s32 txpwr_avg = tas->total_txpwr / RTW89_TAS_MAX_WINDOW / PERCENT; + s32 dpr_on_threshold, dpr_off_threshold, cfg; + enum rtw89_tas_state state = tas->state; + const struct rtw89_chan *chan; + int ret; + + lockdep_assert_held(&rtwdev->mutex); + + if (src == RTW89_SAR_SOURCE_NONE) + return; + + chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg); + if (ret) + return; + + cfg = rtw89_txpwr_sar_to_tas(sar_hdl, cfg); + + if (tas->delta >= cfg) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "TAS delta exceed SAR limit\n"); + state = RTW89_TAS_STATE_DPR_FORBID; + goto out; + } + + dpr_on_threshold = cfg; + dpr_off_threshold = cfg - tas->dpr_gap; + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "DPR_ON thold: %d, DPR_OFF thold: %d, txpwr_avg: %d\n", + dpr_on_threshold, dpr_off_threshold, txpwr_avg); + + if (txpwr_avg >= dpr_on_threshold) + state = RTW89_TAS_STATE_DPR_ON; + else if (txpwr_avg < dpr_off_threshold) + state = RTW89_TAS_STATE_DPR_OFF; + +out: + if (tas->state == state) + return; + + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "TAS old state: %d, new state: %d\n", tas->state, state); + tas->state = state; + rtw89_core_set_chip_txpwr(rtwdev); +} + +void rtw89_tas_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + int ret; + u8 val; + + ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_TAS_EN, &val); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "acpi: cannot get TAS: %d\n", ret); + return; + } + + switch (val) { + case 0: + tas->enable = false; + break; + case 1: + tas->enable = true; + break; + default: + break; + } + + if (!tas->enable) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n"); + return; + } + + tas->dpr_gap = RTW89_TAS_DPR_GAP; + tas->delta = RTW89_TAS_DELTA; +} + +void rtw89_tas_reset(struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + + if (!tas->enable) + return; + + memset(&tas->txpwr_history, 0, sizeof(tas->txpwr_history)); + tas->total_txpwr = 0; + tas->cur_idx = 0; + tas->state = RTW89_TAS_STATE_DPR_OFF; +} + +static const struct rtw89_reg_def txpwr_regs[] = { + {R_PATH0_TXPWR, B_PATH0_TXPWR}, + {R_PATH1_TXPWR, B_PATH1_TXPWR}, +}; + +void rtw89_tas_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + const enum rtw89_sar_sources src = rtwdev->sar.src; + u8 max_nss_num = rtwdev->chip->rf_path_num; + struct rtw89_tas_info *tas = &rtwdev->tas; + s16 tmp, txpwr, instant_txpwr = 0; + u32 val; + int i; + + if (!tas->enable || src == RTW89_SAR_SOURCE_NONE) + return; + + if (env->ccx_watchdog_result != RTW89_PHY_ENV_MON_IFS_CLM) + return; + + for (i = 0; i < max_nss_num; i++) { + val = rtw89_phy_read32_mask(rtwdev, txpwr_regs[i].addr, + txpwr_regs[i].mask); + tmp = sign_extend32(val, 8); + if (tmp <= 0) + return; + instant_txpwr += tmp; + } + + instant_txpwr /= max_nss_num; + /* in unit of 0.25 dBm multiply by percentage */ + txpwr = instant_txpwr * env->ifs_clm_tx_ratio; + tas->total_txpwr += txpwr - tas->txpwr_history[tas->cur_idx]; + tas->txpwr_history[tas->cur_idx] = txpwr; + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "instant_txpwr: %d, tx_ratio: %d, txpwr: %d\n", + instant_txpwr, env->ifs_clm_tx_ratio, txpwr); + + tas->cur_idx = (tas->cur_idx + 1) % RTW89_TAS_MAX_WINDOW; + + rtw89_tas_state_update(rtwdev); +} diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h index 7b5484c84eb1..bd7a657188d9 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.h +++ b/drivers/net/wireless/realtek/rtw89/sar.h @@ -13,14 +13,18 @@ struct rtw89_sar_handler { const char *descr_sar_source; u8 txpwr_factor_sar; - int (*query_sar_config)(struct rtw89_dev *rtwdev, s32 *cfg); + int (*query_sar_config)(struct rtw89_dev *rtwdev, u32 center_freq, s32 *cfg); }; extern const struct cfg80211_sar_capa rtw89_sar_capa; -s8 rtw89_query_sar(struct rtw89_dev *rtwdev); -void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev); +s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq); +void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq); +void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev); int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); +void rtw89_tas_init(struct rtw89_dev *rtwdev); +void rtw89_tas_reset(struct rtw89_dev *rtwdev); +void rtw89_tas_track(struct rtw89_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 0462ba693f6f..c1644353053f 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -529,6 +529,9 @@ static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, u8 sel, u32 start_addr, u32 len) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 filter_model_addr = mac->filter_model_addr; + u32 indir_access_addr = mac->indir_access_addr; u32 *ptr = (u32 *)buf; u32 base_addr, start_page, residue; u32 cnt = 0; @@ -536,14 +539,14 @@ static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; - base_addr = rtw89_mac_mem_base_addrs[sel]; + base_addr = mac->mem_base_addrs[sel]; base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; while (cnt < len) { - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); + rtw89_write32(rtwdev, filter_model_addr, base_addr); - for (i = R_AX_INDIR_ACCESS_ENTRY + residue; - i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE; + for (i = indir_access_addr + residue; + i < indir_access_addr + MAC_MEM_DUMP_PAGE_SIZE; i += 4, ptr++) { *ptr = rtw89_read32(rtwdev, i); cnt += 4; @@ -585,6 +588,9 @@ static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, const struct __fw_backtrace_entry *ent) { struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf; + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + u32 filter_model_addr = mac->filter_model_addr; + u32 indir_access_addr = mac->indir_access_addr; u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK; u32 fwbt_size = ent->size; u32 fwbt_key = ent->key; @@ -610,10 +616,10 @@ static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, } rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n"); - rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr); + rtw89_write32(rtwdev, filter_model_addr, fwbt_addr); - for (i = R_AX_INDIR_ACCESS_ENTRY; - i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size; + for (i = indir_access_addr; + i < indir_access_addr + fwbt_size; i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) { *ptr = (struct __fw_backtrace_info){ .ra = rtw89_read32(rtwdev, i), diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index ec96da36eacc..7142cce167de 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -8,19 +8,56 @@ #include "debug.h" #define DATA_RATE_MODE_CTRL_MASK GENMASK(8, 7) +#define DATA_RATE_MODE_CTRL_MASK_V1 GENMASK(10, 8) #define DATA_RATE_NOT_HT_IDX_MASK GENMASK(3, 0) #define DATA_RATE_MODE_NON_HT 0x0 #define DATA_RATE_HT_IDX_MASK GENMASK(4, 0) +#define DATA_RATE_HT_IDX_MASK_V1 GENMASK(4, 0) #define DATA_RATE_MODE_HT 0x1 #define DATA_RATE_VHT_HE_NSS_MASK GENMASK(6, 4) #define DATA_RATE_VHT_HE_IDX_MASK GENMASK(3, 0) +#define DATA_RATE_NSS_MASK_V1 GENMASK(7, 5) +#define DATA_RATE_MCS_MASK_V1 GENMASK(4, 0) #define DATA_RATE_MODE_VHT 0x2 #define DATA_RATE_MODE_HE 0x3 -#define GET_DATA_RATE_MODE(r) FIELD_GET(DATA_RATE_MODE_CTRL_MASK, r) -#define GET_DATA_RATE_NOT_HT_IDX(r) FIELD_GET(DATA_RATE_NOT_HT_IDX_MASK, r) -#define GET_DATA_RATE_HT_IDX(r) FIELD_GET(DATA_RATE_HT_IDX_MASK, r) -#define GET_DATA_RATE_VHT_HE_IDX(r) FIELD_GET(DATA_RATE_VHT_HE_IDX_MASK, r) -#define GET_DATA_RATE_NSS(r) FIELD_GET(DATA_RATE_VHT_HE_NSS_MASK, r) +#define DATA_RATE_MODE_EHT 0x4 + +static inline u8 rtw89_get_data_rate_mode(struct rtw89_dev *rtwdev, u16 hw_rate) +{ + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + return u16_get_bits(hw_rate, DATA_RATE_MODE_CTRL_MASK_V1); + + return u16_get_bits(hw_rate, DATA_RATE_MODE_CTRL_MASK); +} + +static inline u8 rtw89_get_data_not_ht_idx(struct rtw89_dev *rtwdev, u16 hw_rate) +{ + return u16_get_bits(hw_rate, DATA_RATE_NOT_HT_IDX_MASK); +} + +static inline u8 rtw89_get_data_ht_mcs(struct rtw89_dev *rtwdev, u16 hw_rate) +{ + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + return u16_get_bits(hw_rate, DATA_RATE_HT_IDX_MASK_V1); + + return u16_get_bits(hw_rate, DATA_RATE_HT_IDX_MASK); +} + +static inline u8 rtw89_get_data_mcs(struct rtw89_dev *rtwdev, u16 hw_rate) +{ + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + return u16_get_bits(hw_rate, DATA_RATE_MCS_MASK_V1); + + return u16_get_bits(hw_rate, DATA_RATE_VHT_HE_IDX_MASK); +} + +static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate) +{ + if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) + return u16_get_bits(hw_rate, DATA_RATE_NSS_MASK_V1); + + return u16_get_bits(hw_rate, DATA_RATE_VHT_HE_NSS_MASK); +} /* TX WD BODY DWORD 0 */ #define RTW89_TXWD_BODY0_WP_OFFSET GENMASK(31, 24) @@ -100,6 +137,181 @@ /* TX WD INFO DWORD 5 */ +/* TX WD BODY DWORD 0 */ +#define BE_TXD_BODY0_EN_HWSEQ_MODE GENMASK(1, 0) +#define BE_TXD_BODY0_HW_SSN_SEL GENMASK(4, 2) +#define BE_TXD_BODY0_HWAMSDU BIT(5) +#define BE_TXD_BODY0_HW_SEC_IV BIT(6) +#define BE_TXD_BODY0_WD_PAGE BIT(7) +#define BE_TXD_BODY0_CHK_EN BIT(8) +#define BE_TXD_BODY0_WP_INT BIT(9) +#define BE_TXD_BODY0_STF_MODE BIT(10) +#define BE_TXD_BODY0_HDR_LLC_LEN GENMASK(15, 11) +#define BE_TXD_BODY0_CH_DMA GENMASK(19, 16) +#define BE_TXD_BODY0_SMH_EN BIT(20) +#define BE_TXD_BODY0_PKT_OFFSET BIT(21) +#define BE_TXD_BODY0_WDINFO_EN BIT(22) +#define BE_TXD_BODY0_MOREDATA BIT(23) +#define BE_TXD_BODY0_WP_OFFSET_V1 GENMASK(27, 24) +#define BE_TXD_BODY0_AZ_FTM_SEC_V1 BIT(28) +#define BE_TXD_BODY0_WD_SOURCE GENMASK(30, 29) +#define BE_TXD_BODY0_HCI_SEQNUM_MODE BIT(31) + +/* TX WD BODY DWORD 1 */ +#define BE_TXD_BODY1_DMA_TXAGG_NUM GENMASK(6, 0) +#define BE_TXD_BODY1_REUSE_NUM GENMASK(11, 7) +#define BE_TXD_BODY1_SEC_TYPE GENMASK(15, 12) +#define BE_TXD_BODY1_SEC_KEYID GENMASK(17, 16) +#define BE_TXD_BODY1_SW_SEC_IV BIT(18) +#define BE_TXD_BODY1_REUSE_SIZE GENMASK(23, 20) +#define BE_TXD_BODY1_REUSE_START_OFFSET GENMASK(25, 24) +#define BE_TXD_BODY1_ADDR_INFO_NUM GENMASK(31, 26) + +/* TX WD BODY DWORD 2 */ +#define BE_TXD_BODY2_TXPKTSIZE GENMASK(13, 0) +#define BE_TXD_BODY2_AGG_EN BIT(14) +#define BE_TXD_BODY2_BK BIT(15) +#define BE_TXD_BODY2_MACID_EXTEND BIT(16) +#define BE_TXD_BODY2_QSEL GENMASK(22, 17) +#define BE_TXD_BODY2_TID_IND BIT(23) +#define BE_TXD_BODY2_MACID GENMASK(31, 24) + +/* TX WD BODY DWORD 3 */ +#define BE_TXD_BODY3_WIFI_SEQ GENMASK(11, 0) +#define BE_TXD_BODY3_MLO_FLAG BIT(12) +#define BE_TXD_BODY3_IS_MLD_SW_EN BIT(13) +#define BE_TXD_BODY3_TRY_RATE BIT(14) +#define BE_TXD_BODY3_RELINK_FLAG_V1 BIT(15) +#define BE_TXD_BODY3_BAND0_SU_TC_V1 GENMASK(21, 16) +#define BE_TXD_BODY3_TOTAL_TC GENMASK(27, 22) +#define BE_TXD_BODY3_RU_RTY BIT(28) +#define BE_TXD_BODY3_MU_PRI_RTY BIT(29) +#define BE_TXD_BODY3_MU_2ND_RTY BIT(30) +#define BE_TXD_BODY3_BAND1_SU_RTY_V1 BIT(31) + +/* TX WD BODY DWORD 4 */ +#define BE_TXD_BODY4_TXDESC_CHECKSUM GENMASK(15, 0) +#define BE_TXD_BODY4_SEC_IV_L0 GENMASK(23, 16) +#define BE_TXD_BODY4_SEC_IV_L1 GENMASK(31, 24) + +/* TX WD BODY DWORD 5 */ +#define BE_TXD_BODY5_SEC_IV_H2 GENMASK(7, 0) +#define BE_TXD_BODY5_SEC_IV_H3 GENMASK(15, 8) +#define BE_TXD_BODY5_SEC_IV_H4 GENMASK(23, 16) +#define BE_TXD_BODY5_SEC_IV_H5 GENMASK(31, 24) + +/* TX WD BODY DWORD 6 */ +#define BE_TXD_BODY6_MU_TC GENMASK(4, 0) +#define BE_TXD_BODY6_RU_TC GENMASK(9, 5) +#define BE_TXD_BODY6_PS160 BIT(10) +#define BE_TXD_BODY6_BMC BIT(11) +#define BE_TXD_BODY6_NO_ACK BIT(12) +#define BE_TXD_BODY6_UPD_WLAN_HDR BIT(13) +#define BE_TXD_BODY6_A4_HDR BIT(14) +#define BE_TXD_BODY6_EOSP_BIT BIT(15) +#define BE_TXD_BODY6_S_IDX GENMASK(23, 16) +#define BE_TXD_BODY6_RU_POS GENMASK(31, 24) + +/* TX WD BODY DWORD 7 */ +#define BE_TXD_BODY7_RTS_TC GENMASK(5, 0) +#define BE_TXD_BODY7_MSDU_NUM GENMASK(9, 6) +#define BE_TXD_BODY7_DATA_ER BIT(10) +#define BE_TXD_BODY7_DATA_BW_ER BIT(11) +#define BE_TXD_BODY7_DATA_DCM BIT(12) +#define BE_TXD_BODY7_GI_LTF GENMASK(15, 13) +#define BE_TXD_BODY7_DATARATE GENMASK(27, 16) +#define BE_TXD_BODY7_DATA_BW GENMASK(30, 28) +#define BE_TXD_BODY7_USERATE_SEL BIT(31) + +/* TX WD INFO DWORD 0 */ +#define BE_TXD_INFO0_MBSSID GENMASK(3, 0) +#define BE_TXD_INFO0_MULTIPORT_ID GENMASK(6, 4) +#define BE_TXD_INFO0_DISRTSFB BIT(9) +#define BE_TXD_INFO0_DISDATAFB BIT(10) +#define BE_TXD_INFO0_DATA_LDPC BIT(11) +#define BE_TXD_INFO0_DATA_STBC BIT(12) +#define BE_TXD_INFO0_DATA_TXCNT_LMT GENMASK(21, 16) +#define BE_TXD_INFO0_DATA_TXCNT_LMT_SEL BIT(22) +#define BE_TXD_INFO0_RESP_PHYSTS_CSI_EN_V1 BIT(23) +#define BE_TXD_INFO0_RLS_TO_CPUIO BIT(30) +#define BE_TXD_INFO0_ACK_CH_INFO BIT(31) + +/* TX WD INFO DWORD 1 */ +#define BE_TXD_INFO1_MAX_AGG_NUM GENMASK(7, 0) +#define BE_TXD_INFO1_BCN_SRCH_SEQ GENMASK(9, 8) +#define BE_TXD_INFO1_NAVUSEHDR BIT(10) +#define BE_TXD_INFO1_A_CTRL_BQR BIT(12) +#define BE_TXD_INFO1_A_CTRL_BSR BIT(14) +#define BE_TXD_INFO1_A_CTRL_CAS BIT(15) +#define BE_TXD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(27, 16) +#define BE_TXD_INFO1_SW_DEFINE GENMASK(31, 28) + +/* TX WD INFO DWORD 2 */ +#define BE_TXD_INFO2_SEC_CAM_IDX GENMASK(7, 0) +#define BE_TXD_INFO2_FORCE_KEY_EN BIT(8) +#define BE_TXD_INFO2_LIFETIME_SEL GENMASK(15, 13) +#define BE_TXD_INFO2_FORCE_TXOP BIT(17) +#define BE_TXD_INFO2_AMPDU_DENSITY GENMASK(20, 18) +#define BE_TXD_INFO2_LSIG_TXOP_EN BIT(21) +#define BE_TXD_INFO2_OBW_CTS2SELF_DUP_TYPE GENMASK(29, 26) +#define BE_TXD_INFO2_SPE_RPT_V1 BIT(30) +#define BE_TXD_INFO2_SIFS_TX_V1 BIT(31) + +/* TX WD INFO DWORD 3 */ +#define BE_TXD_INFO3_SPE_PKT GENMASK(3, 0) +#define BE_TXD_INFO3_SPE_PKT_TYPE GENMASK(7, 4) +#define BE_TXD_INFO3_CQI_SND BIT(8) +#define BE_TXD_INFO3_RTT_EN BIT(9) +#define BE_TXD_INFO3_HT_DATA_SND_V1 BIT(10) +#define BE_TXD_INFO3_BT_NULL BIT(11) +#define BE_TXD_INFO3_TRI_FRAME BIT(12) +#define BE_TXD_INFO3_NULL_0 BIT(13) +#define BE_TXD_INFO3_NULL_1 BIT(14) +#define BE_TXD_INFO3_RAW BIT(15) +#define BE_TXD_INFO3_GROUP_BIT_IE_OFFSET GENMASK(23, 16) +#define BE_TXD_INFO3_SIGNALING_TA_PKT_EN BIT(25) +#define BE_TXD_INFO3_BCNPKT_TSF_CTRL BIT(26) +#define BE_TXD_INFO3_SIGNALING_TA_PKT_SC GENMASK(30, 27) +#define BE_TXD_INFO3_FORCE_BSS_CLR BIT(31) + +/* TX WD INFO DWORD 4 */ +#define BE_TXD_INFO4_PUNCTURE_PATTERN GENMASK(15, 0) +#define BE_TXD_INFO4_PUNC_MODE GENMASK(17, 16) +#define BE_TXD_INFO4_SW_TX_OK_0 BIT(18) +#define BE_TXD_INFO4_SW_TX_OK_1 BIT(19) +#define BE_TXD_INFO4_SW_TX_PWR_DBM GENMASK(26, 23) +#define BE_TXD_INFO4_RTS_EN BIT(27) +#define BE_TXD_INFO4_CTS2SELF BIT(28) +#define BE_TXD_INFO4_CCA_RTS GENMASK(30, 29) +#define BE_TXD_INFO4_HW_RTS_EN BIT(31) + +/* TX WD INFO DWORD 5 */ +#define BE_TXD_INFO5_SR_RATE_V1 GENMASK(4, 0) +#define BE_TXD_INFO5_SR_EN_V1 BIT(5) +#define BE_TXD_INFO5_NDPA_DURATION GENMASK(31, 16) + +/* TX WD INFO DWORD 6 */ +#define BE_TXD_INFO6_UL_APEP_LEN GENMASK(11, 0) +#define BE_TXD_INFO6_UL_GI_LTF GENMASK(14, 12) +#define BE_TXD_INFO6_UL_DOPPLER BIT(15) +#define BE_TXD_INFO6_UL_STBC BIT(16) +#define BE_TXD_INFO6_UL_LENGTH_REF GENMASK(21, 18) +#define BE_TXD_INFO6_UL_RF_GAIN_IDX GENMASK(31, 22) + +/* TX WD INFO DWORD 7 */ +#define BE_TXD_INFO7_UL_FIXED_GAIN_EN BIT(0) +#define BE_TXD_INFO7_UL_PRI_EXP_RSSI_DBM GENMASK(7, 1) +#define BE_TXD_INFO7_ELNA_IDX BIT(8) +#define BE_TXD_INFO7_UL_APEP_UNIT GENMASK(10, 9) +#define BE_TXD_INFO7_UL_TRI_PAD GENMASK(13, 11) +#define BE_TXD_INFO7_UL_T_PE GENMASK(15, 14) +#define BE_TXD_INFO7_UL_EHT_USR_PRES BIT(16) +#define BE_TXD_INFO7_UL_HELTF_SYMBOL_NUM GENMASK(19, 17) +#define BE_TXD_INFO7_ULBW GENMASK(21, 20) +#define BE_TXD_INFO7_ULBW_EXT GENMASK(23, 22) +#define BE_TXD_INFO7_USE_WD_UL GENMASK(25, 24) +#define BE_TXD_INFO7_EXTEND_MODE_SEL GENMASK(31, 28) + /* RX WD dword0 */ #define AX_RXD_RPKT_LEN_MASK GENMASK(13, 0) #define AX_RXD_SHIFT_MASK GENMASK(15, 14) @@ -232,6 +444,102 @@ struct rtw89_phy_sts_iehdr { #define RTW89_PHY_STS_IEHDR_TYPE GENMASK(4, 0) #define RTW89_PHY_STS_IEHDR_LEN GENMASK(11, 5) +/* BE RXD dword0 */ +#define BE_RXD_RPKT_LEN_MASK GENMASK(13, 0) +#define BE_RXD_SHIFT_MASK GENMASK(15, 14) +#define BE_RXD_DRV_INFO_SZ_MASK GENMASK(19, 18) +#define BE_RXD_HDR_CNV_SZ_MASK GENMASK(21, 20) +#define BE_RXD_PHY_RPT_SZ_MASK GENMASK(23, 22) +#define BE_RXD_RPKT_TYPE_MASK GENMASK(29, 24) +#define BE_RXD_BB_SEL BIT(30) +#define BE_RXD_LONG_RXD BIT(31) + +/* BE RXD dword1 */ +#define BE_RXD_PKT_ID_MASK GENMASK(11, 0) +#define BE_RXD_FWD_TARGET_MASK GENMASK(23, 16) +#define BE_RXD_BCN_FW_INFO_MASK GENMASK(25, 24) +#define BE_RXD_FW_RLS BIT(26) + +/* BE RXD dword2 */ +#define BE_RXD_MAC_ID_MASK GENMASK(7, 0) +#define BE_RXD_TYPE_MASK GENMASK(11, 10) +#define BE_RXD_LAST_MSDU BIT(12) +#define BE_RXD_AMSDU_CUT BIT(13) +#define BE_RXD_ADDR_CAM_VLD BIT(14) +#define BE_RXD_REORDER BIT(15) +#define BE_RXD_SEQ_MASK GENMASK(27, 16) +#define BE_RXD_TID_MASK GENMASK(31, 28) + +/* BE RXD dword3 */ +#define BE_RXD_SEC_TYPE_MASK GENMASK(3, 0) +#define BE_RXD_BIP_KEYID BIT(4) +#define BE_RXD_BIP_ENC BIT(5) +#define BE_RXD_CRC32_ERR BIT(6) +#define BE_RXD_ICV_ERR BIT(7) +#define BE_RXD_HW_DEC BIT(8) +#define BE_RXD_SW_DEC BIT(9) +#define BE_RXD_A1_MATCH BIT(10) +#define BE_RXD_AMPDU BIT(11) +#define BE_RXD_AMPDU_EOF BIT(12) +#define BE_RXD_AMSDU BIT(13) +#define BE_RXD_MC BIT(14) +#define BE_RXD_BC BIT(15) +#define BE_RXD_MD BIT(16) +#define BE_RXD_MF BIT(17) +#define BE_RXD_PWR BIT(18) +#define BE_RXD_QOS BIT(19) +#define BE_RXD_EOSP BIT(20) +#define BE_RXD_HTC BIT(21) +#define BE_RXD_QNULL BIT(22) +#define BE_RXD_A4_FRAME BIT(23) +#define BE_RXD_FRAG_MASK GENMASK(27, 24) +#define BE_RXD_GET_CH_INFO_V1_MASK GENMASK(31, 30) + +/* BE RXD dword4 */ +#define BE_RXD_PPDU_TYPE_MASK GENMASK(7, 0) +#define BE_RXD_PPDU_CNT_MASK GENMASK(10, 8) +#define BE_RXD_BW_MASK GENMASK(14, 12) +#define BE_RXD_RX_GI_LTF_MASK GENMASK(18, 16) +#define BE_RXD_RX_REORDER_FIELD_EN BIT(19) +#define BE_RXD_RX_DATARATE_MASK GENMASK(31, 20) + +/* BE RXD dword5 */ +#define BE_RXD_FREERUN_CNT_MASK GENMASK(31, 0) + +/* BE RXD dword6 */ +#define BE_RXD_ADDR_CAM_MASK GENMASK(7, 0) +#define BE_RXD_SR_EN BIT(13) +#define BE_RXD_NON_SRG_PPDU BIT(14) +#define BE_RXD_INTER_PPDU BIT(15) +#define BE_RXD_USER_ID_MASK GENMASK(21, 16) +#define BE_RXD_RX_STATISTICS BIT(22) +#define BE_RXD_SMART_ANT BIT(23) +#define BE_RXD_SEC_CAM_IDX_MASK GENMASK(31, 24) + +/* BE RXD dword7 */ +#define BE_RXD_PATTERN_IDX_MASK GENMASK(4, 0) +#define BE_RXD_MAGIC_WAKE BIT(5) +#define BE_RXD_UNICAST_WAKE BIT(6) +#define BE_RXD_PATTERN_WAKE BIT(7) +#define BE_RXD_RX_PL_MATCH BIT(8) +#define BE_RXD_RX_PL_ID_MASK GENMASK(15, 12) +#define BE_RXD_HDR_CNV BIT(16) +#define BE_RXD_NAT25_HIT BIT(17) +#define BE_RXD_IS_DA BIT(18) +#define BE_RXD_CHKSUM_OFFLOAD_EN BIT(19) +#define BE_RXD_RXSC_ENTRY_MASK GENMASK(22, 20) +#define BE_RXD_RXSC_HIT BIT(23) +#define BE_RXD_WITH_LLC BIT(24) +#define BE_RXD_RX_AGG_FIELD_EN BIT(25) + +/* BE RXD dword8 */ +#define BE_RXD_MAC_ADDR_MASK GENMASK(31, 0) + +/* BE RXD dword9 */ +#define BE_RXD_MAC_ADDR_H_MASK GENMASK(15, 0) +#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16) +#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21) + struct rtw89_phy_sts_ie0 { __le32 w0; __le32 w1; diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index 364e54622150..660bf2ece927 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -40,6 +40,7 @@ static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev) static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; int ret; if (enable_wow) { @@ -49,7 +50,7 @@ static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow) return ret; } rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); - rtw89_write32_clr(rtwdev, R_AX_RX_FLTR_OPT, B_AX_SNIFFER_MODE); + rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE); rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0); rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0); @@ -487,6 +488,8 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + const struct rtw89_chip_info *chip = rtwdev->chip; + bool include_bb = !!chip->bbmcu_nr; struct ieee80211_sta *wow_sta; struct rtw89_sta *rtwsta = NULL; bool is_conn = true; @@ -500,7 +503,7 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) else is_conn = false; - ret = rtw89_fw_download(rtwdev, fw_type); + ret = rtw89_fw_download(rtwdev, fw_type, include_bb); if (ret) { rtw89_warn(rtwdev, "download fw failed\n"); return ret; diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index 45ac9371f262..372eaaa2b9ef 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -52,8 +52,7 @@ static void rsi_coex_sched_tx_pkts(struct rsi_coex_ctrl_block *coex_cb) static void rsi_coex_scheduler_thread(struct rsi_common *common) { - struct rsi_coex_ctrl_block *coex_cb = - (struct rsi_coex_ctrl_block *)common->coex_cb; + struct rsi_coex_ctrl_block *coex_cb = common->coex_cb; u32 timeout = EVENT_WAIT_FOREVER; do { @@ -100,9 +99,8 @@ static inline int rsi_map_coex_q(u8 hal_queue) int rsi_coex_send_pkt(void *priv, struct sk_buff *skb, u8 hal_queue) { - struct rsi_common *common = (struct rsi_common *)priv; - struct rsi_coex_ctrl_block *coex_cb = - (struct rsi_coex_ctrl_block *)common->coex_cb; + struct rsi_common *common = priv; + struct rsi_coex_ctrl_block *coex_cb = common->coex_cb; struct skb_info *tx_params = NULL; enum rsi_coex_queues coex_q; int status; @@ -168,8 +166,7 @@ int rsi_coex_attach(struct rsi_common *common) void rsi_coex_detach(struct rsi_common *common) { - struct rsi_coex_ctrl_block *coex_cb = - (struct rsi_coex_ctrl_block *)common->coex_cb; + struct rsi_coex_ctrl_block *coex_cb = common->coex_cb; int cnt; rsi_kill_thread(&coex_cb->coex_tx_thread); diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index bf22fd948276..c528e6ca2c8d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -28,8 +28,7 @@ static int rsi_sdio_stats_read(struct seq_file *seq, void *data) { struct rsi_common *common = seq->private; struct rsi_hw *adapter = common->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; seq_printf(seq, "total_sdio_interrupts: %d\n", dev->rx_info.sdio_int_counter); diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index d4489b943873..2cebe562a1f4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -424,7 +424,7 @@ out: int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb) { - struct rsi_hw *adapter = (struct rsi_hw *)common->priv; + struct rsi_hw *adapter = common->priv; struct rsi_data_desc *bcn_frm; struct ieee80211_hw *hw = common->priv->hw; struct ieee80211_conf *conf = &hw->conf; diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index bc1f038d1655..05890536e353 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1763,8 +1763,8 @@ static int rsi_mac80211_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type) { - struct rsi_hw *adapter = (struct rsi_hw *)hw->priv; - struct rsi_common *common = (struct rsi_common *)adapter->priv; + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; int status = 0; rsi_dbg(INFO_ZONE, "***** Remain on channel *****\n"); diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f9f004446b07..2112d8d277a9 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -270,14 +270,14 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common) #ifdef CONFIG_RSI_COEX enum rsi_host_intf rsi_get_host_intf(void *priv) { - struct rsi_common *common = (struct rsi_common *)priv; + struct rsi_common *common = priv; return common->priv->rsi_host_intf; } void rsi_set_bt_context(void *priv, void *bt_context) { - struct rsi_common *common = (struct rsi_common *)priv; + struct rsi_common *common = priv; common->bt_adapter = bt_context; } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 1911fef3bbad..8e7b757475d2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -144,8 +144,7 @@ static int rsi_issue_sdiocommand(struct sdio_func *func, static void rsi_handle_interrupt(struct sdio_func *function) { struct rsi_hw *adapter = sdio_get_drvdata(function); - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED) return; @@ -337,8 +336,7 @@ static void rsi_reset_card(struct sdio_func *pfunction) */ static void rsi_setclock(struct rsi_hw *adapter, u32 freq) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; struct mmc_host *host = dev->pfunction->card->host; u32 clock; @@ -358,8 +356,7 @@ static void rsi_setclock(struct rsi_hw *adapter, u32 freq) */ static int rsi_setblocklength(struct rsi_hw *adapter, u32 length) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status; rsi_dbg(INIT_ZONE, "%s: Setting the block length\n", __func__); @@ -380,8 +377,7 @@ static int rsi_setblocklength(struct rsi_hw *adapter, u32 length) */ static int rsi_setupcard(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status = 0; rsi_setclock(adapter, 50000); @@ -407,8 +403,7 @@ int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 fun_num = 0; int status; @@ -441,8 +436,7 @@ int rsi_sdio_write_register(struct rsi_hw *adapter, u32 addr, u8 *data) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status = 0; if (likely(dev->sdio_irq_task != current)) @@ -495,8 +489,7 @@ static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter, u8 *data, u16 count) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u32 status; if (likely(dev->sdio_irq_task != current)) @@ -527,8 +520,7 @@ int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u8 *data, u16 count) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status; if (dev->write_fail > 1) { @@ -762,8 +754,7 @@ static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter, u8 *pkt, u32 len) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u32 block_size = dev->tx_blk_size; u32 num_blocks, address, length; u32 queueno; @@ -1045,7 +1036,7 @@ static int rsi_probe(struct sdio_func *pfunction, goto fail_free_adapter; } - sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + sdev = adapter->rsi_dev; rsi_init_event(&sdev->rx_thread.event); status = rsi_create_kthread(adapter->priv, &sdev->rx_thread, rsi_sdio_rx_thread, "SDIO-RX-Thread"); @@ -1221,7 +1212,7 @@ static void rsi_disconnect(struct sdio_func *pfunction) if (!adapter) return; - dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + dev = adapter->rsi_dev; rsi_kill_thread(&dev->rx_thread); sdio_claim_host(pfunction); @@ -1255,8 +1246,7 @@ static void rsi_disconnect(struct sdio_func *pfunction) #ifdef CONFIG_PM static int rsi_set_sdio_pm_caps(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; struct sdio_func *func = dev->pfunction; int ret; @@ -1407,7 +1397,7 @@ static int rsi_freeze(struct device *dev) return -ENODEV; } common = adapter->priv; - sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + sdev = adapter->rsi_dev; if ((common->wow_flags & RSI_WOW_ENABLED) && (common->wow_flags & RSI_WOW_NO_CONNECTION)) @@ -1457,8 +1447,7 @@ static void rsi_shutdown(struct device *dev) { struct sdio_func *pfunction = dev_to_sdio_func(dev); struct rsi_hw *adapter = sdio_get_drvdata(pfunction); - struct rsi_91x_sdiodev *sdev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *sdev = adapter->rsi_dev; struct ieee80211_hw *hw = adapter->hw; rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n"); diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b2b47a0abcbf..597b238e2294 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -88,8 +88,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common) static int rsi_process_pkt(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 num_blks = 0; u32 rcv_pkt_len = 0; int status = 0; @@ -147,8 +146,7 @@ static int rsi_process_pkt(struct rsi_common *common) */ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 function = 0; u8 byte; int status = 0; @@ -233,8 +231,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) static void rsi_rx_handler(struct rsi_hw *adapter) { struct rsi_common *common = adapter->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; int status; u8 isr_status = 0; u8 fw_status = 0; @@ -339,8 +336,7 @@ static void rsi_rx_handler(struct rsi_hw *adapter) int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num) { struct rsi_common *common = adapter->priv; - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; u8 buf_status = 0; int status = 0; static int counter = 4; @@ -409,8 +405,7 @@ out: */ int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter) { - struct rsi_91x_sdiodev *dev = - (struct rsi_91x_sdiodev *)adapter->rsi_dev; + struct rsi_91x_sdiodev *dev = adapter->rsi_dev; /* Once buffer full is seen, event timeout to occur every 2 msecs */ if (dev->rx_info.buffer_full) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 66fe386ec9cc..10a465686439 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -43,7 +43,7 @@ static int rsi_usb_card_write(struct rsi_hw *adapter, u16 len, u8 endpoint) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; int status; u8 *seg = dev->tx_buffer; int transfer; @@ -91,7 +91,7 @@ static int rsi_write_multiple(struct rsi_hw *adapter, if (endpoint == 0) return -EINVAL; - dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + dev = adapter->rsi_dev; if (dev->write_fail) return -ENETDOWN; @@ -109,7 +109,7 @@ static int rsi_write_multiple(struct rsi_hw *adapter, static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface, struct rsi_hw *adapter) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; __le16 buffer_size; @@ -306,7 +306,7 @@ out: static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1]; struct urb *urb = rx_cb->rx_urb; @@ -323,7 +323,7 @@ static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) */ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct rx_usb_ctrl_block *rx_cb = &dev->rx_cb[ep_num - 1]; struct urb *urb = rx_cb->rx_urb; int status; @@ -362,7 +362,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) static int rsi_usb_read_register_multiple(struct rsi_hw *adapter, u32 addr, u8 *data, u16 count) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; u8 *buf; u16 transfer; int status; @@ -412,7 +412,7 @@ static int rsi_usb_read_register_multiple(struct rsi_hw *adapter, u32 addr, static int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr, u8 *data, u16 count) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; u8 *buf; u16 transfer; int status = 0; @@ -559,7 +559,7 @@ static struct rsi_host_intf_ops usb_host_intf_ops = { */ static void rsi_deinit_usb_interface(struct rsi_hw *adapter) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; rsi_kill_thread(&dev->rx_thread); @@ -572,7 +572,7 @@ static void rsi_deinit_usb_interface(struct rsi_hw *adapter) static int rsi_usb_init_rx(struct rsi_hw *adapter) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; struct rx_usb_ctrl_block *rx_cb; u8 idx, num_rx_cb; @@ -822,7 +822,7 @@ static int rsi_probe(struct usb_interface *pfunction, goto err1; } - dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + dev = adapter->rsi_dev; status = rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2); if (status < 0) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 5130b0e72adc..25c2b232394a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -28,7 +28,7 @@ void rsi_usb_rx_thread(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; + struct rsi_91x_usbdev *dev = adapter->rsi_dev; int status; struct sk_buff *skb; diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c index 51a0d58a9070..909d5f346a01 100644 --- a/drivers/net/wireless/silabs/wfx/bus_sdio.c +++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c @@ -10,7 +10,7 @@ #include <linux/mmc/sdio_func.h> #include <linux/mmc/card.h> #include <linux/interrupt.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_irq.h> #include <linux/irq.h> #include <linux/align.h> diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c index 6a5e52a96d18..a44a7403ce8d 100644 --- a/drivers/net/wireless/silabs/wfx/data_tx.c +++ b/drivers/net/wireless/silabs/wfx/data_tx.c @@ -208,6 +208,36 @@ static bool wfx_is_action_back(struct ieee80211_hdr *hdr) return true; } +struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) +{ + struct ieee80211_tx_info *tx_info; + + if (!skb) + return NULL; + tx_info = IEEE80211_SKB_CB(skb); + return (struct wfx_tx_priv *)tx_info->rate_driver_data; +} + +struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) +{ + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; + + return req; +} + +struct wfx_vif *wfx_skb_wvif(struct wfx_dev *wdev, struct sk_buff *skb) +{ + struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb); + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + + if (tx_priv->vif_id != hif->interface && hif->interface != 2) { + dev_err(wdev->dev, "corrupted skb"); + return wdev_to_wvif(wdev, hif->interface); + } + return wdev_to_wvif(wdev, tx_priv->vif_id); +} + static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr) { @@ -226,53 +256,40 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) { - int i; - bool finished; + bool has_rate0 = false; + int i, j; - /* Firmware is not able to mix rates with different flags */ - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - rates[i].flags |= IEEE80211_TX_RC_SHORT_GI; - if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI)) + for (i = 1, j = 1; j < IEEE80211_TX_MAX_RATES; j++) { + if (rates[j].idx == -1) + break; + /* The device use the rates in descending order, whatever the request from minstrel. + * We have to trade off here. Most important is to respect the primary rate + * requested by minstrel. So, we drops the entries with rate higher than the + * previous. + */ + if (rates[j].idx >= rates[i - 1].idx) { + rates[i - 1].count += rates[j].count; + rates[i - 1].count = min_t(u16, 15, rates[i - 1].count); + } else { + memcpy(rates + i, rates + j, sizeof(rates[i])); + if (rates[i].idx == 0) + has_rate0 = true; + /* The device apply Short GI only on the first rate */ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; - if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)) - rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; - } - - /* Sort rates and remove duplicates */ - do { - finished = true; - for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) { - if (rates[i + 1].idx == rates[i].idx && - rates[i].idx != -1) { - rates[i].count += rates[i + 1].count; - if (rates[i].count > 15) - rates[i].count = 15; - rates[i + 1].idx = -1; - rates[i + 1].count = 0; - - finished = false; - } - if (rates[i + 1].idx > rates[i].idx) { - swap(rates[i + 1], rates[i]); - finished = false; - } + i++; } - } while (!finished); + } /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */ - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (rates[i].idx == 0) - break; - if (rates[i].idx == -1) { - rates[i].idx = 0; - rates[i].count = 8; /* == hw->max_rate_tries */ - rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS; - break; - } + if (!has_rate0 && i < IEEE80211_TX_MAX_RATES) { + rates[i].idx = 0; + rates[i].count = 8; /* == hw->max_rate_tries */ + rates[i].flags = rates[0].flags & IEEE80211_TX_RC_MCS; + i++; + } + for (; i < IEEE80211_TX_MAX_RATES; i++) { + memset(rates + i, 0, sizeof(rates[i])); + rates[i].idx = -1; } - /* All retries use long GI */ - for (i = 1; i < IEEE80211_TX_MAX_RATES; i++) - rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; } static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info) @@ -334,6 +351,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct /* Fill tx_priv */ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data; tx_priv->icv_size = wfx_tx_get_icv_len(hw_key); + tx_priv->vif_id = wvif->id; /* Fill hif_msg */ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb"); @@ -344,7 +362,10 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct hif_msg = (struct wfx_hif_msg *)skb->data; hif_msg->len = cpu_to_le16(skb->len); hif_msg->id = HIF_REQ_ID_TX; - hif_msg->interface = wvif->id; + if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + hif_msg->interface = 2; + else + hif_msg->interface = wvif->id; if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) { dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n", @@ -365,9 +386,15 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct req->fc_offset = offset; /* Queue index are inverted between firmware and Linux */ req->queue_id = 3 - queue_id; - req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); - req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); - req->frame_format = wfx_tx_get_frame_format(tx_info); + if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + req->peer_sta_id = HIF_LINK_ID_NOT_ASSOCIATED; + req->retry_policy_index = HIF_TX_RETRY_POLICY_INVALID; + req->frame_format = HIF_FRAME_FORMAT_NON_HT; + } else { + req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); + req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); + req->frame_format = wfx_tx_get_frame_format(tx_info); + } if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI) req->short_gi = 1; if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) @@ -483,7 +510,7 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg) } tx_info = IEEE80211_SKB_CB(skb); tx_priv = wfx_skb_tx_priv(skb); - wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface); + wvif = wfx_skb_wvif(wdev, skb); WARN_ON(!wvif); if (!wvif) return; @@ -545,7 +572,6 @@ void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, b struct wfx_dev *wdev = hw->priv; struct sk_buff_head dropped; struct wfx_vif *wvif; - struct wfx_hif_msg *hif; struct sk_buff *skb; skb_queue_head_init(&dropped); @@ -561,8 +587,7 @@ void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, b if (wdev->chip_frozen) wfx_pending_drop(wdev, &dropped); while ((skb = skb_dequeue(&dropped)) != NULL) { - hif = (struct wfx_hif_msg *)skb->data; - wvif = wdev_to_wvif(wdev, hif->interface); + wvif = wfx_skb_wvif(wdev, skb); ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb)); wfx_skb_dtor(wvif, skb); } diff --git a/drivers/net/wireless/silabs/wfx/data_tx.h b/drivers/net/wireless/silabs/wfx/data_tx.h index 983470705e4b..0621b82103be 100644 --- a/drivers/net/wireless/silabs/wfx/data_tx.h +++ b/drivers/net/wireless/silabs/wfx/data_tx.h @@ -36,6 +36,7 @@ struct wfx_tx_policy_cache { struct wfx_tx_priv { ktime_t xmit_timestamp; unsigned char icv_size; + unsigned char vif_id; }; void wfx_tx_policy_init(struct wfx_vif *wvif); @@ -45,22 +46,8 @@ void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struc void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg); void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); -static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) -{ - struct ieee80211_tx_info *tx_info; - - if (!skb) - return NULL; - tx_info = IEEE80211_SKB_CB(skb); - return (struct wfx_tx_priv *)tx_info->rate_driver_data; -} - -static inline struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) -{ - struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; - struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; - - return req; -} +struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb); +struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb); +struct wfx_vif *wfx_skb_wvif(struct wfx_dev *wdev, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c index 9402503fbde3..9f403d275cb1 100644 --- a/drivers/net/wireless/silabs/wfx/hif_tx.c +++ b/drivers/net/wireless/silabs/wfx/hif_tx.c @@ -45,6 +45,24 @@ static void *wfx_alloc_hif(size_t body_len, struct wfx_hif_msg **hif) return NULL; } +static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates) +{ + int i; + u32 ret = 0; + /* The device only supports 2GHz */ + struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; + + for (i = 0; i < sband->n_bitrates; i++) { + if (rates & BIT(i)) { + if (i >= sband->n_bitrates) + dev_warn(wdev->dev, "unsupported basic rate\n"); + else + ret |= BIT(sband->bitrates[i].hw_value); + } + } + return ret; +} + int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, void *reply, size_t reply_len, bool no_reply) { @@ -220,6 +238,31 @@ int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, s return ret; } +/* Hijack scan request to implement Remain-On-Channel */ +int wfx_hif_scan_uniq(struct wfx_vif *wvif, struct ieee80211_channel *chan, int duration) +{ + int ret; + struct wfx_hif_msg *hif; + size_t buf_len = sizeof(struct wfx_hif_req_start_scan_alt) + sizeof(u8); + struct wfx_hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->num_of_ssids = HIF_API_MAX_NB_SSIDS; + body->maintain_current_bss = 1; + body->disallow_ps = 1; + body->tx_power_level = cpu_to_le32(chan->max_power); + body->num_of_channels = 1; + body->channel_list[0] = chan->hw_value; + body->max_transmit_rate = API_RATE_INDEX_B_1MBPS; + body->min_channel_time = cpu_to_le32(duration); + body->max_channel_time = cpu_to_le32(duration * 110 / 100); + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int chan_start_idx, int chan_num) { diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.h b/drivers/net/wireless/silabs/wfx/hif_tx.h index 71817a6571f0..aab54df6aafa 100644 --- a/drivers/net/wireless/silabs/wfx/hif_tx.h +++ b/drivers/net/wireless/silabs/wfx/hif_tx.h @@ -54,6 +54,7 @@ int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable); int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len); int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, int chan_start, int chan_num); +int wfx_hif_scan_uniq(struct wfx_vif *wvif, struct ieee80211_channel *chan, int duration); int wfx_hif_stop_scan(struct wfx_vif *wvif); int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len); int wfx_hif_shutdown(struct wfx_dev *wdev); diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c index 0b50f7058bbb..e7198520bdff 100644 --- a/drivers/net/wireless/silabs/wfx/main.c +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -151,6 +151,8 @@ static const struct ieee80211_ops wfx_ops = { .change_chanctx = wfx_change_chanctx, .assign_vif_chanctx = wfx_assign_vif_chanctx, .unassign_vif_chanctx = wfx_unassign_vif_chanctx, + .remain_on_channel = wfx_remain_on_channel, + .cancel_remain_on_channel = wfx_cancel_remain_on_channel, }; bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor) @@ -246,6 +248,7 @@ static void wfx_free_common(void *data) mutex_destroy(&wdev->tx_power_loop_info_lock); mutex_destroy(&wdev->rx_stats_lock); + mutex_destroy(&wdev->scan_lock); mutex_destroy(&wdev->conf_mutex); ieee80211_free_hw(wdev->hw); } @@ -288,18 +291,18 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + hw->wiphy->max_remain_on_channel_duration = 5000; hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX; hw->wiphy->max_scan_ssids = 2; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations); hw->wiphy->iface_combinations = wfx_iface_combinations; - hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL); + /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */ + hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(dev, &wfx_band_2ghz, + sizeof(wfx_band_2ghz), GFP_KERNEL); if (!hw->wiphy->bands[NL80211_BAND_2GHZ]) goto err; - /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */ - memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz, sizeof(wfx_band_2ghz)); - wdev = hw->priv; wdev->hw = hw; wdev->dev = dev; @@ -315,6 +318,7 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); mutex_init(&wdev->conf_mutex); + mutex_init(&wdev->scan_lock); mutex_init(&wdev->rx_stats_lock); mutex_init(&wdev->tx_power_loop_info_lock); init_completion(&wdev->firmware_ready); diff --git a/drivers/net/wireless/silabs/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c index 37f492e5d3be..e61b86f211e5 100644 --- a/drivers/net/wireless/silabs/wfx/queue.c +++ b/drivers/net/wireless/silabs/wfx/queue.c @@ -68,13 +68,16 @@ void wfx_tx_queues_init(struct wfx_vif *wvif) for (i = 0; i < IEEE80211_NUM_ACS; ++i) { skb_queue_head_init(&wvif->tx_queue[i].normal); skb_queue_head_init(&wvif->tx_queue[i].cab); + skb_queue_head_init(&wvif->tx_queue[i].offchan); wvif->tx_queue[i].priority = priorities[i]; } } bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue) { - return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab); + return skb_queue_empty_lockless(&queue->normal) && + skb_queue_empty_lockless(&queue->cab) && + skb_queue_empty_lockless(&queue->offchan); } void wfx_tx_queues_check_empty(struct wfx_vif *wvif) @@ -103,8 +106,9 @@ static void __wfx_tx_queue_drop(struct wfx_vif *wvif, void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, struct sk_buff_head *dropped) { - __wfx_tx_queue_drop(wvif, &queue->cab, dropped); __wfx_tx_queue_drop(wvif, &queue->normal, dropped); + __wfx_tx_queue_drop(wvif, &queue->cab, dropped); + __wfx_tx_queue_drop(wvif, &queue->offchan, dropped); wake_up(&wvif->wdev->tx_dequeue); } @@ -113,7 +117,9 @@ void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb) struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + if (tx_info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + skb_queue_tail(&queue->offchan, skb); + else if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) skb_queue_tail(&queue->cab, skb); else skb_queue_tail(&queue->normal, skb); @@ -123,13 +129,11 @@ void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped) { struct wfx_queue *queue; struct wfx_vif *wvif; - struct wfx_hif_msg *hif; struct sk_buff *skb; WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__); while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) { - hif = (struct wfx_hif_msg *)skb->data; - wvif = wdev_to_wvif(wdev, hif->interface); + wvif = wfx_skb_wvif(wdev, skb); if (wvif) { queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; WARN_ON(skb_get_queue_mapping(skb) > 3); @@ -155,7 +159,7 @@ struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id) if (req->packet_id != packet_id) continue; spin_unlock_bh(&wdev->tx_pending.lock); - wvif = wdev_to_wvif(wdev, hif->interface); + wvif = wfx_skb_wvif(wdev, skb); if (wvif) { queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; WARN_ON(skb_get_queue_mapping(skb) > 3); @@ -248,6 +252,26 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev) wvif = NULL; while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + for (i = 0; i < num_queues; i++) { + skb = skb_dequeue(&queues[i]->offchan); + if (!skb) + continue; + hif = (struct wfx_hif_msg *)skb->data; + /* Offchan frames are assigned to a special interface. + * The only interface allowed to send data during scan. + */ + WARN_ON(hif->interface != 2); + atomic_inc(&queues[i]->pending_frames); + trace_queues_stats(wdev, queues[i]); + return skb; + } + } + + if (mutex_is_locked(&wdev->scan_lock)) + return NULL; + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { if (!wvif->after_dtim_tx_allowed) continue; for (i = 0; i < num_queues; i++) { diff --git a/drivers/net/wireless/silabs/wfx/queue.h b/drivers/net/wireless/silabs/wfx/queue.h index 4731debca93d..6857fbd60fba 100644 --- a/drivers/net/wireless/silabs/wfx/queue.h +++ b/drivers/net/wireless/silabs/wfx/queue.h @@ -17,6 +17,7 @@ struct wfx_vif; struct wfx_queue { struct sk_buff_head normal; struct sk_buff_head cab; /* Content After (DTIM) Beacon */ + struct sk_buff_head offchan; atomic_t pending_frames; int priority; }; diff --git a/drivers/net/wireless/silabs/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c index 16f619ed22e0..c3c103ff88cc 100644 --- a/drivers/net/wireless/silabs/wfx/scan.c +++ b/drivers/net/wireless/silabs/wfx/scan.c @@ -95,7 +95,7 @@ void wfx_hw_scan_work(struct work_struct *work) int chan_cur, ret, err; mutex_lock(&wvif->wdev->conf_mutex); - mutex_lock(&wvif->scan_lock); + mutex_lock(&wvif->wdev->scan_lock); if (wvif->join_in_progress) { dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN"); wfx_reset(wvif); @@ -116,7 +116,7 @@ void wfx_hw_scan_work(struct work_struct *work) ret = -ETIMEDOUT; } } while (ret >= 0 && chan_cur < hw_req->req.n_channels); - mutex_unlock(&wvif->scan_lock); + mutex_unlock(&wvif->wdev->scan_lock); mutex_unlock(&wvif->wdev->conf_mutex); wfx_ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0); } @@ -145,3 +145,65 @@ void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done) wvif->scan_nb_chan_done = nb_chan_done; complete(&wvif->scan_complete); } + +void wfx_remain_on_channel_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, remain_on_channel_work); + struct ieee80211_channel *chan = wvif->remain_on_channel_chan; + int duration = wvif->remain_on_channel_duration; + int ret; + + /* Hijack scan request to implement Remain-On-Channel */ + mutex_lock(&wvif->wdev->conf_mutex); + mutex_lock(&wvif->wdev->scan_lock); + if (wvif->join_in_progress) { + dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN"); + wfx_reset(wvif); + } + wfx_tx_flush(wvif->wdev); + + reinit_completion(&wvif->scan_complete); + ret = wfx_hif_scan_uniq(wvif, chan, duration); + if (ret) + goto end; + ieee80211_ready_on_channel(wvif->wdev->hw); + ret = wait_for_completion_timeout(&wvif->scan_complete, + msecs_to_jiffies(duration * 120 / 100)); + if (!ret) { + wfx_hif_stop_scan(wvif); + ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); + dev_dbg(wvif->wdev->dev, "roc timeout\n"); + } + if (!ret) + dev_err(wvif->wdev->dev, "roc didn't stop\n"); + ieee80211_remain_on_channel_expired(wvif->wdev->hw); +end: + mutex_unlock(&wvif->wdev->scan_lock); + mutex_unlock(&wvif->wdev->conf_mutex); + wfx_bh_request_tx(wvif->wdev); +} + +int wfx_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration, + enum ieee80211_roc_type type) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + if (wfx_api_older_than(wdev, 3, 10)) + return -EOPNOTSUPP; + + wvif->remain_on_channel_duration = duration; + wvif->remain_on_channel_chan = chan; + schedule_work(&wvif->remain_on_channel_work); + return 0; +} + +int wfx_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_hif_stop_scan(wvif); + flush_work(&wvif->remain_on_channel_work); + return 0; +} diff --git a/drivers/net/wireless/silabs/wfx/scan.h b/drivers/net/wireless/silabs/wfx/scan.h index 78e3b984f375..995ab8c6cb5e 100644 --- a/drivers/net/wireless/silabs/wfx/scan.h +++ b/drivers/net/wireless/silabs/wfx/scan.h @@ -19,4 +19,10 @@ int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done); +void wfx_remain_on_channel_work(struct work_struct *work); +int wfx_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *chan, int duration, + enum ieee80211_roc_type type); +int wfx_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + #endif diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c index 626dfb4b7a55..1b6c158457b4 100644 --- a/drivers/net/wireless/silabs/wfx/sta.c +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -20,24 +20,6 @@ #define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2 -u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates) -{ - int i; - u32 ret = 0; - /* The device only supports 2GHz */ - struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; - - for (i = 0; i < sband->n_bitrates; i++) { - if (rates & BIT(i)) { - if (i >= sband->n_bitrates) - dev_warn(wdev->dev, "unsupported basic rate\n"); - else - ret |= BIT(sband->bitrates[i].hw_value); - } - } - return ret; -} - void wfx_cooling_timeout_work(struct work_struct *work) { struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev, @@ -114,10 +96,12 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROBE_REQ | FIF_PSPOLL; + /* Filters are ignored during the scan. No frames are filtered. */ + if (mutex_is_locked(&wdev->scan_lock)) + return; + mutex_lock(&wdev->conf_mutex); while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - mutex_lock(&wvif->scan_lock); - /* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and * beacons from other BSS */ @@ -144,8 +128,6 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, else filter_prbreq = true; wfx_hif_set_rx_filter(wvif, filter_bssid, filter_prbreq); - - mutex_unlock(&wvif->scan_lock); } mutex_unlock(&wdev->conf_mutex); } @@ -402,7 +384,12 @@ void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_dev *wdev = wvif->wdev; + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_update_pm(wvif); + wvif = (struct wfx_vif *)vif->drv_priv; wfx_reset(wvif); } @@ -634,18 +621,14 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd) { - struct wfx_vif *wvif_it; - if (notify_cmd != STA_NOTIFY_AWAKE) return; /* Device won't be able to honor CAB if a scan is in progress on any interface. Prefer to * skip this DTIM and wait for the next one. */ - wvif_it = NULL; - while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL) - if (mutex_is_locked(&wvif_it->scan_lock)) - return; + if (mutex_is_locked(&wvif->wdev->scan_lock)) + return; if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed) dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)", @@ -743,9 +726,9 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) complete(&wvif->set_pm_mode_complete); INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); - mutex_init(&wvif->scan_lock); init_completion(&wvif->scan_complete); INIT_WORK(&wvif->scan_work, wfx_hw_scan_work); + INIT_WORK(&wvif->remain_on_channel_work, wfx_remain_on_channel_work); wfx_tx_queues_init(wvif); wfx_tx_policy_init(wvif); diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h index 888db5cd3206..c478ddcb934b 100644 --- a/drivers/net/wireless/silabs/wfx/sta.h +++ b/drivers/net/wireless/silabs/wfx/sta.h @@ -66,6 +66,5 @@ int wfx_update_pm(struct wfx_vif *wvif); /* Other Helpers */ void wfx_reset(struct wfx_vif *wvif); -u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates); #endif diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h index 13ba84b3b2c3..bd0df2e1ea99 100644 --- a/drivers/net/wireless/silabs/wfx/wfx.h +++ b/drivers/net/wireless/silabs/wfx/wfx.h @@ -43,6 +43,7 @@ struct wfx_dev { struct delayed_work cooling_timeout_work; bool poll_irq; bool chip_frozen; + struct mutex scan_lock; struct mutex conf_mutex; struct wfx_hif_cmd hif_cmd; @@ -69,6 +70,7 @@ struct wfx_vif { bool after_dtim_tx_allowed; bool join_in_progress; + struct completion set_pm_mode_complete; struct delayed_work beacon_loss_work; @@ -80,15 +82,15 @@ struct wfx_vif { unsigned long uapsd_mask; - /* avoid some operations in parallel with scan */ - struct mutex scan_lock; struct work_struct scan_work; struct completion scan_complete; int scan_nb_chan_done; bool scan_abort; struct ieee80211_scan_request *scan_req; - struct completion set_pm_mode_complete; + struct ieee80211_channel *remain_on_channel_chan; + int remain_on_channel_duration; + struct work_struct remain_on_channel_work; }; static inline struct ieee80211_vif *wvif_to_vif(struct wfx_vif *wvif) diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index 6894b919ff94..084d52b11f5b 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -994,7 +994,7 @@ void cw1200_skb_dtor(struct cw1200_common *priv, txpriv->raw_link_id, txpriv->tid); tx_policy_put(priv, txpriv->rate_id); } - ieee80211_tx_status(priv->hw, skb); + ieee80211_tx_status_skb(priv->hw, skb); } void cw1200_rx_cb(struct cw1200_common *priv, @@ -1166,7 +1166,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); - if (tim_ie) { + if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) { struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2]; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index eded284af600..cd9a41f59f32 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -404,7 +404,7 @@ static int wl1251_op_start(struct ieee80211_hw *hw) /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip_id; - strncpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version)); + strscpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version)); out: if (ret < 0) diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index e9dc3c72bb11..474b603c121c 100644 --- a/drivers/net/wireless/ti/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c @@ -434,7 +434,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl, result->status, wl1251_tx_parse_status(result->status)); - ieee80211_tx_status(wl->hw, skb); + ieee80211_tx_status_skb(wl->hw, skb); wl->tx_frames[result->id] = NULL; } @@ -566,7 +566,7 @@ void wl1251_tx_flush(struct wl1251 *wl) if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) continue; - ieee80211_tx_status(wl->hw, skb); + ieee80211_tx_status_skb(wl->hw, skb); } for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) @@ -577,7 +577,7 @@ void wl1251_tx_flush(struct wl1251 *wl) if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) continue; - ieee80211_tx_status(wl->hw, skb); + ieee80211_tx_status_skb(wl->hw, skb); wl->tx_frames[i] = NULL; } } diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index d06a2c419447..de045fe4ca1e 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -1919,7 +1919,7 @@ out: return ret; } -static int wl12xx_remove(struct platform_device *pdev) +static void wl12xx_remove(struct platform_device *pdev) { struct wl1271 *wl = platform_get_drvdata(pdev); struct wl12xx_priv *priv; @@ -1928,7 +1928,7 @@ static int wl12xx_remove(struct platform_device *pdev) kfree(priv->rx_mem_addr); - return wlcore_remove(pdev); + wlcore_remove(pdev); } static const struct platform_device_id wl12xx_id_table[] = { @@ -1939,7 +1939,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table); static struct platform_driver wl12xx_driver = { .probe = wl12xx_probe, - .remove = wl12xx_remove, + .remove_new = wl12xx_remove, .id_table = wl12xx_id_table, .driver = { .name = "wl12xx_driver", diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 0b3cf8477c6c..20d9181b3410 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -1516,12 +1516,9 @@ static int wl18xx_handle_static_data(struct wl1271 *wl, struct wl18xx_static_data_priv *static_data_priv = (struct wl18xx_static_data_priv *) static_data->priv; - strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version, + strscpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version, sizeof(wl->chip.phy_fw_ver_str)); - /* make sure the string is NULL-terminated */ - wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0'; - wl1271_info("PHY firmware version: %s", static_data_priv->phy_version); return 0; @@ -2033,7 +2030,7 @@ MODULE_DEVICE_TABLE(platform, wl18xx_id_table); static struct platform_driver wl18xx_driver = { .probe = wl18xx_probe, - .remove = wlcore_remove, + .remove_new = wlcore_remove, .id_table = wl18xx_id_table, .driver = { .name = "wl18xx_driver", diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c index 85abd0a2d1c9..f481c2e3dbc8 100644 --- a/drivers/net/wireless/ti/wlcore/boot.c +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -41,12 +41,9 @@ static int wlcore_boot_parse_fw_ver(struct wl1271 *wl, { int ret; - strncpy(wl->chip.fw_ver_str, static_data->fw_version, + strscpy(wl->chip.fw_ver_str, static_data->fw_version, sizeof(wl->chip.fw_ver_str)); - /* make sure the string is NULL-terminated */ - wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; - ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index 46ab69eab26a..1e082d039b82 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -229,7 +229,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl, vif = wl12xx_wlvif_to_vif(wlvif); if (wlvif->bss_type == BSS_TYPE_STA_BSS) { - ieee80211_chswitch_done(vif, success); + ieee80211_chswitch_done(vif, success, 0); cancel_delayed_work(&wlvif->channel_switch_work); } else { set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index bf21611872a3..fb9ed97774c7 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1126,7 +1126,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode) /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip.id; - strncpy(wiphy->fw_version, wl->chip.fw_ver_str, + strscpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version)); goto out; @@ -2043,7 +2043,7 @@ static void wlcore_channel_switch_work(struct work_struct *work) goto out; vif = wl12xx_wlvif_to_vif(wlvif); - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) @@ -2344,7 +2344,7 @@ power_off: /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip.id; - strncpy(wiphy->fw_version, wl->chip.fw_ver_str, + strscpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version)); /* @@ -3030,7 +3030,7 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif) struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); wl12xx_cmd_stop_channel_switch(wl, wlvif); - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); cancel_delayed_work(&wlvif->channel_switch_work); } @@ -5451,7 +5451,7 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, if (unlikely(wl->state == WLCORE_STATE_OFF)) { if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) - ieee80211_chswitch_done(vif, false); + ieee80211_chswitch_done(vif, false, 0); goto out; } else if (unlikely(wl->state != WLCORE_STATE_ON)) { goto out; @@ -6737,7 +6737,7 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) } EXPORT_SYMBOL_GPL(wlcore_probe); -int wlcore_remove(struct platform_device *pdev) +void wlcore_remove(struct platform_device *pdev) { struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); struct wl1271 *wl = platform_get_drvdata(pdev); @@ -6752,7 +6752,7 @@ int wlcore_remove(struct platform_device *pdev) if (pdev_data->family && pdev_data->family->nvs_name) wait_for_completion(&wl->nvs_loading_complete); if (!wl->initialized) - return 0; + return; if (wl->wakeirq >= 0) { dev_pm_clear_wake_irq(wl->dev); @@ -6772,8 +6772,6 @@ int wlcore_remove(struct platform_device *pdev) free_irq(wl->irq, wl); wlcore_free_hw(wl); - - return 0; } EXPORT_SYMBOL_GPL(wlcore_remove); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index cf8d909fa826..f0686635db46 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -442,18 +442,7 @@ static struct sdio_driver wl1271_sdio_driver = { #endif }; -static int __init wl1271_init(void) -{ - return sdio_register_driver(&wl1271_sdio_driver); -} - -static void __exit wl1271_exit(void) -{ - sdio_unregister_driver(&wl1271_sdio_driver); -} - -module_init(wl1271_init); -module_exit(wl1271_exit); +module_sdio_driver(wl1271_sdio_driver); module_param(dump, bool, 0600); MODULE_PARM_DESC(dump, "Enable sdio read/write dumps."); diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 3f88e6a0a510..7d9a139db59e 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -554,7 +554,7 @@ static void wl1271_remove(struct spi_device *spi) static struct spi_driver wl1271_spi_driver = { .driver = { .name = "wl1271_spi", - .of_match_table = of_match_ptr(wlcore_spi_of_match_table), + .of_match_table = wlcore_spi_of_match_table, }, .probe = wl1271_probe, diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 81c94d390623..1f8511bf9bb3 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -497,7 +497,7 @@ struct wl1271 { }; int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); -int wlcore_remove(struct platform_device *pdev); +void wlcore_remove(struct platform_device *pdev); struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, u32 mbox_size); int wlcore_free_hw(struct wl1271 *wl); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index f446fd0e8cd0..c7b4414cc6c3 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -72,15 +72,6 @@ MODULE_PARM_DESC(mlo, "Support MLO"); /** * enum hwsim_regtest - the type of regulatory tests we offer * - * These are the different values you can use for the regtest - * module parameter. This is useful to help test world roaming - * and the driver regulatory_hint() call and combinations of these. - * If you want to do specific alpha2 regulatory domain tests simply - * use the userspace regulatory request as that will be respected as - * well without the need of this module parameter. This is designed - * only for testing the driver regulatory request, world roaming - * and all possible combinations. - * * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, * this is the default value. * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory @@ -125,6 +116,15 @@ MODULE_PARM_DESC(mlo, "Support MLO"); * domain request * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio * regulatory requests. + * + * These are the different values you can use for the regtest + * module parameter. This is useful to help test world roaming + * and the driver regulatory_hint() call and combinations of these. + * If you want to do specific alpha2 regulatory domain tests simply + * use the userspace regulatory request as that will be respected as + * well without the need of this module parameter. This is designed + * only for testing the driver regulatory request, world roaming + * and all possible combinations. */ enum hwsim_regtest { HWSIM_REGTEST_DISABLED = 0, @@ -582,9 +582,8 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, */ /* Add vendor data */ - err = nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); - if (err) - return err; + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); + /* Send the event - this will call nla_nest_end() */ cfg80211_vendor_event(skb, GFP_KERNEL); } @@ -2446,6 +2445,14 @@ static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw, vp->assoc = vif->cfg.assoc; vp->aid = vif->cfg.aid; } + + if (vif->type == NL80211_IFTYPE_STATION && + changed & BSS_CHANGED_MLD_VALID_LINKS) { + u16 usable_links = ieee80211_vif_usable_links(vif); + + if (vif->active_links != usable_links) + ieee80211_set_active_links_async(vif, usable_links); + } } static void mac80211_hwsim_link_info_changed(struct ieee80211_hw *hw, @@ -3171,7 +3178,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) - memcpy(data, *mac80211_hwsim_gstrings_stats, + memcpy(data, mac80211_hwsim_gstrings_stats, sizeof(mac80211_hwsim_gstrings_stats)); } @@ -4900,25 +4907,19 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = { static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband) { - u16 n_iftype_data; - - if (sband->band == NL80211_BAND_2GHZ) { - n_iftype_data = ARRAY_SIZE(sband_capa_2ghz); - sband->iftype_data = - (struct ieee80211_sband_iftype_data *)sband_capa_2ghz; - } else if (sband->band == NL80211_BAND_5GHZ) { - n_iftype_data = ARRAY_SIZE(sband_capa_5ghz); - sband->iftype_data = - (struct ieee80211_sband_iftype_data *)sband_capa_5ghz; - } else if (sband->band == NL80211_BAND_6GHZ) { - n_iftype_data = ARRAY_SIZE(sband_capa_6ghz); - sband->iftype_data = - (struct ieee80211_sband_iftype_data *)sband_capa_6ghz; - } else { - return; + switch (sband->band) { + case NL80211_BAND_2GHZ: + ieee80211_set_sband_iftype_data(sband, sband_capa_2ghz); + break; + case NL80211_BAND_5GHZ: + ieee80211_set_sband_iftype_data(sband, sband_capa_5ghz); + break; + case NL80211_BAND_6GHZ: + ieee80211_set_sband_iftype_data(sband, sband_capa_6ghz); + break; + default: + break; } - - sband->n_iftype_data = n_iftype_data; } #ifdef CONFIG_MAC80211_MESH @@ -5626,14 +5627,15 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); + if (frame_data_len < sizeof(struct ieee80211_hdr_3addr) || + frame_data_len > IEEE80211_MAX_DATA_LEN) + goto err; + /* Allocate new skb here */ skb = alloc_skb(frame_data_len, GFP_KERNEL); if (skb == NULL) goto err; - if (frame_data_len > IEEE80211_MAX_DATA_LEN) - goto err; - /* Copy the data */ skb_put_data(skb, frame_data, frame_data_len); @@ -6314,7 +6316,7 @@ static void hwsim_virtio_tx_done(struct virtqueue *vq) spin_lock_irqsave(&hwsim_virtio_lock, flags); while ((skb = virtqueue_get_buf(vq, &len))) - nlmsg_free(skb); + dev_kfree_skb_irq(skb); spin_unlock_irqrestore(&hwsim_virtio_lock, flags); } @@ -6383,14 +6385,14 @@ static void hwsim_virtio_rx_work(struct work_struct *work) spin_lock_irqsave(&hwsim_virtio_lock, flags); if (!hwsim_virtio_enabled) { - nlmsg_free(skb); + dev_kfree_skb_irq(skb); goto out_unlock; } vq = hwsim_vqs[HWSIM_VQ_RX]; sg_init_one(sg, skb->head, skb_end_offset(skb)); err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_ATOMIC); if (WARN(err, "virtqueue_add_inbuf returned %d\n", err)) - nlmsg_free(skb); + dev_kfree_skb_irq(skb); else virtqueue_kick(vq); schedule_work(&hwsim_virtio_rx); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h index 92126f02c58f..4676cdaf4cfd 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.h +++ b/drivers/net/wireless/virtual/mac80211_hwsim.h @@ -3,7 +3,7 @@ * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen <j@w1.fi> * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com> - * Copyright (C) 2020, 2022 Intel Corporation + * Copyright (C) 2020, 2022-2023 Intel Corporation */ #ifndef __MAC80211_HWSIM_H @@ -86,7 +86,7 @@ enum hwsim_tx_control_flags { * with %HWSIM_CMD_REPORT_PMSR. * @__HWSIM_CMD_MAX: enum limit */ -enum { +enum hwsim_commands { HWSIM_CMD_UNSPEC, HWSIM_CMD_REGISTER, HWSIM_CMD_FRAME, @@ -117,11 +117,11 @@ enum { * the frame was broadcasted from * @HWSIM_ATTR_FRAME: Data array * @HWSIM_ATTR_FLAGS: mac80211 transmission flags, used to process - properly the frame at user space + * properly the frame at user space * @HWSIM_ATTR_RX_RATE: estimated rx rate index for this frame at user - space + * space * @HWSIM_ATTR_SIGNAL: estimated RX signal for this frame at user - space + * space * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame * @HWSIM_ATTR_CHANNELS: u32 attribute used with the %HWSIM_CMD_CREATE_RADIO @@ -140,6 +140,7 @@ enum { * command to force radio removal when process that created the radio dies * @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666 * @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio. + * @HWSIM_ATTR_PAD: padding attribute for 64-bit values, ignore * @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received. * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding * rates of %HWSIM_ATTR_TX_INFO @@ -156,9 +157,7 @@ enum { * to provide peer measurement result (nl80211_peer_measurement_attrs) * @__HWSIM_ATTR_MAX: enum limit */ - - -enum { +enum hwsim_attrs { HWSIM_ATTR_UNSPEC, HWSIM_ATTR_ADDR_RECEIVER, HWSIM_ATTR_ADDR_TRANSMITTER, @@ -259,7 +258,7 @@ enum hwsim_tx_rate_flags { * struct hwsim_tx_rate - rate selection/status * * @idx: rate index to attempt to send with - * @count: number of tries in this rate before going to the next rate + * @flags: the rate flags according to &enum hwsim_tx_rate_flags * * A value of -1 for @idx indicates an invalid rate and, if used * in an array of retry rates, that no more rates should be tried. @@ -287,7 +286,7 @@ struct hwsim_tx_rate_flag { * @HWSIM_VQ_RX: receive frames and transmission info reports * @HWSIM_NUM_VQS: enum limit */ -enum { +enum hwsim_vqs { HWSIM_VQ_TX, HWSIM_VQ_RX, HWSIM_NUM_VQS, diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index a85fe7e4c6d4..2814df1ecc78 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Driver for ZyDAS zd1201 based wireless USB devices. + * Driver for ZyDAS zd1201 based USB wireless devices. * * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org) * @@ -23,8 +23,8 @@ #include "zd1201.h" static const struct usb_device_id zd1201_table[] = { - {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */ - {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ + {USB_DEVICE(0x0586, 0x3400)}, /* Peabird USB Wireless Adapter */ + {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 USB Wireless Adapter */ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */ {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */ diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index 850c26bc9524..8505d84eeed6 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -1006,7 +1006,7 @@ resubmit: * @usb: the zd1211rw-private USB structure * @skb: a &struct sk_buff pointer * - * This function tranmits a frame to the device. It doesn't wait for + * This function transmits a frame to the device. It doesn't wait for * completion. The frame must contain the control set and have all the * control set information available. * diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h index e77084e76718..fdc211bbeda7 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h +++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h @@ -51,7 +51,7 @@ struct ipc_chnl_cfg { /** * ipc_chnl_cfg_get - Get pipe configuration. * @chnl_cfg: Array of ipc_chnl_cfg struct - * @index: Channel index (upto MAX_CHANNELS) + * @index: Channel index (up to MAX_CHANNELS) * * Return: 0 on success and failure value on error */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index 635301d677e1..829515a601b3 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -4,7 +4,6 @@ */ #include <linux/delay.h> -#include <linux/pm_runtime.h> #include "iosm_ipc_chnl_cfg.h" #include "iosm_ipc_devlink.h" @@ -632,11 +631,6 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) /* Complete all memory stores after setting bit */ smp_mb__after_atomic(); - if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) { - pm_runtime_mark_last_busy(ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_imem->dev); - } - return; err_ipc_mux_deinit: @@ -1240,7 +1234,6 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem) /* forward MDM_NOT_READY to listeners */ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY); - pm_runtime_get_sync(ipc_imem->dev); hrtimer_cancel(&ipc_imem->td_alloc_timer); hrtimer_cancel(&ipc_imem->tdupdate_timer); @@ -1426,16 +1419,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag); } - - if (!pm_runtime_enabled(ipc_imem->dev)) - pm_runtime_enable(ipc_imem->dev); - - pm_runtime_set_autosuspend_delay(ipc_imem->dev, - IPC_MEM_AUTO_SUSPEND_DELAY_MS); - pm_runtime_use_autosuspend(ipc_imem->dev); - pm_runtime_allow(ipc_imem->dev); - pm_runtime_mark_last_busy(ipc_imem->dev); - return ipc_imem; devlink_channel_fail: ipc_devlink_deinit(ipc_imem->ipc_devlink); diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index 0144b45e2afb..5664ac507c90 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -103,8 +103,6 @@ struct ipc_chnl_cfg; #define FULLY_FUNCTIONAL 0 #define IOSM_DEVLINK_INIT 1 -#define IPC_MEM_AUTO_SUSPEND_DELAY_MS 5000 - /* List of the supported UL/DL pipes. */ enum ipc_mem_pipes { IPC_MEM_PIPE_0 = 0, diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index 026c5bd0f999..6bd0290e8be7 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -36,8 +36,8 @@ /** * ipc_imem_sys_port_open - Open a port link to CP. * @ipc_imem: Imem instance. - * @chl_id: Channel Indentifier. - * @hp_id: HP Indentifier. + * @chl_id: Channel Identifier. + * @hp_id: HP Identifier. * * Return: channel instance on success, NULL for failure */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h index 17ca8d1f9397..db5f1f9ebf26 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h @@ -432,7 +432,7 @@ int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr); int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr); /** - * ipc_mux_get_max_sessions - Retuns the maximum sessions supported on the + * ipc_mux_get_max_sessions - Returns the maximum sessions supported on the * provided MUX instance.. * @ipc_mux: Pointer to MUX data-struct * diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index 3a259c9abefd..04517bd3325a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -6,7 +6,6 @@ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/module.h> -#include <linux/pm_runtime.h> #include <net/rtnetlink.h> #include "iosm_ipc_imem.h" @@ -438,8 +437,7 @@ static int __maybe_unused ipc_pcie_resume_cb(struct device *dev) return 0; } -static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, - ipc_pcie_resume_cb, NULL); +static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb); static struct pci_driver iosm_ipc_driver = { .name = KBUILD_MODNAME, diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.h b/drivers/net/wwan/iosm/iosm_ipc_pm.h index e7c00f388cb0..5f14d7932af9 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pm.h +++ b/drivers/net/wwan/iosm/iosm_ipc_pm.h @@ -172,7 +172,7 @@ bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm); bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm); /** - * ipc_pm_wait_for_device_active - Wait upto IPC_PM_ACTIVE_TIMEOUT_MS ms + * ipc_pm_wait_for_device_active - Wait up to IPC_PM_ACTIVE_TIMEOUT_MS ms * for the device to reach active state * @ipc_pm: Pointer to power management component * diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c index 2ba1ddca3945..5d5b4183e14a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_port.c +++ b/drivers/net/wwan/iosm/iosm_ipc_port.c @@ -3,8 +3,6 @@ * Copyright (C) 2020-21 Intel Corporation. */ -#include <linux/pm_runtime.h> - #include "iosm_ipc_chnl_cfg.h" #include "iosm_ipc_imem_ops.h" #include "iosm_ipc_port.h" @@ -15,16 +13,12 @@ static int ipc_port_ctrl_start(struct wwan_port *port) struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port); int ret = 0; - pm_runtime_get_sync(ipc_port->ipc_imem->dev); ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem, ipc_port->chl_id, IPC_HP_CDEV_OPEN); if (!ipc_port->channel) ret = -EIO; - pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev); - return ret; } @@ -33,24 +27,15 @@ static void ipc_port_ctrl_stop(struct wwan_port *port) { struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port); - pm_runtime_get_sync(ipc_port->ipc_imem->dev); ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel); - pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev); } /* transfer control data to modem */ static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) { struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port); - int ret; - pm_runtime_get_sync(ipc_port->ipc_imem->dev); - ret = ipc_imem_sys_cdev_write(ipc_port, skb); - pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev); - - return ret; + return ipc_imem_sys_cdev_write(ipc_port, skb); } static const struct wwan_port_ops ipc_wwan_ctrl_ops = { diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.h b/drivers/net/wwan/iosm/iosm_ipc_port.h index 11bc8ed21616..d33c52aebf66 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_port.h +++ b/drivers/net/wwan/iosm/iosm_ipc_port.h @@ -18,7 +18,7 @@ * @pcie: PCIe component * @port_type: WWAN port type * @channel: Channel instance - * @chl_id: Channel Indentifier + * @chl_id: Channel Identifier */ struct iosm_cdev { struct wwan_port *iosm_port; diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c index 4368373797b6..eeecfa3d10c5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_trace.c +++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c @@ -3,9 +3,7 @@ * Copyright (C) 2020-2021 Intel Corporation. */ -#include <linux/pm_runtime.h> #include <linux/wwan.h> - #include "iosm_ipc_trace.h" /* sub buffer size and number of sub buffer */ @@ -99,8 +97,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp, if (ret) return ret; - pm_runtime_get_sync(ipc_trace->ipc_imem->dev); - mutex_lock(&ipc_trace->trc_mutex); if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) { ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem, @@ -121,10 +117,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp, ret = count; unlock: mutex_unlock(&ipc_trace->trc_mutex); - - pm_runtime_mark_last_busy(ipc_trace->ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_trace->ipc_imem->dev); - return ret; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.h b/drivers/net/wwan/iosm/iosm_ipc_trace.h index 5ebe7790585c..3e7c7f163e1d 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_trace.h +++ b/drivers/net/wwan/iosm/iosm_ipc_trace.h @@ -29,7 +29,7 @@ enum trace_ctrl_mode { * @ipc_imem: Imem instance * @dev: Pointer to device struct * @channel: Channel instance - * @chl_id: Channel Indentifier + * @chl_id: Channel Identifier * @trc_mutex: Mutex used for read and write mode * @mode: Mode for enable and disable trace */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index 93d17de08786..ff747fc79aaf 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -6,7 +6,6 @@ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/if_link.h> -#include <linux/pm_runtime.h> #include <linux/rtnetlink.h> #include <linux/wwan.h> #include <net/pkt_sched.h> @@ -52,13 +51,11 @@ static int ipc_wwan_link_open(struct net_device *netdev) struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); struct iosm_wwan *ipc_wwan = priv->ipc_wwan; int if_id = priv->if_id; - int ret = 0; if (if_id < IP_MUX_SESSION_START || if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) return -EINVAL; - pm_runtime_get_sync(ipc_wwan->ipc_imem->dev); /* get channel id */ priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id); @@ -66,8 +63,7 @@ static int ipc_wwan_link_open(struct net_device *netdev) dev_err(ipc_wwan->dev, "cannot connect wwan0 & id %d to the IPC mem layer", if_id); - ret = -ENODEV; - goto err_out; + return -ENODEV; } /* enable tx path, DL data may follow */ @@ -76,11 +72,7 @@ static int ipc_wwan_link_open(struct net_device *netdev) dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d", priv->ch_id, priv->if_id); -err_out: - pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev); - - return ret; + return 0; } /* Bring-down the wwan net link */ @@ -90,12 +82,9 @@ static int ipc_wwan_link_stop(struct net_device *netdev) netif_stop_queue(netdev); - pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev); ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id, priv->ch_id); priv->ch_id = -1; - pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev); - pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev); return 0; } @@ -117,7 +106,6 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb, if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) return -EINVAL; - pm_runtime_get(ipc_wwan->ipc_imem->dev); /* Send the SKB to device for transmission */ ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem, if_id, priv->ch_id, skb); @@ -131,14 +119,9 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb, ret = NETDEV_TX_BUSY; dev_err(ipc_wwan->dev, "unable to push packets"); } else { - pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev); goto exit; } - pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev); - pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev); - return ret; exit: diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c index 86b60aadfa11..26756ff0e44d 100644 --- a/drivers/net/wwan/rpmsg_wwan_ctrl.c +++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c @@ -37,7 +37,7 @@ static int rpmsg_wwan_ctrl_start(struct wwan_port *port) .dst = RPMSG_ADDR_ANY, }; - strncpy(chinfo.name, rpwwan->rpdev->id.name, RPMSG_NAME_SIZE); + strscpy(chinfo.name, rpwwan->rpdev->id.name, sizeof(chinfo.name)); rpwwan->ept = rpmsg_create_ept(rpwwan->rpdev, rpmsg_wwan_ctrl_callback, rpwwan, chinfo); if (!rpwwan->ept) diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 7162bf38a8c9..cc70360364b7 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -1066,13 +1066,18 @@ static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; u32 phy_ao_base, phy_pd_base; - if (md_ctrl->hif_id != CLDMA_ID_MD) - return; - - phy_ao_base = CLDMA1_AO_BASE; - phy_pd_base = CLDMA1_PD_BASE; - hw_info->phy_interrupt_id = CLDMA1_INT; hw_info->hw_mode = MODE_BIT_64; + + if (md_ctrl->hif_id == CLDMA_ID_MD) { + phy_ao_base = CLDMA1_AO_BASE; + phy_pd_base = CLDMA1_PD_BASE; + hw_info->phy_interrupt_id = CLDMA1_INT; + } else { + phy_ao_base = CLDMA0_AO_BASE; + phy_pd_base = CLDMA0_PD_BASE; + hw_info->phy_interrupt_id = CLDMA0_INT; + } + hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, pbase->pcie_dev_reg_trsl_addr, phy_ao_base); hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h index 47a35e552da7..4410bac6993a 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h @@ -34,7 +34,7 @@ /** * enum cldma_id - Identifiers for CLDMA HW units. * @CLDMA_ID_MD: Modem control channel. - * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). + * @CLDMA_ID_AP: Application Processor control channel. * @CLDMA_NUM: Number of CLDMA HW units available. */ enum cldma_id { diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index f4ff2198b5ef..210d84c67ef9 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -852,7 +852,7 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget) if (!ret) { napi_complete_done(napi, work_done); rxq->sleep_lock_pending = true; - napi_reschedule(napi); + napi_schedule(napi); return work_done; } diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h index 209b386bc088..20c50dce9fc3 100644 --- a/drivers/net/wwan/t7xx/t7xx_mhccif.h +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h @@ -25,6 +25,7 @@ D2H_INT_EXCEPTION_CLEARQ_DONE | \ D2H_INT_EXCEPTION_ALLQ_RESET | \ D2H_INT_PORT_ENUM | \ + D2H_INT_ASYNC_AP_HK | \ D2H_INT_ASYNC_MD_HK) void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index 7d0f5e4f0a78..24e7d491468e 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -44,6 +44,7 @@ #include "t7xx_state_monitor.h" #define RT_ID_MD_PORT_ENUM 0 +#define RT_ID_AP_PORT_ENUM 1 /* Modem feature query identification code - "ICCC" */ #define MD_FEATURE_QUERY_ID 0x49434343 @@ -298,6 +299,7 @@ static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) } t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage); if (stage == HIF_EX_INIT) t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); @@ -426,7 +428,7 @@ static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_inf if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) return -EINVAL; - if (i == RT_ID_MD_PORT_ENUM) + if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM) t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); } @@ -456,12 +458,12 @@ static int t7xx_core_reset(struct t7xx_modem *md) return 0; } -static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, +static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info, + struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, enum t7xx_fsm_event_state err_detect) { struct t7xx_fsm_event *event = NULL, *event_next; - struct t7xx_sys_info *core_info = &md->core_md; struct device *dev = &md->t7xx_dev->pdev->dev; unsigned long flags; int ret; @@ -531,19 +533,33 @@ static void t7xx_md_hk_wq(struct work_struct *work) t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); md->core_md.handshake_ongoing = true; - t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); + t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); +} + +static void t7xx_ap_hk_wq(struct work_struct *work) +{ + struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work); + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */ + t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT); + t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]); + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); + md->core_ap.handshake_ongoing = true; + t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); } void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) { struct t7xx_fsm_ctl *ctl = md->fsm_ctl; - void __iomem *mhccif_base; unsigned int int_sta; unsigned long flags; switch (evt_id) { case FSM_PRE_START: - t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK | + D2H_INT_ASYNC_AP_HK); break; case FSM_START: @@ -556,16 +572,26 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) ctl->exp_flg = true; md->exp_id &= ~D2H_INT_EXCEPTION_INIT; md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + md->exp_id &= ~D2H_INT_ASYNC_AP_HK; } else if (ctl->exp_flg) { md->exp_id &= ~D2H_INT_ASYNC_MD_HK; - } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { - queue_work(md->handshake_wq, &md->handshake_work); - md->exp_id &= ~D2H_INT_ASYNC_MD_HK; - mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; - iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); - t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + md->exp_id &= ~D2H_INT_ASYNC_AP_HK; } else { - t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; + + if (md->exp_id & D2H_INT_ASYNC_MD_HK) { + queue_work(md->handshake_wq, &md->handshake_work); + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + } + + if (md->exp_id & D2H_INT_ASYNC_AP_HK) { + queue_work(md->handshake_wq, &md->ap_handshake_work); + md->exp_id &= ~D2H_INT_ASYNC_AP_HK; + iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); + } } spin_unlock_irqrestore(&md->exp_lock, flags); @@ -578,6 +604,7 @@ void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) case FSM_READY: t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); break; default: @@ -629,6 +656,12 @@ static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev) md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); + + INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq); + md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK; + md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |= + FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); + return md; } @@ -640,6 +673,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) md->exp_id = 0; t7xx_fsm_reset(md); t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]); t7xx_port_proxy_reset(md->port_prox); md->md_init_finish = true; return t7xx_core_reset(md); @@ -669,6 +703,10 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) if (ret) goto err_destroy_hswq; + ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev); + if (ret) + goto err_destroy_hswq; + ret = t7xx_fsm_init(md); if (ret) goto err_destroy_hswq; @@ -681,12 +719,16 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) if (ret) goto err_uninit_ccmni; - ret = t7xx_port_proxy_init(md); + ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]); if (ret) goto err_uninit_md_cldma; + ret = t7xx_port_proxy_init(md); + if (ret) + goto err_uninit_ap_cldma; + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); - if (ret) /* fsm_uninit flushes cmd queue */ + if (ret) /* t7xx_fsm_uninit() flushes cmd queue */ goto err_uninit_proxy; t7xx_md_sys_sw_init(t7xx_dev); @@ -696,6 +738,9 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) err_uninit_proxy: t7xx_port_proxy_uninit(md->port_prox); +err_uninit_ap_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); + err_uninit_md_cldma: t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); @@ -722,6 +767,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); t7xx_port_proxy_uninit(md->port_prox); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); t7xx_ccmni_exit(t7xx_dev); t7xx_fsm_uninit(md); diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h index 7469ed636ae8..abe633cf7adc 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h @@ -66,10 +66,12 @@ struct t7xx_modem { struct cldma_ctrl *md_ctrl[CLDMA_NUM]; struct t7xx_pci_dev *t7xx_dev; struct t7xx_sys_info core_md; + struct t7xx_sys_info core_ap; bool md_init_finish; bool rgu_irq_asserted; struct workqueue_struct *handshake_wq; struct work_struct handshake_work; + struct work_struct ap_handshake_work; struct t7xx_fsm_ctl *fsm_ctl; struct port_proxy *port_prox; unsigned int exp_id; diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h index 8ea9079af997..4ae8a00a8532 100644 --- a/drivers/net/wwan/t7xx/t7xx_port.h +++ b/drivers/net/wwan/t7xx/t7xx_port.h @@ -36,9 +36,13 @@ /* Channel ID and Message ID definitions. * The channel number consists of peer_id(15:12) , channel_id(11:0) * peer_id: - * 0:reserved, 1: to sAP, 2: to MD + * 0:reserved, 1: to AP, 2: to MD */ enum port_ch { + /* to AP */ + PORT_CH_AP_CONTROL_RX = 0x1000, + PORT_CH_AP_CONTROL_TX = 0x1001, + /* to MD */ PORT_CH_CONTROL_RX = 0x2000, PORT_CH_CONTROL_TX = 0x2001, diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c index 68430b130a67..ae632ef96698 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c +++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c @@ -167,8 +167,12 @@ static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb) case CTL_ID_HS2_MSG: skb_pull(skb, sizeof(*ctrl_msg_h)); - if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { - ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, + if (port_conf->rx_ch == PORT_CH_CONTROL_RX || + port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) { + int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ? + FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2; + + ret = t7xx_fsm_append_event(ctl, event, skb->data, le32_to_cpu(ctrl_msg_h->data_length)); if (ret) dev_err(port->dev, "Failed to append Handshake 2 event"); diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c index 894b1d11b2c9..274846d39fbf 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -48,7 +48,7 @@ i < (proxy)->port_count; \ i++, (p) = &(proxy)->ports[i]) -static const struct t7xx_port_conf t7xx_md_port_conf[] = { +static const struct t7xx_port_conf t7xx_port_conf[] = { { .tx_ch = PORT_CH_UART2_TX, .rx_ch = PORT_CH_UART2_RX, @@ -89,6 +89,14 @@ static const struct t7xx_port_conf t7xx_md_port_conf[] = { .path_id = CLDMA_ID_MD, .ops = &ctl_port_ops, .name = "t7xx_ctrl", + }, { + .tx_ch = PORT_CH_AP_CONTROL_TX, + .rx_ch = PORT_CH_AP_CONTROL_RX, + .txq_index = Q_IDX_CTRL, + .rxq_index = Q_IDX_CTRL, + .path_id = CLDMA_ID_AP, + .ops = &ctl_port_ops, + .name = "t7xx_ap_ctrl", }, }; @@ -428,6 +436,9 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) if (port_conf->tx_ch == PORT_CH_CONTROL_TX) md->core_md.ctl_port = port; + if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX) + md->core_ap.ctl_port = port; + port->t7xx_dev = md->t7xx_dev; port->dev = &md->t7xx_dev->pdev->dev; spin_lock_init(&port->port_update_lock); @@ -442,7 +453,7 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) static int t7xx_proxy_alloc(struct t7xx_modem *md) { - unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); + unsigned int port_count = ARRAY_SIZE(t7xx_port_conf); struct device *dev = &md->t7xx_dev->pdev->dev; struct port_proxy *port_prox; int i; @@ -456,7 +467,7 @@ static int t7xx_proxy_alloc(struct t7xx_modem *md) port_prox->dev = dev; for (i = 0; i < port_count; i++) - port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; + port_prox->ports[i].port_conf = &t7xx_port_conf[i]; port_prox->port_count = port_count; t7xx_proxy_init_all_ports(md); @@ -481,6 +492,7 @@ int t7xx_port_proxy_init(struct t7xx_modem *md) if (ret) return ret; + t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb); t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); return 0; } diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h index 7c1b81091a0f..c41d7d094c08 100644 --- a/drivers/net/wwan/t7xx/t7xx_reg.h +++ b/drivers/net/wwan/t7xx/t7xx_reg.h @@ -56,7 +56,7 @@ #define D2H_INT_RESUME_ACK BIT(12) #define D2H_INT_SUSPEND_ACK_AP BIT(13) #define D2H_INT_RESUME_ACK_AP BIT(14) -#define D2H_INT_ASYNC_SAP_HK BIT(15) +#define D2H_INT_ASYNC_AP_HK BIT(15) #define D2H_INT_ASYNC_MD_HK BIT(16) /* Register base */ diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index 0bcca08ff2bd..0bc97430211b 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -285,8 +285,9 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); t7xx_md_event_notify(md, FSM_START); - wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, - HZ * 60); + wait_event_interruptible_timeout(ctl->async_hk_wq, + (md->core_md.ready && md->core_ap.ready) || + ctl->exp_flg, HZ * 60); dev = &md->t7xx_dev->pdev->dev; if (ctl->exp_flg) @@ -299,6 +300,13 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); return -ETIMEDOUT; + } else if (!md->core_ap.ready) { + dev_err(dev, "AP handshake timeout\n"); + if (md->core_ap.handshake_ongoing) + t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0); + + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); + return -ETIMEDOUT; } t7xx_pci_pm_init_late(md->t7xx_dev); @@ -335,6 +343,7 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command return; } + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); } @@ -436,7 +445,8 @@ int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state ev return -EINVAL; } - event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + event = kmalloc(struct_size(event, data, length), + in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!event) return -ENOMEM; diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h index b1af0259d4c5..b0b3662ae6d7 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -38,10 +38,12 @@ enum t7xx_fsm_state { enum t7xx_fsm_event_state { FSM_EVENT_INVALID, FSM_EVENT_MD_HS2, + FSM_EVENT_AP_HS2, FSM_EVENT_MD_EX, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX_PASS, FSM_EVENT_MD_HS2_EXIT, + FSM_EVENT_AP_HS2_EXIT, FSM_EVENT_MAX }; @@ -100,7 +102,7 @@ struct t7xx_fsm_event { struct list_head entry; enum t7xx_fsm_event_state event_id; unsigned int length; - unsigned char data[]; + unsigned char data[] __counted_by(length); }; struct t7xx_fsm_command { diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 284ab1f56391..72e01e550a16 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ +#include <linux/bitmap.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/debugfs.h> @@ -301,7 +302,7 @@ static void wwan_remove_dev(struct wwan_device *wwandev) static const struct { const char * const name; /* Port type name */ - const char * const devsuf; /* Port devce name suffix */ + const char * const devsuf; /* Port device name suffix */ } wwan_port_types[WWAN_PORT_MAX + 1] = { [WWAN_PORT_AT] = { .name = "AT", @@ -395,7 +396,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) char buf[0x20]; int id; - idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL); + idmap = bitmap_zalloc(max_ports, GFP_KERNEL); if (!idmap) return -ENOMEM; @@ -414,7 +415,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) /* Allocate unique id */ id = find_first_zero_bit(idmap, max_ports); - free_page((unsigned long)idmap); + bitmap_free(idmap); snprintf(buf, sizeof(buf), fmt, id); /* Name generation */ @@ -1183,7 +1184,7 @@ void wwan_unregister_ops(struct device *parent) */ put_device(&wwandev->dev); - rtnl_lock(); /* Prevent concurent netdev(s) creation/destroying */ + rtnl_lock(); /* Prevent concurrent netdev(s) creation/destroying */ /* Remove all child netdev(s), using batch removing */ device_for_each_child(&wwandev->dev, &kill_list, diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f3f2c07423a6..7cff90aa8d24 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -41,8 +41,6 @@ #include <asm/xen/hypercall.h> #include <xen/balloon.h> -#define XENVIF_QUEUE_LENGTH 32 - /* Number of bytes allowed on the internal guest Rx queue. */ #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) @@ -254,6 +252,9 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) skb_clear_hash(skb); + /* timestamp packet in software */ + skb_tx_timestamp(skb); + if (!xenvif_rx_queue_tail(queue, skb)) goto drop; @@ -460,7 +461,7 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) static const struct ethtool_ops xenvif_ethtool_ops = { .get_link = ethtool_op_get_link, - + .get_ts_info = ethtool_op_get_ts_info, .get_sset_count = xenvif_get_sset_count, .get_ethtool_stats = xenvif_get_ethtool_stats, .get_strings = xenvif_get_strings, @@ -530,8 +531,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, dev->features = dev->hw_features | NETIF_F_RXCSUM; dev->ethtool_ops = &xenvif_ethtool_ops; - dev->tx_queue_len = XENVIF_QUEUE_LENGTH; - dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; @@ -672,8 +671,7 @@ err: static void xenvif_disconnect_queue(struct xenvif_queue *queue) { if (queue->task) { - kthread_stop(queue->task); - put_task_struct(queue->task); + kthread_stop_put(queue->task); queue->task = NULL; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 47d54d8ea59d..ad29f370034e 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -45,7 +45,7 @@ #include <linux/slab.h> #include <net/ip.h> #include <linux/bpf.h> -#include <net/page_pool.h> +#include <net/page_pool/types.h> #include <linux/bpf_trace.h> #include <xen/xen.h> |